HBASE-24986 Move ReplicationBarrier related methods to a separated class (#2354)

Signed-off-by: Guanghao Zhang <zghao@apache.org>
This commit is contained in:
Duo Zhang 2020-09-07 20:35:27 +08:00 committed by GitHub
parent 1e8db480b3
commit be984cc8d4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 1073 additions and 893 deletions

View File

@ -19,24 +19,17 @@ package org.apache.hadoop.hbase;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.Cell.Type;
import org.apache.hadoop.hbase.ClientMetaTableAccessor.QueryType;
import org.apache.hadoop.hbase.client.AsyncTable;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.Delete;
@ -44,7 +37,6 @@ import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@ -52,15 +44,12 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.filter.SubstringComparator;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.hbase.util.FutureUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.yetus.audience.InterfaceAudience;
@ -69,12 +58,6 @@ import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
/**
* Read/write operations on <code>hbase:meta</code> region as well as assignment information stored
* to <code>hbase:meta</code>.
@ -113,13 +96,6 @@ public final class MetaTableAccessor {
private MetaTableAccessor() {
}
@VisibleForTesting
public static final byte[] REPLICATION_PARENT_QUALIFIER = Bytes.toBytes("parent");
private static final byte ESCAPE_BYTE = (byte) 0xFF;
private static final byte SEPARATED_BYTE = 0x00;
////////////////////////
// Reading operations //
////////////////////////
@ -277,83 +253,6 @@ public final class MetaTableAccessor {
return resultScanner.next();
}
/**
* @return Return all regioninfos listed in the 'info:merge*' columns of the
* <code>regionName</code> row.
*/
@Nullable
public static List<RegionInfo> getMergeRegions(Connection connection, byte[] regionName)
throws IOException {
return getMergeRegions(getRegionResult(connection, regionName).rawCells());
}
/**
* Check whether the given {@code regionName} has any 'info:merge*' columns.
*/
public static boolean hasMergeRegions(Connection conn, byte[] regionName) throws IOException {
return hasMergeRegions(getRegionResult(conn, regionName).rawCells());
}
/**
* @return Deserialized values of &lt;qualifier,regioninfo&gt; pairs taken from column values that
* match the regex 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static Map<String, RegionInfo> getMergeRegionsWithName(Cell[] cells) {
if (cells == null) {
return null;
}
Map<String, RegionInfo> regionsToMerge = null;
for (Cell cell : cells) {
if (!isMergeQualifierPrefix(cell)) {
continue;
}
// Ok. This cell is that of a info:merge* column.
RegionInfo ri = RegionInfo.parseFromOrNull(cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength());
if (ri != null) {
if (regionsToMerge == null) {
regionsToMerge = new LinkedHashMap<>();
}
regionsToMerge.put(Bytes.toString(CellUtil.cloneQualifier(cell)), ri);
}
}
return regionsToMerge;
}
/**
* @return Deserialized regioninfo values taken from column values that match the regex
* 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static List<RegionInfo> getMergeRegions(Cell[] cells) {
Map<String, RegionInfo> mergeRegionsWithName = getMergeRegionsWithName(cells);
return (mergeRegionsWithName == null) ? null : new ArrayList<>(mergeRegionsWithName.values());
}
/**
* @return True if any merge regions present in <code>cells</code>; i.e. the column in
* <code>cell</code> matches the regex 'info:merge.*'.
*/
public static boolean hasMergeRegions(Cell[] cells) {
for (Cell cell : cells) {
if (!isMergeQualifierPrefix(cell)) {
continue;
}
return true;
}
return false;
}
/**
* @return True if the column in <code>cell</code> matches the regex 'info:merge.*'.
*/
private static boolean isMergeQualifierPrefix(Cell cell) {
// Check to see if has family and that qualifier starts with the merge qualifier 'merge'
return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) &&
PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX);
}
/**
* Lists all of the regions currently in META.
* @param connection to connect with
@ -521,7 +420,7 @@ public final class MetaTableAccessor {
ClientMetaTableAccessor.getTableStopRowForMeta(table, type), type, maxRows, visitor);
}
private static void scanMeta(Connection connection, @Nullable final byte[] startRow,
public static void scanMeta(Connection connection, @Nullable final byte[] startRow,
@Nullable final byte[] stopRow, QueryType type, final ClientMetaTableAccessor.Visitor visitor)
throws IOException {
scanMeta(connection, startRow, stopRow, type, Integer.MAX_VALUE, visitor);
@ -566,7 +465,7 @@ public final class MetaTableAccessor {
scanMeta(connection, startRow, stopRow, type, null, maxRows, visitor);
}
private static void scanMeta(Connection connection, @Nullable final byte[] startRow,
public static void scanMeta(Connection connection, @Nullable final byte[] startRow,
@Nullable final byte[] stopRow, QueryType type, @Nullable Filter filter, int maxRows,
final ClientMetaTableAccessor.Visitor visitor) throws IOException {
int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE;
@ -739,7 +638,7 @@ public final class MetaTableAccessor {
/**
* Generates and returns a Delete containing the region info for the catalog table
*/
private static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo, long ts) {
public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo, long ts) {
if (regionInfo == null) {
throw new IllegalArgumentException("Can't make a delete for null region");
}
@ -751,7 +650,7 @@ public final class MetaTableAccessor {
/**
* Adds split daughters to the Put
*/
private static Put addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB)
public static Put addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB)
throws IOException {
if (splitA != null) {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
@ -865,7 +764,7 @@ public final class MetaTableAccessor {
}
}
private static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException {
public static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.STATE_QUALIFIER)
.setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(state.name()))
@ -962,131 +861,6 @@ public final class MetaTableAccessor {
LOG.info("Added {} regions to meta.", puts.size());
}
@VisibleForTesting
static Put addMergeRegions(Put put, Collection<RegionInfo> mergeRegions) throws IOException {
int limit = 10000; // Arbitrary limit. No room in our formatted 'task0000' below for more.
int max = mergeRegions.size();
if (max > limit) {
// Should never happen!!!!! But just in case.
throw new RuntimeException(
"Can't merge " + max + " regions in one go; " + limit + " is upper-limit.");
}
int counter = 0;
for (RegionInfo ri : mergeRegions) {
String qualifier = String.format(HConstants.MERGE_QUALIFIER_PREFIX_STR + "%04d", counter++);
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(HConstants.CATALOG_FAMILY).setQualifier(Bytes.toBytes(qualifier))
.setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(ri))
.build());
}
return put;
}
/**
* Merge regions into one in an atomic operation. Deletes the merging regions in hbase:meta and
* adds the merged region.
* @param connection connection we're using
* @param mergedRegion the merged region
* @param parentSeqNum Parent regions to merge and their next open sequence id used by serial
* replication. Set to -1 if not needed by this table.
* @param sn the location of the region
*/
public static void mergeRegions(Connection connection, RegionInfo mergedRegion,
Map<RegionInfo, Long> parentSeqNum, ServerName sn, int regionReplication) throws IOException {
long time = HConstants.LATEST_TIMESTAMP;
List<Mutation> mutations = new ArrayList<>();
List<RegionInfo> replicationParents = new ArrayList<>();
for (Map.Entry<RegionInfo, Long> e : parentSeqNum.entrySet()) {
RegionInfo ri = e.getKey();
long seqNum = e.getValue();
// Deletes for merging regions
mutations.add(makeDeleteFromRegionInfo(ri, time));
if (seqNum > 0) {
mutations.add(makePutForReplicationBarrier(ri, seqNum, time));
replicationParents.add(ri);
}
}
// Put for parent
Put putOfMerged = makePutFromRegionInfo(mergedRegion, time);
putOfMerged = addMergeRegions(putOfMerged, parentSeqNum.keySet());
// Set initial state to CLOSED.
// NOTE: If initial state is not set to CLOSED then merged region gets added with the
// default OFFLINE state. If Master gets restarted after this step, start up sequence of
// master tries to assign this offline region. This is followed by re-assignments of the
// merged region from resumed {@link MergeTableRegionsProcedure}
addRegionStateToPut(putOfMerged, RegionState.State.CLOSED);
mutations.add(putOfMerged);
// The merged is a new region, openSeqNum = 1 is fine. ServerName may be null
// if crash after merge happened but before we got to here.. means in-memory
// locations of offlined merged, now-closed, regions is lost. Should be ok. We
// assign the merged region later.
if (sn != null) {
addLocation(putOfMerged, sn, 1, mergedRegion.getReplicaId());
}
// Add empty locations for region replicas of the merged region so that number of replicas
// can be cached whenever the primary region is looked up from meta
for (int i = 1; i < regionReplication; i++) {
addEmptyLocation(putOfMerged, i);
}
// add parent reference for serial replication
if (!replicationParents.isEmpty()) {
addReplicationParent(putOfMerged, replicationParents);
}
byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString() + HConstants.DELIMITER);
multiMutate(connection, tableRow, mutations);
}
/**
* Splits the region into two in an atomic operation. Offlines the parent region with the
* information that it is split into two, and also adds the daughter regions. Does not add the
* location information to the daughter regions since they are not open yet.
* @param connection connection we're using
* @param parent the parent region which is split
* @param parentOpenSeqNum the next open sequence id for parent region, used by serial
* replication. -1 if not necessary.
* @param splitA Split daughter region A
* @param splitB Split daughter region B
* @param sn the location of the region
*/
public static void splitRegion(Connection connection, RegionInfo parent, long parentOpenSeqNum,
RegionInfo splitA, RegionInfo splitB, ServerName sn, int regionReplication) throws IOException {
long time = EnvironmentEdgeManager.currentTime();
// Put for parent
Put putParent = makePutFromRegionInfo(
RegionInfoBuilder.newBuilder(parent).setOffline(true).setSplit(true).build(), time);
addDaughtersToPut(putParent, splitA, splitB);
// Puts for daughters
Put putA = makePutFromRegionInfo(splitA, time);
Put putB = makePutFromRegionInfo(splitB, time);
if (parentOpenSeqNum > 0) {
addReplicationBarrier(putParent, parentOpenSeqNum);
addReplicationParent(putA, Collections.singletonList(parent));
addReplicationParent(putB, Collections.singletonList(parent));
}
// Set initial state to CLOSED
// NOTE: If initial state is not set to CLOSED then daughter regions get added with the
// default OFFLINE state. If Master gets restarted after this step, start up sequence of
// master tries to assign these offline regions. This is followed by re-assignments of the
// daughter regions from resumed {@link SplitTableRegionProcedure}
addRegionStateToPut(putA, RegionState.State.CLOSED);
addRegionStateToPut(putB, RegionState.State.CLOSED);
addSequenceNum(putA, 1, splitA.getReplicaId()); // new regions, openSeqNum = 1 is fine.
addSequenceNum(putB, 1, splitB.getReplicaId());
// Add empty locations for region replicas of daughters so that number of replicas can be
// cached whenever the primary region is looked up from meta
for (int i = 1; i < regionReplication; i++) {
addEmptyLocation(putA, i);
addEmptyLocation(putB, i);
}
byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
multiMutate(connection, tableRow, Arrays.asList(putParent, putA, putB));
}
/**
* Update state of the table in meta.
* @param connection what we use for update
@ -1121,35 +895,6 @@ public final class MetaTableAccessor {
deleteFromMetaTable(connection, delete);
LOG.info("Deleted table " + table + " state from META");
}
/**
* Performs an atomic multi-mutate operation against the given table. Used by the likes of merge
* and split as these want to make atomic mutations across multiple rows.
* @throws IOException even if we encounter a RuntimeException, we'll still wrap it in an IOE.
*/
private static void multiMutate(Connection conn, byte[] row, List<Mutation> mutations)
throws IOException {
debugLogMutations(mutations);
MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
builder.addMutationRequest(
ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
builder.addMutationRequest(
ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException(
"multi in MetaEditor doesn't support " + mutation.getClass().getName());
}
}
MutateRowsRequest request = builder.build();
AsyncTable<?> table = conn.toAsyncConnection().getTable(TableName.META_TABLE_NAME);
CompletableFuture<MutateRowsResponse> future =
table.<MultiRowMutationService, MutateRowsResponse> coprocessorService(
MultiRowMutationService::newStub,
(stub, controller, done) -> stub.mutateRows(controller, request, done), row);
FutureUtils.get(future);
}
/**
* Updates the location of the specified region in hbase:meta to be the specified server hostname
@ -1255,43 +1000,6 @@ public final class MetaTableAccessor {
LOG.debug("Overwritten regions: {} ", regionInfos);
}
/**
* Deletes merge qualifiers for the specified merge region.
* @param connection connection we're using
* @param mergeRegion the merged region
*/
public static void deleteMergeQualifiers(Connection connection, final RegionInfo mergeRegion)
throws IOException {
Delete delete = new Delete(mergeRegion.getRegionName());
// NOTE: We are doing a new hbase:meta read here.
Cell[] cells = getRegionResult(connection, mergeRegion.getRegionName()).rawCells();
if (cells == null || cells.length == 0) {
return;
}
List<byte[]> qualifiers = new ArrayList<>();
for (Cell cell : cells) {
if (!isMergeQualifierPrefix(cell)) {
continue;
}
byte[] qualifier = CellUtil.cloneQualifier(cell);
qualifiers.add(qualifier);
delete.addColumns(HConstants.CATALOG_FAMILY, qualifier, HConstants.LATEST_TIMESTAMP);
}
// There will be race condition that a GCMultipleMergedRegionsProcedure is scheduled while
// the previous GCMultipleMergedRegionsProcedure is still going on, in this case, the second
// GCMultipleMergedRegionsProcedure could delete the merged region by accident!
if (qualifiers.isEmpty()) {
LOG.info("No merged qualifiers for region " + mergeRegion.getRegionNameAsString() +
" in meta table, they are cleaned up already, Skip.");
return;
}
deleteFromMetaTable(connection, delete);
LOG.info("Deleted merge references in " + mergeRegion.getRegionNameAsString() +
", deleted qualifiers " +
qualifiers.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")));
}
public static Put addRegionInfo(final Put p, final RegionInfo hri) throws IOException {
p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow())
@ -1321,74 +1029,7 @@ public final class MetaTableAccessor {
.setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)).build());
}
private static void writeRegionName(ByteArrayOutputStream out, byte[] regionName) {
for (byte b : regionName) {
if (b == ESCAPE_BYTE) {
out.write(ESCAPE_BYTE);
}
out.write(b);
}
}
@VisibleForTesting
public static byte[] getParentsBytes(List<RegionInfo> parents) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
Iterator<RegionInfo> iter = parents.iterator();
writeRegionName(bos, iter.next().getRegionName());
while (iter.hasNext()) {
bos.write(ESCAPE_BYTE);
bos.write(SEPARATED_BYTE);
writeRegionName(bos, iter.next().getRegionName());
}
return bos.toByteArray();
}
private static List<byte[]> parseParentsBytes(byte[] bytes) {
List<byte[]> parents = new ArrayList<>();
ByteArrayOutputStream bos = new ByteArrayOutputStream();
for (int i = 0; i < bytes.length; i++) {
if (bytes[i] == ESCAPE_BYTE) {
i++;
if (bytes[i] == SEPARATED_BYTE) {
parents.add(bos.toByteArray());
bos.reset();
continue;
}
// fall through to append the byte
}
bos.write(bytes[i]);
}
if (bos.size() > 0) {
parents.add(bos.toByteArray());
}
return parents;
}
private static void addReplicationParent(Put put, List<RegionInfo> parents) throws IOException {
byte[] value = getParentsBytes(parents);
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(REPLICATION_PARENT_QUALIFIER)
.setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(value).build());
}
public static Put makePutForReplicationBarrier(RegionInfo regionInfo, long openSeqNum, long ts)
throws IOException {
Put put = new Put(regionInfo.getRegionName(), ts);
addReplicationBarrier(put, openSeqNum);
return put;
}
/**
* See class comment on SerialReplicationChecker
*/
public static void addReplicationBarrier(Put put, long openSeqNum) throws IOException {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(HConstants.SEQNUM_QUALIFIER)
.setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum))
.build());
}
private static Put addEmptyLocation(Put p, int replicaId) throws IOException {
public static Put addEmptyLocation(Put p, int replicaId) throws IOException {
CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY);
return p
.add(builder.clear().setRow(p.getRow()).setFamily(HConstants.CATALOG_FAMILY)
@ -1402,129 +1043,6 @@ public final class MetaTableAccessor {
.setType(Cell.Type.Put).build());
}
public static final class ReplicationBarrierResult {
private final long[] barriers;
private final RegionState.State state;
private final List<byte[]> parentRegionNames;
ReplicationBarrierResult(long[] barriers, State state, List<byte[]> parentRegionNames) {
this.barriers = barriers;
this.state = state;
this.parentRegionNames = parentRegionNames;
}
public long[] getBarriers() {
return barriers;
}
public RegionState.State getState() {
return state;
}
public List<byte[]> getParentRegionNames() {
return parentRegionNames;
}
@Override
public String toString() {
return "ReplicationBarrierResult [barriers=" + Arrays.toString(barriers) + ", state=" +
state + ", parentRegionNames=" +
parentRegionNames.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")) +
"]";
}
}
private static long getReplicationBarrier(Cell c) {
return Bytes.toLong(c.getValueArray(), c.getValueOffset(), c.getValueLength());
}
public static long[] getReplicationBarriers(Result result) {
return result.getColumnCells(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER)
.stream().mapToLong(MetaTableAccessor::getReplicationBarrier).sorted().distinct().toArray();
}
private static ReplicationBarrierResult getReplicationBarrierResult(Result result) {
long[] barriers = getReplicationBarriers(result);
byte[] stateBytes = result.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER);
RegionState.State state =
stateBytes != null ? RegionState.State.valueOf(Bytes.toString(stateBytes)) : null;
byte[] parentRegionsBytes =
result.getValue(HConstants.REPLICATION_BARRIER_FAMILY, REPLICATION_PARENT_QUALIFIER);
List<byte[]> parentRegionNames =
parentRegionsBytes != null ? parseParentsBytes(parentRegionsBytes) : Collections.emptyList();
return new ReplicationBarrierResult(barriers, state, parentRegionNames);
}
public static ReplicationBarrierResult getReplicationBarrierResult(Connection conn,
TableName tableName, byte[] row, byte[] encodedRegionName) throws IOException {
byte[] metaStartKey = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
byte[] metaStopKey =
RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false);
Scan scan = new Scan().withStartRow(metaStartKey).withStopRow(metaStopKey)
.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)
.addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions().setReversed(true)
.setCaching(10);
try (Table table = getMetaHTable(conn); ResultScanner scanner = table.getScanner(scan)) {
for (Result result;;) {
result = scanner.next();
if (result == null) {
return new ReplicationBarrierResult(new long[0], null, Collections.emptyList());
}
byte[] regionName = result.getRow();
// TODO: we may look up a region which has already been split or merged so we need to check
// whether the encoded name matches. Need to find a way to quit earlier when there is no
// record for the given region, for now it will scan to the end of the table.
if (!Bytes.equals(encodedRegionName,
Bytes.toBytes(RegionInfo.encodeRegionName(regionName)))) {
continue;
}
return getReplicationBarrierResult(result);
}
}
}
public static long[] getReplicationBarrier(Connection conn, byte[] regionName)
throws IOException {
try (Table table = getMetaHTable(conn)) {
Result result = table.get(new Get(regionName)
.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER)
.readAllVersions());
return getReplicationBarriers(result);
}
}
public static List<Pair<String, Long>> getTableEncodedRegionNameAndLastBarrier(Connection conn,
TableName tableName) throws IOException {
List<Pair<String, Long>> list = new ArrayList<>();
scanMeta(conn,
ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REPLICATION),
ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REPLICATION),
QueryType.REPLICATION, r -> {
byte[] value =
r.getValue(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER);
if (value == null) {
return true;
}
long lastBarrier = Bytes.toLong(value);
String encodedRegionName = RegionInfo.encodeRegionName(r.getRow());
list.add(Pair.newPair(encodedRegionName, lastBarrier));
return true;
});
return list;
}
public static List<String> getTableEncodedRegionNamesForSerialReplication(Connection conn,
TableName tableName) throws IOException {
List<String> list = new ArrayList<>();
scanMeta(conn,
ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REPLICATION),
ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REPLICATION),
QueryType.REPLICATION, new FirstKeyOnlyFilter(), Integer.MAX_VALUE, r -> {
list.add(RegionInfo.encodeRegionName(r.getRow()));
return true;
});
return list;
}
private static void debugLogMutations(List<? extends Mutation> mutations) throws IOException {
if (!METALOG.isDebugEnabled()) {
@ -1540,11 +1058,4 @@ public final class MetaTableAccessor {
private static void debugLogMutation(Mutation p) throws IOException {
METALOG.debug("{} {}", p.getClass().getSimpleName(), p.toJSON());
}
private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws IOException {
return p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow())
.setFamily(HConstants.CATALOG_FAMILY)
.setQualifier(CatalogFamilyFormat.getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp())
.setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)).build());
}
}

View File

@ -21,6 +21,7 @@ import edu.umd.cs.findbugs.annotations.Nullable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
@ -346,4 +347,63 @@ public class CatalogFamilyFormat {
throw new IOException(e);
}
}
/**
* @return Deserialized values of &lt;qualifier,regioninfo&gt; pairs taken from column values that
* match the regex 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static Map<String, RegionInfo> getMergeRegionsWithName(Cell[] cells) {
if (cells == null) {
return null;
}
Map<String, RegionInfo> regionsToMerge = null;
for (Cell cell : cells) {
if (!isMergeQualifierPrefix(cell)) {
continue;
}
// Ok. This cell is that of a info:merge* column.
RegionInfo ri = RegionInfo.parseFromOrNull(cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength());
if (ri != null) {
if (regionsToMerge == null) {
regionsToMerge = new LinkedHashMap<>();
}
regionsToMerge.put(Bytes.toString(CellUtil.cloneQualifier(cell)), ri);
}
}
return regionsToMerge;
}
/**
* @return Deserialized regioninfo values taken from column values that match the regex
* 'info:merge.*' in array of <code>cells</code>.
*/
@Nullable
public static List<RegionInfo> getMergeRegions(Cell[] cells) {
Map<String, RegionInfo> mergeRegionsWithName = getMergeRegionsWithName(cells);
return (mergeRegionsWithName == null) ? null : new ArrayList<>(mergeRegionsWithName.values());
}
/**
* @return True if any merge regions present in <code>cells</code>; i.e. the column in
* <code>cell</code> matches the regex 'info:merge.*'.
*/
public static boolean hasMergeRegions(Cell[] cells) {
for (Cell cell : cells) {
if (isMergeQualifierPrefix(cell)) {
return true;
}
}
return false;
}
/**
* @return True if the column in <code>cell</code> matches the regex 'info:merge.*'.
*/
public static boolean isMergeQualifierPrefix(Cell cell) {
// Check to see if has family and that qualifier starts with the merge qualifier 'merge'
return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) &&
PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX);
}
}

View File

@ -189,7 +189,7 @@ public class CatalogJanitor extends ScheduledChore {
break;
}
List<RegionInfo> parents = MetaTableAccessor.getMergeRegions(e.getValue().rawCells());
List<RegionInfo> parents = CatalogFamilyFormat.getMergeRegions(e.getValue().rawCells());
if (parents != null && cleanMergeRegion(e.getKey(), parents)) {
gcs++;
}
@ -323,7 +323,7 @@ public class CatalogJanitor extends ScheduledChore {
boolean cleanParent(final RegionInfo parent, Result rowContent)
throws IOException {
// Check whether it is a merged region and if it is clean of references.
if (MetaTableAccessor.hasMergeRegions(rowContent.rawCells())) {
if (CatalogFamilyFormat.hasMergeRegions(rowContent.rawCells())) {
// Wait until clean of merge parent regions first
return false;
}
@ -580,7 +580,7 @@ public class CatalogJanitor extends ScheduledChore {
if (regionInfo.isSplitParent()) { // splitParent means split and offline.
this.report.splitParents.put(regionInfo, r);
}
if (MetaTableAccessor.hasMergeRegions(r.rawCells())) {
if (CatalogFamilyFormat.hasMergeRegions(r.rawCells())) {
this.report.mergedRegions.put(regionInfo, r);
}
}

View File

@ -25,9 +25,9 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CatalogFamilyFormat;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.ServerName;
@ -198,7 +198,7 @@ public class HbckChore extends ScheduledChore {
// Null tablename means scan all of meta.
MetaTableAccessor.scanMetaForTableRegions(this.master.getConnection(),
r -> {
List<RegionInfo> mergeParents = MetaTableAccessor.getMergeRegions(r.rawCells());
List<RegionInfo> mergeParents = CatalogFamilyFormat.getMergeRegions(r.rawCells());
if (mergeParents != null) {
for (RegionInfo mergeRegion : mergeParents) {
if (mergeRegion != null) {

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionStatesCount;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
@ -83,7 +84,9 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
@ -1894,7 +1897,8 @@ public class AssignmentManager {
final RegionStateNode nodeB = regionStates.getOrCreateRegionStateNode(daughterB);
nodeB.setState(State.SPLITTING_NEW);
regionStateStore.splitRegion(parent, daughterA, daughterB, serverName);
TableDescriptor td = master.getTableDescriptors().get(parent.getTable());
regionStateStore.splitRegion(parent, daughterA, daughterB, serverName, td);
if (shouldAssignFavoredNodes(parent)) {
List<ServerName> onlineServers = this.master.getServerManager().getOnlineServersList();
getFavoredNodePromoter().generateFavoredNodesForDaughter(onlineServers, parent, daughterA,
@ -1919,9 +1923,9 @@ public class AssignmentManager {
node.setState(State.MERGED);
for (RegionInfo ri: mergeParents) {
regionStates.deleteRegion(ri);
}
regionStateStore.mergeRegions(child, mergeParents, serverName);
TableDescriptor td = master.getTableDescriptors().get(child.getTable());
regionStateStore.mergeRegions(child, mergeParents, serverName, td);
if (shouldAssignFavoredNodes(child)) {
getFavoredNodePromoter().generateFavoredNodesForMergedRegion(child, mergeParents);
}

View File

@ -18,8 +18,6 @@
package org.apache.hadoop.hbase.master.assignment;
import java.io.IOException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure;
@ -30,6 +28,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCMergedRegionsState;
@ -73,30 +72,30 @@ extends AbstractStateMachineTableProcedure<GCMergedRegionsState> {
@Override
protected Flow executeFromState(MasterProcedureEnv env, GCMergedRegionsState state)
throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(this + " execute state=" + state);
}
try {
switch (state) {
case GC_MERGED_REGIONS_PREPARE:
// Nothing to do to prepare.
setNextState(GCMergedRegionsState.GC_MERGED_REGIONS_PURGE);
break;
case GC_MERGED_REGIONS_PURGE:
addChildProcedure(createGCRegionProcedures(env));
setNextState(GCMergedRegionsState.GC_REGION_EDIT_METADATA);
break;
case GC_REGION_EDIT_METADATA:
MetaTableAccessor.deleteMergeQualifiers(env.getMasterServices().getConnection(), mergedChild);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException(this + " unhandled state=" + state);
case GC_MERGED_REGIONS_PREPARE:
// Nothing to do to prepare.
setNextState(GCMergedRegionsState.GC_MERGED_REGIONS_PURGE);
break;
case GC_MERGED_REGIONS_PURGE:
addChildProcedure(createGCRegionProcedures(env));
setNextState(GCMergedRegionsState.GC_REGION_EDIT_METADATA);
break;
case GC_REGION_EDIT_METADATA:
env.getAssignmentManager().getRegionStateStore().deleteMergeQualifiers(mergedChild);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException(this + " unhandled state=" + state);
}
} catch (IOException ioe) {
// TODO: This is going to spew log?
LOG.warn("Error trying to GC merged regions " + this.father.getShortNameToLog() +
" & " + this.mother.getShortNameToLog() + "; retrying...", ioe);
LOG.warn("Error trying to GC merged regions " + this.father.getShortNameToLog() + " & " +
this.mother.getShortNameToLog() + "; retrying...", ioe);
}
return Flow.HAS_MORE_STATE;
}

View File

@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.master.assignment;
import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure;
@ -29,13 +27,13 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCMergedRegionsState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCMultipleMergedRegionsStateData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GC regions that have been Merged. Caller determines if it is GC time. This Procedure does not
* check. This is a Table Procedure. We take a read lock on the Table. We do NOT keep a lock for
@ -100,8 +98,8 @@ public class GCMultipleMergedRegionsProcedure extends
// If GCMultipleMergedRegionsProcedure processing is slower than the CatalogJanitor's scan
// interval, it will end resubmitting GCMultipleMergedRegionsProcedure for the same
// region. We can skip duplicate GCMultipleMergedRegionsProcedure while previous finished
List<RegionInfo> parents = MetaTableAccessor.getMergeRegions(
env.getMasterServices().getConnection(), mergedChild.getRegionName());
List<RegionInfo> parents =
env.getAssignmentManager().getRegionStateStore().getMergeRegions(mergedChild);
if (parents == null || parents.isEmpty()) {
LOG.info("{} mergeXXX qualifiers have ALL been deleted", mergedChild.getShortNameToLog());
return Flow.NO_MORE_STATE;
@ -113,8 +111,7 @@ public class GCMultipleMergedRegionsProcedure extends
setNextState(GCMergedRegionsState.GC_REGION_EDIT_METADATA);
break;
case GC_REGION_EDIT_METADATA:
MetaTableAccessor.deleteMergeQualifiers(env.getMasterServices().getConnection(),
mergedChild);
env.getAssignmentManager().getRegionStateStore().deleteMergeQualifiers(mergedChild);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException(this + " unhandled state=" + state);

View File

@ -27,7 +27,6 @@ import java.util.stream.Stream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.MetaMutationAnnotation;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
@ -40,7 +39,6 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.master.CatalogJanitor;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.RegionState;
@ -61,6 +59,7 @@ import org.apache.hadoop.hbase.wal.WALSplitUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@ -474,9 +473,9 @@ public class MergeTableRegionsProcedure
}
RegionStates regionStates = env.getAssignmentManager().getRegionStates();
RegionStateStore regionStateStore = env.getAssignmentManager().getRegionStateStore();
for (RegionInfo ri : this.regionsToMerge) {
if (MetaTableAccessor.hasMergeRegions(env.getMasterServices().getConnection(),
ri.getRegionName())) {
if (regionStateStore.hasMergeRegions(ri)) {
String msg = "Skip merging " + RegionInfo.getShortNameToLog(regionsToMerge) +
", because a parent, " + RegionInfo.getShortNameToLog(ri) + ", has a merge qualifier " +
"(if a 'merge column' in parent, it was recently merged but still has outstanding " +

View File

@ -18,33 +18,48 @@
package org.apache.hadoop.hbase.master.assignment;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.CatalogFamilyFormat;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.Cell.Type;
import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncTable;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FutureUtils;
import org.apache.hadoop.hbase.wal.WALSplitUtil;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.yetus.audience.InterfaceAudience;
@ -52,14 +67,22 @@ import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
/**
* Store Region State to hbase:meta table.
*/
@InterfaceAudience.Private
public class RegionStateStore {
private static final Logger LOG = LoggerFactory.getLogger(RegionStateStore.class);
private static final Logger METALOG = LoggerFactory.getLogger("org.apache.hadoop.hbase.META");
/** The delimiter for meta columns for replicaIds &gt; 0 */
protected static final char META_REPLICA_ID_DELIMITER = '_';
@ -190,7 +213,7 @@ public class RegionStateStore {
// only update replication barrier for default replica
if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID &&
hasGlobalReplicationScope(regionInfo.getTable())) {
MetaTableAccessor.addReplicationBarrier(put, openSeqNum);
ReplicationBarrierFamilyFormat.addReplicationBarrier(put, openSeqNum);
info.append(", repBarrier=").append(openSeqNum);
}
info.append(", openSeqNum=").append(openSeqNum);
@ -242,33 +265,226 @@ public class RegionStateStore {
return maxSeqId > 0 ? maxSeqId + 1 : HConstants.NO_SEQNUM;
}
/**
* Performs an atomic multi-mutate operation against the given table. Used by the likes of merge
* and split as these want to make atomic mutations across multiple rows.
*/
private void multiMutate(RegionInfo ri, List<Mutation> mutations) throws IOException {
debugLogMutations(mutations);
byte[] row =
Bytes.toBytes(RegionReplicaUtil.getRegionInfoForDefaultReplica(ri).getRegionNameAsString() +
HConstants.DELIMITER);
MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
builder.addMutationRequest(
ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
builder.addMutationRequest(
ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException(
"multi in MetaEditor doesn't support " + mutation.getClass().getName());
}
}
MutateRowsRequest request = builder.build();
AsyncTable<?> table =
master.getConnection().toAsyncConnection().getTable(TableName.META_TABLE_NAME);
CompletableFuture<MutateRowsResponse> future =
table.<MultiRowMutationService, MutateRowsResponse> coprocessorService(
MultiRowMutationService::newStub,
(stub, controller, done) -> stub.mutateRows(controller, request, done), row);
FutureUtils.get(future);
}
private Result getRegionCatalogResult(RegionInfo region) throws IOException {
Get get =
new Get(CatalogFamilyFormat.getMetaKeyForRegion(region)).addFamily(HConstants.CATALOG_FAMILY);
try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
return table.get(get);
}
}
private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws IOException {
return p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow())
.setFamily(HConstants.CATALOG_FAMILY)
.setQualifier(CatalogFamilyFormat.getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp())
.setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)).build());
}
// ============================================================================================
// Update Region Splitting State helpers
// ============================================================================================
public void splitRegion(RegionInfo parent, RegionInfo hriA, RegionInfo hriB,
ServerName serverName) throws IOException {
TableDescriptor htd = getDescriptor(parent.getTable());
/**
* Splits the region into two in an atomic operation. Offlines the parent region with the
* information that it is split into two, and also adds the daughter regions. Does not add the
* location information to the daughter regions since they are not open yet.
*/
public void splitRegion(RegionInfo parent, RegionInfo splitA, RegionInfo splitB,
ServerName serverName, TableDescriptor htd) throws IOException {
long parentOpenSeqNum = HConstants.NO_SEQNUM;
if (htd.hasGlobalReplicationScope()) {
parentOpenSeqNum = getOpenSeqNumForParentRegion(parent);
}
MetaTableAccessor.splitRegion(master.getConnection(), parent, parentOpenSeqNum, hriA, hriB,
serverName, getRegionReplication(htd));
long time = EnvironmentEdgeManager.currentTime();
// Put for parent
Put putParent = MetaTableAccessor.makePutFromRegionInfo(
RegionInfoBuilder.newBuilder(parent).setOffline(true).setSplit(true).build(), time);
MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
// Puts for daughters
Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA, time);
Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB, time);
if (parentOpenSeqNum > 0) {
ReplicationBarrierFamilyFormat.addReplicationBarrier(putParent, parentOpenSeqNum);
ReplicationBarrierFamilyFormat.addReplicationParent(putA, Collections.singletonList(parent));
ReplicationBarrierFamilyFormat.addReplicationParent(putB, Collections.singletonList(parent));
}
// Set initial state to CLOSED
// NOTE: If initial state is not set to CLOSED then daughter regions get added with the
// default OFFLINE state. If Master gets restarted after this step, start up sequence of
// master tries to assign these offline regions. This is followed by re-assignments of the
// daughter regions from resumed {@link SplitTableRegionProcedure}
MetaTableAccessor.addRegionStateToPut(putA, RegionState.State.CLOSED);
MetaTableAccessor.addRegionStateToPut(putB, RegionState.State.CLOSED);
// new regions, openSeqNum = 1 is fine.
addSequenceNum(putA, 1, splitA.getReplicaId());
addSequenceNum(putB, 1, splitB.getReplicaId());
// Add empty locations for region replicas of daughters so that number of replicas can be
// cached whenever the primary region is looked up from meta
int regionReplication = getRegionReplication(htd);
for (int i = 1; i < regionReplication; i++) {
MetaTableAccessor.addEmptyLocation(putA, i);
MetaTableAccessor.addEmptyLocation(putB, i);
}
multiMutate(parent, Arrays.asList(putParent, putA, putB));
}
// ============================================================================================
// Update Region Merging State helpers
// ============================================================================================
public void mergeRegions(RegionInfo child, RegionInfo [] parents, ServerName serverName)
throws IOException {
TableDescriptor htd = getDescriptor(child.getTable());
public void mergeRegions(RegionInfo child, RegionInfo[] parents, ServerName serverName,
TableDescriptor htd) throws IOException {
boolean globalScope = htd.hasGlobalReplicationScope();
SortedMap<RegionInfo, Long> parentSeqNums = new TreeMap<>();
long time = HConstants.LATEST_TIMESTAMP;
List<Mutation> mutations = new ArrayList<>();
List<RegionInfo> replicationParents = new ArrayList<>();
for (RegionInfo ri: parents) {
parentSeqNums.put(ri, globalScope? getOpenSeqNumForParentRegion(ri): -1);
long seqNum = globalScope ? getOpenSeqNumForParentRegion(ri) : -1;
// Deletes for merging regions
mutations.add(MetaTableAccessor.makeDeleteFromRegionInfo(ri, time));
if (seqNum > 0) {
mutations
.add(ReplicationBarrierFamilyFormat.makePutForReplicationBarrier(ri, seqNum, time));
replicationParents.add(ri);
}
}
MetaTableAccessor.mergeRegions(master.getConnection(), child, parentSeqNums,
serverName, getRegionReplication(htd));
// Put for parent
Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(child, time);
putOfMerged = addMergeRegions(putOfMerged, Arrays.asList(parents));
// Set initial state to CLOSED.
// NOTE: If initial state is not set to CLOSED then merged region gets added with the
// default OFFLINE state. If Master gets restarted after this step, start up sequence of
// master tries to assign this offline region. This is followed by re-assignments of the
// merged region from resumed {@link MergeTableRegionsProcedure}
MetaTableAccessor.addRegionStateToPut(putOfMerged, RegionState.State.CLOSED);
mutations.add(putOfMerged);
// The merged is a new region, openSeqNum = 1 is fine. ServerName may be null
// if crash after merge happened but before we got to here.. means in-memory
// locations of offlined merged, now-closed, regions is lost. Should be ok. We
// assign the merged region later.
if (serverName != null) {
MetaTableAccessor.addLocation(putOfMerged, serverName, 1, child.getReplicaId());
}
// Add empty locations for region replicas of the merged region so that number of replicas
// can be cached whenever the primary region is looked up from meta
int regionReplication = getRegionReplication(htd);
for (int i = 1; i < regionReplication; i++) {
MetaTableAccessor.addEmptyLocation(putOfMerged, i);
}
// add parent reference for serial replication
if (!replicationParents.isEmpty()) {
ReplicationBarrierFamilyFormat.addReplicationParent(putOfMerged, replicationParents);
}
multiMutate(child, mutations);
}
/**
* Check whether the given {@code region} has any 'info:merge*' columns.
*/
public boolean hasMergeRegions(RegionInfo region) throws IOException {
return CatalogFamilyFormat.hasMergeRegions(getRegionCatalogResult(region).rawCells());
}
/**
* @return Return all regioninfos listed in the 'info:merge*' columns of the given {@code region}.
*/
public List<RegionInfo> getMergeRegions(RegionInfo region) throws IOException {
return CatalogFamilyFormat.getMergeRegions(getRegionCatalogResult(region).rawCells());
}
/**
* Deletes merge qualifiers for the specified merge region.
* @param connection connection we're using
* @param mergeRegion the merged region
*/
public void deleteMergeQualifiers(RegionInfo mergeRegion)
throws IOException {
// NOTE: We are doing a new hbase:meta read here.
Cell[] cells = getRegionCatalogResult(mergeRegion).rawCells();
if (cells == null || cells.length == 0) {
return;
}
Delete delete = new Delete(mergeRegion.getRegionName());
List<byte[]> qualifiers = new ArrayList<>();
for (Cell cell : cells) {
if (!CatalogFamilyFormat.isMergeQualifierPrefix(cell)) {
continue;
}
byte[] qualifier = CellUtil.cloneQualifier(cell);
qualifiers.add(qualifier);
delete.addColumns(HConstants.CATALOG_FAMILY, qualifier, HConstants.LATEST_TIMESTAMP);
}
// There will be race condition that a GCMultipleMergedRegionsProcedure is scheduled while
// the previous GCMultipleMergedRegionsProcedure is still going on, in this case, the second
// GCMultipleMergedRegionsProcedure could delete the merged region by accident!
if (qualifiers.isEmpty()) {
LOG.info("No merged qualifiers for region " + mergeRegion.getRegionNameAsString() +
" in meta table, they are cleaned up already, Skip.");
return;
}
try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
table.delete(delete);
}
LOG.info("Deleted merge references in " + mergeRegion.getRegionNameAsString() +
", deleted qualifiers " +
qualifiers.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")));
}
@VisibleForTesting
static Put addMergeRegions(Put put, Collection<RegionInfo> mergeRegions) throws IOException {
int limit = 10000; // Arbitrary limit. No room in our formatted 'task0000' below for more.
int max = mergeRegions.size();
if (max > limit) {
// Should never happen!!!!! But just in case.
throw new RuntimeException(
"Can't merge " + max + " regions in one go; " + limit + " is upper-limit.");
}
int counter = 0;
for (RegionInfo ri : mergeRegions) {
String qualifier = String.format(HConstants.MERGE_QUALIFIER_PREFIX_STR + "%04d", counter++);
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(HConstants.CATALOG_FAMILY).setQualifier(Bytes.toBytes(qualifier))
.setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(ri))
.build());
}
return put;
}
// ============================================================================================
@ -334,4 +550,19 @@ public class RegionStateStore {
: Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
+ String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
}
private static void debugLogMutations(List<? extends Mutation> mutations) throws IOException {
if (!METALOG.isDebugEnabled()) {
return;
}
// Logging each mutation in separate line makes it easier to see diff between them visually
// because of common starting indentation.
for (Mutation mutation : mutations) {
debugLogMutation(mutation);
}
}
private static void debugLogMutation(Mutation p) throws IOException {
METALOG.debug("{} {}", p.getClass().getSimpleName(), p.toJSON());
}
}

View File

@ -23,7 +23,6 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
@ -36,6 +35,7 @@ import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.yetus.audience.InterfaceAudience;
@ -89,7 +89,7 @@ public class ReplicationBarrierCleaner extends ScheduledChore {
break;
}
totalRows++;
long[] barriers = MetaTableAccessor.getReplicationBarriers(result);
long[] barriers = ReplicationBarrierFamilyFormat.getReplicationBarriers(result);
if (barriers.length == 0) {
continue;
}

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master.procedure;
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
@ -33,6 +32,7 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.TableStateManager;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.wal.WALSplitUtil;
import org.apache.yetus.audience.InterfaceAudience;
@ -119,8 +119,8 @@ public class DisableTableProcedure
long maxSequenceId = WALSplitUtil.getMaxRegionSequenceId(
env.getMasterConfiguration(), region, fs::getFileSystem, fs::getWALFileSystem);
long openSeqNum = maxSequenceId > 0 ? maxSequenceId + 1 : HConstants.NO_SEQNUM;
mutator.mutate(MetaTableAccessor.makePutForReplicationBarrier(region, openSeqNum,
EnvironmentEdgeManager.currentTime()));
mutator.mutate(ReplicationBarrierFamilyFormat.makePutForReplicationBarrier(region,
openSeqNum, EnvironmentEdgeManager.currentTime()));
}
}
}

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Connection;
@ -30,6 +29,7 @@ import org.apache.hadoop.hbase.master.TableStateManager;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
@ -160,7 +160,7 @@ public abstract class AbstractPeerProcedure<TState> extends AbstractPeerNoLockPr
LOG.debug("Skip settting last pushed sequence id for {}", tableName);
return;
}
for (Pair<String, Long> name2Barrier : MetaTableAccessor
for (Pair<String, Long> name2Barrier : ReplicationBarrierFamilyFormat
.getTableEncodedRegionNameAndLastBarrier(conn, tableName)) {
LOG.trace("Update last pushed sequence id for {}, {}", tableName, name2Barrier);
addToMap(lastSeqIds, name2Barrier.getFirst(), name2Barrier.getSecond().longValue() - 1,

View File

@ -22,7 +22,6 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.TableDescriptor;
@ -30,6 +29,7 @@ import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
@ -131,7 +131,7 @@ public class UpdatePeerConfigProcedure extends ModifyPeerProcedure {
if (oldPeerConfig.needToReplicate(tn)) {
if (!peerConfig.needToReplicate(tn)) {
// removed from peer config
for (String encodedRegionName : MetaTableAccessor
for (String encodedRegionName : ReplicationBarrierFamilyFormat
.getTableEncodedRegionNamesForSerialReplication(conn, tn)) {
addToList(encodedRegionNames, encodedRegionName, queueStorage);
}

View File

@ -68,7 +68,7 @@ public final class RegionReplicaInfo {
? MetaTableAccessor.getTargetServerName(result, regionInfo.getReplicaId())
: null;
this.mergeRegionInfo = (result != null)
? MetaTableAccessor.getMergeRegionsWithName(result.rawCells())
? CatalogFamilyFormat.getMergeRegionsWithName(result.rawCells())
: null;
if (result != null) {

View File

@ -0,0 +1,261 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.Cell.Type;
import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
import org.apache.hadoop.hbase.ClientMetaTableAccessor.QueryType;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
/**
* Helper class for storing replication barriers in family 'rep_barrier' of meta table.
* <p/>
* See SerialReplicationChecker on how to make use of the barriers.
*/
@InterfaceAudience.Private
public final class ReplicationBarrierFamilyFormat {
@VisibleForTesting
public static final byte[] REPLICATION_PARENT_QUALIFIER = Bytes.toBytes("parent");
private static final byte ESCAPE_BYTE = (byte) 0xFF;
private static final byte SEPARATED_BYTE = 0x00;
private ReplicationBarrierFamilyFormat() {
}
public static void addReplicationBarrier(Put put, long openSeqNum) throws IOException {
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(HConstants.SEQNUM_QUALIFIER)
.setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum))
.build());
}
private static void writeRegionName(ByteArrayOutputStream out, byte[] regionName) {
for (byte b : regionName) {
if (b == ESCAPE_BYTE) {
out.write(ESCAPE_BYTE);
}
out.write(b);
}
}
@VisibleForTesting
public static byte[] getParentsBytes(List<RegionInfo> parents) {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
Iterator<RegionInfo> iter = parents.iterator();
writeRegionName(bos, iter.next().getRegionName());
while (iter.hasNext()) {
bos.write(ESCAPE_BYTE);
bos.write(SEPARATED_BYTE);
writeRegionName(bos, iter.next().getRegionName());
}
return bos.toByteArray();
}
private static List<byte[]> parseParentsBytes(byte[] bytes) {
List<byte[]> parents = new ArrayList<>();
ByteArrayOutputStream bos = new ByteArrayOutputStream();
for (int i = 0; i < bytes.length; i++) {
if (bytes[i] == ESCAPE_BYTE) {
i++;
if (bytes[i] == SEPARATED_BYTE) {
parents.add(bos.toByteArray());
bos.reset();
continue;
}
// fall through to append the byte
}
bos.write(bytes[i]);
}
if (bos.size() > 0) {
parents.add(bos.toByteArray());
}
return parents;
}
public static void addReplicationParent(Put put, List<RegionInfo> parents) throws IOException {
byte[] value = getParentsBytes(parents);
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow())
.setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(REPLICATION_PARENT_QUALIFIER)
.setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(value).build());
}
public static Put makePutForReplicationBarrier(RegionInfo regionInfo, long openSeqNum, long ts)
throws IOException {
Put put = new Put(regionInfo.getRegionName(), ts);
addReplicationBarrier(put, openSeqNum);
return put;
}
public static final class ReplicationBarrierResult {
private final long[] barriers;
private final RegionState.State state;
private final List<byte[]> parentRegionNames;
ReplicationBarrierResult(long[] barriers, State state, List<byte[]> parentRegionNames) {
this.barriers = barriers;
this.state = state;
this.parentRegionNames = parentRegionNames;
}
public long[] getBarriers() {
return barriers;
}
public RegionState.State getState() {
return state;
}
public List<byte[]> getParentRegionNames() {
return parentRegionNames;
}
@Override
public String toString() {
return "ReplicationBarrierResult [barriers=" + Arrays.toString(barriers) + ", state=" +
state + ", parentRegionNames=" +
parentRegionNames.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")) +
"]";
}
}
private static long getReplicationBarrier(Cell c) {
return Bytes.toLong(c.getValueArray(), c.getValueOffset(), c.getValueLength());
}
public static long[] getReplicationBarriers(Result result) {
return result.getColumnCells(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER)
.stream().mapToLong(ReplicationBarrierFamilyFormat::getReplicationBarrier).sorted().distinct()
.toArray();
}
private static ReplicationBarrierResult getReplicationBarrierResult(Result result) {
long[] barriers = getReplicationBarriers(result);
byte[] stateBytes = result.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER);
RegionState.State state =
stateBytes != null ? RegionState.State.valueOf(Bytes.toString(stateBytes)) : null;
byte[] parentRegionsBytes =
result.getValue(HConstants.REPLICATION_BARRIER_FAMILY, REPLICATION_PARENT_QUALIFIER);
List<byte[]> parentRegionNames =
parentRegionsBytes != null ? parseParentsBytes(parentRegionsBytes) : Collections.emptyList();
return new ReplicationBarrierResult(barriers, state, parentRegionNames);
}
public static ReplicationBarrierResult getReplicationBarrierResult(Connection conn,
TableName tableName, byte[] row, byte[] encodedRegionName) throws IOException {
byte[] metaStartKey = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
byte[] metaStopKey =
RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false);
Scan scan = new Scan().withStartRow(metaStartKey).withStopRow(metaStopKey)
.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)
.addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions().setReversed(true)
.setCaching(10);
try (Table table = conn.getTable(TableName.META_TABLE_NAME);
ResultScanner scanner = table.getScanner(scan)) {
for (Result result;;) {
result = scanner.next();
if (result == null) {
return new ReplicationBarrierResult(new long[0], null, Collections.emptyList());
}
byte[] regionName = result.getRow();
// TODO: we may look up a region which has already been split or merged so we need to check
// whether the encoded name matches. Need to find a way to quit earlier when there is no
// record for the given region, for now it will scan to the end of the table.
if (!Bytes.equals(encodedRegionName,
Bytes.toBytes(RegionInfo.encodeRegionName(regionName)))) {
continue;
}
return getReplicationBarrierResult(result);
}
}
}
public static long[] getReplicationBarriers(Connection conn, byte[] regionName)
throws IOException {
try (Table table = conn.getTable(TableName.META_TABLE_NAME)) {
Result result = table.get(new Get(regionName)
.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER)
.readAllVersions());
return getReplicationBarriers(result);
}
}
public static List<Pair<String, Long>> getTableEncodedRegionNameAndLastBarrier(Connection conn,
TableName tableName) throws IOException {
List<Pair<String, Long>> list = new ArrayList<>();
MetaTableAccessor.scanMeta(conn,
ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REPLICATION),
ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REPLICATION),
QueryType.REPLICATION, r -> {
byte[] value =
r.getValue(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER);
if (value == null) {
return true;
}
long lastBarrier = Bytes.toLong(value);
String encodedRegionName = RegionInfo.encodeRegionName(r.getRow());
list.add(Pair.newPair(encodedRegionName, lastBarrier));
return true;
});
return list;
}
public static List<String> getTableEncodedRegionNamesForSerialReplication(Connection conn,
TableName tableName) throws IOException {
List<String> list = new ArrayList<>();
MetaTableAccessor.scanMeta(conn,
ClientMetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REPLICATION),
ClientMetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REPLICATION),
QueryType.REPLICATION, new FirstKeyOnlyFilter(), Integer.MAX_VALUE, r -> {
list.add(RegionInfo.encodeRegionName(r.getRow()));
return true;
});
return list;
}
}

View File

@ -25,11 +25,11 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MetaTableAccessor.ReplicationBarrierResult;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat.ReplicationBarrierResult;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.util.Bytes;
@ -160,7 +160,7 @@ class SerialReplicationChecker {
}
private boolean isParentFinished(byte[] regionName) throws IOException {
long[] barriers = MetaTableAccessor.getReplicationBarrier(conn, regionName);
long[] barriers = ReplicationBarrierFamilyFormat.getReplicationBarriers(conn, regionName);
if (barriers.length == 0) {
return true;
}
@ -185,8 +185,9 @@ class SerialReplicationChecker {
private boolean canPush(Entry entry, byte[] row) throws IOException {
String encodedNameAsString = Bytes.toString(entry.getKey().getEncodedRegionName());
long seqId = entry.getKey().getSequenceId();
ReplicationBarrierResult barrierResult = MetaTableAccessor.getReplicationBarrierResult(conn,
entry.getKey().getTableName(), row, entry.getKey().getEncodedRegionName());
ReplicationBarrierResult barrierResult =
ReplicationBarrierFamilyFormat.getReplicationBarrierResult(conn,
entry.getKey().getTableName(), row, entry.getKey().getEncodedRegionName());
LOG.debug("Replication barrier for {}: {}", entry, barrierResult);
long[] barriers = barrierResult.getBarriers();
int index = Arrays.binarySearch(barriers, seqId);

View File

@ -2799,7 +2799,7 @@ public class HBaseFsck extends Configured implements Closeable {
throw new IOException("Two entries in hbase:meta are same " + previous);
}
}
List<RegionInfo> mergeParents = MetaTableAccessor.getMergeRegions(result.rawCells());
List<RegionInfo> mergeParents = CatalogFamilyFormat.getMergeRegions(result.rawCells());
if (mergeParents != null) {
for (RegionInfo mergeRegion : mergeParents) {
if (mergeRegion != null) {

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.hbase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
@ -30,18 +29,14 @@ import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionLocator;
@ -54,13 +49,11 @@ import org.apache.hadoop.hbase.ipc.RpcScheduler;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.junit.AfterClass;
@ -112,72 +105,6 @@ public class TestMetaTableAccessor {
UTIL.shutdownMiniCluster();
}
/**
* Test for HBASE-23044.
*/
@Test
public void testGetMergeRegions() throws Exception {
TableName tn = TableName.valueOf(this.name.getMethodName());
UTIL.createMultiRegionTable(tn, Bytes.toBytes("CF"), 4);
UTIL.waitTableAvailable(tn);
try (Admin admin = UTIL.getAdmin()) {
List<RegionInfo> regions = admin.getRegions(tn);
assertEquals(4, regions.size());
admin.mergeRegionsAsync(regions.get(0).getRegionName(), regions.get(1).getRegionName(), false)
.get(60, TimeUnit.SECONDS);
admin.mergeRegionsAsync(regions.get(2).getRegionName(), regions.get(3).getRegionName(), false)
.get(60, TimeUnit.SECONDS);
List<RegionInfo> mergedRegions = admin.getRegions(tn);
assertEquals(2, mergedRegions.size());
RegionInfo mergedRegion0 = mergedRegions.get(0);
RegionInfo mergedRegion1 = mergedRegions.get(1);
List<RegionInfo> mergeParents =
MetaTableAccessor.getMergeRegions(connection, mergedRegion0.getRegionName());
assertTrue(mergeParents.contains(regions.get(0)));
assertTrue(mergeParents.contains(regions.get(1)));
mergeParents = MetaTableAccessor.getMergeRegions(connection, mergedRegion1.getRegionName());
assertTrue(mergeParents.contains(regions.get(2)));
assertTrue(mergeParents.contains(regions.get(3)));
// Delete merge qualifiers for mergedRegion0, then cannot getMergeRegions again
MetaTableAccessor.deleteMergeQualifiers(connection, mergedRegion0);
mergeParents = MetaTableAccessor.getMergeRegions(connection, mergedRegion0.getRegionName());
assertNull(mergeParents);
mergeParents = MetaTableAccessor.getMergeRegions(connection, mergedRegion1.getRegionName());
assertTrue(mergeParents.contains(regions.get(2)));
assertTrue(mergeParents.contains(regions.get(3)));
}
UTIL.deleteTable(tn);
}
@Test
public void testAddMergeRegions() throws IOException {
TableName tn = TableName.valueOf(this.name.getMethodName());
Put put = new Put(Bytes.toBytes(this.name.getMethodName()));
List<RegionInfo> ris = new ArrayList<>();
int limit = 10;
byte[] previous = HConstants.EMPTY_START_ROW;
for (int i = 0; i < limit; i++) {
RegionInfo ri =
RegionInfoBuilder.newBuilder(tn).setStartKey(previous).setEndKey(Bytes.toBytes(i)).build();
ris.add(ri);
}
put = MetaTableAccessor.addMergeRegions(put, ris);
List<Cell> cells = put.getFamilyCellMap().get(HConstants.CATALOG_FAMILY);
String previousQualifier = null;
assertEquals(limit, cells.size());
for (Cell cell : cells) {
LOG.info(cell.toString());
String qualifier = Bytes.toString(cell.getQualifierArray());
assertTrue(qualifier.startsWith(HConstants.MERGE_QUALIFIER_PREFIX_STR));
assertNotEquals(qualifier, previousQualifier);
previousQualifier = qualifier;
}
}
@Test
public void testIsMetaWhenAllHealthy() throws InterruptedException {
HMaster m = UTIL.getMiniHBaseCluster().getMaster();
@ -507,60 +434,6 @@ public class TestMetaTableAccessor {
}
}
@Test
public void testMetaLocationForRegionReplicasIsAddedAtRegionSplit() throws IOException {
long regionId = System.currentTimeMillis();
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
RegionInfo splitA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false)
.setRegionId(regionId + 1).setReplicaId(0).build();
RegionInfo splitB = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(Bytes.toBytes("a")).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId + 1).setReplicaId(0).build();
try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
List<RegionInfo> regionInfos = Lists.newArrayList(parent);
MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 3);
MetaTableAccessor.splitRegion(connection, parent, -1L, splitA, splitB, serverName0, 3);
assertEmptyMetaLocation(meta, splitA.getRegionName(), 1);
assertEmptyMetaLocation(meta, splitA.getRegionName(), 2);
assertEmptyMetaLocation(meta, splitB.getRegionName(), 1);
assertEmptyMetaLocation(meta, splitB.getRegionName(), 2);
}
}
@Test
public void testMetaLocationForRegionReplicasIsAddedAtRegionMerge() throws IOException {
long regionId = System.currentTimeMillis();
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
RegionInfo parentA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(Bytes.toBytes("a")).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
RegionInfo parentB = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
RegionInfo merged = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId + 1).setReplicaId(0).build();
try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
List<RegionInfo> regionInfos = Lists.newArrayList(parentA, parentB);
MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 3);
MetaTableAccessor.mergeRegions(connection, merged, getMapOfRegionsToSeqNum(parentA, parentB),
serverName0, 3);
assertEmptyMetaLocation(meta, merged.getRegionName(), 1);
assertEmptyMetaLocation(meta, merged.getRegionName(), 2);
}
}
private Map<RegionInfo, Long> getMapOfRegionsToSeqNum(RegionInfo... regions) {
Map<RegionInfo, Long> mids = new HashMap<>(regions.length);
for (RegionInfo region : regions) {
@ -650,68 +523,6 @@ public class TestMetaTableAccessor {
}
}
@Test
public void testMastersSystemTimeIsUsedInMergeRegions() throws IOException {
long regionId = System.currentTimeMillis();
RegionInfo regionInfoA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(new byte[] { 'a' }).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
RegionInfo regionInfoB = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(new byte[] { 'a' }).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
RegionInfo mergedRegionInfo =
RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
ServerName sn = ServerName.valueOf("bar", 0, 0);
try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
List<RegionInfo> regionInfos = Lists.newArrayList(regionInfoA, regionInfoB);
MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1);
// write the serverName column with a big current time, but set the masters time as even
// bigger. When region merge deletes the rows for regionA and regionB, the serverName columns
// should not be seen by the following get
long serverNameTime = EnvironmentEdgeManager.currentTime() + 100000000;
long masterSystemTime = EnvironmentEdgeManager.currentTime() + 123456789;
// write the serverName columns
MetaTableAccessor.updateRegionLocation(connection, regionInfoA, sn, 1, serverNameTime);
// assert that we have the serverName column with expected ts
Get get = new Get(mergedRegionInfo.getRegionName());
Result result = meta.get(get);
Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getServerColumn(0));
assertNotNull(serverCell);
assertEquals(serverNameTime, serverCell.getTimestamp());
ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
edge.setValue(masterSystemTime);
EnvironmentEdgeManager.injectEdge(edge);
try {
// now merge the regions, effectively deleting the rows for region a and b.
MetaTableAccessor.mergeRegions(connection, mergedRegionInfo,
getMapOfRegionsToSeqNum(regionInfoA, regionInfoB), sn, 1);
} finally {
EnvironmentEdgeManager.reset();
}
result = meta.get(get);
serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getServerColumn(0));
Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getStartCodeColumn(0));
Cell seqNumCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getSeqNumColumn(0));
assertNull(serverCell);
assertNull(startCodeCell);
assertNull(seqNumCell);
}
}
public static class SpyingRpcSchedulerFactory extends SimpleRpcSchedulerFactory {
@Override
public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) {
@ -738,98 +549,6 @@ public class TestMetaTableAccessor {
}
}
@Test
public void testMetaUpdatesGoToPriorityQueue() throws Exception {
// This test has to be end-to-end, and do the verification from the server side
Configuration c = UTIL.getConfiguration();
c.set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
SpyingRpcSchedulerFactory.class.getName());
// restart so that new config takes place
afterClass();
beforeClass();
final TableName tableName = TableName.valueOf(name.getMethodName());
try (Admin admin = connection.getAdmin();
RegionLocator rl = connection.getRegionLocator(tableName)) {
// create a table and prepare for a manual split
UTIL.createTable(tableName, "cf1");
HRegionLocation loc = rl.getAllRegionLocations().get(0);
RegionInfo parent = loc.getRegion();
long rid = 1000;
byte[] splitKey = Bytes.toBytes("a");
RegionInfo splitA =
RegionInfoBuilder.newBuilder(parent.getTable()).setStartKey(parent.getStartKey())
.setEndKey(splitKey).setSplit(false).setRegionId(rid).build();
RegionInfo splitB = RegionInfoBuilder.newBuilder(parent.getTable()).setStartKey(splitKey)
.setEndKey(parent.getEndKey()).setSplit(false).setRegionId(rid).build();
// find the meta server
MiniHBaseCluster cluster = UTIL.getMiniHBaseCluster();
int rsIndex = cluster.getServerWithMeta();
HRegionServer rs;
if (rsIndex >= 0) {
rs = cluster.getRegionServer(rsIndex);
} else {
// it is in master
rs = cluster.getMaster();
}
SpyingRpcScheduler scheduler = (SpyingRpcScheduler) rs.getRpcServer().getScheduler();
long prevCalls = scheduler.numPriorityCalls;
MetaTableAccessor.splitRegion(connection, parent, -1L, splitA, splitB, loc.getServerName(),
1);
assertTrue(prevCalls < scheduler.numPriorityCalls);
}
}
@Test
public void testEmptyMetaDaughterLocationDuringSplit() throws IOException {
long regionId = System.currentTimeMillis();
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf("table_foo"))
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
RegionInfo splitA = RegionInfoBuilder.newBuilder(TableName.valueOf("table_foo"))
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false)
.setRegionId(regionId + 1).setReplicaId(0).build();
RegionInfo splitB = RegionInfoBuilder.newBuilder(TableName.valueOf("table_foo"))
.setStartKey(Bytes.toBytes("a")).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId + 1).setReplicaId(0).build();
Table meta = MetaTableAccessor.getMetaHTable(connection);
try {
List<RegionInfo> regionInfos = Lists.newArrayList(parent);
MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 3);
MetaTableAccessor.splitRegion(connection, parent, -1L, splitA, splitB, serverName0, 3);
Get get1 = new Get(splitA.getRegionName());
Result resultA = meta.get(get1);
Cell serverCellA = resultA.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getServerColumn(splitA.getReplicaId()));
Cell startCodeCellA = resultA.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getStartCodeColumn(splitA.getReplicaId()));
assertNull(serverCellA);
assertNull(startCodeCellA);
Get get2 = new Get(splitA.getRegionName());
Result resultB = meta.get(get2);
Cell serverCellB = resultB.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getServerColumn(splitB.getReplicaId()));
Cell startCodeCellB = resultB.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getStartCodeColumn(splitB.getReplicaId()));
assertNull(serverCellB);
assertNull(startCodeCellB);
} finally {
if (meta != null) {
meta.close();
}
}
}
@Test
public void testScanByRegionEncodedNameExistingRegion() throws Exception {
final TableName tableName = TableName.valueOf("testScanByRegionEncodedNameExistingRegion");

View File

@ -0,0 +1,134 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.hbase.TestMetaTableAccessor.SpyingRpcScheduler;
import org.apache.hadoop.hbase.TestMetaTableAccessor.SpyingRpcSchedulerFactory;
import org.apache.hadoop.hbase.client.AsyncTable;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FutureUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
@Category({ MiscTests.class, MediumTests.class })
public class TestMetaUpdatesGoToPriorityQueue {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestMetaUpdatesGoToPriorityQueue.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
@BeforeClass
public static void beforeClass() throws Exception {
// This test has to be end-to-end, and do the verification from the server side
UTIL.getConfiguration().set(RSRpcServices.REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
SpyingRpcSchedulerFactory.class.getName());
UTIL.startMiniCluster();
}
@AfterClass
public static void afterClass() throws Exception {
UTIL.shutdownMiniCluster();
}
private void multiMutate(byte[] row, List<Mutation> mutations) throws IOException {
MutateRowsRequest.Builder builder = MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
builder.addMutationRequest(
ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
builder.addMutationRequest(
ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException(
"multi in MetaEditor doesn't support " + mutation.getClass().getName());
}
}
MutateRowsRequest request = builder.build();
AsyncTable<?> table = UTIL.getAsyncConnection().getTable(TableName.META_TABLE_NAME);
CompletableFuture<MutateRowsResponse> future =
table.<MultiRowMutationService, MutateRowsResponse> coprocessorService(
MultiRowMutationService::newStub,
(stub, controller, done) -> stub.mutateRows(controller, request, done), row);
FutureUtils.get(future);
}
@Test
public void test() throws IOException, InterruptedException {
TableName tableName = TableName.valueOf(getClass().getSimpleName());
// create a table and prepare for a manual split
UTIL.createTable(tableName, "cf1");
UTIL.waitTableAvailable(tableName);
RegionInfo parent = UTIL.getAdmin().getRegions(tableName).get(0);
long rid = 1000;
byte[] splitKey = Bytes.toBytes("a");
RegionInfo splitA =
RegionInfoBuilder.newBuilder(parent.getTable()).setStartKey(parent.getStartKey())
.setEndKey(splitKey).setSplit(false).setRegionId(rid).build();
RegionInfo splitB = RegionInfoBuilder.newBuilder(parent.getTable()).setStartKey(splitKey)
.setEndKey(parent.getEndKey()).setSplit(false).setRegionId(rid).build();
// find the meta server
MiniHBaseCluster cluster = UTIL.getMiniHBaseCluster();
int rsIndex = cluster.getServerWithMeta();
HRegionServer rs;
if (rsIndex >= 0) {
rs = cluster.getRegionServer(rsIndex);
} else {
// it is in master
rs = cluster.getMaster();
}
SpyingRpcScheduler scheduler = (SpyingRpcScheduler) rs.getRpcServer().getScheduler();
long prevCalls = scheduler.numPriorityCalls;
long time = System.currentTimeMillis();
Put putParent = MetaTableAccessor.makePutFromRegionInfo(
RegionInfoBuilder.newBuilder(parent).setOffline(true).setSplit(true).build(), time);
MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA, time);
Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB, time);
multiMutate(putParent.getRow(), Arrays.asList(putParent, putA, putB));
assertTrue(prevCalls < scheduler.numPriorityCalls);
}
}

View File

@ -141,8 +141,8 @@ public class TestSplitMerge {
RegionInfo mergedRegion = mergedRegions.get(0);
List<RegionInfo> mergeParentRegions = MetaTableAccessor.getMergeRegions(UTIL.getConnection(),
mergedRegion.getRegionName());
List<RegionInfo> mergeParentRegions = UTIL.getMiniHBaseCluster().getMaster()
.getAssignmentManager().getRegionStateStore().getMergeRegions(mergedRegion);
assertEquals(mergeParentRegions.size(), regionCount);

View File

@ -17,14 +17,15 @@
*/
package org.apache.hadoop.hbase.master;
import static org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils.isNotEmpty;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.CatalogFamilyFormat;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellBuilderFactory;
import org.apache.hadoop.hbase.CellBuilderType;
@ -39,8 +40,9 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.assignment.GCRegionProcedure;
import org.apache.hadoop.hbase.master.assignment.GCMultipleMergedRegionsProcedure;
import org.apache.hadoop.hbase.master.assignment.GCRegionProcedure;
import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
import org.apache.hadoop.hbase.master.assignment.RegionStates;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@ -212,7 +214,7 @@ public class TestMetaFixer {
// a hole.
Map<RegionInfo, Result> mergedRegions = cj.getLastReport().mergedRegions;
for (Map.Entry<RegionInfo, Result> e : mergedRegions.entrySet()) {
List<RegionInfo> parents = MetaTableAccessor.getMergeRegions(e.getValue().rawCells());
List<RegionInfo> parents = CatalogFamilyFormat.getMergeRegions(e.getValue().rawCells());
if (parents != null) {
ProcedureExecutor<MasterProcedureEnv> pe = services.getMasterProcedureExecutor();
pe.submitProcedure(new GCMultipleMergedRegionsProcedure(pe.getEnvironment(),
@ -276,7 +278,7 @@ public class TestMetaFixer {
cj.scan();
final CatalogJanitor.Report postReport = cj.getLastReport();
RegionStates regionStates = am.getRegionStates();
RegionStateStore regionStateStore = am.getRegionStateStore();
// Make sure that two merged regions are opened and GCs are done.
if (postReport.getOverlaps().size() == 1) {
Pair<RegionInfo, RegionInfo> pair = postReport.getOverlaps().get(0);
@ -285,10 +287,8 @@ public class TestMetaFixer {
(!overlapRegions.contains(pair.getSecond().getRegionNameAsString()) &&
regionStates.getRegionState(pair.getSecond()).isOpened())) {
// Make sure GC is done.
List<RegionInfo> firstParents = MetaTableAccessor.getMergeRegions(
services.getConnection(), pair.getFirst().getRegionName());
List<RegionInfo> secondParents = MetaTableAccessor.getMergeRegions(
services.getConnection(), pair.getSecond().getRegionName());
List<RegionInfo> firstParents = regionStateStore.getMergeRegions(pair.getFirst());
List<RegionInfo> secondParents = regionStateStore.getMergeRegions(pair.getSecond());
return (firstParents == null || firstParents.isEmpty()) &&
(secondParents == null || secondParents.isEmpty());

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master.assignment;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@ -205,10 +206,12 @@ public class TestMergeTableRegionsProcedure {
// the merged regions cleanup.
UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(true);
UTIL.getHBaseCluster().getMaster().getCatalogJanitor().triggerNow();
byte [] mergedRegion = proc.getMergedRegion().getRegionName();
RegionInfo mergedRegion = proc.getMergedRegion();
RegionStateStore regionStateStore =
UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
while (ris != null && ris.get(0) != null && ris.get(1) != null) {
ris = MetaTableAccessor.getMergeRegions(UTIL.getConnection(), mergedRegion);
LOG.info("{} {}", Bytes.toStringBinary(mergedRegion), ris);
ris = regionStateStore.getMergeRegions(mergedRegion);
LOG.info("{} {}", Bytes.toStringBinary(mergedRegion.getRegionName()), ris);
Threads.sleep(1000);
}
assertEquals(countOfRowsLoaded, UTIL.countRows(tableName));

View File

@ -17,34 +17,52 @@
*/
package org.apache.hadoop.hbase.master.assignment;
import static org.apache.hadoop.hbase.TestMetaTableAccessor.assertEmptyMetaLocation;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.hbase.CatalogFamilyFormat;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
@Category({ MasterTests.class, MediumTests.class })
@ -54,10 +72,11 @@ public class TestRegionStateStore {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestRegionStateStore.class);
private static final Logger LOG = LoggerFactory.getLogger(TestRegionStateStore.class);
private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
@Rule
public final TableNameTestRule name = new TableNameTestRule();
@BeforeClass
public static void beforeClass() throws Exception {
UTIL.startMiniCluster();
@ -136,4 +155,243 @@ public class TestRegionStateStore {
});
assertFalse("Visitor has been called, but it shouldn't.", visitorCalled.get());
}
@Test
public void testMetaLocationForRegionReplicasIsAddedAtRegionSplit() throws IOException {
long regionId = System.currentTimeMillis();
ServerName serverName0 =
ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong());
TableName tableName = name.getTableName();
RegionInfo parent = RegionInfoBuilder.newBuilder(tableName)
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
RegionInfo splitA = RegionInfoBuilder.newBuilder(tableName)
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false)
.setRegionId(regionId + 1).setReplicaId(0).build();
RegionInfo splitB = RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("a"))
.setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId + 1).setReplicaId(0)
.build();
List<RegionInfo> regionInfos = Lists.newArrayList(parent);
MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3);
final RegionStateStore regionStateStore =
UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
regionStateStore.splitRegion(parent, splitA, splitB, serverName0,
TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(3).build());
try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) {
assertEmptyMetaLocation(meta, splitA.getRegionName(), 1);
assertEmptyMetaLocation(meta, splitA.getRegionName(), 2);
assertEmptyMetaLocation(meta, splitB.getRegionName(), 1);
assertEmptyMetaLocation(meta, splitB.getRegionName(), 2);
}
}
@Test
public void testEmptyMetaDaughterLocationDuringSplit() throws IOException {
TableName tableName = name.getTableName();
long regionId = System.currentTimeMillis();
ServerName serverName0 =
ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong());
RegionInfo parent = RegionInfoBuilder.newBuilder(tableName)
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
RegionInfo splitA = RegionInfoBuilder.newBuilder(tableName)
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false)
.setRegionId(regionId + 1).setReplicaId(0).build();
RegionInfo splitB = RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("a"))
.setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId + 1).setReplicaId(0)
.build();
List<RegionInfo> regionInfos = Lists.newArrayList(parent);
MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3);
final RegionStateStore regionStateStore =
UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
regionStateStore.splitRegion(parent, splitA, splitB, serverName0,
TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(3).build());
try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) {
Get get1 = new Get(splitA.getRegionName());
Result resultA = meta.get(get1);
Cell serverCellA = resultA.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getServerColumn(splitA.getReplicaId()));
Cell startCodeCellA = resultA.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getStartCodeColumn(splitA.getReplicaId()));
assertNull(serverCellA);
assertNull(startCodeCellA);
Get get2 = new Get(splitA.getRegionName());
Result resultB = meta.get(get2);
Cell serverCellB = resultB.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getServerColumn(splitB.getReplicaId()));
Cell startCodeCellB = resultB.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getStartCodeColumn(splitB.getReplicaId()));
assertNull(serverCellB);
assertNull(startCodeCellB);
}
}
@Test
public void testMetaLocationForRegionReplicasIsAddedAtRegionMerge() throws IOException {
long regionId = System.currentTimeMillis();
ServerName serverName0 =
ServerName.valueOf("foo", 60010, ThreadLocalRandom.current().nextLong());
TableName tableName = name.getTableName();
RegionInfo parentA = RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes("a"))
.setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId).setReplicaId(0)
.build();
RegionInfo parentB = RegionInfoBuilder.newBuilder(tableName)
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
RegionInfo merged = RegionInfoBuilder.newBuilder(tableName)
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId + 1).setReplicaId(0).build();
final RegionStateStore regionStateStore =
UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) {
List<RegionInfo> regionInfos = Lists.newArrayList(parentA, parentB);
MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 3);
regionStateStore.mergeRegions(merged, new RegionInfo[] { parentA, parentB }, serverName0,
TableDescriptorBuilder.newBuilder(tableName).setRegionReplication(3).build());
assertEmptyMetaLocation(meta, merged.getRegionName(), 1);
assertEmptyMetaLocation(meta, merged.getRegionName(), 2);
}
}
@Test
public void testMastersSystemTimeIsUsedInMergeRegions() throws IOException {
long regionId = System.currentTimeMillis();
TableName tableName = name.getTableName();
RegionInfo regionInfoA = RegionInfoBuilder.newBuilder(tableName)
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(new byte[] { 'a' }).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
RegionInfo regionInfoB = RegionInfoBuilder.newBuilder(tableName).setStartKey(new byte[] { 'a' })
.setEndKey(HConstants.EMPTY_END_ROW).setSplit(false).setRegionId(regionId).setReplicaId(0)
.build();
RegionInfo mergedRegionInfo = RegionInfoBuilder.newBuilder(tableName)
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
.setRegionId(regionId).setReplicaId(0).build();
ServerName sn = ServerName.valueOf("bar", 0, 0);
try (Table meta = MetaTableAccessor.getMetaHTable(UTIL.getConnection())) {
List<RegionInfo> regionInfos = Lists.newArrayList(regionInfoA, regionInfoB);
MetaTableAccessor.addRegionsToMeta(UTIL.getConnection(), regionInfos, 1);
// write the serverName column with a big current time, but set the masters time as even
// bigger. When region merge deletes the rows for regionA and regionB, the serverName columns
// should not be seen by the following get
long serverNameTime = EnvironmentEdgeManager.currentTime() + 100000000;
long masterSystemTime = EnvironmentEdgeManager.currentTime() + 123456789;
// write the serverName columns
MetaTableAccessor.updateRegionLocation(UTIL.getConnection(), regionInfoA, sn, 1,
serverNameTime);
// assert that we have the serverName column with expected ts
Get get = new Get(mergedRegionInfo.getRegionName());
Result result = meta.get(get);
Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getServerColumn(0));
assertNotNull(serverCell);
assertEquals(serverNameTime, serverCell.getTimestamp());
final RegionStateStore regionStateStore =
UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
edge.setValue(masterSystemTime);
EnvironmentEdgeManager.injectEdge(edge);
try {
// now merge the regions, effectively deleting the rows for region a and b.
regionStateStore.mergeRegions(mergedRegionInfo,
new RegionInfo[] { regionInfoA, regionInfoB }, sn,
TableDescriptorBuilder.newBuilder(tableName).build());
} finally {
EnvironmentEdgeManager.reset();
}
result = meta.get(get);
serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getServerColumn(0));
Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getStartCodeColumn(0));
Cell seqNumCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
CatalogFamilyFormat.getSeqNumColumn(0));
assertNull(serverCell);
assertNull(startCodeCell);
assertNull(seqNumCell);
}
}
/**
* Test for HBASE-23044.
*/
@Test
public void testGetMergeRegions() throws Exception {
TableName tn = name.getTableName();
UTIL.createMultiRegionTable(tn, Bytes.toBytes("CF"), 4);
UTIL.waitTableAvailable(tn);
Admin admin = UTIL.getAdmin();
List<RegionInfo> regions = admin.getRegions(tn);
assertEquals(4, regions.size());
admin
.mergeRegionsAsync(
new byte[][] { regions.get(0).getRegionName(), regions.get(1).getRegionName() }, false)
.get(60, TimeUnit.SECONDS);
admin
.mergeRegionsAsync(
new byte[][] { regions.get(2).getRegionName(), regions.get(3).getRegionName() }, false)
.get(60, TimeUnit.SECONDS);
List<RegionInfo> mergedRegions = admin.getRegions(tn);
assertEquals(2, mergedRegions.size());
RegionInfo mergedRegion0 = mergedRegions.get(0);
RegionInfo mergedRegion1 = mergedRegions.get(1);
final RegionStateStore regionStateStore =
UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStateStore();
List<RegionInfo> mergeParents = regionStateStore.getMergeRegions(mergedRegion0);
assertTrue(mergeParents.contains(regions.get(0)));
assertTrue(mergeParents.contains(regions.get(1)));
mergeParents = regionStateStore.getMergeRegions(mergedRegion1);
assertTrue(mergeParents.contains(regions.get(2)));
assertTrue(mergeParents.contains(regions.get(3)));
// Delete merge qualifiers for mergedRegion0, then cannot getMergeRegions again
regionStateStore.deleteMergeQualifiers(mergedRegion0);
mergeParents = regionStateStore.getMergeRegions(mergedRegion0);
assertNull(mergeParents);
mergeParents = regionStateStore.getMergeRegions(mergedRegion1);
assertTrue(mergeParents.contains(regions.get(2)));
assertTrue(mergeParents.contains(regions.get(3)));
}
@Test
public void testAddMergeRegions() throws IOException {
TableName tn = name.getTableName();
Put put = new Put(Bytes.toBytes(name.getTableName().getNameAsString()));
List<RegionInfo> ris = new ArrayList<>();
int limit = 10;
byte[] previous = HConstants.EMPTY_START_ROW;
for (int i = 0; i < limit; i++) {
RegionInfo ri =
RegionInfoBuilder.newBuilder(tn).setStartKey(previous).setEndKey(Bytes.toBytes(i)).build();
ris.add(ri);
}
put = RegionStateStore.addMergeRegions(put, ris);
List<Cell> cells = put.getFamilyCellMap().get(HConstants.CATALOG_FAMILY);
String previousQualifier = null;
assertEquals(limit, cells.size());
for (Cell cell : cells) {
String qualifier = Bytes.toString(cell.getQualifierArray());
assertTrue(qualifier.startsWith(HConstants.MERGE_QUALIFIER_PREFIX_STR));
assertNotEquals(qualifier, previousQualifier);
previousQualifier = qualifier;
}
}
}

View File

@ -35,7 +35,6 @@ import java.util.List;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
@ -49,6 +48,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -208,15 +208,15 @@ public class TestReplicationBarrierCleaner {
// should only be called twice although we have 4 regions to clean
verify(peerManager, times(2)).getSerialPeerIdsBelongsTo(any(TableName.class));
assertArrayEquals(new long[] { 60 },
MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(), region11.getRegionName()));
assertArrayEquals(new long[] { 70 },
MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(), region12.getRegionName()));
assertArrayEquals(new long[] { 60 }, ReplicationBarrierFamilyFormat
.getReplicationBarriers(UTIL.getConnection(), region11.getRegionName()));
assertArrayEquals(new long[] { 70 }, ReplicationBarrierFamilyFormat
.getReplicationBarriers(UTIL.getConnection(), region12.getRegionName()));
assertArrayEquals(new long[] { 400 },
MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(), region21.getRegionName()));
assertArrayEquals(new long[] { 600 },
MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(), region22.getRegionName()));
assertArrayEquals(new long[] { 400 }, ReplicationBarrierFamilyFormat
.getReplicationBarriers(UTIL.getConnection(), region21.getRegionName()));
assertArrayEquals(new long[] { 600 }, ReplicationBarrierFamilyFormat
.getReplicationBarriers(UTIL.getConnection(), region22.getRegionName()));
}
@Test
@ -235,28 +235,28 @@ public class TestReplicationBarrierCleaner {
// beyond the first barrier, no deletion
cleaner.chore();
assertArrayEquals(new long[] { 10, 20, 30, 40, 50, 60 },
MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(), region.getRegionName()));
assertArrayEquals(new long[] { 10, 20, 30, 40, 50, 60 }, ReplicationBarrierFamilyFormat
.getReplicationBarriers(UTIL.getConnection(), region.getRegionName()));
// in the first range, still no deletion
cleaner.chore();
assertArrayEquals(new long[] { 10, 20, 30, 40, 50, 60 },
MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(), region.getRegionName()));
assertArrayEquals(new long[] { 10, 20, 30, 40, 50, 60 }, ReplicationBarrierFamilyFormat
.getReplicationBarriers(UTIL.getConnection(), region.getRegionName()));
// in the second range, 10 is deleted
cleaner.chore();
assertArrayEquals(new long[] { 20, 30, 40, 50, 60 },
MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(), region.getRegionName()));
assertArrayEquals(new long[] { 20, 30, 40, 50, 60 }, ReplicationBarrierFamilyFormat
.getReplicationBarriers(UTIL.getConnection(), region.getRegionName()));
// between 50 and 60, so the barriers before 50 will be deleted
cleaner.chore();
assertArrayEquals(new long[] { 50, 60 },
MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(), region.getRegionName()));
assertArrayEquals(new long[] { 50, 60 }, ReplicationBarrierFamilyFormat
.getReplicationBarriers(UTIL.getConnection(), region.getRegionName()));
// in the last open range, 50 is deleted
cleaner.chore();
assertArrayEquals(new long[] { 60 },
MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(), region.getRegionName()));
assertArrayEquals(new long[] { 60 }, ReplicationBarrierFamilyFormat
.getReplicationBarriers(UTIL.getConnection(), region.getRegionName()));
}
@Test
@ -274,8 +274,8 @@ public class TestReplicationBarrierCleaner {
// we have something in catalog family, so only delete 40
cleaner.chore();
assertArrayEquals(new long[] { 50, 60 },
MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(), region.getRegionName()));
assertArrayEquals(new long[] { 50, 60 }, ReplicationBarrierFamilyFormat
.getReplicationBarriers(UTIL.getConnection(), region.getRegionName()));
verify(queueStorage, never()).removeLastSequenceIds(anyString(), anyList());
// No catalog family, then we should remove the whole row

View File

@ -33,6 +33,7 @@ import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CatalogFamilyFormat;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MetaTableAccessor;
@ -223,10 +224,10 @@ public class TestRegionMergeTransactionOnCluster {
MASTER.getConnection(), mergedRegionInfo.getRegionName());
// contains merge reference in META
assertTrue(MetaTableAccessor.hasMergeRegions(mergedRegionResult.rawCells()));
assertTrue(CatalogFamilyFormat.hasMergeRegions(mergedRegionResult.rawCells()));
// merging regions' directory are in the file system all the same
List<RegionInfo> p = MetaTableAccessor.getMergeRegions(mergedRegionResult.rawCells());
List<RegionInfo> p = CatalogFamilyFormat.getMergeRegions(mergedRegionResult.rawCells());
RegionInfo regionA = p.get(0);
RegionInfo regionB = p.get(1);
FileSystem fs = MASTER.getMasterFileSystem().getFileSystem();
@ -297,7 +298,7 @@ public class TestRegionMergeTransactionOnCluster {
while (true) {
mergedRegionResult = MetaTableAccessor
.getRegionResult(TEST_UTIL.getConnection(), mergedRegionInfo.getRegionName());
if (MetaTableAccessor.hasMergeRegions(mergedRegionResult.rawCells())) {
if (CatalogFamilyFormat.hasMergeRegions(mergedRegionResult.rawCells())) {
LOG.info("Waiting on cleanup of merge columns {}",
Arrays.asList(mergedRegionResult.rawCells()).stream().
map(c -> c.toString()).collect(Collectors.joining(",")));
@ -306,7 +307,7 @@ public class TestRegionMergeTransactionOnCluster {
break;
}
}
assertFalse(MetaTableAccessor.hasMergeRegions(mergedRegionResult.rawCells()));
assertFalse(CatalogFamilyFormat.hasMergeRegions(mergedRegionResult.rawCells()));
} finally {
ADMIN.catalogJanitorSwitch(true);
TEST_UTIL.deleteTable(tableName);

View File

@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
@ -45,6 +44,7 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
@ -181,7 +181,8 @@ public class TestSerialReplicationChecker {
private void addParents(RegionInfo region, List<RegionInfo> parents) throws IOException {
Put put = new Put(region.getRegionName(), EnvironmentEdgeManager.currentTime());
put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY,
MetaTableAccessor.REPLICATION_PARENT_QUALIFIER, MetaTableAccessor.getParentsBytes(parents));
ReplicationBarrierFamilyFormat.REPLICATION_PARENT_QUALIFIER,
ReplicationBarrierFamilyFormat.getParentsBytes(parents));
try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
table.put(put);
}

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.replication.ReplicationBarrierFamilyFormat;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
@ -121,7 +122,7 @@ public class TestHBaseFsckCleanReplicationBarriers {
try (ResultScanner scanner =
MetaTableAccessor.getMetaHTable(UTIL.getConnection()).getScanner(barrierScan)) {
while ((result = scanner.next()) != null) {
assertTrue(MetaTableAccessor.getReplicationBarriers(result).length > 0);
assertTrue(ReplicationBarrierFamilyFormat.getReplicationBarriers(result).length > 0);
}
}
boolean cleaned = HbckTestingUtil.cleanReplicationBarrier(UTIL.getConfiguration(), tableName);
@ -135,7 +136,7 @@ public class TestHBaseFsckCleanReplicationBarriers {
cleaned = HbckTestingUtil.cleanReplicationBarrier(UTIL.getConfiguration(), tableName);
assertFalse(cleaned);
for (RegionInfo region : regionInfos) {
assertEquals(0, MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(),
assertEquals(0, ReplicationBarrierFamilyFormat.getReplicationBarriers(UTIL.getConnection(),
region.getRegionName()).length);
}
}
@ -168,7 +169,7 @@ public class TestHBaseFsckCleanReplicationBarriers {
cleaned = HbckTestingUtil.cleanReplicationBarrier(UTIL.getConfiguration(), tableName);
assertFalse(cleaned);
for (RegionInfo region : UTIL.getAdmin().getRegions(tableName)) {
assertEquals(0, MetaTableAccessor.getReplicationBarrier(UTIL.getConnection(),
assertEquals(0, ReplicationBarrierFamilyFormat.getReplicationBarriers(UTIL.getConnection(),
region.getRegionName()).length);
}
}