HBASE-24609 Move MetaTableAccessor out of hbase-client (#1943)
Signed-off-by: Guanghao Zhang <zghao@apache.org>
This commit is contained in:
parent
4b89ee24f3
commit
22103067b6
File diff suppressed because it is too large
Load Diff
|
@ -31,11 +31,11 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -106,12 +106,12 @@ public class SnapshotOfRegionAssignmentFromMeta {
|
|||
LOG.info("Start to scan the hbase:meta for the current region assignment " +
|
||||
"snappshot");
|
||||
// TODO: at some point this code could live in the MetaTableAccessor
|
||||
Visitor v = new Visitor() {
|
||||
ClientMetaTableAccessor.Visitor v = new ClientMetaTableAccessor.Visitor() {
|
||||
@Override
|
||||
public boolean visit(Result result) throws IOException {
|
||||
try {
|
||||
if (result == null || result.isEmpty()) return true;
|
||||
RegionLocations rl = MetaTableAccessor.getRegionLocations(result);
|
||||
RegionLocations rl = CatalogFamilyFormat.getRegionLocations(result);
|
||||
if (rl == null) return true;
|
||||
RegionInfo hri = rl.getRegionLocation(0).getRegion();
|
||||
if (hri == null) return true;
|
||||
|
|
|
@ -0,0 +1,349 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import edu.umd.cs.findbugs.annotations.Nullable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.SortedMap;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.TableState;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
* Helper class for generating/parsing
|
||||
* {@value org.apache.hadoop.hbase.HConstants#CATALOG_FAMILY_STR} family cells in meta table.
|
||||
* <p/>
|
||||
* The cells in catalog family are:
|
||||
*
|
||||
* <pre>
|
||||
* For each table range ('Region'), there is a single row, formatted as:
|
||||
* <tableName>,<startKey>,<regionId>,<encodedRegionName>.
|
||||
* This row is the serialized regionName of the default region replica.
|
||||
* Columns are:
|
||||
* info:regioninfo => contains serialized HRI for the default region replica
|
||||
* info:server => contains hostname:port (in string form) for the server hosting
|
||||
* the default regionInfo replica
|
||||
* info:server_<replicaId> => contains hostname:port (in string form) for the server hosting
|
||||
* the regionInfo replica with replicaId
|
||||
* info:serverstartcode => contains server start code (in binary long form) for the server
|
||||
* hosting the default regionInfo replica
|
||||
* info:serverstartcode_<replicaId> => contains server start code (in binary long form) for
|
||||
* the server hosting the regionInfo replica with
|
||||
* replicaId
|
||||
* info:seqnumDuringOpen => contains seqNum (in binary long form) for the region at the time
|
||||
* the server opened the region with default replicaId
|
||||
* info:seqnumDuringOpen_<replicaId> => contains seqNum (in binary long form) for the region
|
||||
* at the time the server opened the region with
|
||||
* replicaId
|
||||
* info:splitA => contains a serialized HRI for the first daughter region if the
|
||||
* region is split
|
||||
* info:splitB => contains a serialized HRI for the second daughter region if the
|
||||
* region is split
|
||||
* info:merge* => contains a serialized HRI for a merge parent region. There will be two
|
||||
* or more of these columns in a row. A row that has these columns is
|
||||
* undergoing a merge and is the result of the merge. Columns listed
|
||||
* in marge* columns are the parents of this merged region. Example
|
||||
* columns: info:merge0001, info:merge0002. You make also see 'mergeA',
|
||||
* and 'mergeB'. This is old form replaced by the new format that allows
|
||||
* for more than two parents to be merged at a time.
|
||||
* </pre>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class CatalogFamilyFormat {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(CatalogFamilyFormat.class);
|
||||
|
||||
/** A regex for parsing server columns from meta. See above javadoc for meta layout */
|
||||
private static final Pattern SERVER_COLUMN_PATTERN =
|
||||
Pattern.compile("^server(_[0-9a-fA-F]{4})?$");
|
||||
|
||||
/**
|
||||
* Returns an HRI parsed from this regionName. Not all the fields of the HRI is stored in the
|
||||
* name, so the returned object should only be used for the fields in the regionName.
|
||||
* <p/>
|
||||
* Since the returned object does not contain all the fields, we do not expose this method in
|
||||
* public API, such as {@link RegionInfo} or {@link RegionInfoBuilder}.
|
||||
*/
|
||||
public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws IOException {
|
||||
byte[][] fields = RegionInfo.parseRegionName(regionName);
|
||||
long regionId = Long.parseLong(Bytes.toString(fields[2]));
|
||||
int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
|
||||
return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])).setStartKey(fields[1])
|
||||
.setEndKey(fields[2]).setSplit(false).setRegionId(regionId).setReplicaId(replicaId).build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the RegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
|
||||
* <code>qualifier</code> of the catalog table result.
|
||||
* @param r a Result object from the catalog table scan
|
||||
* @param qualifier Column family qualifier
|
||||
* @return An RegionInfo instance or null.
|
||||
*/
|
||||
@Nullable
|
||||
public static RegionInfo getRegionInfo(final Result r, byte[] qualifier) {
|
||||
Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, qualifier);
|
||||
if (cell == null) {
|
||||
return null;
|
||||
}
|
||||
return RegionInfo.parseFromOrNull(cell.getValueArray(), cell.getValueOffset(),
|
||||
cell.getValueLength());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns RegionInfo object from the column
|
||||
* HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog table Result.
|
||||
* @param data a Result object from the catalog table scan
|
||||
* @return RegionInfo or null
|
||||
*/
|
||||
public static RegionInfo getRegionInfo(Result data) {
|
||||
return getRegionInfo(data, HConstants.REGIONINFO_QUALIFIER);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the HRegionLocation parsed from the given meta row Result for the given regionInfo and
|
||||
* replicaId. The regionInfo can be the default region info for the replica.
|
||||
* @param r the meta row result
|
||||
* @param regionInfo RegionInfo for default replica
|
||||
* @param replicaId the replicaId for the HRegionLocation
|
||||
* @return HRegionLocation parsed from the given meta row Result for the given replicaId
|
||||
*/
|
||||
public static HRegionLocation getRegionLocation(final Result r, final RegionInfo regionInfo,
|
||||
final int replicaId) {
|
||||
ServerName serverName = getServerName(r, replicaId);
|
||||
long seqNum = getSeqNumDuringOpen(r, replicaId);
|
||||
RegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId);
|
||||
return new HRegionLocation(replicaInfo, serverName, seqNum);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an HRegionLocationList extracted from the result.
|
||||
* @return an HRegionLocationList containing all locations for the region range or null if we
|
||||
* can't deserialize the result.
|
||||
*/
|
||||
@Nullable
|
||||
public static RegionLocations getRegionLocations(final Result r) {
|
||||
if (r == null) {
|
||||
return null;
|
||||
}
|
||||
RegionInfo regionInfo = getRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
|
||||
if (regionInfo == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
List<HRegionLocation> locations = new ArrayList<>(1);
|
||||
NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyMap = r.getNoVersionMap();
|
||||
|
||||
locations.add(getRegionLocation(r, regionInfo, 0));
|
||||
|
||||
NavigableMap<byte[], byte[]> infoMap = familyMap.get(HConstants.CATALOG_FAMILY);
|
||||
if (infoMap == null) {
|
||||
return new RegionLocations(locations);
|
||||
}
|
||||
|
||||
// iterate until all serverName columns are seen
|
||||
int replicaId = 0;
|
||||
byte[] serverColumn = getServerColumn(replicaId);
|
||||
SortedMap<byte[], byte[]> serverMap;
|
||||
serverMap = infoMap.tailMap(serverColumn, false);
|
||||
|
||||
if (serverMap.isEmpty()) {
|
||||
return new RegionLocations(locations);
|
||||
}
|
||||
|
||||
for (Map.Entry<byte[], byte[]> entry : serverMap.entrySet()) {
|
||||
replicaId = parseReplicaIdFromServerColumn(entry.getKey());
|
||||
if (replicaId < 0) {
|
||||
break;
|
||||
}
|
||||
HRegionLocation location = getRegionLocation(r, regionInfo, replicaId);
|
||||
// In case the region replica is newly created, it's location might be null. We usually do not
|
||||
// have HRL's in RegionLocations object with null ServerName. They are handled as null HRLs.
|
||||
if (location.getServerName() == null) {
|
||||
locations.add(null);
|
||||
} else {
|
||||
locations.add(location);
|
||||
}
|
||||
}
|
||||
|
||||
return new RegionLocations(locations);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link ServerName} from catalog table {@link Result}.
|
||||
* @param r Result to pull from
|
||||
* @return A ServerName instance or null if necessary fields not found or empty.
|
||||
*/
|
||||
@Nullable
|
||||
public static ServerName getServerName(Result r, int replicaId) {
|
||||
byte[] serverColumn = getServerColumn(replicaId);
|
||||
Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, serverColumn);
|
||||
if (cell == null || cell.getValueLength() == 0) {
|
||||
return null;
|
||||
}
|
||||
String hostAndPort =
|
||||
Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
|
||||
byte[] startcodeColumn = getStartCodeColumn(replicaId);
|
||||
cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, startcodeColumn);
|
||||
if (cell == null || cell.getValueLength() == 0) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
return ServerName.valueOf(hostAndPort,
|
||||
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
|
||||
} catch (IllegalArgumentException e) {
|
||||
LOG.error("Ignoring invalid region for server " + hostAndPort + "; cell=" + cell, e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column qualifier for server column for replicaId
|
||||
* @param replicaId the replicaId of the region
|
||||
* @return a byte[] for server column qualifier
|
||||
*/
|
||||
public static byte[] getServerColumn(int replicaId) {
|
||||
return replicaId == 0 ? HConstants.SERVER_QUALIFIER :
|
||||
Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER +
|
||||
String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column qualifier for server start code column for replicaId
|
||||
* @param replicaId the replicaId of the region
|
||||
* @return a byte[] for server start code column qualifier
|
||||
*/
|
||||
public static byte[] getStartCodeColumn(int replicaId) {
|
||||
return replicaId == 0 ? HConstants.STARTCODE_QUALIFIER :
|
||||
Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER +
|
||||
String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
|
||||
}
|
||||
|
||||
/**
|
||||
* The latest seqnum that the server writing to meta observed when opening the region. E.g. the
|
||||
* seqNum when the result of {@link getServerName} was written.
|
||||
* @param r Result to pull the seqNum from
|
||||
* @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
|
||||
*/
|
||||
private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
|
||||
Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId));
|
||||
if (cell == null || cell.getValueLength() == 0) {
|
||||
return HConstants.NO_SEQNUM;
|
||||
}
|
||||
return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column qualifier for seqNum column for replicaId
|
||||
* @param replicaId the replicaId of the region
|
||||
* @return a byte[] for seqNum column qualifier
|
||||
*/
|
||||
public static byte[] getSeqNumColumn(int replicaId) {
|
||||
return replicaId == 0 ? HConstants.SEQNUM_QUALIFIER :
|
||||
Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER +
|
||||
String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
|
||||
}
|
||||
|
||||
/** The delimiter for meta columns for replicaIds > 0 */
|
||||
@VisibleForTesting
|
||||
static final char META_REPLICA_ID_DELIMITER = '_';
|
||||
|
||||
/**
|
||||
* Parses the replicaId from the server column qualifier. See top of the class javadoc for the
|
||||
* actual meta layout
|
||||
* @param serverColumn the column qualifier
|
||||
* @return an int for the replicaId
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
|
||||
String serverStr = Bytes.toString(serverColumn);
|
||||
|
||||
Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
|
||||
if (matcher.matches() && matcher.groupCount() > 0) {
|
||||
String group = matcher.group(1);
|
||||
if (group != null && group.length() > 0) {
|
||||
return Integer.parseInt(group.substring(1), 16);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/** Returns the row key to use for this regionInfo */
|
||||
public static byte[] getMetaKeyForRegion(RegionInfo regionInfo) {
|
||||
return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column qualifier for serialized region state
|
||||
* @param replicaId the replicaId of the region
|
||||
* @return a byte[] for state qualifier
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static byte[] getRegionStateColumn(int replicaId) {
|
||||
return replicaId == 0 ? HConstants.STATE_QUALIFIER :
|
||||
Bytes.toBytes(HConstants.STATE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER +
|
||||
String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column qualifier for serialized region state
|
||||
* @param replicaId the replicaId of the region
|
||||
* @return a byte[] for sn column qualifier
|
||||
*/
|
||||
public static byte[] getServerNameColumn(int replicaId) {
|
||||
return replicaId == 0 ? HConstants.SERVERNAME_QUALIFIER :
|
||||
Bytes.toBytes(HConstants.SERVERNAME_QUALIFIER_STR + META_REPLICA_ID_DELIMITER +
|
||||
String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode table state from META Result. Should contain cell from HConstants.TABLE_FAMILY
|
||||
* @return null if not found
|
||||
*/
|
||||
@Nullable
|
||||
public static TableState getTableState(Result r) throws IOException {
|
||||
Cell cell = r.getColumnLatestCell(HConstants.TABLE_FAMILY, HConstants.TABLE_STATE_QUALIFIER);
|
||||
if (cell == null) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
return TableState.parseFrom(TableName.valueOf(r.getRow()),
|
||||
Arrays.copyOfRange(cell.getValueArray(), cell.getValueOffset(),
|
||||
cell.getValueOffset() + cell.getValueLength()));
|
||||
} catch (DeserializationException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,33 +19,23 @@ package org.apache.hadoop.hbase;
|
|||
|
||||
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.Optional;
|
||||
import java.util.SortedMap;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor.CollectingVisitor;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
|
||||
import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer;
|
||||
import org.apache.hadoop.hbase.client.AsyncTable;
|
||||
import org.apache.hadoop.hbase.client.Consistency;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.Scan.ReadType;
|
||||
import org.apache.hadoop.hbase.client.TableState;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -53,22 +43,34 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* The asynchronous meta table accessor. Used to read/write region and assignment information store
|
||||
* in <code>hbase:meta</code>.
|
||||
* The (asynchronous) meta table accessor used at client side. Used to read/write region and
|
||||
* assignment information store in <code>hbase:meta</code>.
|
||||
* @since 2.0.0
|
||||
* @see CatalogFamilyFormat
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class AsyncMetaTableAccessor {
|
||||
public final class ClientMetaTableAccessor {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(AsyncMetaTableAccessor.class);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(ClientMetaTableAccessor.class);
|
||||
|
||||
private ClientMetaTableAccessor() {
|
||||
}
|
||||
|
||||
/** The delimiter for meta columns for replicaIds > 0 */
|
||||
private static final char META_REPLICA_ID_DELIMITER = '_';
|
||||
@InterfaceAudience.Private
|
||||
public enum QueryType {
|
||||
ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), REGION(HConstants.CATALOG_FAMILY),
|
||||
TABLE(HConstants.TABLE_FAMILY), REPLICATION(HConstants.REPLICATION_BARRIER_FAMILY);
|
||||
|
||||
/** A regex for parsing server columns from meta. See above javadoc for meta layout */
|
||||
private static final Pattern SERVER_COLUMN_PATTERN = Pattern
|
||||
.compile("^server(_[0-9a-fA-F]{4})?$");
|
||||
private final byte[][] families;
|
||||
|
||||
QueryType(byte[]... families) {
|
||||
this.families = families;
|
||||
}
|
||||
|
||||
byte[][] getFamilies() {
|
||||
return this.families;
|
||||
}
|
||||
}
|
||||
|
||||
public static CompletableFuture<Boolean> tableExists(AsyncTable<?> metaTable,
|
||||
TableName tableName) {
|
||||
|
@ -78,7 +80,8 @@ public class AsyncMetaTableAccessor {
|
|||
public static CompletableFuture<Optional<TableState>> getTableState(AsyncTable<?> metaTable,
|
||||
TableName tableName) {
|
||||
CompletableFuture<Optional<TableState>> future = new CompletableFuture<>();
|
||||
Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn());
|
||||
Get get = new Get(tableName.getName()).addColumn(HConstants.TABLE_FAMILY,
|
||||
HConstants.TABLE_STATE_QUALIFIER);
|
||||
addListener(metaTable.get(get), (result, error) -> {
|
||||
if (error != null) {
|
||||
future.completeExceptionally(error);
|
||||
|
@ -99,12 +102,12 @@ public class AsyncMetaTableAccessor {
|
|||
* @param regionName region we're looking for
|
||||
* @return HRegionLocation for the given region
|
||||
*/
|
||||
public static CompletableFuture<Optional<HRegionLocation>> getRegionLocation(
|
||||
AsyncTable<?> metaTable, byte[] regionName) {
|
||||
public static CompletableFuture<Optional<HRegionLocation>>
|
||||
getRegionLocation(AsyncTable<?> metaTable, byte[] regionName) {
|
||||
CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
|
||||
try {
|
||||
RegionInfo parsedRegionInfo = MetaTableAccessor.parseRegionInfoFromRegionName(regionName);
|
||||
addListener(metaTable.get(new Get(MetaTableAccessor.getMetaKeyForRegion(parsedRegionInfo))
|
||||
RegionInfo parsedRegionInfo = CatalogFamilyFormat.parseRegionInfoFromRegionName(regionName);
|
||||
addListener(metaTable.get(new Get(CatalogFamilyFormat.getMetaKeyForRegion(parsedRegionInfo))
|
||||
.addFamily(HConstants.CATALOG_FAMILY)), (r, err) -> {
|
||||
if (err != null) {
|
||||
future.completeExceptionally(err);
|
||||
|
@ -126,8 +129,8 @@ public class AsyncMetaTableAccessor {
|
|||
* @param encodedRegionName region we're looking for
|
||||
* @return HRegionLocation for the given region
|
||||
*/
|
||||
public static CompletableFuture<Optional<HRegionLocation>> getRegionLocationWithEncodedName(
|
||||
AsyncTable<?> metaTable, byte[] encodedRegionName) {
|
||||
public static CompletableFuture<Optional<HRegionLocation>>
|
||||
getRegionLocationWithEncodedName(AsyncTable<?> metaTable, byte[] encodedRegionName) {
|
||||
CompletableFuture<Optional<HRegionLocation>> future = new CompletableFuture<>();
|
||||
addListener(
|
||||
metaTable
|
||||
|
@ -139,7 +142,7 @@ public class AsyncMetaTableAccessor {
|
|||
}
|
||||
String encodedRegionNameStr = Bytes.toString(encodedRegionName);
|
||||
results.stream().filter(result -> !result.isEmpty())
|
||||
.filter(result -> MetaTableAccessor.getRegionInfo(result) != null).forEach(result -> {
|
||||
.filter(result -> CatalogFamilyFormat.getRegionInfo(result) != null).forEach(result -> {
|
||||
getRegionLocations(result).ifPresent(locations -> {
|
||||
for (HRegionLocation location : locations.getRegionLocations()) {
|
||||
if (location != null &&
|
||||
|
@ -156,16 +159,7 @@ public class AsyncMetaTableAccessor {
|
|||
}
|
||||
|
||||
private static Optional<TableState> getTableState(Result r) throws IOException {
|
||||
Cell cell = r.getColumnLatestCell(getTableFamily(), getStateColumn());
|
||||
if (cell == null) return Optional.empty();
|
||||
try {
|
||||
return Optional.of(TableState.parseFrom(
|
||||
TableName.valueOf(r.getRow()),
|
||||
Arrays.copyOfRange(cell.getValueArray(), cell.getValueOffset(), cell.getValueOffset()
|
||||
+ cell.getValueLength())));
|
||||
} catch (DeserializationException e) {
|
||||
throw new IOException("Failed to parse table state from result: " + r, e);
|
||||
}
|
||||
return Optional.ofNullable(CatalogFamilyFormat.getTableState(r));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -202,8 +196,8 @@ public class AsyncMetaTableAccessor {
|
|||
* {@link CompletableFuture}.
|
||||
*/
|
||||
private static CompletableFuture<List<Pair<RegionInfo, ServerName>>> getTableRegionsAndLocations(
|
||||
final AsyncTable<AdvancedScanResultConsumer> metaTable,
|
||||
final TableName tableName, final boolean excludeOfflinedSplitParents) {
|
||||
final AsyncTable<AdvancedScanResultConsumer> metaTable, final TableName tableName,
|
||||
final boolean excludeOfflinedSplitParents) {
|
||||
CompletableFuture<List<Pair<RegionInfo, ServerName>>> future = new CompletableFuture<>();
|
||||
if (TableName.META_TABLE_NAME.equals(tableName)) {
|
||||
future.completeExceptionally(new IOException(
|
||||
|
@ -211,37 +205,8 @@ public class AsyncMetaTableAccessor {
|
|||
}
|
||||
|
||||
// Make a version of CollectingVisitor that collects RegionInfo and ServerAddress
|
||||
CollectingVisitor<Pair<RegionInfo, ServerName>> visitor =
|
||||
new CollectingVisitor<Pair<RegionInfo, ServerName>>() {
|
||||
private RegionLocations current = null;
|
||||
|
||||
@Override
|
||||
public boolean visit(Result r) throws IOException {
|
||||
Optional<RegionLocations> currentRegionLocations = getRegionLocations(r);
|
||||
current = currentRegionLocations.orElse(null);
|
||||
if (current == null || current.getRegionLocation().getRegion() == null) {
|
||||
LOG.warn("No serialized RegionInfo in " + r);
|
||||
return true;
|
||||
}
|
||||
RegionInfo hri = current.getRegionLocation().getRegion();
|
||||
if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
|
||||
// Else call super and add this Result to the collection.
|
||||
return super.visit(r);
|
||||
}
|
||||
|
||||
@Override
|
||||
void add(Result r) {
|
||||
if (current == null) {
|
||||
return;
|
||||
}
|
||||
for (HRegionLocation loc : current.getRegionLocations()) {
|
||||
if (loc != null) {
|
||||
this.results.add(new Pair<RegionInfo, ServerName>(loc.getRegion(), loc
|
||||
.getServerName()));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
CollectRegionLocationsVisitor visitor =
|
||||
new CollectRegionLocationsVisitor(excludeOfflinedSplitParents);
|
||||
|
||||
addListener(scanMeta(metaTable, tableName, QueryType.REGION, visitor), (v, error) -> {
|
||||
if (error != null) {
|
||||
|
@ -290,9 +255,9 @@ public class AsyncMetaTableAccessor {
|
|||
}
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Scanning META" + " starting at row=" + Bytes.toStringBinary(scan.getStartRow())
|
||||
+ " stopping at row=" + Bytes.toStringBinary(scan.getStopRow()) + " for max="
|
||||
+ rowUpperLimit + " with caching=" + scan.getCaching());
|
||||
LOG.debug("Scanning META" + " starting at row=" + Bytes.toStringBinary(scan.getStartRow()) +
|
||||
" stopping at row=" + Bytes.toStringBinary(scan.getStopRow()) + " for max=" +
|
||||
rowUpperLimit + " with caching=" + scan.getCaching());
|
||||
}
|
||||
|
||||
CompletableFuture<Void> future = new CompletableFuture<Void>();
|
||||
|
@ -355,6 +320,98 @@ public class AsyncMetaTableAccessor {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementations 'visit' a catalog table row.
|
||||
*/
|
||||
public interface Visitor {
|
||||
/**
|
||||
* Visit the catalog table row.
|
||||
* @param r A row from catalog table
|
||||
* @return True if we are to proceed scanning the table, else false if we are to stop now.
|
||||
*/
|
||||
boolean visit(final Result r) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementations 'visit' a catalog table row but with close() at the end.
|
||||
*/
|
||||
public interface CloseableVisitor extends Visitor, Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@link Visitor} that collects content out of passed {@link Result}.
|
||||
*/
|
||||
private static abstract class CollectingVisitor<T> implements Visitor {
|
||||
final List<T> results = new ArrayList<>();
|
||||
|
||||
@Override
|
||||
public boolean visit(Result r) throws IOException {
|
||||
if (r != null && !r.isEmpty()) {
|
||||
add(r);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
abstract void add(Result r);
|
||||
|
||||
/**
|
||||
* @return Collected results; wait till visits complete to collect all possible results
|
||||
*/
|
||||
List<T> getResults() {
|
||||
return this.results;
|
||||
}
|
||||
}
|
||||
|
||||
static class CollectRegionLocationsVisitor
|
||||
extends CollectingVisitor<Pair<RegionInfo, ServerName>> {
|
||||
|
||||
private final boolean excludeOfflinedSplitParents;
|
||||
|
||||
private RegionLocations current = null;
|
||||
|
||||
CollectRegionLocationsVisitor(boolean excludeOfflinedSplitParents) {
|
||||
this.excludeOfflinedSplitParents = excludeOfflinedSplitParents;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean visit(Result r) throws IOException {
|
||||
Optional<RegionLocations> currentRegionLocations = getRegionLocations(r);
|
||||
current = currentRegionLocations.orElse(null);
|
||||
if (current == null || current.getRegionLocation().getRegion() == null) {
|
||||
LOG.warn("No serialized RegionInfo in " + r);
|
||||
return true;
|
||||
}
|
||||
RegionInfo hri = current.getRegionLocation().getRegion();
|
||||
if (excludeOfflinedSplitParents && hri.isSplitParent()) {
|
||||
return true;
|
||||
}
|
||||
// Else call super and add this Result to the collection.
|
||||
return super.visit(r);
|
||||
}
|
||||
|
||||
@Override
|
||||
void add(Result r) {
|
||||
if (current == null) {
|
||||
return;
|
||||
}
|
||||
for (HRegionLocation loc : current.getRegionLocations()) {
|
||||
if (loc != null) {
|
||||
this.results.add(new Pair<RegionInfo, ServerName>(loc.getRegion(), loc.getServerName()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Collects all returned.
|
||||
*/
|
||||
static class CollectAllVisitor extends CollectingVisitor<Result> {
|
||||
@Override
|
||||
void add(Result r) {
|
||||
this.results.add(r);
|
||||
}
|
||||
}
|
||||
|
||||
private static Scan getMetaScan(AsyncTable<?> metaTable, int rowUpperLimit) {
|
||||
Scan scan = new Scan();
|
||||
int scannerCaching = metaTable.getConfiguration().getInt(HConstants.HBASE_META_SCANNER_CACHING,
|
||||
|
@ -376,102 +433,15 @@ public class AsyncMetaTableAccessor {
|
|||
* @return an HRegionLocationList containing all locations for the region range or null if we
|
||||
* can't deserialize the result.
|
||||
*/
|
||||
private static Optional<RegionLocations> getRegionLocations(final Result r) {
|
||||
if (r == null) return Optional.empty();
|
||||
Optional<RegionInfo> regionInfo = getHRegionInfo(r, getRegionInfoColumn());
|
||||
if (!regionInfo.isPresent()) return Optional.empty();
|
||||
|
||||
List<HRegionLocation> locations = new ArrayList<HRegionLocation>(1);
|
||||
NavigableMap<byte[], NavigableMap<byte[], byte[]>> familyMap = r.getNoVersionMap();
|
||||
|
||||
locations.add(getRegionLocation(r, regionInfo.get(), 0));
|
||||
|
||||
NavigableMap<byte[], byte[]> infoMap = familyMap.get(getCatalogFamily());
|
||||
if (infoMap == null) return Optional.of(new RegionLocations(locations));
|
||||
|
||||
// iterate until all serverName columns are seen
|
||||
int replicaId = 0;
|
||||
byte[] serverColumn = getServerColumn(replicaId);
|
||||
SortedMap<byte[], byte[]> serverMap = null;
|
||||
serverMap = infoMap.tailMap(serverColumn, false);
|
||||
|
||||
if (serverMap.isEmpty()) return Optional.of(new RegionLocations(locations));
|
||||
|
||||
for (Map.Entry<byte[], byte[]> entry : serverMap.entrySet()) {
|
||||
replicaId = parseReplicaIdFromServerColumn(entry.getKey());
|
||||
if (replicaId < 0) {
|
||||
break;
|
||||
}
|
||||
HRegionLocation location = getRegionLocation(r, regionInfo.get(), replicaId);
|
||||
// In case the region replica is newly created, it's location might be null. We usually do not
|
||||
// have HRL's in RegionLocations object with null ServerName. They are handled as null HRLs.
|
||||
if (location == null || location.getServerName() == null) {
|
||||
locations.add(null);
|
||||
} else {
|
||||
locations.add(location);
|
||||
}
|
||||
}
|
||||
|
||||
return Optional.of(new RegionLocations(locations));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the HRegionLocation parsed from the given meta row Result
|
||||
* for the given regionInfo and replicaId. The regionInfo can be the default region info
|
||||
* for the replica.
|
||||
* @param r the meta row result
|
||||
* @param regionInfo RegionInfo for default replica
|
||||
* @param replicaId the replicaId for the HRegionLocation
|
||||
* @return HRegionLocation parsed from the given meta row Result for the given replicaId
|
||||
*/
|
||||
private static HRegionLocation getRegionLocation(final Result r, final RegionInfo regionInfo,
|
||||
final int replicaId) {
|
||||
Optional<ServerName> serverName = getServerName(r, replicaId);
|
||||
long seqNum = getSeqNumDuringOpen(r, replicaId);
|
||||
RegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId);
|
||||
return new HRegionLocation(replicaInfo, serverName.orElse(null), seqNum);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link ServerName} from catalog table {@link Result}.
|
||||
* @param r Result to pull from
|
||||
* @return A ServerName instance.
|
||||
*/
|
||||
private static Optional<ServerName> getServerName(final Result r, final int replicaId) {
|
||||
byte[] serverColumn = getServerColumn(replicaId);
|
||||
Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn);
|
||||
if (cell == null || cell.getValueLength() == 0) return Optional.empty();
|
||||
String hostAndPort = Bytes.toString(cell.getValueArray(), cell.getValueOffset(),
|
||||
cell.getValueLength());
|
||||
byte[] startcodeColumn = getStartCodeColumn(replicaId);
|
||||
cell = r.getColumnLatestCell(getCatalogFamily(), startcodeColumn);
|
||||
if (cell == null || cell.getValueLength() == 0) return Optional.empty();
|
||||
try {
|
||||
return Optional.of(ServerName.valueOf(hostAndPort,
|
||||
Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())));
|
||||
} catch (IllegalArgumentException e) {
|
||||
LOG.error("Ignoring invalid region for server " + hostAndPort + "; cell=" + cell, e);
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The latest seqnum that the server writing to meta observed when opening the region.
|
||||
* E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written.
|
||||
* @param r Result to pull the seqNum from
|
||||
* @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
|
||||
*/
|
||||
private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
|
||||
Cell cell = r.getColumnLatestCell(getCatalogFamily(), getSeqNumColumn(replicaId));
|
||||
if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM;
|
||||
return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
|
||||
private static Optional<RegionLocations> getRegionLocations(Result r) {
|
||||
return Optional.ofNullable(CatalogFamilyFormat.getRegionLocations(r));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param tableName table we're working with
|
||||
* @return start row for scanning META according to query type
|
||||
*/
|
||||
private static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) {
|
||||
public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) {
|
||||
if (tableName == null) {
|
||||
return null;
|
||||
}
|
||||
|
@ -496,7 +466,7 @@ public class AsyncMetaTableAccessor {
|
|||
* @param tableName table we're working with
|
||||
* @return stop row for scanning META according to query type
|
||||
*/
|
||||
private static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) {
|
||||
public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) {
|
||||
if (tableName == null) {
|
||||
return null;
|
||||
}
|
||||
|
@ -522,107 +492,4 @@ public class AsyncMetaTableAccessor {
|
|||
}
|
||||
return stopRow;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the RegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
|
||||
* <code>qualifier</code> of the catalog table result.
|
||||
* @param r a Result object from the catalog table scan
|
||||
* @param qualifier Column family qualifier
|
||||
* @return An RegionInfo instance.
|
||||
*/
|
||||
private static Optional<RegionInfo> getHRegionInfo(final Result r, byte[] qualifier) {
|
||||
Cell cell = r.getColumnLatestCell(getCatalogFamily(), qualifier);
|
||||
if (cell == null) return Optional.empty();
|
||||
return Optional.ofNullable(RegionInfo.parseFromOrNull(cell.getValueArray(),
|
||||
cell.getValueOffset(), cell.getValueLength()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column family used for meta columns.
|
||||
* @return HConstants.CATALOG_FAMILY.
|
||||
*/
|
||||
private static byte[] getCatalogFamily() {
|
||||
return HConstants.CATALOG_FAMILY;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column family used for table columns.
|
||||
* @return HConstants.TABLE_FAMILY.
|
||||
*/
|
||||
private static byte[] getTableFamily() {
|
||||
return HConstants.TABLE_FAMILY;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column qualifier for serialized region info
|
||||
* @return HConstants.REGIONINFO_QUALIFIER
|
||||
*/
|
||||
private static byte[] getRegionInfoColumn() {
|
||||
return HConstants.REGIONINFO_QUALIFIER;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column qualifier for serialized table state
|
||||
* @return HConstants.TABLE_STATE_QUALIFIER
|
||||
*/
|
||||
private static byte[] getStateColumn() {
|
||||
return HConstants.TABLE_STATE_QUALIFIER;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column qualifier for server column for replicaId
|
||||
* @param replicaId the replicaId of the region
|
||||
* @return a byte[] for server column qualifier
|
||||
*/
|
||||
private static byte[] getServerColumn(int replicaId) {
|
||||
return replicaId == 0
|
||||
? HConstants.SERVER_QUALIFIER
|
||||
: Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
|
||||
+ String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column qualifier for server start code column for replicaId
|
||||
* @param replicaId the replicaId of the region
|
||||
* @return a byte[] for server start code column qualifier
|
||||
*/
|
||||
private static byte[] getStartCodeColumn(int replicaId) {
|
||||
return replicaId == 0
|
||||
? HConstants.STARTCODE_QUALIFIER
|
||||
: Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
|
||||
+ String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the column qualifier for seqNum column for replicaId
|
||||
* @param replicaId the replicaId of the region
|
||||
* @return a byte[] for seqNum column qualifier
|
||||
*/
|
||||
private static byte[] getSeqNumColumn(int replicaId) {
|
||||
return replicaId == 0
|
||||
? HConstants.SEQNUM_QUALIFIER
|
||||
: Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
|
||||
+ String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId));
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the replicaId from the server column qualifier. See top of the class javadoc
|
||||
* for the actual meta layout
|
||||
* @param serverColumn the column qualifier
|
||||
* @return an int for the replicaId
|
||||
*/
|
||||
private static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
|
||||
String serverStr = Bytes.toString(serverColumn);
|
||||
|
||||
Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
|
||||
if (matcher.matches() && matcher.groupCount() > 0) {
|
||||
String group = matcher.group(1);
|
||||
if (group != null && group.length() > 0) {
|
||||
return Integer.parseInt(group.substring(1), 16);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
|
@ -47,10 +47,10 @@ import java.util.concurrent.ConcurrentMap;
|
|||
import java.util.concurrent.ConcurrentNavigableMap;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import org.apache.commons.lang3.ObjectUtils;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -315,7 +315,7 @@ class AsyncNonMetaRegionLocator {
|
|||
|
||||
// return whether we should stop the scan
|
||||
private boolean onScanNext(TableName tableName, LocateRequest req, Result result) {
|
||||
RegionLocations locs = MetaTableAccessor.getRegionLocations(result);
|
||||
RegionLocations locs = CatalogFamilyFormat.getRegionLocations(result);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("The fetched location of '{}', row='{}', locateType={} is {}", tableName,
|
||||
Bytes.toStringBinary(req.row), req.locateType, locs);
|
||||
|
@ -476,7 +476,7 @@ class AsyncNonMetaRegionLocator {
|
|||
if (i < results.length) {
|
||||
TableCache tableCache = getTableCache(tableName);
|
||||
for (; i < results.length; i++) {
|
||||
RegionLocations locs = MetaTableAccessor.getRegionLocations(results[i]);
|
||||
RegionLocations locs = CatalogFamilyFormat.getRegionLocations(results[i]);
|
||||
if (locs == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.client;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -58,8 +58,8 @@ class AsyncTableRegionLocatorImpl implements AsyncTableRegionLocator {
|
|||
return conn.registry.getMetaRegionLocations()
|
||||
.thenApply(locs -> Arrays.asList(locs.getRegionLocations()));
|
||||
}
|
||||
return AsyncMetaTableAccessor.getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME),
|
||||
tableName);
|
||||
return ClientMetaTableAccessor
|
||||
.getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -46,16 +46,15 @@ import java.util.regex.Pattern;
|
|||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.CacheEvictionStats;
|
||||
import org.apache.hadoop.hbase.CacheEvictionStatsAggregator;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||
import org.apache.hadoop.hbase.ClusterMetricsBuilder;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
|
||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
import org.apache.hadoop.hbase.RegionMetrics;
|
||||
|
@ -507,7 +506,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
if (TableName.isMetaTableName(tableName)) {
|
||||
return CompletableFuture.completedFuture(true);
|
||||
}
|
||||
return AsyncMetaTableAccessor.tableExists(metaTable, tableName);
|
||||
return ClientMetaTableAccessor.tableExists(metaTable, tableName);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -722,7 +721,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
return CompletableFuture.completedFuture(true);
|
||||
}
|
||||
CompletableFuture<Boolean> future = new CompletableFuture<>();
|
||||
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> {
|
||||
addListener(ClientMetaTableAccessor.getTableState(metaTable, tableName),
|
||||
(tableState, error) -> {
|
||||
completeCheckTableState(future, tableState.isPresent() ? tableState.get() : null, error,
|
||||
TableState.State.ENABLED, tableName);
|
||||
});
|
||||
|
@ -735,7 +735,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
return CompletableFuture.completedFuture(false);
|
||||
}
|
||||
CompletableFuture<Boolean> future = new CompletableFuture<>();
|
||||
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> {
|
||||
addListener(ClientMetaTableAccessor.getTableState(metaTable, tableName),
|
||||
(tableState, error) -> {
|
||||
completeCheckTableState(future, tableState.isPresent() ? tableState.get() : null, error,
|
||||
TableState.State.DISABLED, tableName);
|
||||
});
|
||||
|
@ -762,7 +763,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
future.complete(false);
|
||||
} else {
|
||||
addListener(
|
||||
AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName),
|
||||
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName),
|
||||
(locations, error1) -> {
|
||||
if (error1 != null) {
|
||||
future.completeExceptionally(error1);
|
||||
|
@ -887,7 +888,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
.thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion)
|
||||
.collect(Collectors.toList()));
|
||||
} else {
|
||||
return AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName)
|
||||
return ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName)
|
||||
.thenApply(
|
||||
locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList()));
|
||||
}
|
||||
|
@ -1123,7 +1124,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
return future;
|
||||
} else {
|
||||
// For non-meta table, we fetch all locations by scanning hbase:meta table
|
||||
return AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName);
|
||||
return ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1333,11 +1334,12 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
future.completeExceptionally(new TableNotFoundException(tableName));
|
||||
return;
|
||||
}
|
||||
addListener(
|
||||
metaTable
|
||||
addListener(metaTable
|
||||
.scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY)
|
||||
.withStartRow(MetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION))
|
||||
.withStopRow(MetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION))),
|
||||
.withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(tableName,
|
||||
ClientMetaTableAccessor.QueryType.REGION))
|
||||
.withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(tableName,
|
||||
ClientMetaTableAccessor.QueryType.REGION))),
|
||||
(results, err2) -> {
|
||||
if (err2 != null) {
|
||||
future.completeExceptionally(err2);
|
||||
|
@ -1346,10 +1348,10 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
if (results != null && !results.isEmpty()) {
|
||||
List<CompletableFuture<Void>> splitFutures = new ArrayList<>();
|
||||
for (Result r : results) {
|
||||
if (r.isEmpty() || MetaTableAccessor.getRegionInfo(r) == null) {
|
||||
if (r.isEmpty() || CatalogFamilyFormat.getRegionInfo(r) == null) {
|
||||
continue;
|
||||
}
|
||||
RegionLocations rl = MetaTableAccessor.getRegionLocations(r);
|
||||
RegionLocations rl = CatalogFamilyFormat.getRegionLocations(r);
|
||||
if (rl != null) {
|
||||
for (HRegionLocation h : rl.getRegionLocations()) {
|
||||
if (h != null && h.getServerName() != null) {
|
||||
|
@ -2377,12 +2379,12 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
.thenApply(locs -> Stream.of(locs.getRegionLocations())
|
||||
.filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst());
|
||||
} else {
|
||||
future = AsyncMetaTableAccessor.getRegionLocationWithEncodedName(metaTable,
|
||||
future = ClientMetaTableAccessor.getRegionLocationWithEncodedName(metaTable,
|
||||
regionNameOrEncodedRegionName);
|
||||
}
|
||||
} else {
|
||||
RegionInfo regionInfo =
|
||||
MetaTableAccessor.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName);
|
||||
CatalogFamilyFormat.parseRegionInfoFromRegionName(regionNameOrEncodedRegionName);
|
||||
if (regionInfo.isMetaRegion()) {
|
||||
future = connection.registry.getMetaRegionLocations()
|
||||
.thenApply(locs -> Stream.of(locs.getRegionLocations())
|
||||
|
@ -2390,7 +2392,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
|
|||
.findFirst());
|
||||
} else {
|
||||
future =
|
||||
AsyncMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName);
|
||||
ClientMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
@Category({ ClientTests.class, SmallTests.class })
|
||||
public class TestCatalogFamilyFormat {
|
||||
|
||||
@ClassRule
|
||||
public static final HBaseClassTestRule CLASS_RULE =
|
||||
HBaseClassTestRule.forClass(TestCatalogFamilyFormat.class);
|
||||
|
||||
@Test
|
||||
public void testParseReplicaIdFromServerColumn() {
|
||||
String column1 = HConstants.SERVER_QUALIFIER_STR;
|
||||
assertEquals(0, CatalogFamilyFormat.parseReplicaIdFromServerColumn(Bytes.toBytes(column1)));
|
||||
String column2 = column1 + CatalogFamilyFormat.META_REPLICA_ID_DELIMITER;
|
||||
assertEquals(-1, CatalogFamilyFormat.parseReplicaIdFromServerColumn(Bytes.toBytes(column2)));
|
||||
String column3 = column2 + "00";
|
||||
assertEquals(-1, CatalogFamilyFormat.parseReplicaIdFromServerColumn(Bytes.toBytes(column3)));
|
||||
String column4 = column3 + "2A";
|
||||
assertEquals(42, CatalogFamilyFormat.parseReplicaIdFromServerColumn(Bytes.toBytes(column4)));
|
||||
String column5 = column4 + "2A";
|
||||
assertEquals(-1, CatalogFamilyFormat.parseReplicaIdFromServerColumn(Bytes.toBytes(column5)));
|
||||
String column6 = HConstants.STARTCODE_QUALIFIER_STR;
|
||||
assertEquals(-1, CatalogFamilyFormat.parseReplicaIdFromServerColumn(Bytes.toBytes(column6)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMetaReaderGetColumnMethods() {
|
||||
assertArrayEquals(HConstants.SERVER_QUALIFIER, CatalogFamilyFormat.getServerColumn(0));
|
||||
assertArrayEquals(
|
||||
Bytes.toBytes(
|
||||
HConstants.SERVER_QUALIFIER_STR + CatalogFamilyFormat.META_REPLICA_ID_DELIMITER + "002A"),
|
||||
CatalogFamilyFormat.getServerColumn(42));
|
||||
|
||||
assertArrayEquals(HConstants.STARTCODE_QUALIFIER, CatalogFamilyFormat.getStartCodeColumn(0));
|
||||
assertArrayEquals(
|
||||
Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR +
|
||||
CatalogFamilyFormat.META_REPLICA_ID_DELIMITER + "002A"),
|
||||
CatalogFamilyFormat.getStartCodeColumn(42));
|
||||
|
||||
assertArrayEquals(HConstants.SEQNUM_QUALIFIER, CatalogFamilyFormat.getSeqNumColumn(0));
|
||||
assertArrayEquals(
|
||||
Bytes.toBytes(
|
||||
HConstants.SEQNUM_QUALIFIER_STR + CatalogFamilyFormat.META_REPLICA_ID_DELIMITER + "002A"),
|
||||
CatalogFamilyFormat.getSeqNumColumn(42));
|
||||
}
|
||||
}
|
|
@ -31,6 +31,8 @@ import java.util.stream.Collectors;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
|
@ -537,7 +539,7 @@ public class CatalogJanitor extends ScheduledChore {
|
|||
* generate more report. Report is NOT ready until after this visitor has been
|
||||
* {@link #close()}'d.
|
||||
*/
|
||||
static class ReportMakingVisitor implements MetaTableAccessor.CloseableVisitor {
|
||||
static class ReportMakingVisitor implements ClientMetaTableAccessor.CloseableVisitor {
|
||||
private final MasterServices services;
|
||||
private volatile boolean closed;
|
||||
|
||||
|
@ -615,10 +617,10 @@ public class CatalogJanitor extends ScheduledChore {
|
|||
// If locations is null, ensure the regioninfo is for sure empty before progressing.
|
||||
// If really empty, report as missing regioninfo! Otherwise, can run server check
|
||||
// and get RegionInfo from locations.
|
||||
RegionLocations locations = MetaTableAccessor.getRegionLocations(metaTableRow);
|
||||
RegionLocations locations = CatalogFamilyFormat.getRegionLocations(metaTableRow);
|
||||
if (locations == null) {
|
||||
ri = MetaTableAccessor.getRegionInfo(metaTableRow,
|
||||
MetaTableAccessor.getRegionInfoColumn());
|
||||
ri = CatalogFamilyFormat.getRegionInfo(metaTableRow,
|
||||
HConstants.REGIONINFO_QUALIFIER);
|
||||
} else {
|
||||
ri = locations.getDefaultRegionLocation().getRegion();
|
||||
checkServer(locations);
|
||||
|
|
|
@ -26,6 +26,8 @@ import java.util.Set;
|
|||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.TableDescriptors;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -133,10 +135,10 @@ public class TableStateManager {
|
|||
Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
|
||||
// Only be called in region normalizer, will not use cache.
|
||||
final Set<TableName> rv = Sets.newHashSet();
|
||||
MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() {
|
||||
MetaTableAccessor.fullScanTables(master.getConnection(), new ClientMetaTableAccessor.Visitor() {
|
||||
@Override
|
||||
public boolean visit(Result r) throws IOException {
|
||||
TableState tableState = MetaTableAccessor.getTableState(r);
|
||||
TableState tableState = CatalogFamilyFormat.getTableState(r);
|
||||
if (tableState != null && tableState.inStates(states)) {
|
||||
rv.add(tableState.getTableName());
|
||||
}
|
||||
|
@ -212,10 +214,10 @@ public class TableStateManager {
|
|||
throws IOException {
|
||||
Map<String, TableState> states = new HashMap<>();
|
||||
// NOTE: Full hbase:meta table scan!
|
||||
MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() {
|
||||
MetaTableAccessor.fullScanTables(connection, new ClientMetaTableAccessor.Visitor() {
|
||||
@Override
|
||||
public boolean visit(Result r) throws IOException {
|
||||
TableState state = MetaTableAccessor.getTableState(r);
|
||||
TableState state = CatalogFamilyFormat.getTableState(r);
|
||||
states.put(state.getTableName().getNameAsString(), state);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -34,10 +34,10 @@ import java.util.concurrent.locks.ReentrantLock;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HBaseIOException;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.PleaseHoldException;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -1123,7 +1123,7 @@ public class AssignmentManager {
|
|||
*/
|
||||
private void closeRegionSilently(ServerName sn, byte [] regionName) {
|
||||
try {
|
||||
RegionInfo ri = MetaTableAccessor.parseRegionInfoFromRegionName(regionName);
|
||||
RegionInfo ri = CatalogFamilyFormat.parseRegionInfoFromRegionName(regionName);
|
||||
// Pass -1 for timeout. Means do not wait.
|
||||
ServerManager.closeRegionSilentlyAndWait(this.master.getAsyncClusterConnection(), sn, ri, -1);
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -22,10 +22,11 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellBuilderFactory;
|
||||
import org.apache.hadoop.hbase.CellBuilderType;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
|
@ -75,7 +76,8 @@ public class RegionStateStore {
|
|||
}
|
||||
|
||||
public void visitMeta(final RegionStateVisitor visitor) throws IOException {
|
||||
MetaTableAccessor.fullScanRegions(master.getConnection(), new MetaTableAccessor.Visitor() {
|
||||
MetaTableAccessor.fullScanRegions(master.getConnection(),
|
||||
new ClientMetaTableAccessor.Visitor() {
|
||||
final boolean isDebugEnabled = LOG.isDebugEnabled();
|
||||
|
||||
@Override
|
||||
|
@ -117,7 +119,7 @@ public class RegionStateStore {
|
|||
|
||||
private void visitMetaEntry(final RegionStateVisitor visitor, final Result result)
|
||||
throws IOException {
|
||||
final RegionLocations rl = MetaTableAccessor.getRegionLocations(result);
|
||||
final RegionLocations rl = CatalogFamilyFormat.getRegionLocations(result);
|
||||
if (rl == null) return;
|
||||
|
||||
final HRegionLocation[] locations = rl.getRegionLocations();
|
||||
|
@ -176,7 +178,7 @@ public class RegionStateStore {
|
|||
long pid) throws IOException {
|
||||
long time = EnvironmentEdgeManager.currentTime();
|
||||
final int replicaId = regionInfo.getReplicaId();
|
||||
final Put put = new Put(MetaTableAccessor.getMetaKeyForRegion(regionInfo), time);
|
||||
final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time);
|
||||
MetaTableAccessor.addRegionInfo(put, regionInfo);
|
||||
final StringBuilder info =
|
||||
new StringBuilder("pid=").append(pid).append(" updating hbase:meta row=")
|
||||
|
@ -199,7 +201,7 @@ public class RegionStateStore {
|
|||
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
|
||||
.setRow(put.getRow())
|
||||
.setFamily(HConstants.CATALOG_FAMILY)
|
||||
.setQualifier(MetaTableAccessor.getServerNameColumn(replicaId))
|
||||
.setQualifier(CatalogFamilyFormat.getServerNameColumn(replicaId))
|
||||
.setTimestamp(put.getTimestamp())
|
||||
.setType(Cell.Type.Put)
|
||||
.setValue(Bytes.toBytes(regionLocation.getServerName()))
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master.procedure;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
|
@ -184,7 +185,7 @@ public class EnableTableProcedure
|
|||
for (int i = 1; i < regionReplicaCount; i++) {
|
||||
// Since we have already added the entries to the META we will be getting only that here
|
||||
List<Cell> columnCells =
|
||||
r.getColumnCells(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(i));
|
||||
r.getColumnCells(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i));
|
||||
if (!columnCells.isEmpty()) {
|
||||
replicasFound++;
|
||||
}
|
||||
|
|
|
@ -21,7 +21,8 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
|
@ -112,10 +113,10 @@ public class HBCKServerCrashProcedure extends ServerCrashProcedure {
|
|||
}
|
||||
|
||||
/**
|
||||
* Visitor for hbase:meta that 'fixes' Unknown Server issues. Collects
|
||||
* a List of Regions to reassign as 'result'.
|
||||
* Visitor for hbase:meta that 'fixes' Unknown Server issues. Collects a List of Regions to
|
||||
* reassign as 'result'.
|
||||
*/
|
||||
private static class UnknownServerVisitor implements MetaTableAccessor.Visitor {
|
||||
private static final class UnknownServerVisitor implements ClientMetaTableAccessor.Visitor {
|
||||
private final List<RegionInfo> reassigns = new ArrayList<>();
|
||||
private final ServerName unknownServerName;
|
||||
private final Connection connection;
|
||||
|
@ -127,7 +128,7 @@ public class HBCKServerCrashProcedure extends ServerCrashProcedure {
|
|||
|
||||
@Override
|
||||
public boolean visit(Result result) throws IOException {
|
||||
RegionLocations rls = MetaTableAccessor.getRegionLocations(result);
|
||||
RegionLocations rls = CatalogFamilyFormat.getRegionLocations(result);
|
||||
if (rls == null) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.commons.lang3.builder.EqualsBuilder;
|
|||
import org.apache.commons.lang3.builder.HashCodeBuilder;
|
||||
import org.apache.commons.lang3.builder.ToStringBuilder;
|
||||
import org.apache.commons.lang3.builder.ToStringStyle;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
|
@ -89,7 +90,7 @@ public final class RegionReplicaInfo {
|
|||
return Collections.singletonList(null);
|
||||
}
|
||||
|
||||
final RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
|
||||
final RegionLocations locations = CatalogFamilyFormat.getRegionLocations(result);
|
||||
if (locations == null) {
|
||||
return Collections.singletonList(null);
|
||||
}
|
||||
|
|
|
@ -56,7 +56,6 @@ import java.util.concurrent.TimeoutException;
|
|||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -68,8 +67,10 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hbase.Abortable;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
|
@ -2052,12 +2053,12 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
if (hi.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
|
||||
int numReplicas = admin.getDescriptor(hi.getTableName()).getRegionReplication();
|
||||
for (int i = 0; i < numReplicas; i++) {
|
||||
get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(i));
|
||||
get.addColumn(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(i));
|
||||
get.addColumn(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(i));
|
||||
get.addColumn(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(i));
|
||||
}
|
||||
}
|
||||
Result r = meta.get(get);
|
||||
RegionLocations rl = MetaTableAccessor.getRegionLocations(r);
|
||||
RegionLocations rl = CatalogFamilyFormat.getRegionLocations(r);
|
||||
if (rl == null) {
|
||||
LOG.warn("Unable to close region " + hi.getRegionNameAsString() +
|
||||
" since meta does not have handle to reach it");
|
||||
|
@ -2734,7 +2735,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
* @throws IOException if an error is encountered
|
||||
*/
|
||||
boolean loadMetaEntries() throws IOException {
|
||||
MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
|
||||
ClientMetaTableAccessor.Visitor visitor = new ClientMetaTableAccessor.Visitor() {
|
||||
int countRecord = 1;
|
||||
|
||||
// comparator to sort KeyValues with latest modtime
|
||||
|
@ -2751,7 +2752,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
|
||||
// record the latest modification of this META record
|
||||
long ts = Collections.max(result.listCells(), comp).getTimestamp();
|
||||
RegionLocations rl = MetaTableAccessor.getRegionLocations(result);
|
||||
RegionLocations rl = CatalogFamilyFormat.getRegionLocations(result);
|
||||
if (rl == null) {
|
||||
emptyRegionInfoQualifiers.add(result);
|
||||
errors.reportError(ERROR_CODE.EMPTY_META_CELL,
|
||||
|
@ -3896,10 +3897,10 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
barrierScan.setCaching(100);
|
||||
barrierScan.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
|
||||
barrierScan
|
||||
.withStartRow(MetaTableAccessor.getTableStartRowForMeta(cleanReplicationBarrierTable,
|
||||
MetaTableAccessor.QueryType.REGION))
|
||||
.withStopRow(MetaTableAccessor.getTableStopRowForMeta(cleanReplicationBarrierTable,
|
||||
MetaTableAccessor.QueryType.REGION));
|
||||
.withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(cleanReplicationBarrierTable,
|
||||
ClientMetaTableAccessor.QueryType.REGION))
|
||||
.withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(cleanReplicationBarrierTable,
|
||||
ClientMetaTableAccessor.QueryType.REGION));
|
||||
Result result;
|
||||
try (ResultScanner scanner = meta.getScanner(barrierScan)) {
|
||||
while ((result = scanner.next()) != null) {
|
||||
|
|
|
@ -2643,7 +2643,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
List<byte[]> rows = new ArrayList<>();
|
||||
ResultScanner s = t.getScanner(new Scan());
|
||||
for (Result result : s) {
|
||||
RegionInfo info = MetaTableAccessor.getRegionInfo(result);
|
||||
RegionInfo info = CatalogFamilyFormat.getRegionInfo(result);
|
||||
if (info == null) {
|
||||
LOG.error("No region info for row " + Bytes.toString(result.getRow()));
|
||||
// TODO figure out what to do for this new hosed case.
|
||||
|
@ -3268,13 +3268,13 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
@Nullable
|
||||
public TableState findLastTableState(final TableName table) throws IOException {
|
||||
final AtomicReference<TableState> lastTableState = new AtomicReference<>(null);
|
||||
MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
|
||||
ClientMetaTableAccessor.Visitor visitor = new ClientMetaTableAccessor.Visitor() {
|
||||
@Override
|
||||
public boolean visit(Result r) throws IOException {
|
||||
if (!Arrays.equals(r.getRow(), table.getName())) {
|
||||
return false;
|
||||
}
|
||||
TableState state = MetaTableAccessor.getTableState(r);
|
||||
TableState state = CatalogFamilyFormat.getTableState(r);
|
||||
if (state != null) {
|
||||
lastTableState.set(state);
|
||||
}
|
||||
|
@ -3282,7 +3282,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
}
|
||||
};
|
||||
MetaTableAccessor.scanMeta(asyncConnection.toConnection(), null, null,
|
||||
MetaTableAccessor.QueryType.TABLE, Integer.MAX_VALUE, visitor);
|
||||
ClientMetaTableAccessor.QueryType.TABLE, Integer.MAX_VALUE, visitor);
|
||||
return lastTableState.get();
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
|
@ -38,7 +37,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
|
@ -97,7 +95,8 @@ public class TestMetaTableAccessor {
|
|||
@Rule
|
||||
public TestName name = new TestName();
|
||||
|
||||
@BeforeClass public static void beforeClass() throws Exception {
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
UTIL.startMiniCluster(3);
|
||||
|
||||
Configuration c = new Configuration(UTIL.getConfiguration());
|
||||
|
@ -108,7 +107,8 @@ public class TestMetaTableAccessor {
|
|||
connection = ConnectionFactory.createConnection(c);
|
||||
}
|
||||
|
||||
@AfterClass public static void afterClass() throws Exception {
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
connection.close();
|
||||
UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
@ -162,8 +162,8 @@ public class TestMetaTableAccessor {
|
|||
int limit = 10;
|
||||
byte[] previous = HConstants.EMPTY_START_ROW;
|
||||
for (int i = 0; i < limit; i++) {
|
||||
RegionInfo ri = RegionInfoBuilder.newBuilder(tn).
|
||||
setStartKey(previous).setEndKey(Bytes.toBytes(i)).build();
|
||||
RegionInfo ri =
|
||||
RegionInfoBuilder.newBuilder(tn).setStartKey(previous).setEndKey(Bytes.toBytes(i)).build();
|
||||
ris.add(ri);
|
||||
}
|
||||
put = MetaTableAccessor.addMergeRegions(put, ris);
|
||||
|
@ -195,12 +195,11 @@ public class TestMetaTableAccessor {
|
|||
}
|
||||
|
||||
/**
|
||||
* Does {@link MetaTableAccessor#getRegion(Connection, byte[])} and a write
|
||||
* against hbase:meta while its hosted server is restarted to prove our retrying
|
||||
* works.
|
||||
* Does {@link MetaTableAccessor#getRegion(Connection, byte[])} and a write against hbase:meta
|
||||
* while its hosted server is restarted to prove our retrying works.
|
||||
*/
|
||||
@Test public void testRetrying()
|
||||
throws IOException, InterruptedException {
|
||||
@Test
|
||||
public void testRetrying() throws IOException, InterruptedException {
|
||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||
LOG.info("Started " + tableName);
|
||||
Table t = UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
|
||||
|
@ -209,8 +208,7 @@ public class TestMetaTableAccessor {
|
|||
regionCount = r.getStartKeys().length;
|
||||
}
|
||||
// Test it works getting a region from just made user table.
|
||||
final List<RegionInfo> regions =
|
||||
testGettingTableRegions(connection, tableName, regionCount);
|
||||
final List<RegionInfo> regions = testGettingTableRegions(connection, tableName, regionCount);
|
||||
MetaTask reader = new MetaTask(connection, "reader") {
|
||||
@Override
|
||||
void metaTask() throws Throwable {
|
||||
|
@ -247,8 +245,7 @@ public class TestMetaTableAccessor {
|
|||
int index = -1;
|
||||
do {
|
||||
index = UTIL.getMiniHBaseCluster().getServerWithMeta();
|
||||
} while (index == -1 &&
|
||||
startTime + timeOut < System.currentTimeMillis());
|
||||
} while (index == -1 && startTime + timeOut < System.currentTimeMillis());
|
||||
|
||||
if (index != -1) {
|
||||
UTIL.getMiniHBaseCluster().abortRegionServer(index);
|
||||
|
@ -313,8 +310,7 @@ public class TestMetaTableAccessor {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "count=" + this.count + ", t=" +
|
||||
(this.t == null? "null": this.t.toString());
|
||||
return "count=" + this.count + ", t=" + (this.t == null ? "null" : this.t.toString());
|
||||
}
|
||||
|
||||
abstract void metaTask() throws Throwable;
|
||||
|
@ -324,11 +320,11 @@ public class TestMetaTableAccessor {
|
|||
public void testGetRegionsFromMetaTable() throws IOException, InterruptedException {
|
||||
List<RegionInfo> regions = MetaTableLocator.getMetaRegions(UTIL.getZooKeeperWatcher());
|
||||
assertTrue(regions.size() >= 1);
|
||||
assertTrue(
|
||||
MetaTableLocator.getMetaRegionsAndLocations(UTIL.getZooKeeperWatcher()).size() >= 1);
|
||||
assertTrue(MetaTableLocator.getMetaRegionsAndLocations(UTIL.getZooKeeperWatcher()).size() >= 1);
|
||||
}
|
||||
|
||||
@Test public void testTableExists() throws IOException {
|
||||
@Test
|
||||
public void testTableExists() throws IOException {
|
||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||
assertFalse(MetaTableAccessor.tableExists(connection, tableName));
|
||||
UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
|
||||
|
@ -337,8 +333,7 @@ public class TestMetaTableAccessor {
|
|||
admin.disableTable(tableName);
|
||||
admin.deleteTable(tableName);
|
||||
assertFalse(MetaTableAccessor.tableExists(connection, tableName));
|
||||
assertTrue(MetaTableAccessor.tableExists(connection,
|
||||
TableName.META_TABLE_NAME));
|
||||
assertTrue(MetaTableAccessor.tableExists(connection, TableName.META_TABLE_NAME));
|
||||
UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
|
||||
assertTrue(MetaTableAccessor.tableExists(connection, tableName));
|
||||
admin.disableTable(tableName);
|
||||
|
@ -346,7 +341,8 @@ public class TestMetaTableAccessor {
|
|||
assertFalse(MetaTableAccessor.tableExists(connection, tableName));
|
||||
}
|
||||
|
||||
@Test public void testGetRegion() throws IOException, InterruptedException {
|
||||
@Test
|
||||
public void testGetRegion() throws IOException, InterruptedException {
|
||||
final String name = this.name.getMethodName();
|
||||
LOG.info("Started " + name);
|
||||
// Test get on non-existent region.
|
||||
|
@ -357,20 +353,18 @@ public class TestMetaTableAccessor {
|
|||
}
|
||||
|
||||
// Test for the optimization made in HBASE-3650
|
||||
@Test public void testScanMetaForTable()
|
||||
throws IOException, InterruptedException {
|
||||
@Test
|
||||
public void testScanMetaForTable() throws IOException, InterruptedException {
|
||||
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||
LOG.info("Started " + tableName);
|
||||
|
||||
/** Create 2 tables
|
||||
- testScanMetaForTable
|
||||
- testScanMetaForTablf
|
||||
/**
|
||||
* Create 2 tables - testScanMetaForTable - testScanMetaForTablf
|
||||
**/
|
||||
|
||||
UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
|
||||
// name that is +1 greater than the first one (e+1=f)
|
||||
TableName greaterName =
|
||||
TableName.valueOf("testScanMetaForTablf");
|
||||
TableName greaterName = TableName.valueOf("testScanMetaForTablf");
|
||||
UTIL.createTable(greaterName, HConstants.CATALOG_FAMILY);
|
||||
|
||||
// Now make sure we only get the regions from 1 of the tables at a time
|
||||
|
@ -380,66 +374,20 @@ public class TestMetaTableAccessor {
|
|||
}
|
||||
|
||||
private static List<RegionInfo> testGettingTableRegions(final Connection connection,
|
||||
final TableName name, final int regionCount)
|
||||
throws IOException, InterruptedException {
|
||||
final TableName name, final int regionCount) throws IOException, InterruptedException {
|
||||
List<RegionInfo> regions = MetaTableAccessor.getTableRegions(connection, name);
|
||||
assertEquals(regionCount, regions.size());
|
||||
Pair<RegionInfo, ServerName> pair =
|
||||
MetaTableAccessor.getRegion(connection, regions.get(0).getRegionName());
|
||||
assertEquals(regions.get(0).getEncodedName(),
|
||||
pair.getFirst().getEncodedName());
|
||||
assertEquals(regions.get(0).getEncodedName(), pair.getFirst().getEncodedName());
|
||||
return regions;
|
||||
}
|
||||
|
||||
private static void testGetRegion(final Connection connection,
|
||||
final RegionInfo region)
|
||||
private static void testGetRegion(final Connection connection, final RegionInfo region)
|
||||
throws IOException, InterruptedException {
|
||||
Pair<RegionInfo, ServerName> pair =
|
||||
MetaTableAccessor.getRegion(connection, region.getRegionName());
|
||||
assertEquals(region.getEncodedName(),
|
||||
pair.getFirst().getEncodedName());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testParseReplicaIdFromServerColumn() {
|
||||
String column1 = HConstants.SERVER_QUALIFIER_STR;
|
||||
assertEquals(0,
|
||||
MetaTableAccessor.parseReplicaIdFromServerColumn(Bytes.toBytes(column1)));
|
||||
String column2 = column1 + MetaTableAccessor.META_REPLICA_ID_DELIMITER;
|
||||
assertEquals(-1,
|
||||
MetaTableAccessor.parseReplicaIdFromServerColumn(Bytes.toBytes(column2)));
|
||||
String column3 = column2 + "00";
|
||||
assertEquals(-1,
|
||||
MetaTableAccessor.parseReplicaIdFromServerColumn(Bytes.toBytes(column3)));
|
||||
String column4 = column3 + "2A";
|
||||
assertEquals(42,
|
||||
MetaTableAccessor.parseReplicaIdFromServerColumn(Bytes.toBytes(column4)));
|
||||
String column5 = column4 + "2A";
|
||||
assertEquals(-1,
|
||||
MetaTableAccessor.parseReplicaIdFromServerColumn(Bytes.toBytes(column5)));
|
||||
String column6 = HConstants.STARTCODE_QUALIFIER_STR;
|
||||
assertEquals(-1,
|
||||
MetaTableAccessor.parseReplicaIdFromServerColumn(Bytes.toBytes(column6)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMetaReaderGetColumnMethods() {
|
||||
assertArrayEquals(HConstants.SERVER_QUALIFIER, MetaTableAccessor.getServerColumn(0));
|
||||
assertArrayEquals(Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR
|
||||
+ MetaTableAccessor.META_REPLICA_ID_DELIMITER + "002A"),
|
||||
MetaTableAccessor.getServerColumn(42));
|
||||
|
||||
assertArrayEquals(HConstants.STARTCODE_QUALIFIER,
|
||||
MetaTableAccessor.getStartCodeColumn(0));
|
||||
assertArrayEquals(Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR
|
||||
+ MetaTableAccessor.META_REPLICA_ID_DELIMITER + "002A"),
|
||||
MetaTableAccessor.getStartCodeColumn(42));
|
||||
|
||||
assertArrayEquals(HConstants.SEQNUM_QUALIFIER,
|
||||
MetaTableAccessor.getSeqNumColumn(0));
|
||||
assertArrayEquals(Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR
|
||||
+ MetaTableAccessor.META_REPLICA_ID_DELIMITER + "002A"),
|
||||
MetaTableAccessor.getSeqNumColumn(42));
|
||||
assertEquals(region.getEncodedName(), pair.getFirst().getEncodedName());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -450,26 +398,14 @@ public class TestMetaTableAccessor {
|
|||
|
||||
long regionId = System.currentTimeMillis();
|
||||
RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(0).build();
|
||||
RegionInfo replica1 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(1)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(1).build();
|
||||
RegionInfo replica100 = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(100)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(100).build();
|
||||
|
||||
long seqNum0 = random.nextLong();
|
||||
long seqNum1 = random.nextLong();
|
||||
|
@ -502,19 +438,19 @@ public class TestMetaTableAccessor {
|
|||
}
|
||||
}
|
||||
|
||||
public static void assertMetaLocation(Table meta, byte[] row, ServerName serverName,
|
||||
long seqNum, int replicaId, boolean checkSeqNum) throws IOException {
|
||||
public static void assertMetaLocation(Table meta, byte[] row, ServerName serverName, long seqNum,
|
||||
int replicaId, boolean checkSeqNum) throws IOException {
|
||||
Get get = new Get(row);
|
||||
Result result = meta.get(get);
|
||||
assertTrue(Bytes.equals(
|
||||
result.getValue(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(replicaId)),
|
||||
result.getValue(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(replicaId)),
|
||||
Bytes.toBytes(serverName.getAddress().toString())));
|
||||
assertTrue(Bytes.equals(
|
||||
result.getValue(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(replicaId)),
|
||||
result.getValue(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(replicaId)),
|
||||
Bytes.toBytes(serverName.getStartcode())));
|
||||
if (checkSeqNum) {
|
||||
assertTrue(Bytes.equals(
|
||||
result.getValue(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(replicaId)),
|
||||
result.getValue(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(replicaId)),
|
||||
Bytes.toBytes(seqNum)));
|
||||
}
|
||||
}
|
||||
|
@ -524,9 +460,9 @@ public class TestMetaTableAccessor {
|
|||
Get get = new Get(row);
|
||||
Result result = meta.get(get);
|
||||
Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getServerColumn(replicaId));
|
||||
CatalogFamilyFormat.getServerColumn(replicaId));
|
||||
Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getStartCodeColumn(replicaId));
|
||||
CatalogFamilyFormat.getStartCodeColumn(replicaId));
|
||||
assertNotNull(serverCell);
|
||||
assertNotNull(startCodeCell);
|
||||
assertEquals(0, serverCell.getValueLength());
|
||||
|
@ -550,13 +486,13 @@ public class TestMetaTableAccessor {
|
|||
Result result = meta.get(get);
|
||||
for (int replicaId = 0; replicaId < 3; replicaId++) {
|
||||
Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getServerColumn(replicaId));
|
||||
CatalogFamilyFormat.getServerColumn(replicaId));
|
||||
Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getStartCodeColumn(replicaId));
|
||||
CatalogFamilyFormat.getStartCodeColumn(replicaId));
|
||||
Cell stateCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getRegionStateColumn(replicaId));
|
||||
CatalogFamilyFormat.getRegionStateColumn(replicaId));
|
||||
Cell snCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getServerNameColumn(replicaId));
|
||||
CatalogFamilyFormat.getServerNameColumn(replicaId));
|
||||
if (replicaId == 0) {
|
||||
assertNotNull(stateCell);
|
||||
} else {
|
||||
|
@ -575,12 +511,8 @@ public class TestMetaTableAccessor {
|
|||
public void testMetaLocationForRegionReplicasIsAddedAtTableCreation() throws IOException {
|
||||
long regionId = System.currentTimeMillis();
|
||||
RegionInfo primary = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(0).build();
|
||||
|
||||
Table meta = MetaTableAccessor.getMetaHTable(connection);
|
||||
try {
|
||||
|
@ -599,27 +531,15 @@ public class TestMetaTableAccessor {
|
|||
long regionId = System.currentTimeMillis();
|
||||
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
|
||||
RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(0).build();
|
||||
|
||||
RegionInfo splitA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(Bytes.toBytes("a"))
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId + 1)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false)
|
||||
.setRegionId(regionId + 1).setReplicaId(0).build();
|
||||
RegionInfo splitB = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(Bytes.toBytes("a"))
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId + 1)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(Bytes.toBytes("a")).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId + 1).setReplicaId(0).build();
|
||||
|
||||
try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
|
||||
List<RegionInfo> regionInfos = Lists.newArrayList(parent);
|
||||
|
@ -640,27 +560,15 @@ public class TestMetaTableAccessor {
|
|||
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
|
||||
|
||||
RegionInfo parentA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(Bytes.toBytes("a"))
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(Bytes.toBytes("a")).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(0).build();
|
||||
|
||||
RegionInfo parentB = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(Bytes.toBytes("a"))
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(0).build();
|
||||
RegionInfo merged = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId + 1)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId + 1).setReplicaId(0).build();
|
||||
|
||||
try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
|
||||
List<RegionInfo> regionInfos = Lists.newArrayList(parentA, parentB);
|
||||
|
@ -694,8 +602,7 @@ public class TestMetaTableAccessor {
|
|||
// Make sure all the regions are deployed
|
||||
UTIL.countRows(table);
|
||||
|
||||
MetaTableAccessor.Visitor visitor =
|
||||
mock(MetaTableAccessor.Visitor.class);
|
||||
ClientMetaTableAccessor.Visitor visitor = mock(ClientMetaTableAccessor.Visitor.class);
|
||||
doReturn(true).when(visitor).visit((Result) anyObject());
|
||||
|
||||
// Scanning the entire table should give us three rows
|
||||
|
@ -731,12 +638,8 @@ public class TestMetaTableAccessor {
|
|||
public void testMastersSystemTimeIsUsedInUpdateLocations() throws IOException {
|
||||
long regionId = System.currentTimeMillis();
|
||||
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(0).build();
|
||||
|
||||
ServerName sn = ServerName.valueOf("bar", 0, 0);
|
||||
try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
|
||||
|
@ -749,11 +652,11 @@ public class TestMetaTableAccessor {
|
|||
Get get = new Get(regionInfo.getRegionName());
|
||||
Result result = meta.get(get);
|
||||
Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getServerColumn(0));
|
||||
CatalogFamilyFormat.getServerColumn(0));
|
||||
Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getStartCodeColumn(0));
|
||||
CatalogFamilyFormat.getStartCodeColumn(0));
|
||||
Cell seqNumCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getSeqNumColumn(0));
|
||||
CatalogFamilyFormat.getSeqNumColumn(0));
|
||||
assertNotNull(serverCell);
|
||||
assertNotNull(startCodeCell);
|
||||
assertNotNull(seqNumCell);
|
||||
|
@ -771,27 +674,16 @@ public class TestMetaTableAccessor {
|
|||
long regionId = System.currentTimeMillis();
|
||||
|
||||
RegionInfo regionInfoA = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(new byte[] {'a'})
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(new byte[] { 'a' }).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(0).build();
|
||||
|
||||
RegionInfo regionInfoB = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(new byte[] {'a'})
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
RegionInfo mergedRegionInfo = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(new byte[] { 'a' }).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(0).build();
|
||||
RegionInfo mergedRegionInfo =
|
||||
RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(0).build();
|
||||
|
||||
ServerName sn = ServerName.valueOf("bar", 0, 0);
|
||||
try (Table meta = MetaTableAccessor.getMetaHTable(connection)) {
|
||||
|
@ -811,7 +703,7 @@ public class TestMetaTableAccessor {
|
|||
Get get = new Get(mergedRegionInfo.getRegionName());
|
||||
Result result = meta.get(get);
|
||||
Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getServerColumn(0));
|
||||
CatalogFamilyFormat.getServerColumn(0));
|
||||
assertNotNull(serverCell);
|
||||
assertEquals(serverNameTime, serverCell.getTimestamp());
|
||||
|
||||
|
@ -826,14 +718,13 @@ public class TestMetaTableAccessor {
|
|||
EnvironmentEdgeManager.reset();
|
||||
}
|
||||
|
||||
|
||||
result = meta.get(get);
|
||||
serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getServerColumn(0));
|
||||
CatalogFamilyFormat.getServerColumn(0));
|
||||
Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getStartCodeColumn(0));
|
||||
CatalogFamilyFormat.getStartCodeColumn(0));
|
||||
Cell seqNumCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getSeqNumColumn(0));
|
||||
CatalogFamilyFormat.getSeqNumColumn(0));
|
||||
assertNull(serverCell);
|
||||
assertNull(startCodeCell);
|
||||
assertNull(seqNumCell);
|
||||
|
@ -889,18 +780,11 @@ public class TestMetaTableAccessor {
|
|||
RegionInfo parent = loc.getRegion();
|
||||
long rid = 1000;
|
||||
byte[] splitKey = Bytes.toBytes("a");
|
||||
RegionInfo splitA = RegionInfoBuilder.newBuilder(parent.getTable())
|
||||
.setStartKey(parent.getStartKey())
|
||||
.setEndKey(splitKey)
|
||||
.setSplit(false)
|
||||
.setRegionId(rid)
|
||||
.build();
|
||||
RegionInfo splitB = RegionInfoBuilder.newBuilder(parent.getTable())
|
||||
.setStartKey(splitKey)
|
||||
.setEndKey(parent.getEndKey())
|
||||
.setSplit(false)
|
||||
.setRegionId(rid)
|
||||
.build();
|
||||
RegionInfo splitA =
|
||||
RegionInfoBuilder.newBuilder(parent.getTable()).setStartKey(parent.getStartKey())
|
||||
.setEndKey(splitKey).setSplit(false).setRegionId(rid).build();
|
||||
RegionInfo splitB = RegionInfoBuilder.newBuilder(parent.getTable()).setStartKey(splitKey)
|
||||
.setEndKey(parent.getEndKey()).setSplit(false).setRegionId(rid).build();
|
||||
|
||||
// find the meta server
|
||||
MiniHBaseCluster cluster = UTIL.getMiniHBaseCluster();
|
||||
|
@ -926,49 +810,36 @@ public class TestMetaTableAccessor {
|
|||
long regionId = System.currentTimeMillis();
|
||||
ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
|
||||
RegionInfo parent = RegionInfoBuilder.newBuilder(TableName.valueOf("table_foo"))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId).setReplicaId(0).build();
|
||||
RegionInfo splitA = RegionInfoBuilder.newBuilder(TableName.valueOf("table_foo"))
|
||||
.setStartKey(HConstants.EMPTY_START_ROW)
|
||||
.setEndKey(Bytes.toBytes("a"))
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId + 1)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(HConstants.EMPTY_START_ROW).setEndKey(Bytes.toBytes("a")).setSplit(false)
|
||||
.setRegionId(regionId + 1).setReplicaId(0).build();
|
||||
RegionInfo splitB = RegionInfoBuilder.newBuilder(TableName.valueOf("table_foo"))
|
||||
.setStartKey(Bytes.toBytes("a"))
|
||||
.setEndKey(HConstants.EMPTY_END_ROW)
|
||||
.setSplit(false)
|
||||
.setRegionId(regionId + 1)
|
||||
.setReplicaId(0)
|
||||
.build();
|
||||
.setStartKey(Bytes.toBytes("a")).setEndKey(HConstants.EMPTY_END_ROW).setSplit(false)
|
||||
.setRegionId(regionId + 1).setReplicaId(0).build();
|
||||
|
||||
Table meta = MetaTableAccessor.getMetaHTable(connection);
|
||||
try {
|
||||
List<RegionInfo> regionInfos = Lists.newArrayList(parent);
|
||||
MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 3);
|
||||
|
||||
MetaTableAccessor.splitRegion(connection, parent, -1L, splitA, splitB,
|
||||
serverName0, 3);
|
||||
MetaTableAccessor.splitRegion(connection, parent, -1L, splitA, splitB, serverName0, 3);
|
||||
Get get1 = new Get(splitA.getRegionName());
|
||||
Result resultA = meta.get(get1);
|
||||
Cell serverCellA = resultA.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getServerColumn(splitA.getReplicaId()));
|
||||
CatalogFamilyFormat.getServerColumn(splitA.getReplicaId()));
|
||||
Cell startCodeCellA = resultA.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getStartCodeColumn(splitA.getReplicaId()));
|
||||
CatalogFamilyFormat.getStartCodeColumn(splitA.getReplicaId()));
|
||||
assertNull(serverCellA);
|
||||
assertNull(startCodeCellA);
|
||||
|
||||
Get get2 = new Get(splitA.getRegionName());
|
||||
Result resultB = meta.get(get2);
|
||||
Cell serverCellB = resultB.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getServerColumn(splitB.getReplicaId()));
|
||||
CatalogFamilyFormat.getServerColumn(splitB.getReplicaId()));
|
||||
Cell startCodeCellB = resultB.getColumnLatestCell(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getStartCodeColumn(splitB.getReplicaId()));
|
||||
CatalogFamilyFormat.getStartCodeColumn(splitB.getReplicaId()));
|
||||
assertNull(serverCellB);
|
||||
assertNull(startCodeCellB);
|
||||
} finally {
|
||||
|
@ -984,8 +855,8 @@ public class TestMetaTableAccessor {
|
|||
UTIL.createTable(tableName, "cf");
|
||||
final List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName);
|
||||
final String encodedName = regions.get(0).getRegionInfo().getEncodedName();
|
||||
final Result result = MetaTableAccessor.scanByRegionEncodedName(UTIL.getConnection(),
|
||||
encodedName);
|
||||
final Result result =
|
||||
MetaTableAccessor.scanByRegionEncodedName(UTIL.getConnection(), encodedName);
|
||||
assertNotNull(result);
|
||||
assertTrue(result.advance());
|
||||
final String resultingRowKey = CellUtil.getCellKeyAsString(result.current());
|
||||
|
@ -996,9 +867,8 @@ public class TestMetaTableAccessor {
|
|||
@Test
|
||||
public void testScanByRegionEncodedNameNonExistingRegion() throws Exception {
|
||||
final String encodedName = "nonexistingregion";
|
||||
final Result result = MetaTableAccessor.scanByRegionEncodedName(UTIL.getConnection(),
|
||||
encodedName);
|
||||
final Result result =
|
||||
MetaTableAccessor.scanByRegionEncodedName(UTIL.getConnection(), encodedName);
|
||||
assertNull(result);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
|
@ -85,7 +85,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
|
|||
|
||||
AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
|
||||
List<HRegionLocation> regionLocations =
|
||||
AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
|
||||
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
|
||||
int originalCount = regionLocations.size();
|
||||
|
||||
initSplitMergeSwitch();
|
||||
|
@ -116,7 +116,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
|
|||
|
||||
AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
|
||||
List<HRegionLocation> regionLocations =
|
||||
AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
|
||||
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
|
||||
int originalCount = regionLocations.size();
|
||||
|
||||
initSplitMergeSwitch();
|
||||
|
@ -160,7 +160,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
|
|||
createTableWithDefaultConf(tableName, splitRows);
|
||||
|
||||
AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
|
||||
List<HRegionLocation> regionLocations = AsyncMetaTableAccessor
|
||||
List<HRegionLocation> regionLocations = ClientMetaTableAccessor
|
||||
.getTableHRegionLocations(metaTable, tableName).get();
|
||||
RegionInfo regionA;
|
||||
RegionInfo regionB;
|
||||
|
@ -174,7 +174,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
|
|||
regionC = regionLocations.get(2).getRegion();
|
||||
admin.mergeRegions(regionA.getRegionName(), regionB.getRegionName(), false).get();
|
||||
|
||||
regionLocations = AsyncMetaTableAccessor
|
||||
regionLocations = ClientMetaTableAccessor
|
||||
.getTableHRegionLocations(metaTable, tableName).get();
|
||||
|
||||
assertEquals(2, regionLocations.size());
|
||||
|
@ -198,7 +198,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
|
|||
admin.mergeRegions(regionC.getRegionName(), mergedChildRegion.getRegionName(),
|
||||
false).get();
|
||||
|
||||
regionLocations = AsyncMetaTableAccessor
|
||||
regionLocations = ClientMetaTableAccessor
|
||||
.getTableHRegionLocations(metaTable, tableName).get();
|
||||
assertEquals(1, regionLocations.size());
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
|
|||
createTableWithDefaultConf(tableName);
|
||||
|
||||
AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
|
||||
List<HRegionLocation> regionLocations = AsyncMetaTableAccessor
|
||||
List<HRegionLocation> regionLocations = ClientMetaTableAccessor
|
||||
.getTableHRegionLocations(metaTable, tableName).get();
|
||||
assertEquals(1, regionLocations.size());
|
||||
|
||||
|
@ -273,7 +273,7 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
|
|||
int count = 0;
|
||||
for (int i = 0; i < 45; i++) {
|
||||
try {
|
||||
regionLocations = AsyncMetaTableAccessor
|
||||
regionLocations = ClientMetaTableAccessor
|
||||
.getTableHRegionLocations(metaTable, tableName).get();
|
||||
count = regionLocations.size();
|
||||
if (count >= 2) {
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CompletionException;
|
||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
|
@ -74,7 +74,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
}
|
||||
|
||||
static TableState.State getStateFromMeta(TableName table) throws Exception {
|
||||
Optional<TableState> state = AsyncMetaTableAccessor
|
||||
Optional<TableState> state = ClientMetaTableAccessor
|
||||
.getTableState(ASYNC_CONN.getTable(TableName.META_TABLE_NAME), table).get();
|
||||
assertTrue(state.isPresent());
|
||||
return state.get().getState();
|
||||
|
@ -85,20 +85,20 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
|
||||
|
||||
createTableWithDefaultConf(tableName);
|
||||
List<HRegionLocation> regionLocations = AsyncMetaTableAccessor
|
||||
List<HRegionLocation> regionLocations = ClientMetaTableAccessor
|
||||
.getTableHRegionLocations(metaTable, tableName).get();
|
||||
assertEquals("Table should have only 1 region", 1, regionLocations.size());
|
||||
|
||||
final TableName tableName2 = TableName.valueOf(tableName.getNameAsString() + "_2");
|
||||
createTableWithDefaultConf(tableName2, new byte[][] { new byte[] { 42 } });
|
||||
regionLocations = AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName2).get();
|
||||
regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName2).get();
|
||||
assertEquals("Table should have only 2 region", 2, regionLocations.size());
|
||||
|
||||
final TableName tableName3 = TableName.valueOf(tableName.getNameAsString() + "_3");
|
||||
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName3);
|
||||
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
|
||||
admin.createTable(builder.build(), Bytes.toBytes("a"), Bytes.toBytes("z"), 3).join();
|
||||
regionLocations = AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3).get();
|
||||
regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3).get();
|
||||
assertEquals("Table should have only 3 region", 3, regionLocations.size());
|
||||
|
||||
final TableName tableName4 = TableName.valueOf(tableName.getNameAsString() + "_4");
|
||||
|
@ -115,7 +115,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
builder = TableDescriptorBuilder.newBuilder(tableName5);
|
||||
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
|
||||
admin.createTable(builder.build(), new byte[] { 1 }, new byte[] { 127 }, 16).join();
|
||||
regionLocations = AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName5).get();
|
||||
regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName5).get();
|
||||
assertEquals("Table should have 16 region", 16, regionLocations.size());
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable);
|
||||
|
||||
AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
|
||||
List<HRegionLocation> regions = AsyncMetaTableAccessor
|
||||
List<HRegionLocation> regions = ClientMetaTableAccessor
|
||||
.getTableHRegionLocations(metaTable, tableName).get();
|
||||
Iterator<HRegionLocation> hris = regions.iterator();
|
||||
|
||||
|
@ -191,7 +191,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
|
||||
admin.createTable(builder.build(), startKey, endKey, expectedRegions).join();
|
||||
|
||||
regions = AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName2).get();
|
||||
regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName2).get();
|
||||
assertEquals(
|
||||
"Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
|
||||
expectedRegions, regions.size());
|
||||
|
@ -243,7 +243,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
|
|||
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
|
||||
admin.createTable(builder.build(), startKey, endKey, expectedRegions).join();
|
||||
|
||||
regions = AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3)
|
||||
regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName3)
|
||||
.get();
|
||||
assertEquals(
|
||||
"Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
|
||||
|
|
|
@ -30,7 +30,7 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.regex.Pattern;
|
||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
|
@ -283,7 +283,7 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
|
|||
createTableWithDefaultConf(tableName, splitKeys);
|
||||
|
||||
AsyncTable<AdvancedScanResultConsumer> metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
|
||||
List<HRegionLocation> regions = AsyncMetaTableAccessor
|
||||
List<HRegionLocation> regions = ClientMetaTableAccessor
|
||||
.getTableHRegionLocations(metaTable, tableName).get();
|
||||
assertEquals(
|
||||
"Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
|
||||
|
@ -294,7 +294,7 @@ public class TestAsyncTableAdminApi3 extends TestAsyncAdminBase {
|
|||
// Enable table, use retain assignment to assign regions.
|
||||
admin.enableTable(tableName).join();
|
||||
|
||||
List<HRegionLocation> regions2 = AsyncMetaTableAccessor
|
||||
List<HRegionLocation> regions2 = ClientMetaTableAccessor
|
||||
.getTableHRegionLocations(metaTable, tableName).get();
|
||||
// Check the assignment.
|
||||
assertEquals(regions.size(), regions2.size());
|
||||
|
|
|
@ -24,12 +24,12 @@ import static org.junit.Assert.assertTrue;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.junit.After;
|
||||
|
@ -63,26 +63,26 @@ public class TestMetaTableAccessorNoCluster {
|
|||
|
||||
@Test
|
||||
public void testGetHRegionInfo() throws IOException {
|
||||
assertNull(MetaTableAccessor.getRegionInfo(new Result()));
|
||||
assertNull(CatalogFamilyFormat.getRegionInfo(new Result()));
|
||||
|
||||
List<Cell> kvs = new ArrayList<>();
|
||||
Result r = Result.create(kvs);
|
||||
assertNull(MetaTableAccessor.getRegionInfo(r));
|
||||
assertNull(CatalogFamilyFormat.getRegionInfo(r));
|
||||
|
||||
byte[] f = HConstants.CATALOG_FAMILY;
|
||||
// Make a key value that doesn't have the expected qualifier.
|
||||
kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.SERVER_QUALIFIER, f));
|
||||
r = Result.create(kvs);
|
||||
assertNull(MetaTableAccessor.getRegionInfo(r));
|
||||
assertNull(CatalogFamilyFormat.getRegionInfo(r));
|
||||
// Make a key that does not have a regioninfo value.
|
||||
kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, f));
|
||||
RegionInfo hri = MetaTableAccessor.getRegionInfo(Result.create(kvs));
|
||||
RegionInfo hri = CatalogFamilyFormat.getRegionInfo(Result.create(kvs));
|
||||
assertTrue(hri == null);
|
||||
// OK, give it what it expects
|
||||
kvs.clear();
|
||||
kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER,
|
||||
RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO)));
|
||||
hri = MetaTableAccessor.getRegionInfo(Result.create(kvs));
|
||||
hri = CatalogFamilyFormat.getRegionInfo(Result.create(kvs));
|
||||
assertNotNull(hri);
|
||||
assertTrue(RegionInfo.COMPARATOR.compare(hri, RegionInfoBuilder.FIRST_META_REGIONINFO) == 0);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import static org.junit.Assert.assertTrue;
|
|||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -145,8 +145,8 @@ public class TestCatalogJanitorCluster {
|
|||
List<RegionInfo> t1Ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), T1);
|
||||
RegionInfo t1Ri1 = t1Ris.get(1);
|
||||
Put pServer = new Put(t1Ri1.getRegionName());
|
||||
pServer.addColumn(MetaTableAccessor.getCatalogFamily(),
|
||||
MetaTableAccessor.getServerColumn(0), Bytes.toBytes("bad.server.example.org:1234"));
|
||||
pServer.addColumn(HConstants.CATALOG_FAMILY,
|
||||
CatalogFamilyFormat.getServerColumn(0), Bytes.toBytes("bad.server.example.org:1234"));
|
||||
MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(pServer));
|
||||
gc = janitor.scan();
|
||||
report = janitor.getLastReport();
|
||||
|
@ -157,8 +157,8 @@ public class TestCatalogJanitorCluster {
|
|||
// break if this happens.
|
||||
LOG.info("Make null info:server");
|
||||
Put emptyInfoServerPut = new Put(t1Ri1.getRegionName());
|
||||
emptyInfoServerPut.addColumn(MetaTableAccessor.getCatalogFamily(),
|
||||
MetaTableAccessor.getServerColumn(0), Bytes.toBytes(""));
|
||||
emptyInfoServerPut.addColumn(HConstants.CATALOG_FAMILY,
|
||||
CatalogFamilyFormat.getServerColumn(0), Bytes.toBytes(""));
|
||||
MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(emptyInfoServerPut));
|
||||
janitor.scan();
|
||||
report = janitor.getLastReport();
|
||||
|
@ -166,8 +166,8 @@ public class TestCatalogJanitorCluster {
|
|||
// Mke an empty regioninfo in t1.
|
||||
RegionInfo t1Ri2 = t1Ris.get(2);
|
||||
Put pEmptyRI = new Put(t1Ri2.getRegionName());
|
||||
pEmptyRI.addColumn(MetaTableAccessor.getCatalogFamily(),
|
||||
MetaTableAccessor.getRegionInfoColumn(), HConstants.EMPTY_BYTE_ARRAY);
|
||||
pEmptyRI.addColumn(HConstants.CATALOG_FAMILY,
|
||||
HConstants.REGIONINFO_QUALIFIER, HConstants.EMPTY_BYTE_ARRAY);
|
||||
MetaTableAccessor.putsToMetaTable(TEST_UTIL.getConnection(), Arrays.asList(pEmptyRI));
|
||||
janitor.scan();
|
||||
report = janitor.getLastReport();
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
|
@ -174,7 +175,7 @@ public class TestCatalogJanitorInMemoryStates {
|
|||
if (result == null) {
|
||||
break;
|
||||
}
|
||||
region = MetaTableAccessor.getRegionInfo(result);
|
||||
region = CatalogFamilyFormat.getRegionInfo(result);
|
||||
if (region.isSplitParent()) {
|
||||
LOG.debug(region.toString() + " IS a parent!");
|
||||
pair = MetaTableAccessor.getDaughterRegions(result);
|
||||
|
|
|
@ -30,6 +30,8 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -106,14 +108,14 @@ public class TestMaster {
|
|||
byte[] rowKey) throws IOException {
|
||||
final AtomicReference<Pair<RegionInfo, ServerName>> result = new AtomicReference<>(null);
|
||||
|
||||
MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
|
||||
ClientMetaTableAccessor.Visitor visitor = new ClientMetaTableAccessor.Visitor() {
|
||||
@Override
|
||||
public boolean visit(Result data) throws IOException {
|
||||
if (data == null || data.size() <= 0) {
|
||||
return true;
|
||||
}
|
||||
Pair<RegionInfo, ServerName> pair = new Pair<>(MetaTableAccessor.getRegionInfo(data),
|
||||
MetaTableAccessor.getServerName(data, 0));
|
||||
Pair<RegionInfo, ServerName> pair = new Pair<>(CatalogFamilyFormat.getRegionInfo(data),
|
||||
CatalogFamilyFormat.getServerName(data, 0));
|
||||
if (!pair.getFirst().getTable().equals(tableName)) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -33,16 +33,16 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics.Option;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.StartMiniClusterOption;
|
||||
|
@ -162,7 +162,7 @@ public class TestMasterOperationsForRegionReplicas {
|
|||
List<Result> metaRows = MetaTableAccessor.fullScanRegions(ADMIN.getConnection());
|
||||
int numRows = 0;
|
||||
for (Result result : metaRows) {
|
||||
RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
|
||||
RegionLocations locations = CatalogFamilyFormat.getRegionLocations(result);
|
||||
RegionInfo hri = locations.getRegionLocation().getRegion();
|
||||
if (!hri.getTable().equals(tableName)) continue;
|
||||
numRows += 1;
|
||||
|
@ -310,11 +310,11 @@ public class TestMasterOperationsForRegionReplicas {
|
|||
for (byte[] row : tableRows) {
|
||||
Delete deleteOneReplicaLocation = new Delete(row);
|
||||
deleteOneReplicaLocation.addColumns(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getServerColumn(1));
|
||||
CatalogFamilyFormat.getServerColumn(1));
|
||||
deleteOneReplicaLocation.addColumns(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getSeqNumColumn(1));
|
||||
CatalogFamilyFormat.getSeqNumColumn(1));
|
||||
deleteOneReplicaLocation.addColumns(HConstants.CATALOG_FAMILY,
|
||||
MetaTableAccessor.getStartCodeColumn(1));
|
||||
CatalogFamilyFormat.getStartCodeColumn(1));
|
||||
metaTable.delete(deleteOneReplicaLocation);
|
||||
}
|
||||
metaTable.close();
|
||||
|
@ -337,10 +337,12 @@ public class TestMasterOperationsForRegionReplicas {
|
|||
Connection connection) throws IOException {
|
||||
assert (ADMIN.tableExists(table));
|
||||
final AtomicInteger count = new AtomicInteger();
|
||||
Visitor visitor = new Visitor() {
|
||||
ClientMetaTableAccessor.Visitor visitor = new ClientMetaTableAccessor.Visitor() {
|
||||
@Override
|
||||
public boolean visit(Result r) throws IOException {
|
||||
if (MetaTableAccessor.getRegionInfo(r).getTable().equals(table)) count.incrementAndGet();
|
||||
if (CatalogFamilyFormat.getRegionInfo(r).getTable().equals(table)) {
|
||||
count.incrementAndGet();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -18,10 +18,10 @@
|
|||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -498,7 +498,7 @@ public class TestMasterTransitions {
|
|||
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
|
||||
ResultScanner s = meta.getScanner(scan);
|
||||
for (Result r = null; (r = s.next()) != null;) {
|
||||
RegionInfo hri = MetaTableAccessor.getRegionInfo(r);
|
||||
RegionInfo hri = CatalogFamilyFormat.getRegionInfo(r);
|
||||
if (hri == null) break;
|
||||
if (!hri.getTable().equals(TABLENAME)) {
|
||||
continue;
|
||||
|
|
|
@ -34,6 +34,8 @@ import java.util.Map;
|
|||
import java.util.Random;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -473,12 +475,12 @@ public class TestRegionPlacement {
|
|||
final AtomicInteger regionOnPrimaryNum = new AtomicInteger(0);
|
||||
final AtomicInteger totalRegionNum = new AtomicInteger(0);
|
||||
LOG.info("The start of region placement verification");
|
||||
MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
|
||||
ClientMetaTableAccessor.Visitor visitor = new ClientMetaTableAccessor.Visitor() {
|
||||
@Override
|
||||
public boolean visit(Result result) throws IOException {
|
||||
try {
|
||||
@SuppressWarnings("deprecation")
|
||||
RegionInfo info = MetaTableAccessor.getRegionInfo(result);
|
||||
RegionInfo info = CatalogFamilyFormat.getRegionInfo(result);
|
||||
if(info.getTable().getNamespaceAsString()
|
||||
.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
|
||||
return true;
|
||||
|
|
|
@ -30,6 +30,8 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
|
@ -243,10 +245,10 @@ public class MasterProcedureTestingUtility {
|
|||
private static int countMetaRegions(final HMaster master, final TableName tableName)
|
||||
throws IOException {
|
||||
final AtomicInteger actualRegCount = new AtomicInteger(0);
|
||||
final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
|
||||
final ClientMetaTableAccessor.Visitor visitor = new ClientMetaTableAccessor.Visitor() {
|
||||
@Override
|
||||
public boolean visit(Result rowResult) throws IOException {
|
||||
RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
|
||||
RegionLocations list = CatalogFamilyFormat.getRegionLocations(rowResult);
|
||||
if (list == null) {
|
||||
LOG.warn("No serialized RegionInfo in " + rowResult);
|
||||
return true;
|
||||
|
|
|
@ -25,7 +25,7 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -103,7 +103,7 @@ public class TestHBCKSCP extends TestSCPBase {
|
|||
// Assert region is OPEN.
|
||||
assertEquals(RegionState.State.OPEN.toString(),
|
||||
Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)));
|
||||
ServerName serverName = MetaTableAccessor.getServerName(r, 0);
|
||||
ServerName serverName = CatalogFamilyFormat.getServerName(r, 0);
|
||||
assertEquals(rsServerName, serverName);
|
||||
// moveFrom adds to dead servers and adds it to processing list only we will
|
||||
// not be processing this server 'normally'. Remove it from processing by
|
||||
|
@ -129,7 +129,7 @@ public class TestHBCKSCP extends TestSCPBase {
|
|||
r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName());
|
||||
assertEquals(RegionState.State.OPEN.toString(),
|
||||
Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)));
|
||||
serverName = MetaTableAccessor.getServerName(r, 0);
|
||||
serverName = CatalogFamilyFormat.getServerName(r, 0);
|
||||
assertNotNull(cluster.getRegionServer(serverName));
|
||||
assertEquals(rsServerName, serverName);
|
||||
|
||||
|
@ -149,7 +149,7 @@ public class TestHBCKSCP extends TestSCPBase {
|
|||
r = MetaTableAccessor.getRegionResult(master.getConnection(), rsRI.getRegionName());
|
||||
assertEquals(RegionState.State.OPEN.toString(),
|
||||
Bytes.toString(r.getValue(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)));
|
||||
serverName = MetaTableAccessor.getServerName(r, 0);
|
||||
serverName = CatalogFamilyFormat.getServerName(r, 0);
|
||||
assertNotNull(cluster.getRegionServer(serverName));
|
||||
assertNotEquals(rsServerName, serverName);
|
||||
// Make sure no mention of old server post SCP.
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.util.TreeSet;
|
|||
import java.util.stream.Collectors;
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.ChoreService;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
|
@ -443,7 +444,7 @@ public class TestEndToEndSplitTransaction {
|
|||
break;
|
||||
}
|
||||
|
||||
region = MetaTableAccessor.getRegionInfo(result);
|
||||
region = CatalogFamilyFormat.getRegionInfo(result);
|
||||
if (region.isSplitParent()) {
|
||||
log("found parent region: " + region.toString());
|
||||
PairOfSameType<RegionInfo> pair = MetaTableAccessor.getDaughterRegions(result);
|
||||
|
|
|
@ -17,7 +17,7 @@ import static org.junit.Assert.assertTrue;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -113,10 +113,10 @@ public class TestHBaseFsckCleanReplicationBarriers {
|
|||
barrierScan.setCaching(100);
|
||||
barrierScan.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
|
||||
barrierScan
|
||||
.withStartRow(
|
||||
MetaTableAccessor.getTableStartRowForMeta(tableName, MetaTableAccessor.QueryType.REGION))
|
||||
.withStopRow(
|
||||
MetaTableAccessor.getTableStopRowForMeta(tableName, MetaTableAccessor.QueryType.REGION));
|
||||
.withStartRow(ClientMetaTableAccessor.getTableStartRowForMeta(tableName,
|
||||
ClientMetaTableAccessor.QueryType.REGION))
|
||||
.withStopRow(ClientMetaTableAccessor.getTableStopRowForMeta(tableName,
|
||||
ClientMetaTableAccessor.QueryType.REGION));
|
||||
Result result;
|
||||
try (ResultScanner scanner =
|
||||
MetaTableAccessor.getMetaHTable(UTIL.getConnection()).getScanner(barrierScan)) {
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
|
@ -238,7 +239,7 @@ public class OfflineMetaRebuildTestCore {
|
|||
List<Delete> dels = new ArrayList<>();
|
||||
for (Result r : scanner) {
|
||||
RegionInfo info =
|
||||
MetaTableAccessor.getRegionInfo(r);
|
||||
CatalogFamilyFormat.getRegionInfo(r);
|
||||
if(info != null && !info.getTable().getNamespaceAsString()
|
||||
.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
|
||||
Delete d = new Delete(r.getRow());
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CatalogFamilyFormat;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellBuilder;
|
||||
import org.apache.hadoop.hbase.CellBuilderFactory;
|
||||
|
@ -41,7 +42,6 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
|
@ -1130,7 +1130,7 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements Hb
|
|||
}
|
||||
|
||||
// find region start and end keys
|
||||
RegionInfo regionInfo = MetaTableAccessor.getRegionInfo(startRowResult);
|
||||
RegionInfo regionInfo = CatalogFamilyFormat.getRegionInfo(startRowResult);
|
||||
if (regionInfo == null) {
|
||||
throw new IOException("RegionInfo REGIONINFO was null or " +
|
||||
" empty in Meta for row="
|
||||
|
@ -1144,7 +1144,7 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements Hb
|
|||
region.version = HREGION_VERSION; // version not used anymore, PB encoding used.
|
||||
|
||||
// find region assignment to server
|
||||
ServerName serverName = MetaTableAccessor.getServerName(startRowResult, 0);
|
||||
ServerName serverName = CatalogFamilyFormat.getServerName(startRowResult, 0);
|
||||
if (serverName != null) {
|
||||
region.setServerName(Bytes.toBytes(serverName.getHostname()));
|
||||
region.port = serverName.getPort();
|
||||
|
|
Loading…
Reference in New Issue