HBASE-22721 Refactor HBaseFsck: move the inner class out

Signed-off-by: stack <stack@apache.org>
This commit is contained in:
Guanghao Zhang 2019-07-24 08:02:41 +08:00
parent d88ee0447a
commit 56417284bf
14 changed files with 1578 additions and 1405 deletions

View File

@ -49,6 +49,7 @@ import java.util.concurrent.FutureTask;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
@ -74,7 +75,6 @@ import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSHedgedReadMetrics; import org.apache.hadoop.hdfs.DFSHedgedReadMetrics;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
@ -1209,10 +1209,10 @@ public abstract class FSUtils extends CommonFSUtils {
* @throws IOException When scanning the directory fails. * @throws IOException When scanning the directory fails.
* @throws InterruptedException * @throws InterruptedException
*/ */
public static Map<String, Path> getTableStoreFilePathMap( public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> resultMap,
Map<String, Path> resultMap,
final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter, final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter,
ExecutorService executor, final ErrorReporter errors) throws IOException, InterruptedException { ExecutorService executor, final HbckErrorReporter errors)
throws IOException, InterruptedException {
final Map<String, Path> finalResultMap = final Map<String, Path> finalResultMap =
resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap; resultMap == null ? new ConcurrentHashMap<>(128, 0.75f, 32) : resultMap;
@ -1375,7 +1375,7 @@ public abstract class FSUtils extends CommonFSUtils {
*/ */
public static Map<String, Path> getTableStoreFilePathMap( public static Map<String, Path> getTableStoreFilePathMap(
final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter, final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter,
ExecutorService executor, ErrorReporter errors) ExecutorService executor, HbckErrorReporter errors)
throws IOException, InterruptedException { throws IOException, InterruptedException {
ConcurrentHashMap<String, Path> map = new ConcurrentHashMap<>(1024, 0.75f, 32); ConcurrentHashMap<String, Path> map = new ConcurrentHashMap<>(1024, 0.75f, 32);

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import java.util.ArrayList;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface HbckErrorReporter {
enum ERROR_CODE {
UNKNOWN, NO_META_REGION, NULL_META_REGION, NO_VERSION_FILE, NOT_IN_META_HDFS, NOT_IN_META,
NOT_IN_META_OR_DEPLOYED, NOT_IN_HDFS_OR_DEPLOYED, NOT_IN_HDFS, SERVER_DOES_NOT_MATCH_META,
NOT_DEPLOYED, MULTI_DEPLOYED, SHOULD_NOT_BE_DEPLOYED, MULTI_META_REGION, RS_CONNECT_FAILURE,
FIRST_REGION_STARTKEY_NOT_EMPTY, LAST_REGION_ENDKEY_NOT_EMPTY, DUPE_STARTKEYS,
HOLE_IN_REGION_CHAIN, OVERLAP_IN_REGION_CHAIN, REGION_CYCLE, DEGENERATE_REGION,
ORPHAN_HDFS_REGION, LINGERING_SPLIT_PARENT, NO_TABLEINFO_FILE, LINGERING_REFERENCE_HFILE,
LINGERING_HFILELINK, WRONG_USAGE, EMPTY_META_CELL, EXPIRED_TABLE_LOCK, BOUNDARIES_ERROR,
ORPHAN_TABLE_STATE, NO_TABLE_STATE, UNDELETED_REPLICATION_QUEUE, DUPE_ENDKEYS,
UNSUPPORTED_OPTION, INVALID_TABLE
}
void clear();
void report(String message);
void reportError(String message);
void reportError(ERROR_CODE errorCode, String message);
void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table);
void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, HbckRegionInfo info);
void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table, HbckRegionInfo info1,
HbckRegionInfo info2);
int summarize();
void detail(String details);
ArrayList<ERROR_CODE> getErrorList();
void progress();
void print(String message);
void resetErrors();
boolean tableHasErrors(HbckTableInfo table);
}

View File

@ -0,0 +1,412 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
/**
* Maintain information about a particular region. It gathers information
* from three places -- HDFS, META, and region servers.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class HbckRegionInfo implements KeyRange {
private static final Logger LOG = LoggerFactory.getLogger(HbckRegionInfo.class.getName());
private MetaEntry metaEntry = null; // info in META
private HdfsEntry hdfsEntry = null; // info in HDFS
private List<OnlineEntry> deployedEntries = Lists.newArrayList(); // on Region Server
private List<ServerName> deployedOn = Lists.newArrayList(); // info on RS's
private boolean skipChecks = false; // whether to skip further checks to this region info.
private boolean isMerged = false;// whether this region has already been merged into another one
private int deployedReplicaId = RegionInfo.DEFAULT_REPLICA_ID;
private RegionInfo primaryHRIForDeployedReplica = null;
public HbckRegionInfo(MetaEntry metaEntry) {
this.metaEntry = metaEntry;
}
public synchronized int getReplicaId() {
return metaEntry != null? metaEntry.getReplicaId(): deployedReplicaId;
}
public synchronized void addServer(RegionInfo regionInfo, ServerName serverName) {
OnlineEntry rse = new OnlineEntry(regionInfo, serverName) ;
this.deployedEntries.add(rse);
this.deployedOn.add(serverName);
// save the replicaId that we see deployed in the cluster
this.deployedReplicaId = regionInfo.getReplicaId();
this.primaryHRIForDeployedReplica =
RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo);
}
@Override
public synchronized String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{ meta => ");
sb.append((metaEntry != null)? metaEntry.getRegionNameAsString() : "null");
sb.append(", hdfs => " + getHdfsRegionDir());
sb.append(", deployed => " + Joiner.on(", ").join(deployedEntries));
sb.append(", replicaId => " + getReplicaId());
sb.append(" }");
return sb.toString();
}
@Override
public byte[] getStartKey() {
if (this.metaEntry != null) {
return this.metaEntry.getStartKey();
} else if (this.hdfsEntry != null) {
return this.hdfsEntry.hri.getStartKey();
} else {
LOG.error("Entry " + this + " has no meta or hdfs region start key.");
return null;
}
}
@Override
public byte[] getEndKey() {
if (this.metaEntry != null) {
return this.metaEntry.getEndKey();
} else if (this.hdfsEntry != null) {
return this.hdfsEntry.hri.getEndKey();
} else {
LOG.error("Entry " + this + " has no meta or hdfs region start key.");
return null;
}
}
public MetaEntry getMetaEntry() {
return this.metaEntry;
}
public void setMetaEntry(MetaEntry metaEntry) {
this.metaEntry = metaEntry;
}
public HdfsEntry getHdfsEntry() {
return this.hdfsEntry;
}
public void setHdfsEntry(HdfsEntry hdfsEntry) {
this.hdfsEntry = hdfsEntry;
}
public List<OnlineEntry> getOnlineEntries() {
return this.deployedEntries;
}
public List<ServerName> getDeployedOn() {
return this.deployedOn;
}
/**
* Read the .regioninfo file from the file system. If there is no
* .regioninfo, add it to the orphan hdfs region list.
*/
public void loadHdfsRegioninfo(Configuration conf) throws IOException {
Path regionDir = getHdfsRegionDir();
if (regionDir == null) {
if (getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
// Log warning only for default/ primary replica with no region dir
LOG.warn("No HDFS region dir found: " + this + " meta=" + metaEntry);
}
return;
}
if (hdfsEntry.hri != null) {
// already loaded data
return;
}
FileSystem fs = FileSystem.get(conf);
RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
LOG.debug("RegionInfo read: " + hri.toString());
hdfsEntry.hri = hri;
}
public TableName getTableName() {
if (this.metaEntry != null) {
return this.metaEntry.getTable();
} else if (this.hdfsEntry != null) {
// we are only guaranteed to have a path and not an HRI for hdfsEntry,
// so we get the name from the Path
Path tableDir = this.hdfsEntry.regionDir.getParent();
return FSUtils.getTableName(tableDir);
} else {
// return the info from the first online/deployed hri
for (OnlineEntry e : deployedEntries) {
return e.getRegionInfo().getTable();
}
return null;
}
}
public String getRegionNameAsString() {
if (metaEntry != null) {
return metaEntry.getRegionNameAsString();
} else if (hdfsEntry != null) {
if (hdfsEntry.hri != null) {
return hdfsEntry.hri.getRegionNameAsString();
}
} else {
// return the info from the first online/deployed hri
for (OnlineEntry e : deployedEntries) {
return e.getRegionInfo().getRegionNameAsString();
}
}
return null;
}
public byte[] getRegionName() {
if (metaEntry != null) {
return metaEntry.getRegionName();
} else if (hdfsEntry != null) {
return hdfsEntry.hri.getRegionName();
} else {
// return the info from the first online/deployed hri
for (OnlineEntry e : deployedEntries) {
return e.getRegionInfo().getRegionName();
}
return null;
}
}
public RegionInfo getPrimaryHRIForDeployedReplica() {
return primaryHRIForDeployedReplica;
}
public Path getHdfsRegionDir() {
if (hdfsEntry == null) {
return null;
}
return hdfsEntry.regionDir;
}
public boolean containsOnlyHdfsEdits() {
if (hdfsEntry == null) {
return false;
}
return hdfsEntry.hdfsOnlyEdits;
}
public boolean isHdfsRegioninfoPresent() {
if (hdfsEntry == null) {
return false;
}
return hdfsEntry.hdfsRegioninfoFilePresent;
}
public long getModTime() {
if (hdfsEntry == null) {
return 0;
}
return hdfsEntry.regionDirModTime;
}
public RegionInfo getHdfsHRI() {
if (hdfsEntry == null) {
return null;
}
return hdfsEntry.hri;
}
public void setSkipChecks(boolean skipChecks) {
this.skipChecks = skipChecks;
}
public boolean isSkipChecks() {
return skipChecks;
}
public void setMerged(boolean isMerged) {
this.isMerged = isMerged;
}
public boolean isMerged() {
return this.isMerged;
}
/**
* Stores the regioninfo entries scanned from META
*/
public static class MetaEntry extends HRegionInfo {
ServerName regionServer; // server hosting this region
long modTime; // timestamp of most recent modification metadata
RegionInfo splitA, splitB; //split daughters
public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime) {
this(rinfo, regionServer, modTime, null, null);
}
public MetaEntry(RegionInfo rinfo, ServerName regionServer, long modTime,
RegionInfo splitA, RegionInfo splitB) {
super(rinfo);
this.regionServer = regionServer;
this.modTime = modTime;
this.splitA = splitA;
this.splitB = splitB;
}
public ServerName getRegionServer() {
return this.regionServer;
}
@Override
public boolean equals(Object o) {
boolean superEq = super.equals(o);
if (!superEq) {
return superEq;
}
MetaEntry me = (MetaEntry) o;
if (!regionServer.equals(me.regionServer)) {
return false;
}
return (modTime == me.modTime);
}
@Override
public int hashCode() {
int hash = Arrays.hashCode(getRegionName());
hash = (int) (hash ^ getRegionId());
hash ^= Arrays.hashCode(getStartKey());
hash ^= Arrays.hashCode(getEndKey());
hash ^= Boolean.valueOf(isOffline()).hashCode();
hash ^= getTable().hashCode();
if (regionServer != null) {
hash ^= regionServer.hashCode();
}
hash = (int) (hash ^ modTime);
return hash;
}
}
/**
* Stores the regioninfo entries from HDFS
*/
public static class HdfsEntry {
RegionInfo hri;
Path regionDir = null;
long regionDirModTime = 0;
boolean hdfsRegioninfoFilePresent = false;
boolean hdfsOnlyEdits = false;
HdfsEntry() {
}
public HdfsEntry(Path regionDir, long regionDirModTime) {
this.regionDir = regionDir;
this.regionDirModTime = regionDirModTime;
}
}
/**
* Stores the regioninfo retrieved from Online region servers.
*/
static class OnlineEntry {
private RegionInfo regionInfo;
private ServerName serverName;
OnlineEntry(RegionInfo regionInfo, ServerName serverName) {
this.regionInfo = regionInfo;
this.serverName = serverName;
}
public RegionInfo getRegionInfo() {
return regionInfo;
}
public ServerName getServerName() {
return serverName;
}
@Override
public String toString() {
return serverName.toString() + ";" + regionInfo.getRegionNameAsString();
}
}
final static Comparator<HbckRegionInfo> COMPARATOR = new Comparator<HbckRegionInfo>() {
@Override
public int compare(HbckRegionInfo l, HbckRegionInfo r) {
if (l == r) {
// same instance
return 0;
}
int tableCompare = l.getTableName().compareTo(r.getTableName());
if (tableCompare != 0) {
return tableCompare;
}
int startComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare(
l.getStartKey(), r.getStartKey());
if (startComparison != 0) {
return startComparison;
}
// Special case for absolute endkey
byte[] endKey = r.getEndKey();
endKey = (endKey.length == 0) ? null : endKey;
byte[] endKey2 = l.getEndKey();
endKey2 = (endKey2.length == 0) ? null : endKey2;
int endComparison = RegionSplitCalculator.BYTES_COMPARATOR.compare(
endKey2, endKey);
if (endComparison != 0) {
return endComparison;
}
// use regionId as tiebreaker.
// Null is considered after all possible values so make it bigger.
if (l.getHdfsEntry() == null && r.getHdfsEntry() == null) {
return 0;
}
if (l.getHdfsEntry() == null && r.getHdfsEntry() != null) {
return 1;
}
// l.hdfsEntry must not be null
if (r.getHdfsEntry() == null) {
return -1;
}
// both l.hdfsEntry and r.hdfsEntry must not be null.
return Long.compare(l.getHdfsEntry().hri.getRegionId(), r.getHdfsEntry().hri.getRegionId());
}
};
}

View File

@ -0,0 +1,810 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.util;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
/**
* Maintain information about a particular table.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class HbckTableInfo {
private static final Logger LOG = LoggerFactory.getLogger(HbckTableInfo.class.getName());
private static final String TO_BE_LOADED = "to_be_loaded";
TableName tableName;
TreeSet<ServerName> deployedOn;
// backwards regions
final List<HbckRegionInfo> backwards = new ArrayList<>();
// sidelined big overlapped regions
final Map<Path, HbckRegionInfo> sidelinedRegions = new HashMap<>();
// region split calculator
final RegionSplitCalculator<HbckRegionInfo> sc =
new RegionSplitCalculator<>(HbckRegionInfo.COMPARATOR);
// Histogram of different TableDescriptors found. Ideally there is only one!
final Set<TableDescriptor> htds = new HashSet<>();
// key = start split, values = set of splits in problem group
final Multimap<byte[], HbckRegionInfo> overlapGroups =
TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, HbckRegionInfo.COMPARATOR);
// list of regions derived from meta entries.
private ImmutableList<RegionInfo> regionsFromMeta = null;
HBaseFsck hbck;
HbckTableInfo(TableName name, HBaseFsck hbck) {
this.tableName = name;
this.hbck = hbck;
deployedOn = new TreeSet<>();
}
/**
* @return descriptor common to all regions. null if are none or multiple!
*/
TableDescriptor getTableDescriptor() {
if (htds.size() == 1) {
return (TableDescriptor)htds.toArray()[0];
} else {
LOG.error("None/Multiple table descriptors found for table '"
+ tableName + "' regions: " + htds);
}
return null;
}
public void addRegionInfo(HbckRegionInfo hir) {
if (Bytes.equals(hir.getEndKey(), HConstants.EMPTY_END_ROW)) {
// end key is absolute end key, just add it.
// ignore replicas other than primary for these checks
if (hir.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
sc.add(hir);
}
return;
}
// if not the absolute end key, check for cycle
if (Bytes.compareTo(hir.getStartKey(), hir.getEndKey()) > 0) {
hbck.getErrors().reportError(HbckErrorReporter.ERROR_CODE.REGION_CYCLE, String.format(
"The endkey for this region comes before the " + "startkey, startkey=%s, endkey=%s",
Bytes.toStringBinary(hir.getStartKey()), Bytes.toStringBinary(hir.getEndKey())), this,
hir);
backwards.add(hir);
return;
}
// main case, add to split calculator
// ignore replicas other than primary for these checks
if (hir.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
sc.add(hir);
}
}
public void addServer(ServerName server) {
this.deployedOn.add(server);
}
public TableName getName() {
return tableName;
}
public int getNumRegions() {
return sc.getStarts().size() + backwards.size();
}
public synchronized ImmutableList<RegionInfo> getRegionsFromMeta(
TreeMap<String, HbckRegionInfo> regionInfoMap) {
// lazy loaded, synchronized to ensure a single load
if (regionsFromMeta == null) {
List<RegionInfo> regions = new ArrayList<>();
for (HbckRegionInfo h : regionInfoMap.values()) {
if (tableName.equals(h.getTableName())) {
if (h.getMetaEntry() != null) {
regions.add(h.getMetaEntry());
}
}
}
regionsFromMeta = Ordering.from(RegionInfo.COMPARATOR).immutableSortedCopy(regions);
}
return regionsFromMeta;
}
class IntegrityFixSuggester extends TableIntegrityErrorHandlerImpl {
HbckErrorReporter errors;
IntegrityFixSuggester(HbckTableInfo ti, HbckErrorReporter errors) {
this.errors = errors;
setTableInfo(ti);
}
@Override
public void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException {
errors.reportError(HbckErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY,
"First region should start with an empty key. You need to "
+ " create a new region and regioninfo in HDFS to plug the hole.",
getTableInfo(), hi);
}
@Override
public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException {
errors.reportError(HbckErrorReporter.ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY,
"Last region should end with an empty key. You need to "
+ "create a new region and regioninfo in HDFS to plug the hole.", getTableInfo());
}
@Override
public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException{
errors.reportError(HbckErrorReporter.ERROR_CODE.DEGENERATE_REGION,
"Region has the same start and end key.", getTableInfo(), hi);
}
@Override
public void handleDuplicateStartKeys(HbckRegionInfo r1, HbckRegionInfo r2) throws IOException {
byte[] key = r1.getStartKey();
// dup start key
errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_STARTKEYS,
"Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(),
r1);
errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_STARTKEYS,
"Multiple regions have the same startkey: " + Bytes.toStringBinary(key), getTableInfo(),
r2);
}
@Override
public void handleSplit(HbckRegionInfo r1, HbckRegionInfo r2) throws IOException{
byte[] key = r1.getStartKey();
// dup start key
errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_ENDKEYS,
"Multiple regions have the same regionID: "
+ Bytes.toStringBinary(key), getTableInfo(), r1);
errors.reportError(HbckErrorReporter.ERROR_CODE.DUPE_ENDKEYS,
"Multiple regions have the same regionID: "
+ Bytes.toStringBinary(key), getTableInfo(), r2);
}
@Override
public void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2)
throws IOException {
errors.reportError(HbckErrorReporter.ERROR_CODE.OVERLAP_IN_REGION_CHAIN,
"There is an overlap in the region chain.", getTableInfo(), hi1, hi2);
}
@Override
public void handleHoleInRegionChain(byte[] holeStart, byte[] holeStop) throws IOException {
errors.reportError(
HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN,
"There is a hole in the region chain between "
+ Bytes.toStringBinary(holeStart) + " and "
+ Bytes.toStringBinary(holeStop)
+ ". You need to create a new .regioninfo and region "
+ "dir in hdfs to plug the hole.");
}
}
/**
* This handler fixes integrity errors from hdfs information. There are
* basically three classes of integrity problems 1) holes, 2) overlaps, and
* 3) invalid regions.
*
* This class overrides methods that fix holes and the overlap group case.
* Individual cases of particular overlaps are handled by the general
* overlap group merge repair case.
*
* If hbase is online, this forces regions offline before doing merge
* operations.
*/
class HDFSIntegrityFixer extends IntegrityFixSuggester {
Configuration conf;
boolean fixOverlaps = true;
HDFSIntegrityFixer(HbckTableInfo ti, HbckErrorReporter errors, Configuration conf,
boolean fixHoles, boolean fixOverlaps) {
super(ti, errors);
this.conf = conf;
this.fixOverlaps = fixOverlaps;
// TODO properly use fixHoles
}
/**
* This is a special case hole -- when the first region of a table is
* missing from META, HBase doesn't acknowledge the existance of the
* table.
*/
@Override
public void handleRegionStartKeyNotEmpty(HbckRegionInfo next) throws IOException {
errors.reportError(HbckErrorReporter.ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY,
"First region should start with an empty key. Creating a new " +
"region and regioninfo in HDFS to plug the hole.",
getTableInfo(), next);
TableDescriptor htd = getTableInfo().getTableDescriptor();
// from special EMPTY_START_ROW to next region's startKey
RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName())
.setStartKey(HConstants.EMPTY_START_ROW)
.setEndKey(next.getStartKey())
.build();
// TODO test
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Table region start key was not empty. Created new empty region: "
+ newRegion + " " +region);
hbck.fixes++;
}
@Override
public void handleRegionEndKeyNotEmpty(byte[] curEndKey) throws IOException {
errors.reportError(HbckErrorReporter.ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY,
"Last region should end with an empty key. Creating a new "
+ "region and regioninfo in HDFS to plug the hole.", getTableInfo());
TableDescriptor htd = getTableInfo().getTableDescriptor();
// from curEndKey to EMPTY_START_ROW
RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName())
.setStartKey(curEndKey)
.setEndKey(HConstants.EMPTY_START_ROW)
.build();
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Table region end key was not empty. Created new empty region: " + newRegion
+ " " + region);
hbck.fixes++;
}
/**
* There is a hole in the hdfs regions that violates the table integrity
* rules. Create a new empty region that patches the hole.
*/
@Override
public void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeStopKey)
throws IOException {
errors.reportError(HbckErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN,
"There is a hole in the region chain between " + Bytes.toStringBinary(holeStartKey) +
" and " + Bytes.toStringBinary(holeStopKey) +
". Creating a new regioninfo and region " + "dir in hdfs to plug the hole.");
TableDescriptor htd = getTableInfo().getTableDescriptor();
RegionInfo newRegion =
RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(holeStartKey)
.setEndKey(holeStopKey).build();
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Plugged hole by creating new empty region: " + newRegion + " " + region);
hbck.fixes++;
}
/**
* This takes set of overlapping regions and merges them into a single
* region. This covers cases like degenerate regions, shared start key,
* general overlaps, duplicate ranges, and partial overlapping regions.
*
* Cases:
* - Clean regions that overlap
* - Only .oldlogs regions (can't find start/stop range, or figure out)
*
* This is basically threadsafe, except for the fixer increment in mergeOverlaps.
*/
@Override
public void handleOverlapGroup(Collection<HbckRegionInfo> overlap)
throws IOException {
Preconditions.checkNotNull(overlap);
Preconditions.checkArgument(overlap.size() >0);
if (!this.fixOverlaps) {
LOG.warn("Not attempting to repair overlaps.");
return;
}
if (overlap.size() > hbck.getMaxMerge()) {
LOG.warn("Overlap group has " + overlap.size() + " overlapping " +
"regions which is greater than " + hbck.getMaxMerge() +
", the max number of regions to merge");
if (hbck.shouldSidelineBigOverlaps()) {
// we only sideline big overlapped groups that exceeds the max number of regions to merge
sidelineBigOverlaps(overlap);
}
return;
}
if (hbck.shouldRemoveParents()) {
removeParentsAndFixSplits(overlap);
}
mergeOverlaps(overlap);
}
void removeParentsAndFixSplits(Collection<HbckRegionInfo> overlap) throws IOException {
Pair<byte[], byte[]> range = null;
HbckRegionInfo parent = null;
HbckRegionInfo daughterA = null;
HbckRegionInfo daughterB = null;
Collection<HbckRegionInfo> daughters = new ArrayList<HbckRegionInfo>(overlap);
String thread = Thread.currentThread().getName();
LOG.info("== [" + thread + "] Attempting fix splits in overlap state.");
// we only can handle a single split per group at the time
if (overlap.size() > 3) {
LOG.info("Too many overlaps were found on this group, falling back to regular merge.");
return;
}
for (HbckRegionInfo hi : overlap) {
if (range == null) {
range = new Pair<byte[], byte[]>(hi.getStartKey(), hi.getEndKey());
} else {
if (RegionSplitCalculator.BYTES_COMPARATOR
.compare(hi.getStartKey(), range.getFirst()) < 0) {
range.setFirst(hi.getStartKey());
}
if (RegionSplitCalculator.BYTES_COMPARATOR
.compare(hi.getEndKey(), range.getSecond()) > 0) {
range.setSecond(hi.getEndKey());
}
}
}
LOG.info("This group range is [" + Bytes.toStringBinary(range.getFirst()) + ", "
+ Bytes.toStringBinary(range.getSecond()) + "]");
// attempt to find a possible parent for the edge case of a split
for (HbckRegionInfo hi : overlap) {
if (Bytes.compareTo(hi.getHdfsHRI().getStartKey(), range.getFirst()) == 0
&& Bytes.compareTo(hi.getHdfsHRI().getEndKey(), range.getSecond()) == 0) {
LOG.info("This is a parent for this group: " + hi.toString());
parent = hi;
}
}
// Remove parent regions from daughters collection
if (parent != null) {
daughters.remove(parent);
}
// Lets verify that daughters share the regionID at split time and they
// were created after the parent
for (HbckRegionInfo hi : daughters) {
if (Bytes.compareTo(hi.getHdfsHRI().getStartKey(), range.getFirst()) == 0) {
if (parent.getHdfsHRI().getRegionId() < hi.getHdfsHRI().getRegionId()) {
daughterA = hi;
}
}
if (Bytes.compareTo(hi.getHdfsHRI().getEndKey(), range.getSecond()) == 0) {
if (parent.getHdfsHRI().getRegionId() < hi.getHdfsHRI().getRegionId()) {
daughterB = hi;
}
}
}
// daughters must share the same regionID and we should have a parent too
if (daughterA.getHdfsHRI().getRegionId() != daughterB.getHdfsHRI().getRegionId() ||
parent == null) {
return;
}
FileSystem fs = FileSystem.get(conf);
LOG.info("Found parent: " + parent.getRegionNameAsString());
LOG.info("Found potential daughter a: " + daughterA.getRegionNameAsString());
LOG.info("Found potential daughter b: " + daughterB.getRegionNameAsString());
LOG.info("Trying to fix parent in overlap by removing the parent.");
try {
hbck.closeRegion(parent);
} catch (IOException ioe) {
LOG.warn("Parent region could not be closed, continuing with regular merge...", ioe);
return;
} catch (InterruptedException ie) {
LOG.warn("Parent region could not be closed, continuing with regular merge...", ie);
return;
}
try {
hbck.offline(parent.getRegionName());
} catch (IOException ioe) {
LOG.warn("Unable to offline parent region: " + parent.getRegionNameAsString()
+ ". Just continuing with regular merge... ", ioe);
return;
}
try {
HBaseFsckRepair.removeParentInMeta(conf, parent.getHdfsHRI());
} catch (IOException ioe) {
LOG.warn("Unable to remove parent region in META: " + parent.getRegionNameAsString()
+ ". Just continuing with regular merge... ", ioe);
return;
}
hbck.sidelineRegionDir(fs, parent);
LOG.info(
"[" + thread + "] Sidelined parent region dir " + parent.getHdfsRegionDir() + " into " +
hbck.getSidelineDir());
hbck.debugLsr(parent.getHdfsRegionDir());
// Make sure we don't have the parents and daughters around
overlap.remove(parent);
overlap.remove(daughterA);
overlap.remove(daughterB);
LOG.info("Done fixing split.");
}
void mergeOverlaps(Collection<HbckRegionInfo> overlap)
throws IOException {
String thread = Thread.currentThread().getName();
LOG.info("== [" + thread + "] Merging regions into one region: "
+ Joiner.on(",").join(overlap));
// get the min / max range and close all concerned regions
Pair<byte[], byte[]> range = null;
for (HbckRegionInfo hi : overlap) {
if (range == null) {
range = new Pair<>(hi.getStartKey(), hi.getEndKey());
} else {
if (RegionSplitCalculator.BYTES_COMPARATOR
.compare(hi.getStartKey(), range.getFirst()) < 0) {
range.setFirst(hi.getStartKey());
}
if (RegionSplitCalculator.BYTES_COMPARATOR
.compare(hi.getEndKey(), range.getSecond()) > 0) {
range.setSecond(hi.getEndKey());
}
}
// need to close files so delete can happen.
LOG.debug("[" + thread + "] Closing region before moving data around: " + hi);
LOG.debug("[" + thread + "] Contained region dir before close");
hbck.debugLsr(hi.getHdfsRegionDir());
try {
LOG.info("[" + thread + "] Closing region: " + hi);
hbck.closeRegion(hi);
} catch (IOException ioe) {
LOG.warn("[" + thread + "] Was unable to close region " + hi
+ ". Just continuing... ", ioe);
} catch (InterruptedException e) {
LOG.warn("[" + thread + "] Was unable to close region " + hi
+ ". Just continuing... ", e);
}
try {
LOG.info("[" + thread + "] Offlining region: " + hi);
hbck.offline(hi.getRegionName());
} catch (IOException ioe) {
LOG.warn("[" + thread + "] Unable to offline region from master: " + hi
+ ". Just continuing... ", ioe);
}
}
// create new empty container region.
TableDescriptor htd = getTableInfo().getTableDescriptor();
// from start key to end Key
RegionInfo newRegion = RegionInfoBuilder.newBuilder(htd.getTableName())
.setStartKey(range.getFirst())
.setEndKey(range.getSecond())
.build();
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("[" + thread + "] Created new empty container region: " +
newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
hbck.debugLsr(region.getRegionFileSystem().getRegionDir());
// all target regions are closed, should be able to safely cleanup.
boolean didFix= false;
Path target = region.getRegionFileSystem().getRegionDir();
for (HbckRegionInfo contained : overlap) {
LOG.info("[" + thread + "] Merging " + contained + " into " + target);
int merges = hbck.mergeRegionDirs(target, contained);
if (merges > 0) {
didFix = true;
}
}
if (didFix) {
hbck.fixes++;
}
}
/**
* Sideline some regions in a big overlap group so that it
* will have fewer regions, and it is easier to merge them later on.
*
* @param bigOverlap the overlapped group with regions more than maxMerge
*/
void sidelineBigOverlaps(Collection<HbckRegionInfo> bigOverlap) throws IOException {
int overlapsToSideline = bigOverlap.size() - hbck.getMaxMerge();
if (overlapsToSideline > hbck.getMaxOverlapsToSideline()) {
overlapsToSideline = hbck.getMaxOverlapsToSideline();
}
List<HbckRegionInfo> regionsToSideline =
RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline);
FileSystem fs = FileSystem.get(conf);
for (HbckRegionInfo regionToSideline: regionsToSideline) {
try {
LOG.info("Closing region: " + regionToSideline);
hbck.closeRegion(regionToSideline);
} catch (IOException ioe) {
LOG.warn("Was unable to close region " + regionToSideline
+ ". Just continuing... ", ioe);
} catch (InterruptedException e) {
LOG.warn("Was unable to close region " + regionToSideline
+ ". Just continuing... ", e);
}
try {
LOG.info("Offlining region: " + regionToSideline);
hbck.offline(regionToSideline.getRegionName());
} catch (IOException ioe) {
LOG.warn("Unable to offline region from master: " + regionToSideline
+ ". Just continuing... ", ioe);
}
LOG.info("Before sideline big overlapped region: " + regionToSideline.toString());
Path sidelineRegionDir = hbck.sidelineRegionDir(fs, TO_BE_LOADED, regionToSideline);
if (sidelineRegionDir != null) {
sidelinedRegions.put(sidelineRegionDir, regionToSideline);
LOG.info("After sidelined big overlapped region: "
+ regionToSideline.getRegionNameAsString()
+ " to " + sidelineRegionDir.toString());
hbck.fixes++;
}
}
}
}
/**
* Check the region chain (from META) of this table. We are looking for
* holes, overlaps, and cycles.
* @return false if there are errors
*/
public boolean checkRegionChain(TableIntegrityErrorHandler handler) throws IOException {
// When table is disabled no need to check for the region chain. Some of the regions
// accidently if deployed, this below code might report some issues like missing start
// or end regions or region hole in chain and may try to fix which is unwanted.
if (hbck.isTableDisabled(this.tableName)) {
return true;
}
int originalErrorsCount = hbck.getErrors().getErrorList().size();
Multimap<byte[], HbckRegionInfo> regions = sc.calcCoverage();
SortedSet<byte[]> splits = sc.getSplits();
byte[] prevKey = null;
byte[] problemKey = null;
if (splits.isEmpty()) {
// no region for this table
handler.handleHoleInRegionChain(HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
}
for (byte[] key : splits) {
Collection<HbckRegionInfo> ranges = regions.get(key);
if (prevKey == null && !Bytes.equals(key, HConstants.EMPTY_BYTE_ARRAY)) {
for (HbckRegionInfo rng : ranges) {
handler.handleRegionStartKeyNotEmpty(rng);
}
}
// check for degenerate ranges
for (HbckRegionInfo rng : ranges) {
// special endkey case converts '' to null
byte[] endKey = rng.getEndKey();
endKey = (endKey.length == 0) ? null : endKey;
if (Bytes.equals(rng.getStartKey(),endKey)) {
handler.handleDegenerateRegion(rng);
}
}
if (ranges.size() == 1) {
// this split key is ok -- no overlap, not a hole.
if (problemKey != null) {
LOG.warn("reached end of problem group: " + Bytes.toStringBinary(key));
}
problemKey = null; // fell through, no more problem.
} else if (ranges.size() > 1) {
// set the new problem key group name, if already have problem key, just
// keep using it.
if (problemKey == null) {
// only for overlap regions.
LOG.warn("Naming new problem group: " + Bytes.toStringBinary(key));
problemKey = key;
}
overlapGroups.putAll(problemKey, ranges);
// record errors
ArrayList<HbckRegionInfo> subRange = new ArrayList<>(ranges);
// this dumb and n^2 but this shouldn't happen often
for (HbckRegionInfo r1 : ranges) {
if (r1.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
continue;
}
subRange.remove(r1);
for (HbckRegionInfo r2 : subRange) {
if (r2.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) {
continue;
}
// general case of same start key
if (Bytes.compareTo(r1.getStartKey(), r2.getStartKey())==0) {
handler.handleDuplicateStartKeys(r1,r2);
} else if (Bytes.compareTo(r1.getEndKey(), r2.getStartKey())==0 &&
r1.getHdfsHRI().getRegionId() == r2.getHdfsHRI().getRegionId()) {
LOG.info("this is a split, log to splits");
handler.handleSplit(r1, r2);
} else {
// overlap
handler.handleOverlapInRegionChain(r1, r2);
}
}
}
} else if (ranges.isEmpty()) {
if (problemKey != null) {
LOG.warn("reached end of problem group: " + Bytes.toStringBinary(key));
}
problemKey = null;
byte[] holeStopKey = sc.getSplits().higher(key);
// if higher key is null we reached the top.
if (holeStopKey != null) {
// hole
handler.handleHoleInRegionChain(key, holeStopKey);
}
}
prevKey = key;
}
// When the last region of a table is proper and having an empty end key, 'prevKey'
// will be null.
if (prevKey != null) {
handler.handleRegionEndKeyNotEmpty(prevKey);
}
// TODO fold this into the TableIntegrityHandler
if (hbck.getConf().getBoolean("hbasefsck.overlap.merge.parallel", true)) {
boolean ok = handleOverlapsParallel(handler, prevKey);
if (!ok) {
return false;
}
} else {
for (Collection<HbckRegionInfo> overlap : overlapGroups.asMap().values()) {
handler.handleOverlapGroup(overlap);
}
}
if (hbck.shouldDisplayFullReport()) {
// do full region split map dump
hbck.getErrors().print("---- Table '" + this.tableName
+ "': region split map");
dump(splits, regions);
hbck.getErrors().print("---- Table '" + this.tableName
+ "': overlap groups");
dumpOverlapProblems(overlapGroups);
hbck.getErrors().print("There are " + overlapGroups.keySet().size()
+ " overlap groups with " + overlapGroups.size()
+ " overlapping regions");
}
if (!sidelinedRegions.isEmpty()) {
LOG.warn("Sidelined big overlapped regions, please bulk load them!");
hbck.getErrors().print("---- Table '" + this.tableName
+ "': sidelined big overlapped regions");
dumpSidelinedRegions(sidelinedRegions);
}
return hbck.getErrors().getErrorList().size() == originalErrorsCount;
}
private boolean handleOverlapsParallel(TableIntegrityErrorHandler handler, byte[] prevKey)
throws IOException {
// we parallelize overlap handler for the case we have lots of groups to fix. We can
// safely assume each group is independent.
List<HBaseFsck.WorkItemOverlapMerge> merges = new ArrayList<>(overlapGroups.size());
List<Future<Void>> rets;
for (Collection<HbckRegionInfo> overlap : overlapGroups.asMap().values()) {
//
merges.add(new HBaseFsck.WorkItemOverlapMerge(overlap, handler));
}
try {
rets = hbck.executor.invokeAll(merges);
} catch (InterruptedException e) {
LOG.error("Overlap merges were interrupted", e);
return false;
}
for(int i=0; i<merges.size(); i++) {
HBaseFsck.WorkItemOverlapMerge work = merges.get(i);
Future<Void> f = rets.get(i);
try {
f.get();
} catch(ExecutionException e) {
LOG.warn("Failed to merge overlap group" + work, e.getCause());
} catch (InterruptedException e) {
LOG.error("Waiting for overlap merges was interrupted", e);
return false;
}
}
return true;
}
/**
* This dumps data in a visually reasonable way for visual debugging
*/
private void dump(SortedSet<byte[]> splits, Multimap<byte[], HbckRegionInfo> regions) {
// we display this way because the last end key should be displayed as well.
StringBuilder sb = new StringBuilder();
for (byte[] k : splits) {
sb.setLength(0); // clear out existing buffer, if any.
sb.append(Bytes.toStringBinary(k) + ":\t");
for (HbckRegionInfo r : regions.get(k)) {
sb.append("[ "+ r.toString() + ", "
+ Bytes.toStringBinary(r.getEndKey())+ "]\t");
}
hbck.getErrors().print(sb.toString());
}
}
private void dumpOverlapProblems(Multimap<byte[], HbckRegionInfo> regions) {
// we display this way because the last end key should be displayed as
// well.
for (byte[] k : regions.keySet()) {
hbck.getErrors().print(Bytes.toStringBinary(k) + ":");
for (HbckRegionInfo r : regions.get(k)) {
hbck.getErrors().print("[ " + r.toString() + ", "
+ Bytes.toStringBinary(r.getEndKey()) + "]");
}
hbck.getErrors().print("----");
}
}
private void dumpSidelinedRegions(Map<Path, HbckRegionInfo> regions) {
for (Map.Entry<Path, HbckRegionInfo> entry : regions.entrySet()) {
TableName tableName = entry.getValue().getTableName();
Path path = entry.getKey();
hbck.getErrors().print("This sidelined region dir should be bulk loaded: " + path.toString());
hbck.getErrors().print("Bulk load command looks like: " + LoadIncrementalHFiles.NAME + " " +
path.toUri().getPath() + " " + tableName);
}
}
}

View File

@ -31,6 +31,7 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hbase.util.HbckErrorReporter;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -48,7 +49,6 @@ import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.FSUtils.FamilyDirFilter; import org.apache.hadoop.hbase.util.FSUtils.FamilyDirFilter;
import org.apache.hadoop.hbase.util.FSUtils.HFileFilter; import org.apache.hadoop.hbase.util.FSUtils.HFileFilter;
import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter; import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
/** /**
* This class marches through all of the region's hfiles and verifies that * This class marches through all of the region's hfiles and verifies that
@ -523,7 +523,7 @@ public class HFileCorruptionChecker {
* Print a human readable summary of hfile quarantining operations. * Print a human readable summary of hfile quarantining operations.
* @param out * @param out
*/ */
public void report(ErrorReporter out) { public void report(HbckErrorReporter out) {
out.print("Checked " + hfilesChecked.get() + " hfile for corruption"); out.print("Checked " + hfilesChecked.get() + " hfile for corruption");
out.print(" HFiles corrupted: " + corrupted.size()); out.print(" HFiles corrupted: " + corrupted.size());
if (inQuarantineMode) { if (inQuarantineMode) {

View File

@ -30,8 +30,7 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerStorage;
import org.apache.hadoop.hbase.replication.ReplicationQueueInfo; import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.HbckErrorReporter;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -45,7 +44,7 @@ public class ReplicationChecker {
private static final Logger LOG = LoggerFactory.getLogger(ReplicationChecker.class); private static final Logger LOG = LoggerFactory.getLogger(ReplicationChecker.class);
private final ErrorReporter errorReporter; private final HbckErrorReporter errorReporter;
// replicator with its queueIds for removed peers // replicator with its queueIds for removed peers
private Map<ServerName, List<String>> undeletedQueueIds = new HashMap<>(); private Map<ServerName, List<String>> undeletedQueueIds = new HashMap<>();
// replicator with its undeleted queueIds for removed peers in hfile-refs queue // replicator with its undeleted queueIds for removed peers in hfile-refs queue
@ -54,7 +53,7 @@ public class ReplicationChecker {
private final ReplicationPeerStorage peerStorage; private final ReplicationPeerStorage peerStorage;
private final ReplicationQueueStorage queueStorage; private final ReplicationQueueStorage queueStorage;
public ReplicationChecker(Configuration conf, ZKWatcher zkw, ErrorReporter errorReporter) { public ReplicationChecker(Configuration conf, ZKWatcher zkw, HbckErrorReporter errorReporter) {
this.peerStorage = ReplicationStorageFactory.getReplicationPeerStorage(zkw, conf); this.peerStorage = ReplicationStorageFactory.getReplicationPeerStorage(zkw, conf);
this.queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, conf); this.queueStorage = ReplicationStorageFactory.getReplicationQueueStorage(zkw, conf);
this.errorReporter = errorReporter; this.errorReporter = errorReporter;
@ -62,7 +61,7 @@ public class ReplicationChecker {
public boolean hasUnDeletedQueues() { public boolean hasUnDeletedQueues() {
return errorReporter.getErrorList() return errorReporter.getErrorList()
.contains(HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE); .contains(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE);
} }
private Map<ServerName, List<String>> getUnDeletedQueues() throws ReplicationException { private Map<ServerName, List<String>> getUnDeletedQueues() throws ReplicationException {
@ -104,16 +103,14 @@ public class ReplicationChecker {
String msg = "Undeleted replication queue for removed peer found: " + String msg = "Undeleted replication queue for removed peer found: " +
String.format("[removedPeerId=%s, replicator=%s, queueId=%s]", queueInfo.getPeerId(), String.format("[removedPeerId=%s, replicator=%s, queueId=%s]", queueInfo.getPeerId(),
replicator, queueId); replicator, queueId);
errorReporter.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, errorReporter.reportError(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg);
msg);
}); });
}); });
undeletedHFileRefsPeerIds = getUndeletedHFileRefsPeers(); undeletedHFileRefsPeerIds = getUndeletedHFileRefsPeers();
undeletedHFileRefsPeerIds.stream() undeletedHFileRefsPeerIds.stream().map(
.map( peerId -> "Undeleted replication hfile-refs queue for removed peer " + peerId + " found")
peerId -> "Undeleted replication hfile-refs queue for removed peer " + peerId + " found")
.forEach(msg -> errorReporter .forEach(msg -> errorReporter
.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg)); .reportError(HbckErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE, msg));
} }
public void fixUnDeletedQueues() throws ReplicationException { public void fixUnDeletedQueues() throws ReplicationException {

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.util.hbck;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo; import org.apache.hadoop.hbase.util.HbckRegionInfo;
import org.apache.hadoop.hbase.util.HBaseFsck.TableInfo; import org.apache.hadoop.hbase.util.HbckTableInfo;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
/** /**
@ -33,22 +33,22 @@ import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private @InterfaceAudience.Private
public interface TableIntegrityErrorHandler { public interface TableIntegrityErrorHandler {
TableInfo getTableInfo(); HbckTableInfo getTableInfo();
/** /**
* Set the TableInfo used by all HRegionInfos fabricated by other callbacks * Set the TableInfo used by all HRegionInfos fabricated by other callbacks
*/ */
void setTableInfo(TableInfo ti); void setTableInfo(HbckTableInfo ti);
/** /**
* Callback for handling case where a Table has a first region that does not * Callback for handling case where a Table has a first region that does not
* have an empty start key. * have an empty start key.
* *
* @param hi An HbckInfo of the second region in a table. This should have * @param hi An HbckRegionInfo of the second region in a table. This should have
* a non-empty startkey, and can be used to fabricate a first region that * a non-empty startkey, and can be used to fabricate a first region that
* has an empty start key. * has an empty start key.
*/ */
void handleRegionStartKeyNotEmpty(HbckInfo hi) throws IOException; void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException;
/** /**
* Callback for handling case where a Table has a last region that does not * Callback for handling case where a Table has a last region that does not
@ -62,35 +62,35 @@ public interface TableIntegrityErrorHandler {
/** /**
* Callback for handling a region that has the same start and end key. * Callback for handling a region that has the same start and end key.
* *
* @param hi An HbckInfo for a degenerate key. * @param hi An HbckRegionInfo for a degenerate key.
*/ */
void handleDegenerateRegion(HbckInfo hi) throws IOException; void handleDegenerateRegion(HbckRegionInfo hi) throws IOException;
/** /**
* Callback for handling two regions that have the same start key. This is * Callback for handling two regions that have the same start key. This is
* a specific case of a region overlap. * a specific case of a region overlap.
* @param hi1 one of the overlapping HbckInfo * @param hi1 one of the overlapping HbckRegionInfo
* @param hi2 the other overlapping HbckInfo * @param hi2 the other overlapping HbckRegionInfo
*/ */
void handleDuplicateStartKeys(HbckInfo hi1, HbckInfo hi2) throws IOException; void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException;
/** /**
* Callback for handling two regions that have the same regionID * Callback for handling two regions that have the same regionID
* a specific case of a split * a specific case of a split
* @param hi1 one of the overlapping HbckInfo * @param hi1 one of the overlapping HbckRegionInfo
* @param hi2 the other overlapping HbckInfo * @param hi2 the other overlapping HbckRegionInfo
*/ */
void handleSplit(HbckInfo hi1, HbckInfo hi2) throws IOException; void handleSplit(HbckRegionInfo hi1, HbckRegionInfo hi2) throws IOException;
/** /**
* Callback for handling two reigons that overlap in some arbitrary way. * Callback for handling two reigons that overlap in some arbitrary way.
* This is a specific case of region overlap, and called for each possible * This is a specific case of region overlap, and called for each possible
* pair. If two regions have the same start key, the handleDuplicateStartKeys * pair. If two regions have the same start key, the handleDuplicateStartKeys
* method is called. * method is called.
* @param hi1 one of the overlapping HbckInfo * @param hi1 one of the overlapping HbckRegionInfo
* @param hi2 the other overlapping HbckInfo * @param hi2 the other overlapping HbckRegionInfo
*/ */
void handleOverlapInRegionChain(HbckInfo hi1, HbckInfo hi2) void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2)
throws IOException; throws IOException;
/** /**
@ -106,5 +106,5 @@ public interface TableIntegrityErrorHandler {
* Callback for handling an group of regions that overlap. * Callback for handling an group of regions that overlap.
* @param overlap Collection of overlapping regions. * @param overlap Collection of overlapping regions.
*/ */
void handleOverlapGroup(Collection<HbckInfo> overlap) throws IOException; void handleOverlapGroup(Collection<HbckRegionInfo> overlap) throws IOException;
} }

View File

@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.util.hbck;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo; import org.apache.hadoop.hbase.util.HbckRegionInfo;
import org.apache.hadoop.hbase.util.HBaseFsck.TableInfo; import org.apache.hadoop.hbase.util.HbckTableInfo;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
/** /**
@ -30,13 +30,13 @@ import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private @InterfaceAudience.Private
abstract public class TableIntegrityErrorHandlerImpl implements abstract public class TableIntegrityErrorHandlerImpl implements
TableIntegrityErrorHandler { TableIntegrityErrorHandler {
TableInfo ti; HbckTableInfo ti;
/** /**
* {@inheritDoc} * {@inheritDoc}
*/ */
@Override @Override
public TableInfo getTableInfo() { public HbckTableInfo getTableInfo() {
return ti; return ti;
} }
@ -44,7 +44,7 @@ abstract public class TableIntegrityErrorHandlerImpl implements
* {@inheritDoc} * {@inheritDoc}
*/ */
@Override @Override
public void setTableInfo(TableInfo ti2) { public void setTableInfo(HbckTableInfo ti2) {
this.ti = ti2; this.ti = ti2;
} }
@ -52,7 +52,7 @@ abstract public class TableIntegrityErrorHandlerImpl implements
* {@inheritDoc} * {@inheritDoc}
*/ */
@Override @Override
public void handleRegionStartKeyNotEmpty(HbckInfo hi) throws IOException { public void handleRegionStartKeyNotEmpty(HbckRegionInfo hi) throws IOException {
} }
/** /**
@ -66,14 +66,14 @@ abstract public class TableIntegrityErrorHandlerImpl implements
* {@inheritDoc} * {@inheritDoc}
*/ */
@Override @Override
public void handleDegenerateRegion(HbckInfo hi) throws IOException { public void handleDegenerateRegion(HbckRegionInfo hi) throws IOException {
} }
/** /**
* {@inheritDoc} * {@inheritDoc}
*/ */
@Override @Override
public void handleDuplicateStartKeys(HbckInfo hi1, HbckInfo hi2) public void handleDuplicateStartKeys(HbckRegionInfo hi1, HbckRegionInfo hi2)
throws IOException { throws IOException {
} }
@ -81,7 +81,7 @@ abstract public class TableIntegrityErrorHandlerImpl implements
* {@inheritDoc} * {@inheritDoc}
*/ */
@Override @Override
public void handleOverlapInRegionChain(HbckInfo hi1, HbckInfo hi2) public void handleOverlapInRegionChain(HbckRegionInfo hi1, HbckRegionInfo hi2)
throws IOException { throws IOException {
} }
@ -97,7 +97,7 @@ abstract public class TableIntegrityErrorHandlerImpl implements
* {@inheritDoc} * {@inheritDoc}
*/ */
@Override @Override
public void handleOverlapGroup(Collection<HbckInfo> overlap) public void handleOverlapGroup(Collection<HbckRegionInfo> overlap)
throws IOException { throws IOException {
} }

View File

@ -54,7 +54,7 @@ import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.HBaseFsck;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; import org.apache.hadoop.hbase.util.HbckErrorReporter.ERROR_CODE;
import org.apache.hadoop.hbase.util.HBaseFsckRepair; import org.apache.hadoop.hbase.util.HBaseFsckRepair;
import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;

View File

@ -65,9 +65,6 @@ import org.apache.hadoop.hbase.master.assignment.RegionStates;
import org.apache.hadoop.hbase.mob.MobFileName; import org.apache.hadoop.hbase.mob.MobFileName;
import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo;
import org.apache.hadoop.hbase.util.HBaseFsck.TableInfo;
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
import org.junit.rules.TestName; import org.junit.rules.TestName;
@ -462,7 +459,7 @@ public class BaseTestHBaseFsck {
} }
static class MockErrorReporter implements ErrorReporter { static class MockErrorReporter implements HbckErrorReporter {
static int calledCount = 0; static int calledCount = 0;
@Override @Override
@ -486,19 +483,19 @@ public class BaseTestHBaseFsck {
} }
@Override @Override
public void reportError(ERROR_CODE errorCode, String message, TableInfo table) { public void reportError(ERROR_CODE errorCode, String message, HbckTableInfo table) {
calledCount++; calledCount++;
} }
@Override @Override
public void reportError(ERROR_CODE errorCode, public void reportError(ERROR_CODE errorCode,
String message, TableInfo table, HbckInfo info) { String message, HbckTableInfo table, HbckRegionInfo info) {
calledCount++; calledCount++;
} }
@Override @Override
public void reportError(ERROR_CODE errorCode, String message, public void reportError(ERROR_CODE errorCode, String message,
TableInfo table, HbckInfo info1, HbckInfo info2) { HbckTableInfo table, HbckRegionInfo info1, HbckRegionInfo info2) {
calledCount++; calledCount++;
} }
@ -534,7 +531,7 @@ public class BaseTestHBaseFsck {
} }
@Override @Override
public boolean tableHasErrors(TableInfo table) { public boolean tableHasErrors(HbckTableInfo table) {
calledCount++; calledCount++;
return false; return false;
} }

View File

@ -25,8 +25,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo; import org.apache.hadoop.hbase.util.HbckRegionInfo.MetaEntry;
import org.apache.hadoop.hbase.util.HBaseFsck.MetaEntry;
import org.junit.ClassRule; import org.junit.ClassRule;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
@ -51,58 +50,57 @@ public class TestHBaseFsckComparator {
byte[] keyC = Bytes.toBytes("C"); byte[] keyC = Bytes.toBytes("C");
byte[] keyEnd = Bytes.toBytes(""); byte[] keyEnd = Bytes.toBytes("");
static HbckInfo genHbckInfo(TableName table, byte[] start, byte[] end, int time) { static HbckRegionInfo genHbckInfo(TableName table, byte[] start, byte[] end, int time) {
return new HbckInfo(new MetaEntry(new HRegionInfo(table, start, end), null, return new HbckRegionInfo(new MetaEntry(new HRegionInfo(table, start, end), null,
time)); time));
} }
@Test @Test
public void testEquals() { public void testEquals() {
HbckInfo hi1 = genHbckInfo(table, keyA, keyB, 0); HbckRegionInfo hi1 = genHbckInfo(table, keyA, keyB, 0);
HbckInfo hi2 = genHbckInfo(table, keyA, keyB, 0); HbckRegionInfo hi2 = genHbckInfo(table, keyA, keyB, 0);
assertEquals(0, HBaseFsck.cmp.compare(hi1, hi2)); assertEquals(0, HbckRegionInfo.COMPARATOR.compare(hi1, hi2));
assertEquals(0, HBaseFsck.cmp.compare(hi2, hi1)); assertEquals(0, HbckRegionInfo.COMPARATOR.compare(hi2, hi1));
} }
@Test @Test
public void testEqualsInstance() { public void testEqualsInstance() {
HbckInfo hi1 = genHbckInfo(table, keyA, keyB, 0); HbckRegionInfo hi1 = genHbckInfo(table, keyA, keyB, 0);
HbckInfo hi2 = hi1; HbckRegionInfo hi2 = hi1;
assertEquals(0, HBaseFsck.cmp.compare(hi1, hi2)); assertEquals(0, HbckRegionInfo.COMPARATOR.compare(hi1, hi2));
assertEquals(0, HBaseFsck.cmp.compare(hi2, hi1)); assertEquals(0, HbckRegionInfo.COMPARATOR.compare(hi2, hi1));
} }
@Test @Test
public void testDiffTable() { public void testDiffTable() {
HbckInfo hi1 = genHbckInfo(table, keyA, keyC, 0); HbckRegionInfo hi1 = genHbckInfo(table, keyA, keyC, 0);
HbckInfo hi2 = genHbckInfo(table2, keyA, keyC, 0); HbckRegionInfo hi2 = genHbckInfo(table2, keyA, keyC, 0);
assertTrue(HBaseFsck.cmp.compare(hi1, hi2) < 0); assertTrue(HbckRegionInfo.COMPARATOR.compare(hi1, hi2) < 0);
assertTrue(HBaseFsck.cmp.compare(hi2, hi1) > 0); assertTrue(HbckRegionInfo.COMPARATOR.compare(hi2, hi1) > 0);
} }
@Test @Test
public void testDiffStartKey() { public void testDiffStartKey() {
HbckInfo hi1 = genHbckInfo(table, keyStart, keyC, 0); HbckRegionInfo hi1 = genHbckInfo(table, keyStart, keyC, 0);
HbckInfo hi2 = genHbckInfo(table, keyA, keyC, 0); HbckRegionInfo hi2 = genHbckInfo(table, keyA, keyC, 0);
assertTrue(HBaseFsck.cmp.compare(hi1, hi2) < 0); assertTrue(HbckRegionInfo.COMPARATOR.compare(hi1, hi2) < 0);
assertTrue(HBaseFsck.cmp.compare(hi2, hi1) > 0); assertTrue(HbckRegionInfo.COMPARATOR.compare(hi2, hi1) > 0);
} }
@Test @Test
public void testDiffEndKey() { public void testDiffEndKey() {
HbckInfo hi1 = genHbckInfo(table, keyA, keyB, 0); HbckRegionInfo hi1 = genHbckInfo(table, keyA, keyB, 0);
HbckInfo hi2 = genHbckInfo(table, keyA, keyC, 0); HbckRegionInfo hi2 = genHbckInfo(table, keyA, keyC, 0);
assertTrue(HBaseFsck.cmp.compare(hi1, hi2) < 0); assertTrue(HbckRegionInfo.COMPARATOR.compare(hi1, hi2) < 0);
assertTrue(HBaseFsck.cmp.compare(hi2, hi1) > 0); assertTrue(HbckRegionInfo.COMPARATOR.compare(hi2, hi1) > 0);
} }
@Test @Test
public void testAbsEndKey() { public void testAbsEndKey() {
HbckInfo hi1 = genHbckInfo(table, keyA, keyC, 0); HbckRegionInfo hi1 = genHbckInfo(table, keyA, keyC, 0);
HbckInfo hi2 = genHbckInfo(table, keyA, keyEnd, 0); HbckRegionInfo hi2 = genHbckInfo(table, keyA, keyEnd, 0);
assertTrue(HBaseFsck.cmp.compare(hi1, hi2) < 0); assertTrue(HbckRegionInfo.COMPARATOR.compare(hi1, hi2) < 0);
assertTrue(HBaseFsck.cmp.compare(hi2, hi1) > 0); assertTrue(HbckRegionInfo.COMPARATOR.compare(hi2, hi1) > 0);
} }
} }

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationStorageFactory; import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; import org.apache.hadoop.hbase.util.HbckErrorReporter.ERROR_CODE;
import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;

View File

@ -30,7 +30,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.HBaseFsck;
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; import org.apache.hadoop.hbase.util.HbckErrorReporter.ERROR_CODE;
public class HbckTestingUtil { public class HbckTestingUtil {
private static ExecutorService exec = new ScheduledThreadPoolExecutor(10); private static ExecutorService exec = new ScheduledThreadPoolExecutor(10);