HBASE-17224 Fix lots of spelling errors in HBase logging and exception messages (Grant Sohn)
This commit is contained in:
parent
540ede376b
commit
eeaea4aea3
|
@ -1249,7 +1249,7 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
public HColumnDescriptor setDFSReplication(short replication) {
|
||||
if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) {
|
||||
throw new IllegalArgumentException(
|
||||
"DFS replication factor cannot be less than 1 if explictly set.");
|
||||
"DFS replication factor cannot be less than 1 if explicitly set.");
|
||||
}
|
||||
setValue(DFS_REPLICATION, Short.toString(replication));
|
||||
return this;
|
||||
|
|
|
@ -551,7 +551,7 @@ public class ReplicationAdmin implements Closeable {
|
|||
} else if (!peerHtd.equals(htd)) {
|
||||
throw new IllegalArgumentException("Table " + tableName.getNameAsString()
|
||||
+ " exists in peer cluster " + repPeer.getId()
|
||||
+ ", but the table descriptors are not same when comapred with source cluster."
|
||||
+ ", but the table descriptors are not same when compared with source cluster."
|
||||
+ " Thus can not enable the table's replication switch.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -182,7 +182,7 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase
|
|||
|
||||
@Override
|
||||
public void abort(String why, Throwable e) {
|
||||
LOG.fatal("The ReplicationPeer coresponding to peer " + peerConfig
|
||||
LOG.fatal("The ReplicationPeer corresponding to peer " + peerConfig
|
||||
+ " was aborted for the following reason(s):" + why, e);
|
||||
}
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ public class BoundedByteBufferPool {
|
|||
|
||||
if (LOG.isTraceEnabled()) {
|
||||
long allocations = allocationsRef.incrementAndGet();
|
||||
LOG.trace("runningAverage=" + runningAverage + ", alloctions=" + allocations);
|
||||
LOG.trace("runningAverage=" + runningAverage + ", allocations=" + allocations);
|
||||
}
|
||||
return bb;
|
||||
}
|
||||
|
|
|
@ -113,7 +113,7 @@ public class HDFSBlocksDistribution {
|
|||
*/
|
||||
@Override
|
||||
public synchronized String toString() {
|
||||
return "number of unique hosts in the disribution=" +
|
||||
return "number of unique hosts in the distribution=" +
|
||||
this.hostAndWeights.size();
|
||||
}
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ public class ZNodeClearer {
|
|||
String[] rsZnodeParts = rsZnodePath.split("/");
|
||||
masterServerName = rsZnodeParts[rsZnodeParts.length -1];
|
||||
} catch (IndexOutOfBoundsException e) {
|
||||
LOG.warn("String " + rsZnodePath + " has wrong fromat", e);
|
||||
LOG.warn("String " + rsZnodePath + " has wrong format", e);
|
||||
}
|
||||
return masterServerName;
|
||||
}
|
||||
|
|
|
@ -1598,7 +1598,7 @@ public class HFileBlock implements Cacheable {
|
|||
blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread,
|
||||
doVerificationThruHBaseChecksum);
|
||||
if (blk != null) {
|
||||
HFile.LOG.warn("HDFS checksum verification suceeded for file " +
|
||||
HFile.LOG.warn("HDFS checksum verification succeeded for file " +
|
||||
pathName + " at offset " +
|
||||
offset + " filesize " + fileSize);
|
||||
}
|
||||
|
|
|
@ -1024,7 +1024,7 @@ public class BucketCache implements BlockCache, HeapSize {
|
|||
if (this.ioErrorStartTime > 0) {
|
||||
if (cacheEnabled && (now - ioErrorStartTime) > this.ioErrorsTolerationDuration) {
|
||||
LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration +
|
||||
"ms, disabing cache, please check your IOEngine");
|
||||
"ms, disabling cache, please check your IOEngine");
|
||||
disableCache();
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -473,7 +473,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
// need to reload split keys each iteration.
|
||||
final Pair<byte[][], byte[][]> startEndKeys = regionLocator.getStartEndKeys();
|
||||
if (count != 0) {
|
||||
LOG.info("Split occured while grouping HFiles, retry attempt " +
|
||||
LOG.info("Split occurred while grouping HFiles, retry attempt " +
|
||||
+ count + " with " + queue.size() + " files remaining to group or split");
|
||||
}
|
||||
|
||||
|
@ -903,12 +903,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
if (indexForCallable < 0) {
|
||||
throw new IOException("The first region info for table "
|
||||
+ table.getName()
|
||||
+ " cann't be found in hbase:meta.Please use hbck tool to fix it first.");
|
||||
+ " can't be found in hbase:meta.Please use hbck tool to fix it first.");
|
||||
} else if ((indexForCallable == startEndKeys.getFirst().length - 1)
|
||||
&& !Bytes.equals(startEndKeys.getSecond()[indexForCallable], HConstants.EMPTY_BYTE_ARRAY)) {
|
||||
throw new IOException("The last region info for table "
|
||||
+ table.getName()
|
||||
+ " cann't be found in hbase:meta.Please use hbck tool to fix it first.");
|
||||
+ " can't be found in hbase:meta.Please use hbck tool to fix it first.");
|
||||
} else if (indexForCallable + 1 < startEndKeys.getFirst().length
|
||||
&& !(Bytes.compareTo(startEndKeys.getSecond()[indexForCallable],
|
||||
startEndKeys.getFirst()[indexForCallable + 1]) == 0)) {
|
||||
|
|
|
@ -298,7 +298,7 @@ public class VerifyReplication extends Configured implements Tool {
|
|||
return pair;
|
||||
} catch (ReplicationException e) {
|
||||
throw new IOException(
|
||||
"An error occured while trying to connect to the remove peer cluster", e);
|
||||
"An error occurred while trying to connect to the remove peer cluster", e);
|
||||
} finally {
|
||||
if (peer != null) {
|
||||
peer.close();
|
||||
|
|
|
@ -460,7 +460,7 @@ public class AssignmentVerificationReport {
|
|||
|
||||
public void print(boolean isDetailMode) {
|
||||
if (!isFilledUp) {
|
||||
System.err.println("[Error] Region assignment verfication report" +
|
||||
System.err.println("[Error] Region assignment verification report" +
|
||||
"hasn't been filled up");
|
||||
}
|
||||
DecimalFormat df = new java.text.DecimalFormat( "#.##");
|
||||
|
|
|
@ -89,7 +89,7 @@ public class MasterMobCompactionThread {
|
|||
try {
|
||||
master.reportMobCompactionEnd(tableName);
|
||||
} catch (IOException e1) {
|
||||
LOG.error("Failed to mark end of mob compation", e1);
|
||||
LOG.error("Failed to mark end of mob compaction", e1);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ public class MasterMobCompactionThread {
|
|||
try {
|
||||
master.reportMobCompactionEnd(tableName);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to mark end of mob compation", e);
|
||||
LOG.error("Failed to mark end of mob compaction", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -976,8 +976,8 @@ public class RegionPlacementMaintainer {
|
|||
opt.addOption("tables", true,
|
||||
"The list of table names splitted by ',' ;" +
|
||||
"For example: -tables: t1,t2,...,tn");
|
||||
opt.addOption("l", "locality", true, "enforce the maxium locality");
|
||||
opt.addOption("m", "min-move", true, "enforce minium assignment move");
|
||||
opt.addOption("l", "locality", true, "enforce the maximum locality");
|
||||
opt.addOption("m", "min-move", true, "enforce minimum assignment move");
|
||||
opt.addOption("diff", false, "calculate difference between assignment plans");
|
||||
opt.addOption("munkres", false,
|
||||
"use munkres to place secondaries and tertiaries");
|
||||
|
|
|
@ -1091,7 +1091,7 @@ public class RegionStates {
|
|||
}
|
||||
return hri;
|
||||
} catch (IOException e) {
|
||||
server.abort("Aborting because error occoured while reading "
|
||||
server.abort("Aborting because error occurred while reading "
|
||||
+ Bytes.toStringBinary(regionName) + " from hbase:meta", e);
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -460,7 +460,7 @@ public class FavoredNodeAssignmentHelper {
|
|||
getOneRandomServer(primaryRack, serverSkipSet);
|
||||
|
||||
if (secondaryRS == null || tertiaryRS == null) {
|
||||
LOG.error("Cannot place the secondary and terinary" +
|
||||
LOG.error("Cannot place the secondary and ternary" +
|
||||
"region server for region " +
|
||||
regionInfo.getRegionNameAsString());
|
||||
}
|
||||
|
@ -498,7 +498,7 @@ public class FavoredNodeAssignmentHelper {
|
|||
ServerName tertiaryRS = getOneRandomServer(secondaryRack, skipServerSet);
|
||||
|
||||
if (secondaryRS == null || tertiaryRS == null) {
|
||||
LOG.error("Cannot place the secondary and terinary" +
|
||||
LOG.error("Cannot place the secondary and ternary" +
|
||||
"region server for region " +
|
||||
regionInfo.getRegionNameAsString());
|
||||
}
|
||||
|
|
|
@ -349,7 +349,7 @@ public class DeleteTableProcedure
|
|||
}
|
||||
}
|
||||
if (!deletes.isEmpty()) {
|
||||
LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + tableName +
|
||||
LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName +
|
||||
" from " + TableName.META_TABLE_NAME);
|
||||
metaTable.delete(deletes);
|
||||
}
|
||||
|
|
|
@ -416,14 +416,14 @@ public class DispatchMergingRegionsProcedure
|
|||
forcible,
|
||||
getUser());
|
||||
LOG.info("Sent merge to server " + getServerName(env) + " for region " +
|
||||
getRegionsToMergeListEncodedNameString() + ", focible=" + forcible);
|
||||
getRegionsToMergeListEncodedNameString() + ", forcible=" + forcible);
|
||||
return;
|
||||
} catch (RegionOpeningException roe) {
|
||||
// Do a retry since region should be online on RS immediately
|
||||
LOG.warn("Failed mergering regions in " + getServerName(env) + ", retrying...", roe);
|
||||
} catch (Exception ie) {
|
||||
LOG.warn("Failed sending merge to " + getServerName(env) + " for regions " +
|
||||
getRegionsToMergeListEncodedNameString() + ", focible=" + forcible, ie);
|
||||
getRegionsToMergeListEncodedNameString() + ", forcible=" + forcible, ie);
|
||||
return;
|
||||
}
|
||||
} while ((duration = EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env));
|
||||
|
|
|
@ -697,7 +697,7 @@ public class SplitTableRegionProcedure
|
|||
HRegionInfo.parseRegionName(p.getRow());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.error("Row key of mutation from coprossor is not parsable as region name."
|
||||
LOG.error("Row key of mutation from coprocessor is not parsable as region name."
|
||||
+ "Mutations from coprocessor should only for hbase:meta table.");
|
||||
throw e;
|
||||
}
|
||||
|
|
|
@ -199,7 +199,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs {
|
|||
try {
|
||||
byte[] data = ZKUtil.getData(zkController.getWatcher(), path);
|
||||
if (!ProtobufUtil.isPBMagicPrefix(data)) {
|
||||
String msg = "Data in for starting procuedure " + opName +
|
||||
String msg = "Data in for starting procedure " + opName +
|
||||
" is illegally formatted (no pb magic). " +
|
||||
"Killing the procedure: " + Bytes.toString(data);
|
||||
LOG.error(msg);
|
||||
|
|
|
@ -100,7 +100,7 @@ public class CompactedHFilesDischarger extends ScheduledChore {
|
|||
+ region.getRegionInfo() + " under the store " + store.getColumnFamilyName());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception while trying to close and archive the comapcted store "
|
||||
LOG.error("Exception while trying to close and archive the compacted store "
|
||||
+ "files of the store " + store.getColumnFamilyName() + " in the" + " region "
|
||||
+ region.getRegionInfo(), e);
|
||||
}
|
||||
|
|
|
@ -206,7 +206,7 @@ public class HeapMemoryManager {
|
|||
|
||||
public void stop() {
|
||||
// The thread is Daemon. Just interrupting the ongoing process.
|
||||
LOG.info("Stoping HeapMemoryTuner chore.");
|
||||
LOG.info("Stopping HeapMemoryTuner chore.");
|
||||
this.heapMemTunerChore.cancel(true);
|
||||
}
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ public class SplitTransactionImpl implements SplitTransaction {
|
|||
HRegionInfo.parseRegionName(p.getRow());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.error("Row key of mutation from coprossor is not parsable as region name."
|
||||
LOG.error("Row key of mutation from coprocessor is not parsable as region name."
|
||||
+ "Mutations from coprocessor should only for hbase:meta table.");
|
||||
throw e;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ public class WALSplitterHandler extends EventHandler {
|
|||
break;
|
||||
case PREEMPTED:
|
||||
SplitLogCounters.tot_wkr_preempt_task.incrementAndGet();
|
||||
LOG.warn("task execution prempted " + splitTaskDetails.getWALFile());
|
||||
LOG.warn("task execution preempted " + splitTaskDetails.getWALFile());
|
||||
break;
|
||||
case ERR:
|
||||
if (server != null && !server.isStopped()) {
|
||||
|
|
|
@ -177,7 +177,7 @@ public class HFileReplicator {
|
|||
// need to reload split keys each iteration.
|
||||
startEndKeys = locator.getStartEndKeys();
|
||||
if (count != 0) {
|
||||
LOG.warn("Error occured while replicating HFiles, retry attempt " + count + " with "
|
||||
LOG.warn("Error occurred while replicating HFiles, retry attempt " + count + " with "
|
||||
+ queue.size() + " files still remaining to replicate.");
|
||||
}
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ public class TableAuthManager implements Closeable {
|
|||
mtime.incrementAndGet();
|
||||
} catch (IOException e) {
|
||||
// Never happens
|
||||
LOG.error("Error occured while updating the global cache", e);
|
||||
LOG.error("Error occurred while updating the global cache", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ public class AuthenticationTokenSecretManager
|
|||
synchronized (this) {
|
||||
if (!leaderElector.isAlive() || leaderElector.isStopped()) {
|
||||
LOG.warn("Thread leaderElector[" + leaderElector.getName() + ":"
|
||||
+ leaderElector.getId() + "] is stoped or not alive");
|
||||
+ leaderElector.getId() + "] is stopped or not alive");
|
||||
leaderElector.start();
|
||||
LOG.info("Thread leaderElector [" + leaderElector.getName() + ":"
|
||||
+ leaderElector.getId() + "] is started");
|
||||
|
|
|
@ -899,7 +899,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
if (!valid) {
|
||||
errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries",
|
||||
tablesInfo.get(regionInfo.getTable()));
|
||||
LOG.warn("Region's boundaries not alligned between stores and META for:");
|
||||
LOG.warn("Region's boundaries not aligned between stores and META for:");
|
||||
LOG.warn(currentRegionBoundariesInformation);
|
||||
}
|
||||
}
|
||||
|
@ -933,7 +933,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
FileSystem fs = p.getFileSystem(getConf());
|
||||
FileStatus[] dirs = fs.listStatus(p);
|
||||
if (dirs == null) {
|
||||
LOG.warn("Attempt to adopt ophan hdfs region skipped becuase no files present in " +
|
||||
LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " +
|
||||
p + ". This dir could probably be deleted.");
|
||||
return ;
|
||||
}
|
||||
|
@ -1103,7 +1103,7 @@ public class HBaseFsck extends Configured implements Closeable {
|
|||
Path rootDir = getSidelineDir();
|
||||
Path dst = new Path(rootDir, pathStr.substring(index + 1));
|
||||
fs.mkdirs(dst.getParent());
|
||||
LOG.info("Trying to sildeline reference file "
|
||||
LOG.info("Trying to sideline reference file "
|
||||
+ path + " to " + dst);
|
||||
setShouldRerun();
|
||||
|
||||
|
|
|
@ -368,7 +368,7 @@ public class HFileCorruptionChecker {
|
|||
try {
|
||||
f.get();
|
||||
} catch (ExecutionException e) {
|
||||
LOG.warn("Failed to quaratine an HFile in regiondir "
|
||||
LOG.warn("Failed to quarantine an HFile in regiondir "
|
||||
+ rdcs.get(i).regionDir, e.getCause());
|
||||
// rethrow IOExceptions
|
||||
if (e.getCause() instanceof IOException) {
|
||||
|
|
|
@ -1001,7 +1001,7 @@ public class WALSplitter {
|
|||
try {
|
||||
controller.dataAvailable.wait(2000);
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("Got intrerrupted while waiting for EntryBuffers is drained");
|
||||
LOG.warn("Got interrupted while waiting for EntryBuffers is drained");
|
||||
Thread.interrupted();
|
||||
break;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue