diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index d940509f037..3d7b145d9f7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -1249,7 +1249,7 @@ public class HColumnDescriptor implements Comparable { public HColumnDescriptor setDFSReplication(short replication) { if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) { throw new IllegalArgumentException( - "DFS replication factor cannot be less than 1 if explictly set."); + "DFS replication factor cannot be less than 1 if explicitly set."); } setValue(DFS_REPLICATION, Short.toString(replication)); return this; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index dc1a7ad77d9..25590c51086 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -551,7 +551,7 @@ public class ReplicationAdmin implements Closeable { } else if (!peerHtd.equals(htd)) { throw new IllegalArgumentException("Table " + tableName.getNameAsString() + " exists in peer cluster " + repPeer.getId() - + ", but the table descriptors are not same when comapred with source cluster." + + ", but the table descriptors are not same when compared with source cluster." + " Thus can not enable the table's replication switch."); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java index ce20e615c29..5302b1bd007 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java @@ -182,7 +182,7 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase @Override public void abort(String why, Throwable e) { - LOG.fatal("The ReplicationPeer coresponding to peer " + peerConfig + LOG.fatal("The ReplicationPeer corresponding to peer " + peerConfig + " was aborted for the following reason(s):" + why, e); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java index deddb5192d1..939d12d1477 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java @@ -138,7 +138,7 @@ public class BoundedByteBufferPool { if (LOG.isTraceEnabled()) { long allocations = allocationsRef.incrementAndGet(); - LOG.trace("runningAverage=" + runningAverage + ", alloctions=" + allocations); + LOG.trace("runningAverage=" + runningAverage + ", allocations=" + allocations); } return bb; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java index 06319583ec3..37738633377 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java @@ -113,7 +113,7 @@ public class HDFSBlocksDistribution { */ @Override public synchronized String toString() { - return "number of unique hosts in the disribution=" + + return "number of unique hosts in the distribution=" + this.hostAndWeights.size(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java index 8cd23185be0..bd8a58e467c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java @@ -138,7 +138,7 @@ public class ZNodeClearer { String[] rsZnodeParts = rsZnodePath.split("/"); masterServerName = rsZnodeParts[rsZnodeParts.length -1]; } catch (IndexOutOfBoundsException e) { - LOG.warn("String " + rsZnodePath + " has wrong fromat", e); + LOG.warn("String " + rsZnodePath + " has wrong format", e); } return masterServerName; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 72f144df2d9..1970aded364 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -1598,7 +1598,7 @@ public class HFileBlock implements Cacheable { blk = readBlockDataInternal(is, offset, onDiskSizeWithHeaderL, pread, doVerificationThruHBaseChecksum); if (blk != null) { - HFile.LOG.warn("HDFS checksum verification suceeded for file " + + HFile.LOG.warn("HDFS checksum verification succeeded for file " + pathName + " at offset " + offset + " filesize " + fileSize); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index a36423e8628..7979fe25c31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -1024,7 +1024,7 @@ public class BucketCache implements BlockCache, HeapSize { if (this.ioErrorStartTime > 0) { if (cacheEnabled && (now - ioErrorStartTime) > this.ioErrorsTolerationDuration) { LOG.error("IO errors duration time has exceeded " + ioErrorsTolerationDuration + - "ms, disabing cache, please check your IOEngine"); + "ms, disabling cache, please check your IOEngine"); disableCache(); } } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 0d9aae862cb..980dcb1c3ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -473,7 +473,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { // need to reload split keys each iteration. final Pair startEndKeys = regionLocator.getStartEndKeys(); if (count != 0) { - LOG.info("Split occured while grouping HFiles, retry attempt " + + LOG.info("Split occurred while grouping HFiles, retry attempt " + + count + " with " + queue.size() + " files remaining to group or split"); } @@ -903,12 +903,12 @@ public class LoadIncrementalHFiles extends Configured implements Tool { if (indexForCallable < 0) { throw new IOException("The first region info for table " + table.getName() - + " cann't be found in hbase:meta.Please use hbck tool to fix it first."); + + " can't be found in hbase:meta.Please use hbck tool to fix it first."); } else if ((indexForCallable == startEndKeys.getFirst().length - 1) && !Bytes.equals(startEndKeys.getSecond()[indexForCallable], HConstants.EMPTY_BYTE_ARRAY)) { throw new IOException("The last region info for table " + table.getName() - + " cann't be found in hbase:meta.Please use hbck tool to fix it first."); + + " can't be found in hbase:meta.Please use hbck tool to fix it first."); } else if (indexForCallable + 1 < startEndKeys.getFirst().length && !(Bytes.compareTo(startEndKeys.getSecond()[indexForCallable], startEndKeys.getFirst()[indexForCallable + 1]) == 0)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index 09d1cc8d4d7..6801a617401 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -298,7 +298,7 @@ public class VerifyReplication extends Configured implements Tool { return pair; } catch (ReplicationException e) { throw new IOException( - "An error occured while trying to connect to the remove peer cluster", e); + "An error occurred while trying to connect to the remove peer cluster", e); } finally { if (peer != null) { peer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java index 53de19dabb6..1ea57b4830e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentVerificationReport.java @@ -460,7 +460,7 @@ public class AssignmentVerificationReport { public void print(boolean isDetailMode) { if (!isFilledUp) { - System.err.println("[Error] Region assignment verfication report" + + System.err.println("[Error] Region assignment verification report" + "hasn't been filled up"); } DecimalFormat df = new java.text.DecimalFormat( "#.##"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java index 47f52f549e2..c0a915b6983 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java @@ -89,7 +89,7 @@ public class MasterMobCompactionThread { try { master.reportMobCompactionEnd(tableName); } catch (IOException e1) { - LOG.error("Failed to mark end of mob compation", e1); + LOG.error("Failed to mark end of mob compaction", e1); } throw e; } @@ -131,7 +131,7 @@ public class MasterMobCompactionThread { try { master.reportMobCompactionEnd(tableName); } catch (IOException e) { - LOG.error("Failed to mark end of mob compation", e); + LOG.error("Failed to mark end of mob compaction", e); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java index a9ed0250fa4..01a698e7176 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java @@ -976,8 +976,8 @@ public class RegionPlacementMaintainer { opt.addOption("tables", true, "The list of table names splitted by ',' ;" + "For example: -tables: t1,t2,...,tn"); - opt.addOption("l", "locality", true, "enforce the maxium locality"); - opt.addOption("m", "min-move", true, "enforce minium assignment move"); + opt.addOption("l", "locality", true, "enforce the maximum locality"); + opt.addOption("m", "min-move", true, "enforce minimum assignment move"); opt.addOption("diff", false, "calculate difference between assignment plans"); opt.addOption("munkres", false, "use munkres to place secondaries and tertiaries"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index 39932858b6c..6079ed69d6c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -1091,7 +1091,7 @@ public class RegionStates { } return hri; } catch (IOException e) { - server.abort("Aborting because error occoured while reading " + server.abort("Aborting because error occurred while reading " + Bytes.toStringBinary(regionName) + " from hbase:meta", e); return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java index 355339e5e4b..b4d69090ea7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java @@ -460,7 +460,7 @@ public class FavoredNodeAssignmentHelper { getOneRandomServer(primaryRack, serverSkipSet); if (secondaryRS == null || tertiaryRS == null) { - LOG.error("Cannot place the secondary and terinary" + + LOG.error("Cannot place the secondary and ternary" + "region server for region " + regionInfo.getRegionNameAsString()); } @@ -498,7 +498,7 @@ public class FavoredNodeAssignmentHelper { ServerName tertiaryRS = getOneRandomServer(secondaryRack, skipServerSet); if (secondaryRS == null || tertiaryRS == null) { - LOG.error("Cannot place the secondary and terinary" + + LOG.error("Cannot place the secondary and ternary" + "region server for region " + regionInfo.getRegionNameAsString()); } @@ -603,4 +603,4 @@ public class FavoredNodeAssignmentHelper { } return strBuf.toString(); } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index ca449023de5..ffe86317051 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -349,7 +349,7 @@ public class DeleteTableProcedure } } if (!deletes.isEmpty()) { - LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + tableName + + LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " + TableName.META_TABLE_NAME); metaTable.delete(deletes); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java index b1fd7eae528..ee9293204ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java @@ -416,14 +416,14 @@ public class DispatchMergingRegionsProcedure forcible, getUser()); LOG.info("Sent merge to server " + getServerName(env) + " for region " + - getRegionsToMergeListEncodedNameString() + ", focible=" + forcible); + getRegionsToMergeListEncodedNameString() + ", forcible=" + forcible); return; } catch (RegionOpeningException roe) { // Do a retry since region should be online on RS immediately LOG.warn("Failed mergering regions in " + getServerName(env) + ", retrying...", roe); } catch (Exception ie) { LOG.warn("Failed sending merge to " + getServerName(env) + " for regions " + - getRegionsToMergeListEncodedNameString() + ", focible=" + forcible, ie); + getRegionsToMergeListEncodedNameString() + ", forcible=" + forcible, ie); return; } } while ((duration = EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java index 19637bbee59..883ac9aa911 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java @@ -697,7 +697,7 @@ public class SplitTableRegionProcedure HRegionInfo.parseRegionName(p.getRow()); } } catch (IOException e) { - LOG.error("Row key of mutation from coprossor is not parsable as region name." + LOG.error("Row key of mutation from coprocessor is not parsable as region name." + "Mutations from coprocessor should only for hbase:meta table."); throw e; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java index 5c67ce39b5f..2c2b4afba5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java @@ -199,7 +199,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { try { byte[] data = ZKUtil.getData(zkController.getWatcher(), path); if (!ProtobufUtil.isPBMagicPrefix(data)) { - String msg = "Data in for starting procuedure " + opName + + String msg = "Data in for starting procedure " + opName + " is illegally formatted (no pb magic). " + "Killing the procedure: " + Bytes.toString(data); LOG.error(msg); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java index 18cf35d09d6..2773e00aa2a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java @@ -100,7 +100,7 @@ public class CompactedHFilesDischarger extends ScheduledChore { + region.getRegionInfo() + " under the store " + store.getColumnFamilyName()); } } catch (Exception e) { - LOG.error("Exception while trying to close and archive the comapcted store " + LOG.error("Exception while trying to close and archive the compacted store " + "files of the store " + store.getColumnFamilyName() + " in the" + " region " + region.getRegionInfo(), e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index a2f546a8495..ad3978ef73a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -206,7 +206,7 @@ public class HeapMemoryManager { public void stop() { // The thread is Daemon. Just interrupting the ongoing process. - LOG.info("Stoping HeapMemoryTuner chore."); + LOG.info("Stopping HeapMemoryTuner chore."); this.heapMemTunerChore.cancel(true); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java index 487dbd88b0e..96d7bc45a1f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java @@ -257,7 +257,7 @@ public class SplitTransactionImpl implements SplitTransaction { HRegionInfo.parseRegionName(p.getRow()); } } catch (IOException e) { - LOG.error("Row key of mutation from coprossor is not parsable as region name." + LOG.error("Row key of mutation from coprocessor is not parsable as region name." + "Mutations from coprocessor should only for hbase:meta table."); throw e; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java index 45ab5078981..8ad150b195f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/WALSplitterHandler.java @@ -77,7 +77,7 @@ public class WALSplitterHandler extends EventHandler { break; case PREEMPTED: SplitLogCounters.tot_wkr_preempt_task.incrementAndGet(); - LOG.warn("task execution prempted " + splitTaskDetails.getWALFile()); + LOG.warn("task execution preempted " + splitTaskDetails.getWALFile()); break; case ERR: if (server != null && !server.isStopped()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java index 9eda7e4ec2f..256c24caa60 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java @@ -177,7 +177,7 @@ public class HFileReplicator { // need to reload split keys each iteration. startEndKeys = locator.getStartEndKeys(); if (count != 0) { - LOG.warn("Error occured while replicating HFiles, retry attempt " + count + " with " + LOG.warn("Error occurred while replicating HFiles, retry attempt " + count + " with " + queue.size() + " files still remaining to replicate."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java index 5032d968d66..eae9e4e47cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java @@ -225,7 +225,7 @@ public class TableAuthManager implements Closeable { mtime.incrementAndGet(); } catch (IOException e) { // Never happens - LOG.error("Error occured while updating the global cache", e); + LOG.error("Error occurred while updating the global cache", e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java index 5b488a99de4..26448b1a875 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSecretManager.java @@ -152,7 +152,7 @@ public class AuthenticationTokenSecretManager synchronized (this) { if (!leaderElector.isAlive() || leaderElector.isStopped()) { LOG.warn("Thread leaderElector[" + leaderElector.getName() + ":" - + leaderElector.getId() + "] is stoped or not alive"); + + leaderElector.getId() + "] is stopped or not alive"); leaderElector.start(); LOG.info("Thread leaderElector [" + leaderElector.getName() + ":" + leaderElector.getId() + "] is started"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index a31b7d1b85a..defffe351f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -899,7 +899,7 @@ public class HBaseFsck extends Configured implements Closeable { if (!valid) { errors.reportError(ERROR_CODE.BOUNDARIES_ERROR, "Found issues with regions boundaries", tablesInfo.get(regionInfo.getTable())); - LOG.warn("Region's boundaries not alligned between stores and META for:"); + LOG.warn("Region's boundaries not aligned between stores and META for:"); LOG.warn(currentRegionBoundariesInformation); } } @@ -933,7 +933,7 @@ public class HBaseFsck extends Configured implements Closeable { FileSystem fs = p.getFileSystem(getConf()); FileStatus[] dirs = fs.listStatus(p); if (dirs == null) { - LOG.warn("Attempt to adopt ophan hdfs region skipped becuase no files present in " + + LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " + p + ". This dir could probably be deleted."); return ; } @@ -1103,7 +1103,7 @@ public class HBaseFsck extends Configured implements Closeable { Path rootDir = getSidelineDir(); Path dst = new Path(rootDir, pathStr.substring(index + 1)); fs.mkdirs(dst.getParent()); - LOG.info("Trying to sildeline reference file " + LOG.info("Trying to sideline reference file " + path + " to " + dst); setShouldRerun(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java index f7b2c51cc4c..c0ce6393772 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java @@ -368,7 +368,7 @@ public class HFileCorruptionChecker { try { f.get(); } catch (ExecutionException e) { - LOG.warn("Failed to quaratine an HFile in regiondir " + LOG.warn("Failed to quarantine an HFile in regiondir " + rdcs.get(i).regionDir, e.getCause()); // rethrow IOExceptions if (e.getCause() instanceof IOException) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 1c6ab07a7a3..42d70f4bae1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -1001,7 +1001,7 @@ public class WALSplitter { try { controller.dataAvailable.wait(2000); } catch (InterruptedException e) { - LOG.warn("Got intrerrupted while waiting for EntryBuffers is drained"); + LOG.warn("Got interrupted while waiting for EntryBuffers is drained"); Thread.interrupted(); break; }