diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 5783f9074a3..aabd6fd231f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -1052,7 +1052,7 @@ public class DFSInputStream extends FSInputStream reader.getNetworkDistance(), nread); if (nread != len) { throw new IOException("truncated return from reader.read(): " + - "excpected " + len + ", got " + nread); + "expected " + len + ", got " + nread); } DFSClientFaultInjector.get().readFromDatanodeDelay(); return; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java index 51ad08fc956..db064e4019c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java @@ -57,11 +57,11 @@ public class LongBitFormat implements Serializable { public long combine(long value, long record) { if (value < MIN) { throw new IllegalArgumentException( - "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN); + "Illegal value: " + NAME + " = " + value + " < MIN = " + MIN); } if (value > MAX) { throw new IllegalArgumentException( - "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX); + "Illegal value: " + NAME + " = " + value + " > MAX = " + MAX); } return (record & ~MASK) | (value << OFFSET); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java index e3759ce7875..fdca64e5dbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java @@ -38,7 +38,7 @@ public class ServerException extends XException { S04("Service [{0}] does not implement declared interface [{1}]"), S05("[{0}] is not a file"), S06("Could not load file [{0}], {1}"), - S07("Could not instanciate service class [{0}], {1}"), + S07("Could not instantiate service class [{0}], {1}"), S08("Could not load service classes, {0}"), S09("Could not set service [{0}] programmatically -server shutting down-, {1}"), S10("Service [{0}] requires service [{1}]"), diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index e26fac5ab8b..2d8f676f2d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -130,7 +130,7 @@ class OpenFileCtxCache { } toEvict = openFileMap.remove(pairs.getKey()); Preconditions.checkState(toEvict == pairs.getValue(), - "The deleted entry is not the same as odlest found."); + "The deleted entry is not the same as oldest found."); } } openFileMap.put(h, context); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 23166e26f1b..46646997d41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1364,7 +1364,7 @@ public class DFSUtil { DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT); HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr); if (policy == null) { - throw new HadoopIllegalArgumentException("Unregonized value '" + throw new HadoopIllegalArgumentException("Unrecognized value '" + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index e60703be4af..05d538aa86b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2772,7 +2772,7 @@ public class BlockManager implements BlockStatsMXBean { throws IOException { for (ReportedBlockInfo rbi : rbis) { if (LOG.isDebugEnabled()) { - LOG.debug("Processing previouly queued message " + rbi); + LOG.debug("Processing previously queued message " + rbi); } if (rbi.getReportedState() == null) { // This is a DELETE_BLOCK request diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 8323140e4d6..69dc9f9d86f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -836,7 +836,7 @@ class BlockPoolSlice { } catch (Exception e) { // Any exception we need to revert back to read from disk // Log the error and return false - LOG.info("Exception occured while reading the replicas cache file: " + LOG.info("Exception occurred while reading the replicas cache file: " + replicaFile.getPath(), e ); return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 08564de6898..d41f9c379d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -1292,7 +1292,7 @@ public class FsVolumeImpl implements FsVolumeSpi { try { fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE); } catch (IOException ioe) { - LOG.warn("Exception occured while compiling report: ", ioe); + LOG.warn("Exception occurred while compiling report: ", ioe); // Initiate a check on disk failure. dataset.datanode.checkDiskErrorAsync(); // Ignore this directory and proceed. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java index a8adcbd5621..9e60e48bdce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java @@ -97,7 +97,7 @@ public class QueryCommand extends Command { String header = "Query Plan queries a given data node about the " + "current state of disk balancer execution.\n\n"; - String footer = "\nQuery command retrievs the plan ID and the current " + + String footer = "\nQuery command retrieves the plan ID and the current " + "running state. "; HelpFormatter helpFormatter = new HelpFormatter(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index f2a1ee52546..41ec8e9513a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -134,7 +134,7 @@ final class FSDirTruncateOp { if (!onBlockBoundary) { // Open file for write, but don't log into edits long lastBlockDelta = file.computeFileSize() - newLength; - assert lastBlockDelta > 0 : "delta is 0 only if on block bounday"; + assert lastBlockDelta > 0 : "delta is 0 only if on block boundary"; truncateBlock = prepareFileForTruncate(fsn, iip, clientName, clientMachine, lastBlockDelta, null); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 29903449585..11cdbc63e7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -1082,7 +1082,7 @@ public class FSEditLogLoader { boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock( fsDir, path, iip, file, oldBlock); if (!removed && !(op instanceof UpdateBlocksOp)) { - throw new IOException("Trying to delete non-existant block " + oldBlock); + throw new IOException("Trying to delete non-existent block " + oldBlock); } } else if (newBlocks.length > oldBlocks.length) { final boolean isStriped = ecPolicy != null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index d6dd8eead7a..f79130db7a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -1025,7 +1025,7 @@ public class NNStorage extends Storage implements Closeable, if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) { throw new InconsistentFSStateException(storage, - "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID); + "Unexpected blockpoolID " + bpid + " . Expected " + blockpoolID); } setBlockPoolID(bpid); } @@ -1154,4 +1154,4 @@ public class NNStorage extends Storage implements Closeable, } } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 107d4ed081d..2c31cd9e997 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -219,7 +219,7 @@ public class NamenodeWebHdfsMethods { final String remoteAddr) throws IOException { FSNamesystem fsn = namenode.getNamesystem(); if (fsn == null) { - throw new IOException("Namesystem has not been intialized yet."); + throw new IOException("Namesystem has not been initialized yet."); } final BlockManager bm = fsn.getBlockManager();