diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index aabd6fd231f..5783f9074a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -1052,7 +1052,7 @@ public class DFSInputStream extends FSInputStream reader.getNetworkDistance(), nread); if (nread != len) { throw new IOException("truncated return from reader.read(): " + - "expected " + len + ", got " + nread); + "excpected " + len + ", got " + nread); } DFSClientFaultInjector.get().readFromDatanodeDelay(); return; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java index db064e4019c..51ad08fc956 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java @@ -57,11 +57,11 @@ public class LongBitFormat implements Serializable { public long combine(long value, long record) { if (value < MIN) { throw new IllegalArgumentException( - "Illegal value: " + NAME + " = " + value + " < MIN = " + MIN); + "Illagal value: " + NAME + " = " + value + " < MIN = " + MIN); } if (value > MAX) { throw new IllegalArgumentException( - "Illegal value: " + NAME + " = " + value + " > MAX = " + MAX); + "Illagal value: " + NAME + " = " + value + " > MAX = " + MAX); } return (record & ~MASK) | (value << OFFSET); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java index fdca64e5dbc..e3759ce7875 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java @@ -38,7 +38,7 @@ public class ServerException extends XException { S04("Service [{0}] does not implement declared interface [{1}]"), S05("[{0}] is not a file"), S06("Could not load file [{0}], {1}"), - S07("Could not instantiate service class [{0}], {1}"), + S07("Could not instanciate service class [{0}], {1}"), S08("Could not load service classes, {0}"), S09("Could not set service [{0}] programmatically -server shutting down-, {1}"), S10("Service [{0}] requires service [{1}]"), diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index 2d8f676f2d4..e26fac5ab8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -130,7 +130,7 @@ class OpenFileCtxCache { } toEvict = openFileMap.remove(pairs.getKey()); Preconditions.checkState(toEvict == pairs.getValue(), - "The deleted entry is not the same as oldest found."); + "The deleted entry is not the same as odlest found."); } } openFileMap.put(h, context); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 46646997d41..23166e26f1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1364,7 +1364,7 @@ public class DFSUtil { DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT); HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr); if (policy == null) { - throw new HadoopIllegalArgumentException("Unrecognized value '" + throw new HadoopIllegalArgumentException("Unregonized value '" + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 05d538aa86b..e60703be4af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2772,7 +2772,7 @@ public class BlockManager implements BlockStatsMXBean { throws IOException { for (ReportedBlockInfo rbi : rbis) { if (LOG.isDebugEnabled()) { - LOG.debug("Processing previously queued message " + rbi); + LOG.debug("Processing previouly queued message " + rbi); } if (rbi.getReportedState() == null) { // This is a DELETE_BLOCK request diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 69dc9f9d86f..8323140e4d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -836,7 +836,7 @@ class BlockPoolSlice { } catch (Exception e) { // Any exception we need to revert back to read from disk // Log the error and return false - LOG.info("Exception occurred while reading the replicas cache file: " + LOG.info("Exception occured while reading the replicas cache file: " + replicaFile.getPath(), e ); return false; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index d41f9c379d0..08564de6898 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -1292,7 +1292,7 @@ public class FsVolumeImpl implements FsVolumeSpi { try { fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE); } catch (IOException ioe) { - LOG.warn("Exception occurred while compiling report: ", ioe); + LOG.warn("Exception occured while compiling report: ", ioe); // Initiate a check on disk failure. dataset.datanode.checkDiskErrorAsync(); // Ignore this directory and proceed. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java index 9e60e48bdce..a8adcbd5621 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java @@ -97,7 +97,7 @@ public class QueryCommand extends Command { String header = "Query Plan queries a given data node about the " + "current state of disk balancer execution.\n\n"; - String footer = "\nQuery command retrieves the plan ID and the current " + + String footer = "\nQuery command retrievs the plan ID and the current " + "running state. "; HelpFormatter helpFormatter = new HelpFormatter(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index 41ec8e9513a..f2a1ee52546 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -134,7 +134,7 @@ final class FSDirTruncateOp { if (!onBlockBoundary) { // Open file for write, but don't log into edits long lastBlockDelta = file.computeFileSize() - newLength; - assert lastBlockDelta > 0 : "delta is 0 only if on block boundary"; + assert lastBlockDelta > 0 : "delta is 0 only if on block bounday"; truncateBlock = prepareFileForTruncate(fsn, iip, clientName, clientMachine, lastBlockDelta, null); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 11cdbc63e7a..29903449585 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -1082,7 +1082,7 @@ public class FSEditLogLoader { boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock( fsDir, path, iip, file, oldBlock); if (!removed && !(op instanceof UpdateBlocksOp)) { - throw new IOException("Trying to delete non-existent block " + oldBlock); + throw new IOException("Trying to delete non-existant block " + oldBlock); } } else if (newBlocks.length > oldBlocks.length) { final boolean isStriped = ecPolicy != null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index f79130db7a7..d6dd8eead7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -1025,7 +1025,7 @@ public class NNStorage extends Storage implements Closeable, if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) { throw new InconsistentFSStateException(storage, - "Unexpected blockpoolID " + bpid + " . Expected " + blockpoolID); + "Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID); } setBlockPoolID(bpid); } @@ -1154,4 +1154,4 @@ public class NNStorage extends Storage implements Closeable, } } } -} +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 2c31cd9e997..107d4ed081d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -219,7 +219,7 @@ public class NamenodeWebHdfsMethods { final String remoteAddr) throws IOException { FSNamesystem fsn = namenode.getNamesystem(); if (fsn == null) { - throw new IOException("Namesystem has not been initialized yet."); + throw new IOException("Namesystem has not been intialized yet."); } final BlockManager bm = fsn.getBlockManager();