HDFS-11201. Spelling errors in the logging, help, assertions and exception messages. Contributed by Grant Sohn.
This commit is contained in:
parent
291df5c7fb
commit
b9522e86a5
|
@ -1052,7 +1052,7 @@ public class DFSInputStream extends FSInputStream
|
||||||
reader.getNetworkDistance(), nread);
|
reader.getNetworkDistance(), nread);
|
||||||
if (nread != len) {
|
if (nread != len) {
|
||||||
throw new IOException("truncated return from reader.read(): " +
|
throw new IOException("truncated return from reader.read(): " +
|
||||||
"excpected " + len + ", got " + nread);
|
"expected " + len + ", got " + nread);
|
||||||
}
|
}
|
||||||
DFSClientFaultInjector.get().readFromDatanodeDelay();
|
DFSClientFaultInjector.get().readFromDatanodeDelay();
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -57,11 +57,11 @@ public class LongBitFormat implements Serializable {
|
||||||
public long combine(long value, long record) {
|
public long combine(long value, long record) {
|
||||||
if (value < MIN) {
|
if (value < MIN) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Illagal value: " + NAME + " = " + value + " < MIN = " + MIN);
|
"Illegal value: " + NAME + " = " + value + " < MIN = " + MIN);
|
||||||
}
|
}
|
||||||
if (value > MAX) {
|
if (value > MAX) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Illagal value: " + NAME + " = " + value + " > MAX = " + MAX);
|
"Illegal value: " + NAME + " = " + value + " > MAX = " + MAX);
|
||||||
}
|
}
|
||||||
return (record & ~MASK) | (value << OFFSET);
|
return (record & ~MASK) | (value << OFFSET);
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,7 @@ public class ServerException extends XException {
|
||||||
S04("Service [{0}] does not implement declared interface [{1}]"),
|
S04("Service [{0}] does not implement declared interface [{1}]"),
|
||||||
S05("[{0}] is not a file"),
|
S05("[{0}] is not a file"),
|
||||||
S06("Could not load file [{0}], {1}"),
|
S06("Could not load file [{0}], {1}"),
|
||||||
S07("Could not instanciate service class [{0}], {1}"),
|
S07("Could not instantiate service class [{0}], {1}"),
|
||||||
S08("Could not load service classes, {0}"),
|
S08("Could not load service classes, {0}"),
|
||||||
S09("Could not set service [{0}] programmatically -server shutting down-, {1}"),
|
S09("Could not set service [{0}] programmatically -server shutting down-, {1}"),
|
||||||
S10("Service [{0}] requires service [{1}]"),
|
S10("Service [{0}] requires service [{1}]"),
|
||||||
|
|
|
@ -130,7 +130,7 @@ class OpenFileCtxCache {
|
||||||
}
|
}
|
||||||
toEvict = openFileMap.remove(pairs.getKey());
|
toEvict = openFileMap.remove(pairs.getKey());
|
||||||
Preconditions.checkState(toEvict == pairs.getValue(),
|
Preconditions.checkState(toEvict == pairs.getValue(),
|
||||||
"The deleted entry is not the same as odlest found.");
|
"The deleted entry is not the same as oldest found.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
openFileMap.put(h, context);
|
openFileMap.put(h, context);
|
||||||
|
|
|
@ -1364,7 +1364,7 @@ public class DFSUtil {
|
||||||
DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT);
|
DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT);
|
||||||
HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
|
HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
|
||||||
if (policy == null) {
|
if (policy == null) {
|
||||||
throw new HadoopIllegalArgumentException("Unregonized value '"
|
throw new HadoopIllegalArgumentException("Unrecognized value '"
|
||||||
+ policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
|
+ policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2772,7 +2772,7 @@ public class BlockManager implements BlockStatsMXBean {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
for (ReportedBlockInfo rbi : rbis) {
|
for (ReportedBlockInfo rbi : rbis) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Processing previouly queued message " + rbi);
|
LOG.debug("Processing previously queued message " + rbi);
|
||||||
}
|
}
|
||||||
if (rbi.getReportedState() == null) {
|
if (rbi.getReportedState() == null) {
|
||||||
// This is a DELETE_BLOCK request
|
// This is a DELETE_BLOCK request
|
||||||
|
|
|
@ -836,7 +836,7 @@ class BlockPoolSlice {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Any exception we need to revert back to read from disk
|
// Any exception we need to revert back to read from disk
|
||||||
// Log the error and return false
|
// Log the error and return false
|
||||||
LOG.info("Exception occured while reading the replicas cache file: "
|
LOG.info("Exception occurred while reading the replicas cache file: "
|
||||||
+ replicaFile.getPath(), e );
|
+ replicaFile.getPath(), e );
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1292,7 +1292,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
||||||
try {
|
try {
|
||||||
fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE);
|
fileNames = IOUtils.listDirectory(dir, BlockDirFilter.INSTANCE);
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
LOG.warn("Exception occured while compiling report: ", ioe);
|
LOG.warn("Exception occurred while compiling report: ", ioe);
|
||||||
// Initiate a check on disk failure.
|
// Initiate a check on disk failure.
|
||||||
dataset.datanode.checkDiskErrorAsync();
|
dataset.datanode.checkDiskErrorAsync();
|
||||||
// Ignore this directory and proceed.
|
// Ignore this directory and proceed.
|
||||||
|
|
|
@ -97,7 +97,7 @@ public class QueryCommand extends Command {
|
||||||
String header = "Query Plan queries a given data node about the " +
|
String header = "Query Plan queries a given data node about the " +
|
||||||
"current state of disk balancer execution.\n\n";
|
"current state of disk balancer execution.\n\n";
|
||||||
|
|
||||||
String footer = "\nQuery command retrievs the plan ID and the current " +
|
String footer = "\nQuery command retrieves the plan ID and the current " +
|
||||||
"running state. ";
|
"running state. ";
|
||||||
|
|
||||||
HelpFormatter helpFormatter = new HelpFormatter();
|
HelpFormatter helpFormatter = new HelpFormatter();
|
||||||
|
|
|
@ -134,7 +134,7 @@ final class FSDirTruncateOp {
|
||||||
if (!onBlockBoundary) {
|
if (!onBlockBoundary) {
|
||||||
// Open file for write, but don't log into edits
|
// Open file for write, but don't log into edits
|
||||||
long lastBlockDelta = file.computeFileSize() - newLength;
|
long lastBlockDelta = file.computeFileSize() - newLength;
|
||||||
assert lastBlockDelta > 0 : "delta is 0 only if on block bounday";
|
assert lastBlockDelta > 0 : "delta is 0 only if on block boundary";
|
||||||
truncateBlock = prepareFileForTruncate(fsn, iip, clientName,
|
truncateBlock = prepareFileForTruncate(fsn, iip, clientName,
|
||||||
clientMachine, lastBlockDelta, null);
|
clientMachine, lastBlockDelta, null);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1082,7 +1082,7 @@ public class FSEditLogLoader {
|
||||||
boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(
|
boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(
|
||||||
fsDir, path, iip, file, oldBlock);
|
fsDir, path, iip, file, oldBlock);
|
||||||
if (!removed && !(op instanceof UpdateBlocksOp)) {
|
if (!removed && !(op instanceof UpdateBlocksOp)) {
|
||||||
throw new IOException("Trying to delete non-existant block " + oldBlock);
|
throw new IOException("Trying to delete non-existent block " + oldBlock);
|
||||||
}
|
}
|
||||||
} else if (newBlocks.length > oldBlocks.length) {
|
} else if (newBlocks.length > oldBlocks.length) {
|
||||||
final boolean isStriped = ecPolicy != null;
|
final boolean isStriped = ecPolicy != null;
|
||||||
|
|
|
@ -1025,7 +1025,7 @@ public class NNStorage extends Storage implements Closeable,
|
||||||
|
|
||||||
if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
|
if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
|
||||||
throw new InconsistentFSStateException(storage,
|
throw new InconsistentFSStateException(storage,
|
||||||
"Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
|
"Unexpected blockpoolID " + bpid + " . Expected " + blockpoolID);
|
||||||
}
|
}
|
||||||
setBlockPoolID(bpid);
|
setBlockPoolID(bpid);
|
||||||
}
|
}
|
||||||
|
|
|
@ -219,7 +219,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
final String remoteAddr) throws IOException {
|
final String remoteAddr) throws IOException {
|
||||||
FSNamesystem fsn = namenode.getNamesystem();
|
FSNamesystem fsn = namenode.getNamesystem();
|
||||||
if (fsn == null) {
|
if (fsn == null) {
|
||||||
throw new IOException("Namesystem has not been intialized yet.");
|
throw new IOException("Namesystem has not been initialized yet.");
|
||||||
}
|
}
|
||||||
final BlockManager bm = fsn.getBlockManager();
|
final BlockManager bm = fsn.getBlockManager();
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue