From 786b43c7a33f4a89fb5fcf65d6745c56253b1dac Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Tue, 2 Sep 2014 16:22:18 -0700 Subject: [PATCH] HDFS-6942. Fix typos in log messages. Contributed by Ray Chiang. --- .../java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 6 +++--- .../org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 2 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 2 +- .../test/java/org/apache/hadoop/hdfs/TestFileAppend4.java | 2 +- .../org/apache/hadoop/hdfs/server/namenode/TestStartup.java | 2 +- 6 files changed, 9 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index 0c6be98bc28..707900b59b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -423,7 +423,7 @@ class OpenFileCtx { if (existantWriteCtx != null) { if (!existantWriteCtx.getReplied()) { if (LOG.isDebugEnabled()) { - LOG.debug("Repeated write request which hasn't be served: xid=" + LOG.debug("Repeated write request which hasn't been served: xid=" + xid + ", drop it."); } } else { @@ -581,7 +581,7 @@ class OpenFileCtx { * writing, and there is no other threads writing (i.e., asyncStatus is * false), start the writing and set asyncStatus to true. * - * @return True if the new write is sequencial and we can start writing + * @return True if the new write is sequential and we can start writing * (including the case that there is already a thread writing). */ private synchronized boolean checkAndStartWrite( @@ -906,7 +906,7 @@ class OpenFileCtx { long offset = nextOffset.get(); if (range.getMin() > offset) { if (LOG.isDebugEnabled()) { - LOG.debug("The next sequencial write has not arrived yet"); + LOG.debug("The next sequential write has not arrived yet"); } processCommits(nextOffset.get()); // handle race this.asyncStatus = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 33dc3a3d5a1..0d591d63963 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -1423,7 +1423,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { throw io; } // This happens when startAfter was just deleted - LOG.info("Cookie cound't be found: " + new String(startAfter) + LOG.info("Cookie couldn't be found: " + new String(startAfter) + ", do listing from beginning"); dlisting = dfsClient .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0ecda4b3896..204df268192 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -321,6 +321,8 @@ Release 2.6.0 - UNRELEASED HDFS-6972. TestRefreshUserMappings.testRefreshSuperUserGroupsConfiguration doesn't decode url correctly. (Yongjun Zhang via wang) + HDFS-6942. Fix typos in log messages. (Ray Chiang via wheat9) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 441a65967c1..bae363f1d92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1752,7 +1752,7 @@ public class DataNode extends Configured + b + " (numBytes=" + b.getNumBytes() + ")" + ", stage=" + stage + ", clientname=" + clientname - + ", targests=" + Arrays.asList(targets)); + + ", targets=" + Arrays.asList(targets)); } this.targets = targets; this.targetStorageTypes = targetStorageTypes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index b685a895f2b..32a41966dc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -125,7 +125,7 @@ public class TestFileAppend4 { while (!recovered && tries-- > 0) { try { out = fs.append(file1); - LOG.info("Successfully opened for appends"); + LOG.info("Successfully opened for append"); recovered = true; } catch (IOException e) { LOG.info("Failed open for append, waiting on lease recovery"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 5b9a1f864df..08fde3e6ba1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -456,7 +456,7 @@ public class TestStartup { checkNameSpace(conf); // read an image compressed in Gzip and store it uncompressed - LOG.info("Read an compressed iamge and store it as uncompressed."); + LOG.info("Read a compressed image and store it as uncompressed."); conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false); checkNameSpace(conf);