From a3cbaf0c046285104d805fa9430f86eccc290e3b Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Wed, 19 Oct 2016 17:20:07 -0700 Subject: [PATCH] HDFS-10752. Several log refactoring/improvement suggestion in HDFS. Contributed by Hanisha Koneru. --- .../hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java | 16 ++++++++-------- .../blockmanagement/CorruptReplicasMap.java | 4 ++-- .../hadoop/hdfs/server/namenode/NameNode.java | 10 +++++----- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index 7bf93ada901..e26fac5ab8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -69,7 +69,7 @@ class OpenFileCtxCache { Iterator> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { - LOG.trace("openFileMap size:" + openFileMap.size()); + LOG.trace("openFileMap size:" + size()); } Entry idlest = null; @@ -117,10 +117,10 @@ class OpenFileCtxCache { boolean put(FileHandle h, OpenFileCtx context) { OpenFileCtx toEvict = null; synchronized (this) { - Preconditions.checkState(openFileMap.size() <= this.maxStreams, - "stream cache size " + openFileMap.size() - + " is larger than maximum" + this.maxStreams); - if (openFileMap.size() == this.maxStreams) { + Preconditions.checkState(size() <= this.maxStreams, + "stream cache size " + size() + " is larger than maximum" + this + .maxStreams); + if (size() == this.maxStreams) { Entry pairs = getEntryToEvict(); if (pairs ==null) { return false; @@ -149,7 +149,7 @@ class OpenFileCtxCache { Iterator> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { - LOG.trace("openFileMap size:" + openFileMap.size()); + LOG.trace("openFileMap size:" + size()); } while (it.hasNext()) { @@ -168,7 +168,7 @@ class OpenFileCtxCache { openFileMap.remove(handle); if (LOG.isDebugEnabled()) { LOG.debug("After remove stream " + handle.getFileId() - + ", the stream number:" + openFileMap.size()); + + ", the stream number:" + size()); } ctxToRemove.add(ctx2); } @@ -201,7 +201,7 @@ class OpenFileCtxCache { Iterator> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { - LOG.trace("openFileMap size:" + openFileMap.size()); + LOG.trace("openFileMap size:" + size()); } while (it.hasNext()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index 35468da4dc7..8a097a542c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -84,12 +84,12 @@ public class CorruptReplicasMap{ if (!nodes.keySet().contains(dn)) { NameNode.blockStateChangeLog.debug( "BLOCK NameSystem.addToCorruptReplicasMap: {} added as corrupt on " - + "{} by {} {}", blk.getBlockName(), dn, Server.getRemoteIp(), + + "{} by {} {}", blk, dn, Server.getRemoteIp(), reasonText); } else { NameNode.blockStateChangeLog.debug( "BLOCK NameSystem.addToCorruptReplicasMap: duplicate requested for" + - " {} to add as corrupt on {} by {} {}", blk.getBlockName(), dn, + " {} to add as corrupt on {} by {} {}", blk, dn, Server.getRemoteIp(), reasonText); } // Add the node or update the reason. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 14646c12005..652171216ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -667,7 +667,7 @@ public class NameNode extends ReconfigurableBase implements NamenodeRegistration setRegistration() { nodeRegistration = new NamenodeRegistration( - NetUtils.getHostPortString(rpcServer.getRpcAddress()), + NetUtils.getHostPortString(getNameNodeAddress()), NetUtils.getHostPortString(getHttpAddress()), getFSImage().getStorage(), getRole()); return nodeRegistration; @@ -730,7 +730,7 @@ public class NameNode extends ReconfigurableBase implements // This is expected for MiniDFSCluster. Set it now using // the RPC server's bind address. clientNamenodeAddress = - NetUtils.getHostPortString(rpcServer.getRpcAddress()); + NetUtils.getHostPortString(getNameNodeAddress()); LOG.info("Clients are to use " + clientNamenodeAddress + " to access" + " this namenode/service."); } @@ -817,7 +817,7 @@ public class NameNode extends ReconfigurableBase implements LOG.warn("ServicePlugin " + p + " could not be started", t); } } - LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress()); + LOG.info(getRole() + " RPC up at: " + getNameNodeAddress()); if (rpcServer.getServiceRpcAddress() != null) { LOG.info(getRole() + " service RPC up at: " + rpcServer.getServiceRpcAddress()); @@ -1050,7 +1050,7 @@ public class NameNode extends ReconfigurableBase implements * @return NameNode RPC address in "host:port" string form */ public String getNameNodeAddressHostPortString() { - return NetUtils.getHostPortString(rpcServer.getRpcAddress()); + return NetUtils.getHostPortString(getNameNodeAddress()); } /** @@ -1059,7 +1059,7 @@ public class NameNode extends ReconfigurableBase implements */ public InetSocketAddress getServiceRpcAddress() { final InetSocketAddress serviceAddr = rpcServer.getServiceRpcAddress(); - return serviceAddr == null ? rpcServer.getRpcAddress() : serviceAddr; + return serviceAddr == null ? getNameNodeAddress() : serviceAddr; } /**