diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java index 3555add7aa4..2ef40d20349 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java @@ -371,9 +371,11 @@ public class DatanodeInfo extends DatanodeID implements Node { long c = getCapacity(); long r = getRemaining(); long u = getDfsUsed(); + float usedPercent = getDfsUsedPercent(); long cc = getCacheCapacity(); long cr = getCacheRemaining(); long cu = getCacheUsed(); + float cacheUsedPercent = getCacheUsedPercent(); buffer.append(getName()); if (!NetworkTopology.DEFAULT_RACK.equals(location)) { buffer.append(" "+location); @@ -387,11 +389,11 @@ public class DatanodeInfo extends DatanodeID implements Node { } buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")"); buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")"); - buffer.append(" " + percent2String(u/(double)c)); + buffer.append(" " + percent2String(usedPercent)); buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")"); buffer.append(" " + cc + "(" + StringUtils.byteDesc(cc)+")"); buffer.append(" " + cu + "(" + StringUtils.byteDesc(cu)+")"); - buffer.append(" " + percent2String(cu/(double)cc)); + buffer.append(" " + percent2String(cacheUsedPercent)); buffer.append(" " + cr + "(" + StringUtils.byteDesc(cr)+")"); buffer.append(" " + new Date(lastUpdate)); return buffer.toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 14666dd9795..a07fca2bb49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1358,6 +1358,9 @@ Release 2.7.2 - UNRELEASED HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect. (Kihwal Lee via yliu) + HDFS-9033. dfsadmin -metasave prints "NaN" for cache used%. + (Brahma Reddy Battula via aajisaka) + Release 2.7.1 - 2015-07-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index f1d310439d3..f8189876233 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -149,6 +150,16 @@ public class TestMetaSave { assertTrue(line.equals("Mis-replicated blocks that have been postponed:")); line = reader.readLine(); assertTrue(line.equals("Metasave: Blocks being replicated: 0")); + line = reader.readLine(); + assertTrue(line.equals("Metasave: Blocks 2 waiting deletion from 1 datanodes.")); + //skip 2 lines to reach HDFS-9033 scenario. + line = reader.readLine(); + line = reader.readLine(); + line = reader.readLine(); + assertTrue(line.equals("Metasave: Number of datanodes: 2")); + line = reader.readLine(); + assertFalse(line.contains("NaN")); + } finally { if (reader != null) reader.close();