diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index ec1a6703ce8..b2dafbfa6bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2018,10 +2018,11 @@ public void setOwner(String src, String username, String groupname) } } - private long[] callGetStats() throws IOException { + private long getStateByIndex(int stateIndex) throws IOException { checkOpen(); try (TraceScope ignored = tracer.newScope("getStats")) { - return namenode.getStats(); + long[] states = namenode.getStats(); + return states.length > stateIndex ? states[stateIndex] : -1; } } @@ -2029,8 +2030,8 @@ private long[] callGetStats() throws IOException { * @see ClientProtocol#getStats() */ public FsStatus getDiskStatus() throws IOException { - long rawNums[] = callGetStats(); - return new FsStatus(rawNums[0], rawNums[1], rawNums[2]); + return new FsStatus(getStateByIndex(0), + getStateByIndex(1), getStateByIndex(2)); } /** @@ -2039,7 +2040,8 @@ public FsStatus getDiskStatus() throws IOException { * @throws IOException */ public long getMissingBlocksCount() throws IOException { - return callGetStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]; + return getStateByIndex(ClientProtocol. + GET_STATS_MISSING_BLOCKS_IDX); } /** @@ -2048,8 +2050,17 @@ public long getMissingBlocksCount() throws IOException { * @throws IOException */ public long getMissingReplOneBlocksCount() throws IOException { - return callGetStats()[ClientProtocol. - GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX]; + return getStateByIndex(ClientProtocol. + GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX); + } + + /** + * Returns count of blocks pending on deletion. + * @throws IOException + */ + public long getPendingDeletionBlocksCount() throws IOException { + return getStateByIndex(ClientProtocol. + GET_STATS_PENDING_DELETION_BLOCKS_IDX); } /** @@ -2057,7 +2068,8 @@ public long getMissingReplOneBlocksCount() throws IOException { * @throws IOException */ public long getUnderReplicatedBlocksCount() throws IOException { - return callGetStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]; + return getStateByIndex(ClientProtocol. + GET_STATS_UNDER_REPLICATED_IDX); } /** @@ -2065,7 +2077,8 @@ public long getUnderReplicatedBlocksCount() throws IOException { * @throws IOException */ public long getCorruptBlocksCount() throws IOException { - return callGetStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]; + return getStateByIndex(ClientProtocol. + GET_STATS_CORRUPT_BLOCKS_IDX); } /** @@ -2075,7 +2088,8 @@ public long getCorruptBlocksCount() throws IOException { * @throws IOException */ public long getBytesInFutureBlocks() throws IOException { - return callGetStats()[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX]; + return getStateByIndex(ClientProtocol. + GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 32e700d9490..7d63b9d5b5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1194,6 +1194,15 @@ public long getMissingBlocksCount() throws IOException { return dfs.getMissingBlocksCount(); } + /** + * Returns count of blocks pending on deletion. + * + * @throws IOException + */ + public long getPendingDeletionBlocksCount() throws IOException { + return dfs.getPendingDeletionBlocksCount(); + } + /** * Returns count of blocks with replication factor 1 and have * lost the only replica. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index dcf7c7bb632..5fa80257699 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -714,7 +714,8 @@ SnapshottableDirectoryStatus[] getSnapshottableDirListing() int GET_STATS_MISSING_BLOCKS_IDX = 5; int GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX = 6; int GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX = 7; - int STATS_ARRAY_LENGTH = 8; + int GET_STATS_PENDING_DELETION_BLOCKS_IDX = 8; + int STATS_ARRAY_LENGTH = 9; /** * Get a set of statistics about the filesystem. @@ -729,6 +730,7 @@ SnapshottableDirectoryStatus[] getSnapshottableDirListing() *
  • [6] contains number of blocks which have replication factor * 1 and have lost the only replica.
  • *
  • [7] contains number of bytes that are at risk for deletion.
  • + *
  • [8] contains number of pending deletion blocks.
  • * * Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of * actual numbers to index into the array. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index ff6f8925d38..5d81c1acbe1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -1509,6 +1509,8 @@ public static long[] convert(GetFsStatsResponseProto res) { res.getMissingReplOneBlocks(); result[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] = res.hasBlocksInFuture() ? res.getBlocksInFuture() : 0; + result[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX] = + res.getPendingDeletionBlocks(); return result; } @@ -1869,6 +1871,11 @@ public static GetFsStatsResponseProto convert(long[] fsStats) { result.setBlocksInFuture( fsStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX]); } + if (fsStats.length >= + ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX + 1) { + result.setPendingDeletionBlocks( + fsStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX]); + } return result.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index 075229725cd..9925d1d3bbe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -306,6 +306,7 @@ message GetFsStatsResponseProto { required uint64 missing_blocks = 6; optional uint64 missing_repl_one_blocks = 7; optional uint64 blocks_in_future = 8; + optional uint64 pending_deletion_blocks = 9; } enum DatanodeReportTypeProto { // type of the datanode report diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8ffe1c75b59..8d33532e4b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -82,6 +82,9 @@ Release 2.8.0 - UNRELEASED IMPROVEMENTS + HDFS-9653. Added blocks pending deletion report to dfsadmin. + (Weiwei Yang via eyang) + HDFS-9257. improve error message for "Absolute path required" in INode.java to contain the rejected path (Marcell Szabo via harsh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java index 7546b1ac3ab..b8d30437729 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java @@ -184,6 +184,7 @@ public synchronized long[] getStats() { -1L, -1L, -1L, + -1L, -1L}; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9a44776ece4..225d55941a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3772,6 +3772,8 @@ long[] getStats() { getMissingReplOneBlocksCount(); stats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] = blockManager.getBytesInFuture(); + stats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX] = + blockManager.getPendingDeletionBlocksCount(); return stats; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index fe2a497fd9e..59fe1a7910c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -503,6 +503,8 @@ public void report(String[] argv, int i) throws IOException { dfs.getMissingBlocksCount()); System.out.println("Missing blocks (with replication factor 1): " + dfs.getMissingReplOneBlocksCount()); + System.out.println("Pending deletion blocks: " + + dfs.getPendingDeletionBlocksCount()); System.out.println(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java index a588a73150e..75005440319 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java @@ -17,16 +17,19 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import java.lang.reflect.Method; import java.text.SimpleDateFormat; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; @@ -88,15 +91,31 @@ public void testPendingDeletion() throws Exception { Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal()); Assert.assertEquals(REPLICATION, cluster.getNamesystem() .getPendingDeletionBlocks()); + Assert.assertEquals(REPLICATION, + dfs.getPendingDeletionBlocksCount()); Thread.sleep(6000); Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal()); Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks()); + Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount()); long nnStarted = cluster.getNamesystem().getNNStartedTimeInMillis(); long blockDeletionStartTime = cluster.getNamesystem() .getBlockDeletionStartTime(); Assert.assertTrue(String.format( "Expect blockDeletionStartTime = %d > nnStarted = %d.", blockDeletionStartTime, nnStarted), blockDeletionStartTime > nnStarted); + + // test client protocol compatibility + Method method = DFSClient.class. + getDeclaredMethod("getStateByIndex", int.class); + method.setAccessible(true); + // get number of pending deletion blocks by its index + long validState = (Long) method.invoke(dfs.getClient(), + ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX); + // get an out of index value + long invalidState = (Long) method.invoke(dfs.getClient(), + ClientProtocol.STATS_ARRAY_LENGTH); + Assert.assertEquals(0, validState); + Assert.assertEquals(-1, invalidState); } /**