HDFS-9653. Added blocks pending deletion report to dfsadmin.

(Weiwei Yang via eyang)
This commit is contained in:
Eric Yang 2016-01-24 14:19:49 -08:00
parent 10dc2c0493
commit 10a2bc0dff
10 changed files with 71 additions and 11 deletions

View File

@ -1957,10 +1957,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
} }
} }
private long[] callGetStats() throws IOException { private long getStateByIndex(int stateIndex) throws IOException {
checkOpen(); checkOpen();
try (TraceScope ignored = tracer.newScope("getStats")) { try (TraceScope ignored = tracer.newScope("getStats")) {
return namenode.getStats(); long[] states = namenode.getStats();
return states.length > stateIndex ? states[stateIndex] : -1;
} }
} }
@ -1968,8 +1969,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @see ClientProtocol#getStats() * @see ClientProtocol#getStats()
*/ */
public FsStatus getDiskStatus() throws IOException { public FsStatus getDiskStatus() throws IOException {
long rawNums[] = callGetStats(); return new FsStatus(getStateByIndex(0),
return new FsStatus(rawNums[0], rawNums[1], rawNums[2]); getStateByIndex(1), getStateByIndex(2));
} }
/** /**
@ -1978,7 +1979,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException * @throws IOException
*/ */
public long getMissingBlocksCount() throws IOException { public long getMissingBlocksCount() throws IOException {
return callGetStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]; return getStateByIndex(ClientProtocol.
GET_STATS_MISSING_BLOCKS_IDX);
} }
/** /**
@ -1987,8 +1989,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException * @throws IOException
*/ */
public long getMissingReplOneBlocksCount() throws IOException { public long getMissingReplOneBlocksCount() throws IOException {
return callGetStats()[ClientProtocol. return getStateByIndex(ClientProtocol.
GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX]; GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX);
}
/**
* Returns count of blocks pending on deletion.
* @throws IOException
*/
public long getPendingDeletionBlocksCount() throws IOException {
return getStateByIndex(ClientProtocol.
GET_STATS_PENDING_DELETION_BLOCKS_IDX);
} }
/** /**
@ -1996,7 +2007,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException * @throws IOException
*/ */
public long getUnderReplicatedBlocksCount() throws IOException { public long getUnderReplicatedBlocksCount() throws IOException {
return callGetStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]; return getStateByIndex(ClientProtocol.
GET_STATS_UNDER_REPLICATED_IDX);
} }
/** /**
@ -2004,7 +2016,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException * @throws IOException
*/ */
public long getCorruptBlocksCount() throws IOException { public long getCorruptBlocksCount() throws IOException {
return callGetStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]; return getStateByIndex(ClientProtocol.
GET_STATS_CORRUPT_BLOCKS_IDX);
} }
/** /**
@ -2014,7 +2027,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException * @throws IOException
*/ */
public long getBytesInFutureBlocks() throws IOException { public long getBytesInFutureBlocks() throws IOException {
return callGetStats()[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX]; return getStateByIndex(ClientProtocol.
GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX);
} }
/** /**

View File

@ -1114,6 +1114,15 @@ public class DistributedFileSystem extends FileSystem {
return dfs.getMissingBlocksCount(); return dfs.getMissingBlocksCount();
} }
/**
* Returns count of blocks pending on deletion.
*
* @throws IOException
*/
public long getPendingDeletionBlocksCount() throws IOException {
return dfs.getPendingDeletionBlocksCount();
}
/** /**
* Returns count of blocks with replication factor 1 and have * Returns count of blocks with replication factor 1 and have
* lost the only replica. * lost the only replica.

View File

@ -714,7 +714,8 @@ public interface ClientProtocol {
int GET_STATS_MISSING_BLOCKS_IDX = 5; int GET_STATS_MISSING_BLOCKS_IDX = 5;
int GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX = 6; int GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX = 6;
int GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX = 7; int GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX = 7;
int STATS_ARRAY_LENGTH = 8; int GET_STATS_PENDING_DELETION_BLOCKS_IDX = 8;
int STATS_ARRAY_LENGTH = 9;
/** /**
* Get a set of statistics about the filesystem. * Get a set of statistics about the filesystem.
@ -729,6 +730,7 @@ public interface ClientProtocol {
* <li> [6] contains number of blocks which have replication factor * <li> [6] contains number of blocks which have replication factor
* 1 and have lost the only replica. </li> * 1 and have lost the only replica. </li>
* <li> [7] contains number of bytes that are at risk for deletion. </li> * <li> [7] contains number of bytes that are at risk for deletion. </li>
* <li> [8] contains number of pending deletion blocks. </li>
* </ul> * </ul>
* Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of * Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
* actual numbers to index into the array. * actual numbers to index into the array.

View File

@ -1562,6 +1562,8 @@ public class PBHelperClient {
res.getMissingReplOneBlocks(); res.getMissingReplOneBlocks();
result[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] = result[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] =
res.hasBlocksInFuture() ? res.getBlocksInFuture() : 0; res.hasBlocksInFuture() ? res.getBlocksInFuture() : 0;
result[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX] =
res.getPendingDeletionBlocks();
return result; return result;
} }
@ -1931,6 +1933,11 @@ public class PBHelperClient {
result.setBlocksInFuture( result.setBlocksInFuture(
fsStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX]); fsStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX]);
} }
if (fsStats.length >=
ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX + 1) {
result.setPendingDeletionBlocks(
fsStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX]);
}
return result.build(); return result.build();
} }

View File

@ -307,6 +307,7 @@ message GetFsStatsResponseProto {
required uint64 missing_blocks = 6; required uint64 missing_blocks = 6;
optional uint64 missing_repl_one_blocks = 7; optional uint64 missing_repl_one_blocks = 7;
optional uint64 blocks_in_future = 8; optional uint64 blocks_in_future = 8;
optional uint64 pending_deletion_blocks = 9;
} }
enum DatanodeReportTypeProto { // type of the datanode report enum DatanodeReportTypeProto { // type of the datanode report

View File

@ -60,6 +60,9 @@ Trunk (Unreleased)
IMPROVEMENTS IMPROVEMENTS
HDFS-9653. Added blocks pending deletion report to dfsadmin.
(Weiwei Yang via eyang)
HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common. HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.
(Junping Du via llu) (Junping Du via llu)

View File

@ -184,6 +184,7 @@ class HeartbeatManager implements DatanodeStatistics {
-1L, -1L,
-1L, -1L,
-1L, -1L,
-1L,
-1L}; -1L};
} }

View File

@ -3809,6 +3809,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
getMissingReplOneBlocksCount(); getMissingReplOneBlocksCount();
stats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] = stats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] =
blockManager.getBytesInFuture(); blockManager.getBytesInFuture();
stats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX] =
blockManager.getPendingDeletionBlocksCount();
return stats; return stats;
} }

View File

@ -502,6 +502,8 @@ public class DFSAdmin extends FsShell {
dfs.getMissingBlocksCount()); dfs.getMissingBlocksCount());
System.out.println("Missing blocks (with replication factor 1): " + System.out.println("Missing blocks (with replication factor 1): " +
dfs.getMissingReplOneBlocksCount()); dfs.getMissingReplOneBlocksCount());
System.out.println("Pending deletion blocks: " +
dfs.getPendingDeletionBlocksCount());
System.out.println(); System.out.println();

View File

@ -17,16 +17,19 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.lang.reflect.Method;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -88,15 +91,31 @@ public class TestPendingInvalidateBlock {
Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal()); Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(REPLICATION, cluster.getNamesystem() Assert.assertEquals(REPLICATION, cluster.getNamesystem()
.getPendingDeletionBlocks()); .getPendingDeletionBlocks());
Assert.assertEquals(REPLICATION,
dfs.getPendingDeletionBlocksCount());
Thread.sleep(6000); Thread.sleep(6000);
Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal()); Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks()); Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
long nnStarted = cluster.getNamesystem().getNNStartedTimeInMillis(); long nnStarted = cluster.getNamesystem().getNNStartedTimeInMillis();
long blockDeletionStartTime = cluster.getNamesystem() long blockDeletionStartTime = cluster.getNamesystem()
.getBlockDeletionStartTime(); .getBlockDeletionStartTime();
Assert.assertTrue(String.format( Assert.assertTrue(String.format(
"Expect blockDeletionStartTime = %d > nnStarted = %d.", "Expect blockDeletionStartTime = %d > nnStarted = %d.",
blockDeletionStartTime, nnStarted), blockDeletionStartTime > nnStarted); blockDeletionStartTime, nnStarted), blockDeletionStartTime > nnStarted);
// test client protocol compatibility
Method method = DFSClient.class.
getDeclaredMethod("getStateByIndex", int.class);
method.setAccessible(true);
// get number of pending deletion blocks by its index
long validState = (Long) method.invoke(dfs.getClient(),
ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX);
// get an out of index value
long invalidState = (Long) method.invoke(dfs.getClient(),
ClientProtocol.STATS_ARRAY_LENGTH);
Assert.assertEquals(0, validState);
Assert.assertEquals(-1, invalidState);
} }
/** /**