HDFS-9653. Added blocks pending deletion report to dfsadmin.

(Weiwei Yang via eyang)
This commit is contained in:
Eric Yang 2016-01-24 14:19:49 -08:00
parent 80928af293
commit 6ba6f993f8
10 changed files with 71 additions and 11 deletions

View File

@ -2018,10 +2018,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
} }
} }
private long[] callGetStats() throws IOException { private long getStateByIndex(int stateIndex) throws IOException {
checkOpen(); checkOpen();
try (TraceScope ignored = tracer.newScope("getStats")) { try (TraceScope ignored = tracer.newScope("getStats")) {
return namenode.getStats(); long[] states = namenode.getStats();
return states.length > stateIndex ? states[stateIndex] : -1;
} }
} }
@ -2029,8 +2030,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @see ClientProtocol#getStats() * @see ClientProtocol#getStats()
*/ */
public FsStatus getDiskStatus() throws IOException { public FsStatus getDiskStatus() throws IOException {
long rawNums[] = callGetStats(); return new FsStatus(getStateByIndex(0),
return new FsStatus(rawNums[0], rawNums[1], rawNums[2]); getStateByIndex(1), getStateByIndex(2));
} }
/** /**
@ -2039,7 +2040,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException * @throws IOException
*/ */
public long getMissingBlocksCount() throws IOException { public long getMissingBlocksCount() throws IOException {
return callGetStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]; return getStateByIndex(ClientProtocol.
GET_STATS_MISSING_BLOCKS_IDX);
} }
/** /**
@ -2048,8 +2050,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException * @throws IOException
*/ */
public long getMissingReplOneBlocksCount() throws IOException { public long getMissingReplOneBlocksCount() throws IOException {
return callGetStats()[ClientProtocol. return getStateByIndex(ClientProtocol.
GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX]; GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX);
}
/**
* Returns count of blocks pending on deletion.
* @throws IOException
*/
public long getPendingDeletionBlocksCount() throws IOException {
return getStateByIndex(ClientProtocol.
GET_STATS_PENDING_DELETION_BLOCKS_IDX);
} }
/** /**
@ -2057,7 +2068,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException * @throws IOException
*/ */
public long getUnderReplicatedBlocksCount() throws IOException { public long getUnderReplicatedBlocksCount() throws IOException {
return callGetStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]; return getStateByIndex(ClientProtocol.
GET_STATS_UNDER_REPLICATED_IDX);
} }
/** /**
@ -2065,7 +2077,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException * @throws IOException
*/ */
public long getCorruptBlocksCount() throws IOException { public long getCorruptBlocksCount() throws IOException {
return callGetStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]; return getStateByIndex(ClientProtocol.
GET_STATS_CORRUPT_BLOCKS_IDX);
} }
/** /**
@ -2075,7 +2088,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
* @throws IOException * @throws IOException
*/ */
public long getBytesInFutureBlocks() throws IOException { public long getBytesInFutureBlocks() throws IOException {
return callGetStats()[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX]; return getStateByIndex(ClientProtocol.
GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX);
} }
/** /**

View File

@ -1194,6 +1194,15 @@ public class DistributedFileSystem extends FileSystem {
return dfs.getMissingBlocksCount(); return dfs.getMissingBlocksCount();
} }
/**
* Returns count of blocks pending on deletion.
*
* @throws IOException
*/
public long getPendingDeletionBlocksCount() throws IOException {
return dfs.getPendingDeletionBlocksCount();
}
/** /**
* Returns count of blocks with replication factor 1 and have * Returns count of blocks with replication factor 1 and have
* lost the only replica. * lost the only replica.

View File

@ -714,7 +714,8 @@ public interface ClientProtocol {
int GET_STATS_MISSING_BLOCKS_IDX = 5; int GET_STATS_MISSING_BLOCKS_IDX = 5;
int GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX = 6; int GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX = 6;
int GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX = 7; int GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX = 7;
int STATS_ARRAY_LENGTH = 8; int GET_STATS_PENDING_DELETION_BLOCKS_IDX = 8;
int STATS_ARRAY_LENGTH = 9;
/** /**
* Get a set of statistics about the filesystem. * Get a set of statistics about the filesystem.
@ -729,6 +730,7 @@ public interface ClientProtocol {
* <li> [6] contains number of blocks which have replication factor * <li> [6] contains number of blocks which have replication factor
* 1 and have lost the only replica. </li> * 1 and have lost the only replica. </li>
* <li> [7] contains number of bytes that are at risk for deletion. </li> * <li> [7] contains number of bytes that are at risk for deletion. </li>
* <li> [8] contains number of pending deletion blocks. </li>
* </ul> * </ul>
* Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of * Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
* actual numbers to index into the array. * actual numbers to index into the array.

View File

@ -1509,6 +1509,8 @@ public class PBHelperClient {
res.getMissingReplOneBlocks(); res.getMissingReplOneBlocks();
result[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] = result[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] =
res.hasBlocksInFuture() ? res.getBlocksInFuture() : 0; res.hasBlocksInFuture() ? res.getBlocksInFuture() : 0;
result[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX] =
res.getPendingDeletionBlocks();
return result; return result;
} }
@ -1869,6 +1871,11 @@ public class PBHelperClient {
result.setBlocksInFuture( result.setBlocksInFuture(
fsStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX]); fsStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX]);
} }
if (fsStats.length >=
ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX + 1) {
result.setPendingDeletionBlocks(
fsStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX]);
}
return result.build(); return result.build();
} }

View File

@ -306,6 +306,7 @@ message GetFsStatsResponseProto {
required uint64 missing_blocks = 6; required uint64 missing_blocks = 6;
optional uint64 missing_repl_one_blocks = 7; optional uint64 missing_repl_one_blocks = 7;
optional uint64 blocks_in_future = 8; optional uint64 blocks_in_future = 8;
optional uint64 pending_deletion_blocks = 9;
} }
enum DatanodeReportTypeProto { // type of the datanode report enum DatanodeReportTypeProto { // type of the datanode report

View File

@ -21,6 +21,9 @@ Release 2.8.0 - UNRELEASED
IMPROVEMENTS IMPROVEMENTS
HDFS-9653. Added blocks pending deletion report to dfsadmin.
(Weiwei Yang via eyang)
HDFS-9257. improve error message for "Absolute path required" in INode.java HDFS-9257. improve error message for "Absolute path required" in INode.java
to contain the rejected path (Marcell Szabo via harsh) to contain the rejected path (Marcell Szabo via harsh)

View File

@ -182,6 +182,7 @@ class HeartbeatManager implements DatanodeStatistics {
-1L, -1L,
-1L, -1L,
-1L, -1L,
-1L,
-1L}; -1L};
} }

View File

@ -3866,6 +3866,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
getMissingReplOneBlocksCount(); getMissingReplOneBlocksCount();
stats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] = stats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] =
blockManager.getBytesInFuture(); blockManager.getBytesInFuture();
stats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX] =
blockManager.getPendingDeletionBlocksCount();
return stats; return stats;
} }

View File

@ -503,6 +503,8 @@ public class DFSAdmin extends FsShell {
dfs.getMissingBlocksCount()); dfs.getMissingBlocksCount());
System.out.println("Missing blocks (with replication factor 1): " + System.out.println("Missing blocks (with replication factor 1): " +
dfs.getMissingReplOneBlocksCount()); dfs.getMissingReplOneBlocksCount());
System.out.println("Pending deletion blocks: " +
dfs.getPendingDeletionBlocksCount());
System.out.println(); System.out.println();

View File

@ -17,16 +17,19 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.lang.reflect.Method;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -88,15 +91,31 @@ public class TestPendingInvalidateBlock {
Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal()); Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(REPLICATION, cluster.getNamesystem() Assert.assertEquals(REPLICATION, cluster.getNamesystem()
.getPendingDeletionBlocks()); .getPendingDeletionBlocks());
Assert.assertEquals(REPLICATION,
dfs.getPendingDeletionBlocksCount());
Thread.sleep(6000); Thread.sleep(6000);
Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal()); Assert.assertEquals(0, cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks()); Assert.assertEquals(0, cluster.getNamesystem().getPendingDeletionBlocks());
Assert.assertEquals(0, dfs.getPendingDeletionBlocksCount());
long nnStarted = cluster.getNamesystem().getNNStartedTimeInMillis(); long nnStarted = cluster.getNamesystem().getNNStartedTimeInMillis();
long blockDeletionStartTime = cluster.getNamesystem() long blockDeletionStartTime = cluster.getNamesystem()
.getBlockDeletionStartTime(); .getBlockDeletionStartTime();
Assert.assertTrue(String.format( Assert.assertTrue(String.format(
"Expect blockDeletionStartTime = %d > nnStarted = %d.", "Expect blockDeletionStartTime = %d > nnStarted = %d.",
blockDeletionStartTime, nnStarted), blockDeletionStartTime > nnStarted); blockDeletionStartTime, nnStarted), blockDeletionStartTime > nnStarted);
// test client protocol compatibility
Method method = DFSClient.class.
getDeclaredMethod("getStateByIndex", int.class);
method.setAccessible(true);
// get number of pending deletion blocks by its index
long validState = (Long) method.invoke(dfs.getClient(),
ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX);
// get an out of index value
long invalidState = (Long) method.invoke(dfs.getClient(),
ClientProtocol.STATS_ARRAY_LENGTH);
Assert.assertEquals(0, validState);
Assert.assertEquals(-1, invalidState);
} }
/** /**