From 4e50dc976a92a9560630c87cfc4e4513916e5735 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 7 Sep 2017 16:57:19 -0700 Subject: [PATCH] HDFS-12218. Addendum. Rename split EC / replicated block metrics in BlockManager. --- .../hadoop/hdfs/protocol/ClientProtocol.java | 4 +- .../hdfs/protocol/ECBlockGroupStats.java | 60 ++++++++-------- .../hdfs/protocol/ReplicatedBlockStats.java | 72 +++++++++---------- .../ClientNamenodeProtocolTranslatorPB.java | 28 ++++---- .../hdfs/protocolPB/PBHelperClient.java | 42 +++++------ .../main/proto/ClientNamenodeProtocol.proto | 16 ++--- ...amenodeProtocolServerSideTranslatorPB.java | 20 +++--- .../server/blockmanagement/BlockManager.java | 22 +++--- .../blockmanagement/CorruptReplicasMap.java | 6 +- .../blockmanagement/InvalidateBlocks.java | 52 +++++++------- .../blockmanagement/LowRedundancyBlocks.java | 16 ++--- .../hdfs/server/namenode/FSNamesystem.java | 19 +++-- .../server/namenode/NameNodeRpcServer.java | 8 +-- .../namenode/metrics/ECBlockGroupsMBean.java | 4 +- .../apache/hadoop/hdfs/tools/DFSAdmin.java | 25 +++---- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 30 ++++---- .../TestComputeInvalidateWork.java | 4 +- .../TestCorruptReplicaInfo.java | 8 +-- .../TestLowRedundancyBlockQueues.java | 10 +-- .../namenode/metrics/TestNameNodeMetrics.java | 2 +- .../hadoop/hdfs/tools/TestDFSAdmin.java | 4 +- 21 files changed, 226 insertions(+), 226 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 6626e3ba7ba..8d5503f9abb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -778,14 +778,14 @@ public interface ClientProtocol { * in the filesystem. */ @Idempotent - ReplicatedBlockStats getBlocksStats() throws IOException; + ReplicatedBlockStats getReplicatedBlockStats() throws IOException; /** * Get statistics pertaining to blocks of type {@link BlockType#STRIPED} * in the filesystem. */ @Idempotent - ECBlockGroupStats getECBlockGroupsStats() throws IOException; + ECBlockGroupStats getECBlockGroupStats() throws IOException; /** * Get a report on the system's current datanodes. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java index 7258c43b1ec..9a8ad8cdb13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ECBlockGroupStats.java @@ -24,45 +24,45 @@ import org.apache.hadoop.classification.InterfaceStability; * Get statistics pertaining to blocks of type {@link BlockType#STRIPED} * in the filesystem. *

- * @see ClientProtocol#getECBlockGroupsStats() + * @see ClientProtocol#getECBlockGroupStats() */ @InterfaceAudience.Public @InterfaceStability.Evolving public final class ECBlockGroupStats { - private final long lowRedundancyBlockGroupsStat; - private final long corruptBlockGroupsStat; - private final long missingBlockGroupsStat; - private final long bytesInFutureBlockGroupsStat; - private final long pendingDeletionBlockGroupsStat; + private final long lowRedundancyBlockGroups; + private final long corruptBlockGroups; + private final long missingBlockGroups; + private final long bytesInFutureBlockGroups; + private final long pendingDeletionBlocks; - public ECBlockGroupStats(long lowRedundancyBlockGroupsStat, long - corruptBlockGroupsStat, long missingBlockGroupsStat, long - bytesInFutureBlockGroupsStat, long pendingDeletionBlockGroupsStat) { - this.lowRedundancyBlockGroupsStat = lowRedundancyBlockGroupsStat; - this.corruptBlockGroupsStat = corruptBlockGroupsStat; - this.missingBlockGroupsStat = missingBlockGroupsStat; - this.bytesInFutureBlockGroupsStat = bytesInFutureBlockGroupsStat; - this.pendingDeletionBlockGroupsStat = pendingDeletionBlockGroupsStat; + public ECBlockGroupStats(long lowRedundancyBlockGroups, + long corruptBlockGroups, long missingBlockGroups, + long bytesInFutureBlockGroups, long pendingDeletionBlocks) { + this.lowRedundancyBlockGroups = lowRedundancyBlockGroups; + this.corruptBlockGroups = corruptBlockGroups; + this.missingBlockGroups = missingBlockGroups; + this.bytesInFutureBlockGroups = bytesInFutureBlockGroups; + this.pendingDeletionBlocks = pendingDeletionBlocks; } - public long getBytesInFutureBlockGroupsStat() { - return bytesInFutureBlockGroupsStat; + public long getBytesInFutureBlockGroups() { + return bytesInFutureBlockGroups; } - public long getCorruptBlockGroupsStat() { - return corruptBlockGroupsStat; + public long getCorruptBlockGroups() { + return corruptBlockGroups; } - public long getLowRedundancyBlockGroupsStat() { - return lowRedundancyBlockGroupsStat; + public long getLowRedundancyBlockGroups() { + return lowRedundancyBlockGroups; } - public long getMissingBlockGroupsStat() { - return missingBlockGroupsStat; + public long getMissingBlockGroups() { + return missingBlockGroups; } - public long getPendingDeletionBlockGroupsStat() { - return pendingDeletionBlockGroupsStat; + public long getPendingDeletionBlocks() { + return pendingDeletionBlocks; } @Override @@ -70,13 +70,13 @@ public final class ECBlockGroupStats { StringBuilder statsBuilder = new StringBuilder(); statsBuilder.append("ECBlockGroupStats=[") .append("LowRedundancyBlockGroups=").append( - getLowRedundancyBlockGroupsStat()) - .append(", CorruptBlockGroups=").append(getCorruptBlockGroupsStat()) - .append(", MissingBlockGroups=").append(getMissingBlockGroupsStat()) + getLowRedundancyBlockGroups()) + .append(", CorruptBlockGroups=").append(getCorruptBlockGroups()) + .append(", MissingBlockGroups=").append(getMissingBlockGroups()) .append(", BytesInFutureBlockGroups=").append( - getBytesInFutureBlockGroupsStat()) - .append(", PendingDeletionBlockGroups=").append( - getPendingDeletionBlockGroupsStat()) + getBytesInFutureBlockGroups()) + .append(", PendingDeletionBlocks=").append( + getPendingDeletionBlocks()) .append("]"); return statsBuilder.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java index c92dbc73c6b..49aadedcdec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReplicatedBlockStats.java @@ -24,66 +24,66 @@ import org.apache.hadoop.classification.InterfaceStability; * Get statistics pertaining to blocks of type {@link BlockType#CONTIGUOUS} * in the filesystem. *

- * @see ClientProtocol#getBlocksStats() + * @see ClientProtocol#getReplicatedBlockStats() */ @InterfaceAudience.Public @InterfaceStability.Evolving public final class ReplicatedBlockStats { - private final long lowRedundancyBlocksStat; - private final long corruptBlocksStat; - private final long missingBlocksStat; - private final long missingReplicationOneBlocksStat; - private final long bytesInFutureBlocksStat; - private final long pendingDeletionBlocksStat; + private final long lowRedundancyBlocks; + private final long corruptBlocks; + private final long missingBlocks; + private final long missingReplicationOneBlocks; + private final long bytesInFutureBlocks; + private final long pendingDeletionBlocks; - public ReplicatedBlockStats(long lowRedundancyBlocksStat, - long corruptBlocksStat, long missingBlocksStat, - long missingReplicationOneBlocksStat, long bytesInFutureBlocksStat, - long pendingDeletionBlocksStat) { - this.lowRedundancyBlocksStat = lowRedundancyBlocksStat; - this.corruptBlocksStat = corruptBlocksStat; - this.missingBlocksStat = missingBlocksStat; - this.missingReplicationOneBlocksStat = missingReplicationOneBlocksStat; - this.bytesInFutureBlocksStat = bytesInFutureBlocksStat; - this.pendingDeletionBlocksStat = pendingDeletionBlocksStat; + public ReplicatedBlockStats(long lowRedundancyBlocks, + long corruptBlocks, long missingBlocks, + long missingReplicationOneBlocks, long bytesInFutureBlocks, + long pendingDeletionBlocks) { + this.lowRedundancyBlocks = lowRedundancyBlocks; + this.corruptBlocks = corruptBlocks; + this.missingBlocks = missingBlocks; + this.missingReplicationOneBlocks = missingReplicationOneBlocks; + this.bytesInFutureBlocks = bytesInFutureBlocks; + this.pendingDeletionBlocks = pendingDeletionBlocks; } - public long getLowRedundancyBlocksStat() { - return lowRedundancyBlocksStat; + public long getLowRedundancyBlocks() { + return lowRedundancyBlocks; } - public long getCorruptBlocksStat() { - return corruptBlocksStat; + public long getCorruptBlocks() { + return corruptBlocks; } - public long getMissingReplicaBlocksStat() { - return missingBlocksStat; + public long getMissingReplicaBlocks() { + return missingBlocks; } - public long getMissingReplicationOneBlocksStat() { - return missingReplicationOneBlocksStat; + public long getMissingReplicationOneBlocks() { + return missingReplicationOneBlocks; } - public long getBytesInFutureBlocksStat() { - return bytesInFutureBlocksStat; + public long getBytesInFutureBlocks() { + return bytesInFutureBlocks; } - public long getPendingDeletionBlocksStat() { - return pendingDeletionBlocksStat; + public long getPendingDeletionBlocks() { + return pendingDeletionBlocks; } @Override public String toString() { StringBuilder statsBuilder = new StringBuilder(); - statsBuilder.append("ReplicatedBlocksStats=[") - .append("LowRedundancyBlocks=").append(getLowRedundancyBlocksStat()) - .append(", CorruptBlocks=").append(getCorruptBlocksStat()) - .append(", MissingReplicaBlocks=").append(getMissingReplicaBlocksStat()) + statsBuilder.append("ReplicatedBlockStats=[") + .append("LowRedundancyBlocks=").append(getLowRedundancyBlocks()) + .append(", CorruptBlocks=").append(getCorruptBlocks()) + .append(", MissingReplicaBlocks=").append(getMissingReplicaBlocks()) .append(", MissingReplicationOneBlocks=").append( - getMissingReplicationOneBlocksStat()) - .append(", BytesInFutureBlocks=").append(getBytesInFutureBlocksStat()) + getMissingReplicationOneBlocks()) + .append(", BytesInFutureBlocks=").append(getBytesInFutureBlocks()) .append(", PendingDeletionBlocks=").append( - getPendingDeletionBlocksStat()) + getPendingDeletionBlocks()) .append("]"); return statsBuilder.toString(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 53d88045762..209eee7b501 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -120,8 +120,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFil import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupsStatsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsBlocksStatsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto; @@ -246,13 +246,13 @@ public class ClientNamenodeProtocolTranslatorPB implements private final static GetFsStatusRequestProto VOID_GET_FSSTATUS_REQUEST = GetFsStatusRequestProto.newBuilder().build(); - private final static GetFsBlocksStatsRequestProto - VOID_GET_FS_REPLICABLOCKS_STATS_REQUEST = - GetFsBlocksStatsRequestProto.newBuilder().build(); + private final static GetFsReplicatedBlockStatsRequestProto + VOID_GET_FS_REPLICATED_BLOCK_STATS_REQUEST = + GetFsReplicatedBlockStatsRequestProto.newBuilder().build(); - private final static GetFsECBlockGroupsStatsRequestProto - VOID_GET_FS_ECBLOCKGROUPS_STATS_REQUEST = - GetFsECBlockGroupsStatsRequestProto.newBuilder().build(); + private final static GetFsECBlockGroupStatsRequestProto + VOID_GET_FS_ECBLOCKGROUP_STATS_REQUEST = + GetFsECBlockGroupStatsRequestProto.newBuilder().build(); private final static RollEditsRequestProto VOID_ROLLEDITS_REQUEST = RollEditsRequestProto.getDefaultInstance(); @@ -695,20 +695,20 @@ public class ClientNamenodeProtocolTranslatorPB implements } @Override - public ReplicatedBlockStats getBlocksStats() throws IOException { + public ReplicatedBlockStats getReplicatedBlockStats() throws IOException { try { - return PBHelperClient.convert(rpcProxy.getFsBlocksStats(null, - VOID_GET_FS_REPLICABLOCKS_STATS_REQUEST)); + return PBHelperClient.convert(rpcProxy.getFsReplicatedBlockStats(null, + VOID_GET_FS_REPLICATED_BLOCK_STATS_REQUEST)); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } } @Override - public ECBlockGroupStats getECBlockGroupsStats() throws IOException { + public ECBlockGroupStats getECBlockGroupStats() throws IOException { try { - return PBHelperClient.convert(rpcProxy.getFsECBlockGroupsStats(null, - VOID_GET_FS_ECBLOCKGROUPS_STATS_REQUEST)); + return PBHelperClient.convert(rpcProxy.getFsECBlockGroupStats(null, + VOID_GET_FS_ECBLOCKGROUP_STATS_REQUEST)); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index 684ad70d554..d92d91ea2fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -122,8 +122,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupsStatsResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsBlocksStatsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto; @@ -1811,7 +1811,7 @@ public class PBHelperClient { } public static ReplicatedBlockStats convert( - GetFsBlocksStatsResponseProto res) { + GetFsReplicatedBlockStatsResponseProto res) { return new ReplicatedBlockStats(res.getLowRedundancy(), res.getCorruptBlocks(), res.getMissingBlocks(), res.getMissingReplOneBlocks(), res.getBlocksInFuture(), @@ -1819,7 +1819,7 @@ public class PBHelperClient { } public static ECBlockGroupStats convert( - GetFsECBlockGroupsStatsResponseProto res) { + GetFsECBlockGroupStatsResponseProto res) { return new ECBlockGroupStats(res.getLowRedundancy(), res.getCorruptBlocks(), res.getMissingBlocks(), res.getBlocksInFuture(), res.getPendingDeletionBlocks()); @@ -2236,37 +2236,37 @@ public class PBHelperClient { return result.build(); } - public static GetFsBlocksStatsResponseProto convert( + public static GetFsReplicatedBlockStatsResponseProto convert( ReplicatedBlockStats replicatedBlockStats) { - GetFsBlocksStatsResponseProto.Builder result = - GetFsBlocksStatsResponseProto.newBuilder(); + GetFsReplicatedBlockStatsResponseProto.Builder result = + GetFsReplicatedBlockStatsResponseProto.newBuilder(); result.setLowRedundancy( - replicatedBlockStats.getLowRedundancyBlocksStat()); + replicatedBlockStats.getLowRedundancyBlocks()); result.setCorruptBlocks( - replicatedBlockStats.getCorruptBlocksStat()); + replicatedBlockStats.getCorruptBlocks()); result.setMissingBlocks( - replicatedBlockStats.getMissingReplicaBlocksStat()); + replicatedBlockStats.getMissingReplicaBlocks()); result.setMissingReplOneBlocks( - replicatedBlockStats.getMissingReplicationOneBlocksStat()); + replicatedBlockStats.getMissingReplicationOneBlocks()); result.setBlocksInFuture( - replicatedBlockStats.getBytesInFutureBlocksStat()); + replicatedBlockStats.getBytesInFutureBlocks()); result.setPendingDeletionBlocks( - replicatedBlockStats.getPendingDeletionBlocksStat()); + replicatedBlockStats.getPendingDeletionBlocks()); return result.build(); } - public static GetFsECBlockGroupsStatsResponseProto convert( + public static GetFsECBlockGroupStatsResponseProto convert( ECBlockGroupStats ecBlockGroupStats) { - GetFsECBlockGroupsStatsResponseProto.Builder result = - GetFsECBlockGroupsStatsResponseProto.newBuilder(); + GetFsECBlockGroupStatsResponseProto.Builder result = + GetFsECBlockGroupStatsResponseProto.newBuilder(); result.setLowRedundancy( - ecBlockGroupStats.getLowRedundancyBlockGroupsStat()); - result.setCorruptBlocks(ecBlockGroupStats.getCorruptBlockGroupsStat()); - result.setMissingBlocks(ecBlockGroupStats.getMissingBlockGroupsStat()); + ecBlockGroupStats.getLowRedundancyBlockGroups()); + result.setCorruptBlocks(ecBlockGroupStats.getCorruptBlockGroups()); + result.setMissingBlocks(ecBlockGroupStats.getMissingBlockGroups()); result.setBlocksInFuture( - ecBlockGroupStats.getBytesInFutureBlockGroupsStat()); + ecBlockGroupStats.getBytesInFutureBlockGroups()); result.setPendingDeletionBlocks( - ecBlockGroupStats.getPendingDeletionBlockGroupsStat()); + ecBlockGroupStats.getPendingDeletionBlocks()); return result.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index 3f108fa718b..6db6ad0804c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -327,10 +327,10 @@ message GetFsStatsResponseProto { optional uint64 pending_deletion_blocks = 9; } -message GetFsBlocksStatsRequestProto { // no input paramters +message GetFsReplicatedBlockStatsRequestProto { // no input paramters } -message GetFsBlocksStatsResponseProto { +message GetFsReplicatedBlockStatsResponseProto { required uint64 low_redundancy = 1; required uint64 corrupt_blocks = 2; required uint64 missing_blocks = 3; @@ -339,10 +339,10 @@ message GetFsBlocksStatsResponseProto { required uint64 pending_deletion_blocks = 6; } -message GetFsECBlockGroupsStatsRequestProto { // no input paramters +message GetFsECBlockGroupStatsRequestProto { // no input paramters } -message GetFsECBlockGroupsStatsResponseProto { +message GetFsECBlockGroupStatsResponseProto { required uint64 low_redundancy = 1; required uint64 corrupt_blocks = 2; required uint64 missing_blocks = 3; @@ -831,10 +831,10 @@ service ClientNamenodeProtocol { rpc recoverLease(RecoverLeaseRequestProto) returns(RecoverLeaseResponseProto); rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto); - rpc getFsBlocksStats(GetFsBlocksStatsRequestProto) - returns (GetFsBlocksStatsResponseProto); - rpc getFsECBlockGroupsStats(GetFsECBlockGroupsStatsRequestProto) - returns (GetFsECBlockGroupsStatsResponseProto); + rpc getFsReplicatedBlockStats(GetFsReplicatedBlockStatsRequestProto) + returns (GetFsReplicatedBlockStatsResponseProto); + rpc getFsECBlockGroupStats(GetFsECBlockGroupStatsRequestProto) + returns (GetFsECBlockGroupStatsResponseProto); rpc getDatanodeReport(GetDatanodeReportRequestProto) returns(GetDatanodeReportResponseProto); rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 44d5216a267..a79e75f39d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -124,12 +124,12 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFil import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupStatsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsReplicatedBlockStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsBlocksStatsResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupsStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsBlocksStatsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsECBlockGroupsStatsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto; @@ -763,22 +763,22 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } @Override - public GetFsBlocksStatsResponseProto getFsBlocksStats( - RpcController controller, GetFsBlocksStatsRequestProto request) + public GetFsReplicatedBlockStatsResponseProto getFsReplicatedBlockStats( + RpcController controller, GetFsReplicatedBlockStatsRequestProto request) throws ServiceException { try { - return PBHelperClient.convert(server.getBlocksStats()); + return PBHelperClient.convert(server.getReplicatedBlockStats()); } catch (IOException e) { throw new ServiceException(e); } } @Override - public GetFsECBlockGroupsStatsResponseProto getFsECBlockGroupsStats( - RpcController controller, GetFsECBlockGroupsStatsRequestProto request) + public GetFsECBlockGroupStatsResponseProto getFsECBlockGroupStats( + RpcController controller, GetFsECBlockGroupStatsRequestProto request) throws ServiceException { try { - return PBHelperClient.convert(server.getECBlockGroupsStats()); + return PBHelperClient.convert(server.getECBlockGroupStats()); } catch (IOException e) { throw new ServiceException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index e83cbc6ef4a..f4e5cb46563 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -233,47 +233,47 @@ public class BlockManager implements BlockStatsMXBean { /** Used by metrics. */ public long getLowRedundancyBlocks() { - return neededReconstruction.getLowRedundancyBlocksStat(); + return neededReconstruction.getLowRedundancyBlocks(); } /** Used by metrics. */ public long getCorruptBlocks() { - return corruptReplicas.getCorruptBlocksStat(); + return corruptReplicas.getCorruptBlocks(); } /** Used by metrics. */ public long getMissingBlocks() { - return neededReconstruction.getCorruptBlocksStat(); + return neededReconstruction.getCorruptBlocks(); } /** Used by metrics. */ public long getMissingReplicationOneBlocks() { - return neededReconstruction.getCorruptReplicationOneBlocksStat(); + return neededReconstruction.getCorruptReplicationOneBlocks(); } /** Used by metrics. */ public long getPendingDeletionReplicatedBlocks() { - return invalidateBlocks.getBlocksStat(); + return invalidateBlocks.getBlocks(); } /** Used by metrics. */ public long getLowRedundancyECBlockGroups() { - return neededReconstruction.getLowRedundancyECBlockGroupsStat(); + return neededReconstruction.getLowRedundancyECBlockGroups(); } /** Used by metrics. */ public long getCorruptECBlockGroups() { - return corruptReplicas.getCorruptECBlockGroupsStat(); + return corruptReplicas.getCorruptECBlockGroups(); } /** Used by metrics. */ public long getMissingECBlockGroups() { - return neededReconstruction.getCorruptECBlockGroupsStat(); + return neededReconstruction.getCorruptECBlockGroups(); } /** Used by metrics. */ - public long getPendingDeletionECBlockGroups() { - return invalidateBlocks.getECBlockGroupsStat(); + public long getPendingDeletionECBlocks() { + return invalidateBlocks.getECBlocks(); } /** @@ -748,7 +748,7 @@ public class BlockManager implements BlockStatsMXBean { invalidateBlocks.dump(out); //Dump corrupt blocks and their storageIDs - Set corruptBlocks = corruptReplicas.getCorruptBlocks(); + Set corruptBlocks = corruptReplicas.getCorruptBlocksSet(); out.println("Corrupt Blocks:"); for(Block block : corruptBlocks) { Collection corruptNodes = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index d158b640142..7a576bb9b84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -240,7 +240,7 @@ public class CorruptReplicasMap{ * method to get the set of corrupt blocks in corruptReplicasMap. * @return Set of Block objects */ - Set getCorruptBlocks() { + Set getCorruptBlocksSet() { Set corruptBlocks = new HashSet(); corruptBlocks.addAll(corruptReplicasMap.keySet()); return corruptBlocks; @@ -267,11 +267,11 @@ public class CorruptReplicasMap{ } } - long getCorruptBlocksStat() { + long getCorruptBlocks() { return totalCorruptBlocks.longValue(); } - long getCorruptECBlockGroupsStat() { + long getCorruptECBlockGroups() { return totalCorruptECBlockGroups.longValue(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java index 7b6b8a924ca..75561caabac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java @@ -53,9 +53,9 @@ class InvalidateBlocks { private final Map> nodeToBlocks = new HashMap<>(); private final Map> - nodeToECBlockGroups = new HashMap<>(); + nodeToECBlocks = new HashMap<>(); private final LongAdder numBlocks = new LongAdder(); - private final LongAdder numECBlockGroups = new LongAdder(); + private final LongAdder numECBlocks = new LongAdder(); private final int blockInvalidateLimit; /** @@ -87,7 +87,7 @@ class InvalidateBlocks { * @return The total number of blocks to be invalidated. */ long numBlocks() { - return getECBlockGroupsStat() + getBlocksStat(); + return getECBlocks() + getBlocks(); } /** @@ -95,7 +95,7 @@ class InvalidateBlocks { * {@link org.apache.hadoop.hdfs.protocol.BlockType#CONTIGUOUS} * to be invalidated. */ - long getBlocksStat() { + long getBlocks() { return numBlocks.longValue(); } @@ -104,8 +104,8 @@ class InvalidateBlocks { * {@link org.apache.hadoop.hdfs.protocol.BlockType#STRIPED} * to be invalidated. */ - long getECBlockGroupsStat() { - return numECBlockGroups.longValue(); + long getECBlocks() { + return numECBlocks.longValue(); } private LightWeightHashSet getBlocksSet(final DatanodeInfo dn) { @@ -115,9 +115,9 @@ class InvalidateBlocks { return null; } - private LightWeightHashSet getECBlockGroupsSet(final DatanodeInfo dn) { - if (nodeToECBlockGroups.containsKey(dn)) { - return nodeToECBlockGroups.get(dn); + private LightWeightHashSet getECBlocksSet(final DatanodeInfo dn) { + if (nodeToECBlocks.containsKey(dn)) { + return nodeToECBlocks.get(dn); } return null; } @@ -125,7 +125,7 @@ class InvalidateBlocks { private LightWeightHashSet getBlocksSet(final DatanodeInfo dn, final Block block) { if (BlockIdManager.isStripedBlockID(block.getBlockId())) { - return getECBlockGroupsSet(dn); + return getECBlocksSet(dn); } else { return getBlocksSet(dn); } @@ -134,8 +134,8 @@ class InvalidateBlocks { private void putBlocksSet(final DatanodeInfo dn, final Block block, final LightWeightHashSet set) { if (BlockIdManager.isStripedBlockID(block.getBlockId())) { - assert getECBlockGroupsSet(dn) == null; - nodeToECBlockGroups.put(dn, set); + assert getECBlocksSet(dn) == null; + nodeToECBlocks.put(dn, set); } else { assert getBlocksSet(dn) == null; nodeToBlocks.put(dn, set); @@ -144,7 +144,7 @@ class InvalidateBlocks { private long getBlockSetsSize(final DatanodeInfo dn) { LightWeightHashSet replicaBlocks = getBlocksSet(dn); - LightWeightHashSet stripedBlocks = getECBlockGroupsSet(dn); + LightWeightHashSet stripedBlocks = getECBlocksSet(dn); return ((replicaBlocks == null ? 0 : replicaBlocks.size()) + (stripedBlocks == null ? 0 : stripedBlocks.size())); } @@ -179,7 +179,7 @@ class InvalidateBlocks { } if (set.add(block)) { if (BlockIdManager.isStripedBlockID(block.getBlockId())) { - numECBlockGroups.increment(); + numECBlocks.increment(); } else { numBlocks.increment(); } @@ -196,9 +196,9 @@ class InvalidateBlocks { if (replicaBlockSets != null) { numBlocks.add(replicaBlockSets.size() * -1); } - LightWeightHashSet blockGroupSets = nodeToECBlockGroups.remove(dn); - if (blockGroupSets != null) { - numECBlockGroups.add(blockGroupSets.size() * -1); + LightWeightHashSet ecBlocksSet = nodeToECBlocks.remove(dn); + if (ecBlocksSet != null) { + numECBlocks.add(ecBlocksSet.size() * -1); } } @@ -207,7 +207,7 @@ class InvalidateBlocks { final LightWeightHashSet v = getBlocksSet(dn, block); if (v != null && v.remove(block)) { if (BlockIdManager.isStripedBlockID(block.getBlockId())) { - numECBlockGroups.decrement(); + numECBlocks.decrement(); } else { numBlocks.decrement(); } @@ -231,21 +231,21 @@ class InvalidateBlocks { /** Print the contents to out. */ synchronized void dump(final PrintWriter out) { final int size = nodeToBlocks.values().size() + - nodeToECBlockGroups.values().size(); + nodeToECBlocks.values().size(); out.println("Metasave: Blocks " + numBlocks() + " waiting deletion from " + size + " datanodes."); if (size == 0) { return; } dumpBlockSet(nodeToBlocks, out); - dumpBlockSet(nodeToECBlockGroups, out); + dumpBlockSet(nodeToECBlocks, out); } /** @return a list of the storage IDs. */ synchronized List getDatanodes() { HashSet set = new HashSet<>(); set.addAll(nodeToBlocks.keySet()); - set.addAll(nodeToECBlockGroups.keySet()); + set.addAll(nodeToECBlocks.keySet()); return new ArrayList<>(set); } @@ -289,9 +289,9 @@ class InvalidateBlocks { remainingLimit = getBlocksToInvalidateByLimit(nodeToBlocks.get(dn), toInvalidate, numBlocks, remainingLimit); } - if ((remainingLimit > 0) && (nodeToECBlockGroups.get(dn) != null)) { - getBlocksToInvalidateByLimit(nodeToECBlockGroups.get(dn), - toInvalidate, numECBlockGroups, remainingLimit); + if ((remainingLimit > 0) && (nodeToECBlocks.get(dn) != null)) { + getBlocksToInvalidateByLimit(nodeToECBlocks.get(dn), + toInvalidate, numECBlocks, remainingLimit); } if (toInvalidate.size() > 0 && getBlockSetsSize(dn) == 0) { remove(dn); @@ -302,8 +302,8 @@ class InvalidateBlocks { synchronized void clear() { nodeToBlocks.clear(); - nodeToECBlockGroups.clear(); + nodeToECBlocks.clear(); numBlocks.reset(); - numECBlockGroups.reset(); + numECBlocks.reset(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java index af2cb7ef037..347d606a04e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java @@ -144,33 +144,33 @@ class LowRedundancyBlocks implements Iterable { /** Return the number of corrupt blocks with replication factor 1 */ long getCorruptReplicationOneBlockSize() { - return getCorruptReplicationOneBlocksStat(); + return getCorruptReplicationOneBlocks(); } /** * Return under replicated block count excluding corrupt replicas. */ - long getLowRedundancyBlocksStat() { - return lowRedundancyBlocks.longValue() - getCorruptBlocksStat(); + long getLowRedundancyBlocks() { + return lowRedundancyBlocks.longValue() - getCorruptBlocks(); } - long getCorruptBlocksStat() { + long getCorruptBlocks() { return corruptBlocks.longValue(); } - long getCorruptReplicationOneBlocksStat() { + long getCorruptReplicationOneBlocks() { return corruptReplicationOneBlocks.longValue(); } /** * Return low redundancy striped blocks excluding corrupt blocks. */ - long getLowRedundancyECBlockGroupsStat() { + long getLowRedundancyECBlockGroups() { return lowRedundancyECBlockGroups.longValue() - - getCorruptECBlockGroupsStat(); + getCorruptECBlockGroups(); } - long getCorruptECBlockGroupsStat() { + long getCorruptECBlockGroups() { return corruptECBlockGroups.longValue(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index aada5bfbc01..08c8562dc9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -89,7 +89,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*; -import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats; import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats; import org.apache.hadoop.hdfs.protocol.OpenFileEntry; @@ -4081,9 +4080,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * Get statistics pertaining to blocks of type {@link BlockType#CONTIGUOUS} * in the filesystem. *

- * @see ClientProtocol#getBlocksStats() + * @see ClientProtocol#getReplicatedBlockStats() */ - ReplicatedBlockStats getBlocksStats() { + ReplicatedBlockStats getReplicatedBlockStats() { return new ReplicatedBlockStats(getLowRedundancyReplicatedBlocks(), getCorruptReplicatedBlocks(), getMissingReplicatedBlocks(), getMissingReplicationOneBlocks(), getBytesInFutureReplicatedBlocks(), @@ -4094,12 +4093,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * Get statistics pertaining to blocks of type {@link BlockType#STRIPED} * in the filesystem. *

- * @see ClientProtocol#getECBlockGroupsStats() + * @see ClientProtocol#getECBlockGroupStats() */ - ECBlockGroupStats getECBlockGroupsStats() { + ECBlockGroupStats getECBlockGroupStats() { return new ECBlockGroupStats(getLowRedundancyECBlockGroups(), getCorruptECBlockGroups(), getMissingECBlockGroups(), - getBytesInFutureECBlockGroups(), getPendingDeletionECBlockGroups()); + getBytesInFutureECBlockGroups(), getPendingDeletionECBlocks()); } @Override // FSNamesystemMBean @@ -4712,10 +4711,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override // ECBlockGroupsMBean - @Metric({"PendingDeletionECBlockGroups", "Number of erasure coded block " + - "groups that are pending deletion"}) - public long getPendingDeletionECBlockGroups() { - return blockManager.getPendingDeletionECBlockGroups(); + @Metric({"PendingDeletionECBlocks", "Number of erasure coded blocks " + + "that are pending deletion"}) + public long getPendingDeletionECBlocks() { + return blockManager.getPendingDeletionECBlocks(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 7b14226fcb8..1ef3f55b053 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1163,17 +1163,17 @@ public class NameNodeRpcServer implements NamenodeProtocols { } @Override // ClientProtocol - public ReplicatedBlockStats getBlocksStats() throws IOException { + public ReplicatedBlockStats getReplicatedBlockStats() throws IOException { checkNNStartup(); namesystem.checkOperation(OperationCategory.READ); - return namesystem.getBlocksStats(); + return namesystem.getReplicatedBlockStats(); } @Override // ClientProtocol - public ECBlockGroupStats getECBlockGroupsStats() throws IOException { + public ECBlockGroupStats getECBlockGroupStats() throws IOException { checkNNStartup(); namesystem.checkOperation(OperationCategory.READ); - return namesystem.getECBlockGroupsStats(); + return namesystem.getECBlockGroupStats(); } @Override // ClientProtocol diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java index 5fa646a6c44..474f3edb975 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/ECBlockGroupsMBean.java @@ -53,7 +53,7 @@ public interface ECBlockGroupsMBean { long getBytesInFutureECBlockGroups(); /** - * Return count of erasure coded block groups that are pending deletion. + * Return count of erasure coded blocks that are pending deletion. */ - long getPendingDeletionECBlockGroups(); + long getPendingDeletionECBlocks(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index a2bb2c053f3..cc7eb1b06d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -534,30 +534,31 @@ public class DFSAdmin extends FsShell { * minutes. Use "-metaSave" to list of all such blocks and accurate * counts. */ - ReplicatedBlockStats replicatedBlockStats = dfs.getClient().getNamenode().getBlocksStats(); + ReplicatedBlockStats replicatedBlockStats = + dfs.getClient().getNamenode().getReplicatedBlockStats(); System.out.println("Replicated Blocks:"); System.out.println("\tUnder replicated blocks: " + - replicatedBlockStats.getLowRedundancyBlocksStat()); + replicatedBlockStats.getLowRedundancyBlocks()); System.out.println("\tBlocks with corrupt replicas: " + - replicatedBlockStats.getCorruptBlocksStat()); + replicatedBlockStats.getCorruptBlocks()); System.out.println("\tMissing blocks: " + - replicatedBlockStats.getMissingReplicaBlocksStat()); + replicatedBlockStats.getMissingReplicaBlocks()); System.out.println("\tMissing blocks (with replication factor 1): " + - replicatedBlockStats.getMissingReplicationOneBlocksStat()); + replicatedBlockStats.getMissingReplicationOneBlocks()); System.out.println("\tPending deletion blocks: " + - replicatedBlockStats.getPendingDeletionBlocksStat()); + replicatedBlockStats.getPendingDeletionBlocks()); ECBlockGroupStats ecBlockGroupStats = - dfs.getClient().getNamenode().getECBlockGroupsStats(); + dfs.getClient().getNamenode().getECBlockGroupStats(); System.out.println("Erasure Coded Block Groups: "); System.out.println("\tLow redundancy block groups: " + - ecBlockGroupStats.getLowRedundancyBlockGroupsStat()); + ecBlockGroupStats.getLowRedundancyBlockGroups()); System.out.println("\tBlock groups with corrupt internal blocks: " + - ecBlockGroupStats.getCorruptBlockGroupsStat()); + ecBlockGroupStats.getCorruptBlockGroups()); System.out.println("\tMissing block groups: " + - ecBlockGroupStats.getMissingBlockGroupsStat()); - System.out.println("\tPending deletion block groups: " + - ecBlockGroupStats.getPendingDeletionBlockGroupsStat()); + ecBlockGroupStats.getMissingBlockGroups()); + System.out.println("\tPending deletion blocks: " + + ecBlockGroupStats.getPendingDeletionBlocks()); System.out.println(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 0926b44335c..b00eff2e184 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1657,8 +1657,8 @@ public class DFSTestUtil { /** * Verify the aggregated {@link ClientProtocol#getStats()} block counts equal - * the sum of {@link ClientProtocol#getBlocksStats()} and - * {@link ClientProtocol#getECBlockGroupsStats()}. + * the sum of {@link ClientProtocol#getReplicatedBlockStats()} and + * {@link ClientProtocol#getECBlockGroupStats()}. * @throws Exception */ public static void verifyClientStats(Configuration conf, @@ -1668,35 +1668,35 @@ public class DFSTestUtil { ClientProtocol.class).getProxy(); long[] aggregatedStats = cluster.getNameNode().getRpcServer().getStats(); ReplicatedBlockStats replicatedBlockStats = - client.getBlocksStats(); - ECBlockGroupStats ecBlockGroupStats = client.getECBlockGroupsStats(); + client.getReplicatedBlockStats(); + ECBlockGroupStats ecBlockGroupStats = client.getECBlockGroupStats(); assertEquals("Under replicated stats not matching!", aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX], aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]); assertEquals("Low redundancy stats not matching!", aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX], - replicatedBlockStats.getLowRedundancyBlocksStat() + - ecBlockGroupStats.getLowRedundancyBlockGroupsStat()); + replicatedBlockStats.getLowRedundancyBlocks() + + ecBlockGroupStats.getLowRedundancyBlockGroups()); assertEquals("Corrupt blocks stats not matching!", aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX], - replicatedBlockStats.getCorruptBlocksStat() + - ecBlockGroupStats.getCorruptBlockGroupsStat()); + replicatedBlockStats.getCorruptBlocks() + + ecBlockGroupStats.getCorruptBlockGroups()); assertEquals("Missing blocks stats not matching!", aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX], - replicatedBlockStats.getMissingReplicaBlocksStat() + - ecBlockGroupStats.getMissingBlockGroupsStat()); + replicatedBlockStats.getMissingReplicaBlocks() + + ecBlockGroupStats.getMissingBlockGroups()); assertEquals("Missing blocks with replication factor one not matching!", aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX], - replicatedBlockStats.getMissingReplicationOneBlocksStat()); + replicatedBlockStats.getMissingReplicationOneBlocks()); assertEquals("Bytes in future blocks stats not matching!", aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX], - replicatedBlockStats.getBytesInFutureBlocksStat() + - ecBlockGroupStats.getBytesInFutureBlockGroupsStat()); + replicatedBlockStats.getBytesInFutureBlocks() + + ecBlockGroupStats.getBytesInFutureBlockGroups()); assertEquals("Pending deletion blocks stats not matching!", aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX], - replicatedBlockStats.getPendingDeletionBlocksStat() + - ecBlockGroupStats.getPendingDeletionBlockGroupsStat()); + replicatedBlockStats.getPendingDeletionBlocks() + + ecBlockGroupStats.getPendingDeletionBlocks()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java index 241391821fb..e7bd3d231d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java @@ -268,9 +268,9 @@ public class TestComputeInvalidateWork { "Striped BlockGroups!", (long) expected, invalidateBlocks.numBlocks()); assertEquals("Unexpected invalidate count for replicas!", - totalReplicas, invalidateBlocks.getBlocksStat()); + totalReplicas, invalidateBlocks.getBlocks()); assertEquals("Unexpected invalidate count for striped block groups!", - totalStripedDataBlocks, invalidateBlocks.getECBlockGroupsStat()); + totalStripedDataBlocks, invalidateBlocks.getECBlocks()); } finally { namesystem.writeUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java index 3f8a5cd4845..3510bc3d769 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java @@ -78,10 +78,10 @@ public class TestCorruptReplicaInfo { assertEquals("Unexpected total corrupt blocks count!", totalExpectedCorruptBlocks, corruptReplicasMap.size()); assertEquals("Unexpected replica blocks count!", - expectedReplicaCount, corruptReplicasMap.getCorruptBlocksStat()); + expectedReplicaCount, corruptReplicasMap.getCorruptBlocks()); assertEquals("Unexpected striped blocks count!", expectedStripedBlockCount, - corruptReplicasMap.getCorruptECBlockGroupsStat()); + corruptReplicasMap.getCorruptECBlockGroups()); } @Test @@ -93,9 +93,9 @@ public class TestCorruptReplicaInfo { assertEquals("Total number of corrupt blocks must initially be 0!", 0, crm.size()); assertEquals("Number of corrupt replicas must initially be 0!", - 0, crm.getCorruptBlocksStat()); + 0, crm.getCorruptBlocks()); assertEquals("Number of corrupt striped block groups must initially be 0!", - 0, crm.getCorruptECBlockGroupsStat()); + 0, crm.getCorruptECBlockGroups()); assertNull("Param n cannot be less than 0", crm.getCorruptBlockIdsForTesting(BlockType.CONTIGUOUS, -1, null)); assertNull("Param n cannot be greater than 100", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java index c65fc6495f8..2b28f1ef3ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestLowRedundancyBlockQueues.java @@ -50,16 +50,16 @@ public class TestLowRedundancyBlockQueues { int corruptReplicationOneCount, int lowRedundancyStripedCount, int corruptStripedCount) { assertEquals("Low redundancy replica count incorrect!", - lowRedundancyReplicaCount, queues.getLowRedundancyBlocksStat()); + lowRedundancyReplicaCount, queues.getLowRedundancyBlocks()); assertEquals("Corrupt replica count incorrect!", - corruptReplicaCount, queues.getCorruptBlocksStat()); + corruptReplicaCount, queues.getCorruptBlocks()); assertEquals("Corrupt replica one count incorrect!", corruptReplicationOneCount, - queues.getCorruptReplicationOneBlocksStat()); + queues.getCorruptReplicationOneBlocks()); assertEquals("Low redundancy striped blocks count incorrect!", - lowRedundancyStripedCount, queues.getLowRedundancyECBlockGroupsStat()); + lowRedundancyStripedCount, queues.getLowRedundancyECBlockGroups()); assertEquals("Corrupt striped blocks count incorrect!", - corruptStripedCount, queues.getCorruptECBlockGroupsStat()); + corruptStripedCount, queues.getCorruptECBlockGroups()); assertEquals("Low Redundancy count incorrect!", lowRedundancyReplicaCount + lowRedundancyStripedCount, queues.getLowRedundancyBlockCount()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index b983fd16262..e046b50d6a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -449,7 +449,7 @@ public class TestNameNodeMetrics { assertEquals("Pending deletion blocks metrics not matching!", namesystem.getPendingDeletionBlocks(), namesystem.getPendingDeletionReplicatedBlocks() + - namesystem.getPendingDeletionECBlockGroups()); + namesystem.getPendingDeletionECBlocks()); } /** Corrupt a block and ensure metrics reflects it */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 2d38f2fbb80..c515df36cad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -778,9 +778,9 @@ public class TestDFSAdmin { assertEquals(numCorruptBlocks + numCorruptECBlockGroups, client.getCorruptBlocksCount()); assertEquals(numCorruptBlocks, client.getNamenode() - .getBlocksStats().getCorruptBlocksStat()); + .getReplicatedBlockStats().getCorruptBlocks()); assertEquals(numCorruptECBlockGroups, client.getNamenode() - .getECBlockGroupsStats().getCorruptBlockGroupsStat()); + .getECBlockGroupStats().getCorruptBlockGroups()); } @Test