From 90d1b1b6a49c28d35c60c9ed6fd2f777faaebd81 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Sat, 9 Nov 2013 00:19:48 +0000 Subject: [PATCH] HDFS-5481. Fix TestDataNodeVolumeFailure in branch HDFS-2832. (Contributed by Junping Du) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1540228 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES_HDFS-2832.txt | 5 +++- .../datanode/TestDataNodeVolumeFailure.java | 27 ++++++++++++++----- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt index 87f89547584..50be9340354 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt @@ -85,6 +85,9 @@ IMPROVEMENTS: HDFS-5472. Fix TestDatanodeManager, TestSafeMode and TestNNThroughputBenchmark (Contributed by szetszwo) - HDFS-5475. NN incorrectly tracks more than one replica per DN. (Arpit + HDFS-5475. NN incorrectly tracks more than one replica per DN. (Arpit Agarwal) + HDFS-5481. Fix TestDataNodeVolumeFailure in branch HDFS-2832. (Contributed + by Junping Du) + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 706916d8705..30e23420ac9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -42,11 +42,13 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; @@ -151,13 +153,24 @@ public class TestDataNodeVolumeFailure { DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3 String bpid = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid); - final StorageBlockReport[] report = { - new StorageBlockReport( - new DatanodeStorage(dnR.getDatanodeUuid()), - DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid - ).getBlockListAsLongs()) - }; - cluster.getNameNodeRpc().blockReport(dnR, bpid, report); + + Map perVolumeBlockLists = + dn.getFSDataset().getBlockReports(bpid); + + // Send block report + StorageBlockReport[] reports = + new StorageBlockReport[perVolumeBlockLists.size()]; + + int reportIndex = 0; + for(Map.Entry kvPair : perVolumeBlockLists.entrySet()) { + String storageID = kvPair.getKey(); + BlockListAsLongs blockList = kvPair.getValue(); + DatanodeStorage dnStorage = new DatanodeStorage(storageID); + reports[reportIndex++] = + new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs()); + } + + cluster.getNameNodeRpc().blockReport(dnR, bpid, reports); // verify number of blocks and files... verify(filename, filesize);