From a1aa1836fb6831c25efe326cdfdc014370cf5957 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Tue, 3 Dec 2013 16:30:29 +0000 Subject: [PATCH] HDFS-5484. StorageType and State in DatanodeStorageInfo in NameNode is not accurate. (Contributed by Eric Sirianni) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1547462 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt | 3 +++ .../hadoop/hdfs/server/datanode/BPServiceActor.java | 8 +++----- .../hdfs/server/datanode/fsdataset/FsDatasetSpi.java | 5 +++-- .../server/datanode/fsdataset/impl/FsDatasetImpl.java | 9 +++++---- .../server/datanode/fsdataset/impl/FsVolumeImpl.java | 6 ++++++ .../test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java | 9 +++++---- .../test/java/org/apache/hadoop/hdfs/TestDFSShell.java | 7 ++++--- .../hadoop/hdfs/TestInjectionForSimulatedStorage.java | 5 +++-- .../hadoop/hdfs/server/datanode/SimulatedFSDataset.java | 8 +++----- .../hadoop/hdfs/server/datanode/TestBlockReport.java | 8 +++----- .../hdfs/server/datanode/TestDataNodeVolumeFailure.java | 7 +++---- 11 files changed, 41 insertions(+), 34 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt index cb2b7bd01dc..f822f9cf33f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt @@ -120,3 +120,6 @@ IMPROVEMENTS: HDFS-5559. Fix TestDatanodeConfig in HDFS-2832. (Contributed by szetszwo) + HDFS-5484. StorageType and State in DatanodeStorageInfo in NameNode is + not accurate. (Eric Sirianni via Arpit Agarwal) + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 6f15c4a6be0..c9317b35db4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -454,7 +454,7 @@ DatanodeCommand blockReport() throws IOException { long brCreateStartTime = now(); long totalBlockCount = 0; - Map perVolumeBlockLists = + Map perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpos.getBlockPoolId()); // Send block report @@ -463,13 +463,11 @@ DatanodeCommand blockReport() throws IOException { new StorageBlockReport[perVolumeBlockLists.size()]; int i = 0; - for(Map.Entry kvPair : perVolumeBlockLists.entrySet()) { - String storageID = kvPair.getKey(); + for(Map.Entry kvPair : perVolumeBlockLists.entrySet()) { + DatanodeStorage dnStorage = kvPair.getKey(); BlockListAsLongs blockList = kvPair.getValue(); totalBlockCount += blockList.getNumberOfBlocks(); - // Dummy DatanodeStorage object just for sending the block report. - DatanodeStorage dnStorage = new DatanodeStorage(storageID); reports[i++] = new StorageBlockReport( dnStorage, blockList.getBlockListAsLongs()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index b9d6c5bd14a..415c6a985ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -268,9 +269,9 @@ public String recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen /** * Returns one block report per volume. * @param bpid Block Pool Id - * @return - a map of StorageID to block report for the volume. + * @return - a map of DatanodeStorage to block report for the volume. */ - public Map getBlockReports(String bpid); + public Map getBlockReports(String bpid); /** * Returns the cache report - the full list of cached block IDs of a diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 79c30738bd8..806e9f70402 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.nativeio.NativeIO; @@ -1089,14 +1090,14 @@ public List getCacheReport(String bpid) { } @Override - public Map getBlockReports(String bpid) { - Map blockReportMap = - new HashMap(); + public Map getBlockReports(String bpid) { + Map blockReportMap = + new HashMap(); for (FsVolumeImpl v : getVolumes()) { ReplicaMap rMap = perVolumeReplicaMap.get(v.getStorageID()); BlockListAsLongs blockList = getBlockReportWithReplicaMap(bpid, rMap); - blockReportMap.put(v.getStorageID(), blockList); + blockReportMap.put(v.toDatanodeStorage(), blockList); } return blockReportMap; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index b7e2ccd10db..9e5b0ebee4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -332,5 +333,10 @@ public String getStorageID() { public StorageType getStorageType() { return storageType; } + + DatanodeStorage toDatanodeStorage() { + return new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, storageType); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index fa891ec4bac..abec6f01518 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -88,6 +88,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.hdfs.web.HftpFileSystem; @@ -1970,7 +1971,7 @@ public void formatDataNodeDirs() throws IOException { * @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes() * @return the block report for the specified data node */ - public Map getBlockReport(String bpid, int dataNodeIndex) { + public Map getBlockReport(String bpid, int dataNodeIndex) { if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) { throw new IndexOutOfBoundsException(); } @@ -1984,10 +1985,10 @@ public Map getBlockReport(String bpid, int dataNodeInd * @return block reports from all data nodes * BlockListAsLongs is indexed in the same order as the list of datanodes returned by getDataNodes() */ - public List> getAllBlockReports(String bpid) { + public List> getAllBlockReports(String bpid) { int numDataNodes = dataNodes.size(); - final List> result - = new ArrayList>(numDataNodes); + final List> result + = new ArrayList>(numDataNodes); for (int i = 0; i < numDataNodes; ++i) { result.add(getBlockReport(bpid, i)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index f065dc923af..dd453644a0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SequenceFile; @@ -1394,11 +1395,11 @@ static List getBlockFiles(MiniDFSCluster cluster) throws IOException { List files = new ArrayList(); List datanodes = cluster.getDataNodes(); String poolId = cluster.getNamesystem().getBlockPoolId(); - List> blocks = cluster.getAllBlockReports(poolId); + List> blocks = cluster.getAllBlockReports(poolId); for(int i = 0; i < blocks.size(); i++) { DataNode dn = datanodes.get(i); - Map map = blocks.get(i); - for(Map.Entry e : map.entrySet()) { + Map map = blocks.get(i); + for(Map.Entry e : map.entrySet()) { for(Block b : e.getValue()) { files.add(DataNodeTestUtils.getFile(dn, poolId, b.getBlockId())); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java index da2b8bbfe80..1f2fbbd8e4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.Time; import org.junit.Test; @@ -136,7 +137,7 @@ public void testInjection() throws IOException { DFSTestUtil.createFile(cluster.getFileSystem(), testPath, filesize, filesize, blockSize, (short) numDataNodes, 0L); waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20); - List> blocksList = cluster.getAllBlockReports(bpid); + List> blocksList = cluster.getAllBlockReports(bpid); cluster.shutdown(); cluster = null; @@ -157,7 +158,7 @@ public void testInjection() throws IOException { .build(); cluster.waitActive(); Set uniqueBlocks = new HashSet(); - for(Map map : blocksList) { + for(Map map : blocksList) { for(BlockListAsLongs blockList : map.values()) { for(Block b : blockList) { uniqueBlocks.add(new Block(b)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index eebc5459655..f5b291e0ae6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -22,6 +22,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -484,12 +485,9 @@ synchronized BlockListAsLongs getBlockReport(String bpid) { } @Override - public synchronized Map getBlockReports( + public synchronized Map getBlockReports( String bpid) { - Map reports = - new HashMap(); - reports.put(storage.storageUuid, getBlockReport(bpid)); - return reports; + return Collections.singletonMap(new DatanodeStorage(storage.storageUuid), getBlockReport(bpid)); } @Override // FsDatasetSpi diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java index 53b47e7d840..bd54edd0cee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java @@ -120,7 +120,7 @@ public void shutDownCluster() throws IOException { private static StorageBlockReport[] getBlockReports( DataNode dn, String bpid, boolean corruptOneBlockGs, boolean corruptOneBlockLen) { - Map perVolumeBlockLists = + Map perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpid); // Send block report @@ -130,8 +130,8 @@ private static StorageBlockReport[] getBlockReports( boolean corruptedLen = false; int reportIndex = 0; - for(Map.Entry kvPair : perVolumeBlockLists.entrySet()) { - String storageID = kvPair.getKey(); + for(Map.Entry kvPair : perVolumeBlockLists.entrySet()) { + DatanodeStorage dnStorage = kvPair.getKey(); BlockListAsLongs blockList = kvPair.getValue(); // Walk the list of blocks until we find one each to corrupt the @@ -150,8 +150,6 @@ private static StorageBlockReport[] getBlockReports( } } - // Dummy DatanodeStorage object just for sending the block report. - DatanodeStorage dnStorage = new DatanodeStorage(storageID); reports[reportIndex++] = new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index 30e23420ac9..646d33d13b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -154,7 +154,7 @@ public void testVolumeFailure() throws Exception { String bpid = cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid); - Map perVolumeBlockLists = + Map perVolumeBlockLists = dn.getFSDataset().getBlockReports(bpid); // Send block report @@ -162,10 +162,9 @@ public void testVolumeFailure() throws Exception { new StorageBlockReport[perVolumeBlockLists.size()]; int reportIndex = 0; - for(Map.Entry kvPair : perVolumeBlockLists.entrySet()) { - String storageID = kvPair.getKey(); + for(Map.Entry kvPair : perVolumeBlockLists.entrySet()) { + DatanodeStorage dnStorage = kvPair.getKey(); BlockListAsLongs blockList = kvPair.getValue(); - DatanodeStorage dnStorage = new DatanodeStorage(storageID); reports[reportIndex++] = new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs()); }