HDFS-5481. Fix TestDataNodeVolumeFailure in branch HDFS-2832. (Contributed by Junping Du)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1540228 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2013-11-09 00:19:48 +00:00
parent 6b0611ed24
commit 90d1b1b6a4
2 changed files with 24 additions and 8 deletions

View File

@ -85,6 +85,9 @@ IMPROVEMENTS:
HDFS-5472. Fix TestDatanodeManager, TestSafeMode and
TestNNThroughputBenchmark (Contributed by szetszwo)
HDFS-5475. NN incorrectly tracks more than one replica per DN. (Arpit
HDFS-5475. NN incorrectly tracks more than one replica per DN. (Arpit
Agarwal)
HDFS-5481. Fix TestDataNodeVolumeFailure in branch HDFS-2832. (Contributed
by Junping Du)

View File

@ -42,11 +42,13 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@ -151,13 +153,24 @@ public class TestDataNodeVolumeFailure {
DataNode dn = cluster.getDataNodes().get(1); //corresponds to dir data3
String bpid = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(bpid);
final StorageBlockReport[] report = {
new StorageBlockReport(
new DatanodeStorage(dnR.getDatanodeUuid()),
DataNodeTestUtils.getFSDataset(dn).getBlockReport(bpid
).getBlockListAsLongs())
};
cluster.getNameNodeRpc().blockReport(dnR, bpid, report);
Map<String, BlockListAsLongs> perVolumeBlockLists =
dn.getFSDataset().getBlockReports(bpid);
// Send block report
StorageBlockReport[] reports =
new StorageBlockReport[perVolumeBlockLists.size()];
int reportIndex = 0;
for(Map.Entry<String, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
String storageID = kvPair.getKey();
BlockListAsLongs blockList = kvPair.getValue();
DatanodeStorage dnStorage = new DatanodeStorage(storageID);
reports[reportIndex++] =
new StorageBlockReport(dnStorage, blockList.getBlockListAsLongs());
}
cluster.getNameNodeRpc().blockReport(dnR, bpid, reports);
// verify number of blocks and files...
verify(filename, filesize);