From e02bbeb8862ee5bca572a0252e8ff3a3699eff5a Mon Sep 17 00:00:00 2001 From: "Colin P. McCabe" Date: Fri, 4 Dec 2015 12:15:53 -0800 Subject: [PATCH] HDFS-9267. TestDiskError should get stored replicas through FsDatasetTestUtils. (Lei (Eddy) Xu via Colin P. McCabe) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../server/datanode/FsDatasetTestUtils.java | 6 ++++- .../hdfs/server/datanode/TestDiskError.java | 9 +++----- .../impl/FsDatasetImplTestUtils.java | 23 +++++++++++++++++++ 4 files changed, 33 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9e8b8a942ce..99aa719c3ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -883,6 +883,8 @@ Release 2.9.0 - UNRELEASED NEW FEATURES IMPROVEMENTS + HDFS-9267. TestDiskError should get stored replicas through + FsDatasetTestUtils. (Lei (Eddy) Xu via Colin P. McCabe) OPTIMIZATIONS diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java index fd47705bfef..e89e1f2ac5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java @@ -29,6 +29,7 @@ import org.apache.hadoop.util.ReflectionUtils; import java.io.FileNotFoundException; import java.io.IOException; +import java.util.Iterator; /** * Provide block access for FsDataset white box tests. @@ -251,4 +252,7 @@ public interface FsDatasetTestUtils { */ void changeStoredGenerationStamp(ExtendedBlock block, long newGenStamp) throws IOException; -} + + /** Get all stored replicas in the specified block pool. */ + Iterator getStoredReplicas(String bpid) throws IOException; +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java index cc8566c83b1..55a668bc61d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java @@ -140,7 +140,8 @@ public class TestDiskError { cluster.waitActive(); final int sndNode = 1; DataNode datanode = cluster.getDataNodes().get(sndNode); - + FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(datanode); + // replicate the block to the second datanode InetSocketAddress target = datanode.getXferAddress(); Socket s = new Socket(target.getAddress(), target.getPort()); @@ -161,11 +162,7 @@ public class TestDiskError { // the temporary block & meta files should be deleted String bpid = cluster.getNamesystem().getBlockPoolId(); - File storageDir = cluster.getInstanceStorageDir(sndNode, 0); - File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid); - storageDir = cluster.getInstanceStorageDir(sndNode, 1); - File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid); - while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) { + while (utils.getStoredReplicas(bpid).hasNext()) { Thread.sleep(100); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index 320ae9f580a..f67eeb84488 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -47,6 +47,9 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; import java.nio.file.Files; import java.nio.file.StandardCopyOption; import java.util.Random; @@ -377,4 +380,24 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils { Files.move(metaFile.toPath(), newMetaFile.toPath(), StandardCopyOption.ATOMIC_MOVE); } + + @Override + public Iterator getStoredReplicas(String bpid) throws IOException { + // Reload replicas from the disk. + ReplicaMap replicaMap = new ReplicaMap(dataset); + try (FsVolumeReferences refs = dataset.getFsVolumeReferences()) { + for (FsVolumeSpi vol : refs) { + FsVolumeImpl volume = (FsVolumeImpl) vol; + volume.getVolumeMap(bpid, replicaMap, dataset.ramDiskReplicaTracker); + } + } + + // Cast ReplicaInfo to Replica, because ReplicaInfo assumes a file-based + // FsVolumeSpi implementation. + List ret = new ArrayList<>(); + if (replicaMap.replicas(bpid) != null) { + ret.addAll(replicaMap.replicas(bpid)); + } + return ret.iterator(); + } }