HDFS-9267. TestDiskError should get stored replicas through FsDatasetTestUtils. (Lei (Eddy) Xu via Colin P. McCabe)

(cherry picked from commit e02bbeb886)
This commit is contained in:
Colin P. McCabe 2015-12-04 12:15:53 -08:00 committed by Colin Patrick Mccabe
parent 58714f3b73
commit 3beedc177d
4 changed files with 33 additions and 7 deletions

View File

@ -7,6 +7,8 @@ Release 2.9.0 - UNRELEASED
NEW FEATURES NEW FEATURES
IMPROVEMENTS IMPROVEMENTS
HDFS-9267. TestDiskError should get stored replicas through
FsDatasetTestUtils. (Lei (Eddy) Xu via Colin P. McCabe)
OPTIMIZATIONS OPTIMIZATIONS

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.util.ReflectionUtils;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.Iterator;
/** /**
* Provide block access for FsDataset white box tests. * Provide block access for FsDataset white box tests.
@ -251,4 +252,7 @@ public interface FsDatasetTestUtils {
*/ */
void changeStoredGenerationStamp(ExtendedBlock block, long newGenStamp) void changeStoredGenerationStamp(ExtendedBlock block, long newGenStamp)
throws IOException; throws IOException;
/** Get all stored replicas in the specified block pool. */
Iterator<Replica> getStoredReplicas(String bpid) throws IOException;
} }

View File

@ -140,6 +140,7 @@ public class TestDiskError {
cluster.waitActive(); cluster.waitActive();
final int sndNode = 1; final int sndNode = 1;
DataNode datanode = cluster.getDataNodes().get(sndNode); DataNode datanode = cluster.getDataNodes().get(sndNode);
FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(datanode);
// replicate the block to the second datanode // replicate the block to the second datanode
InetSocketAddress target = datanode.getXferAddress(); InetSocketAddress target = datanode.getXferAddress();
@ -161,11 +162,7 @@ public class TestDiskError {
// the temporary block & meta files should be deleted // the temporary block & meta files should be deleted
String bpid = cluster.getNamesystem().getBlockPoolId(); String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = cluster.getInstanceStorageDir(sndNode, 0); while (utils.getStoredReplicas(bpid).hasNext()) {
File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
storageDir = cluster.getInstanceStorageDir(sndNode, 1);
File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
Thread.sleep(100); Thread.sleep(100);
} }

View File

@ -47,6 +47,9 @@ import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.nio.channels.FileChannel; import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.StandardCopyOption; import java.nio.file.StandardCopyOption;
import java.util.Random; import java.util.Random;
@ -377,4 +380,24 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
Files.move(metaFile.toPath(), newMetaFile.toPath(), Files.move(metaFile.toPath(), newMetaFile.toPath(),
StandardCopyOption.ATOMIC_MOVE); StandardCopyOption.ATOMIC_MOVE);
} }
@Override
public Iterator<Replica> getStoredReplicas(String bpid) throws IOException {
// Reload replicas from the disk.
ReplicaMap replicaMap = new ReplicaMap(dataset);
try (FsVolumeReferences refs = dataset.getFsVolumeReferences()) {
for (FsVolumeSpi vol : refs) {
FsVolumeImpl volume = (FsVolumeImpl) vol;
volume.getVolumeMap(bpid, replicaMap, dataset.ramDiskReplicaTracker);
}
}
// Cast ReplicaInfo to Replica, because ReplicaInfo assumes a file-based
// FsVolumeSpi implementation.
List<Replica> ret = new ArrayList<>();
if (replicaMap.replicas(bpid) != null) {
ret.addAll(replicaMap.replicas(bpid));
}
return ret.iterator();
}
} }