HDFS-12319. DirectoryScanner will throw IllegalStateException when Multiple BP's are present. Contributed by Brahma Reddy Battula.
This commit is contained in:
parent
82957ce0d0
commit
2cae387402
|
@ -813,13 +813,12 @@ public class DirectoryScanner implements Runnable {
|
||||||
public ScanInfoPerBlockPool call() throws IOException {
|
public ScanInfoPerBlockPool call() throws IOException {
|
||||||
String[] bpList = volume.getBlockPoolList();
|
String[] bpList = volume.getBlockPoolList();
|
||||||
ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
|
ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
|
||||||
|
perfTimer.start();
|
||||||
|
throttleTimer.start();
|
||||||
for (String bpid : bpList) {
|
for (String bpid : bpList) {
|
||||||
LinkedList<ScanInfo> report = new LinkedList<>();
|
LinkedList<ScanInfo> report = new LinkedList<>();
|
||||||
File bpFinalizedDir = volume.getFinalizedDir(bpid);
|
File bpFinalizedDir = volume.getFinalizedDir(bpid);
|
||||||
|
|
||||||
perfTimer.start();
|
|
||||||
throttleTimer.start();
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
result.put(bpid,
|
result.put(bpid,
|
||||||
compileReport(volume, bpFinalizedDir, bpFinalizedDir, report));
|
compileReport(volume, bpFinalizedDir, bpFinalizedDir, report));
|
||||||
|
|
|
@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
@ -309,15 +310,22 @@ public class TestDirectoryScanner {
|
||||||
missingMemoryBlocks, mismatchBlocks, 0);
|
missingMemoryBlocks, mismatchBlocks, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void scan(long totalBlocks, int diffsize, long missingMetaFile, long missingBlockFile,
|
private void scan(long totalBlocks, int diffsize, long missingMetaFile,
|
||||||
long missingMemoryBlocks, long mismatchBlocks, long duplicateBlocks) throws IOException {
|
long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
|
||||||
|
long duplicateBlocks) throws IOException {
|
||||||
scanner.reconcile();
|
scanner.reconcile();
|
||||||
|
verifyStats(totalBlocks, diffsize, missingMetaFile, missingBlockFile,
|
||||||
|
missingMemoryBlocks, mismatchBlocks, duplicateBlocks);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void verifyStats(long totalBlocks, int diffsize, long missingMetaFile,
|
||||||
|
long missingBlockFile, long missingMemoryBlocks, long mismatchBlocks,
|
||||||
|
long duplicateBlocks) {
|
||||||
assertTrue(scanner.diffs.containsKey(bpid));
|
assertTrue(scanner.diffs.containsKey(bpid));
|
||||||
LinkedList<DirectoryScanner.ScanInfo> diff = scanner.diffs.get(bpid);
|
LinkedList<DirectoryScanner.ScanInfo> diff = scanner.diffs.get(bpid);
|
||||||
assertTrue(scanner.stats.containsKey(bpid));
|
assertTrue(scanner.stats.containsKey(bpid));
|
||||||
DirectoryScanner.Stats stats = scanner.stats.get(bpid);
|
DirectoryScanner.Stats stats = scanner.stats.get(bpid);
|
||||||
|
|
||||||
assertEquals(diffsize, diff.size());
|
assertEquals(diffsize, diff.size());
|
||||||
assertEquals(totalBlocks, stats.totalBlocks);
|
assertEquals(totalBlocks, stats.totalBlocks);
|
||||||
assertEquals(missingMetaFile, stats.missingMetaFile);
|
assertEquals(missingMetaFile, stats.missingMetaFile);
|
||||||
|
@ -1009,4 +1017,48 @@ public class TestDirectoryScanner {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDirectoryScannerInFederatedCluster() throws Exception {
|
||||||
|
//Create Federated cluster with two nameservices and one DN
|
||||||
|
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF)
|
||||||
|
.nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
|
||||||
|
.numDataNodes(1).build()) {
|
||||||
|
cluster.waitActive();
|
||||||
|
cluster.transitionToActive(1);
|
||||||
|
cluster.transitionToActive(3);
|
||||||
|
DataNode dataNode = cluster.getDataNodes().get(0);
|
||||||
|
fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
|
||||||
|
//Create one block in first nameservice
|
||||||
|
FileSystem fs = cluster.getFileSystem(1);
|
||||||
|
int bp1Files = 1;
|
||||||
|
writeFile(fs, bp1Files);
|
||||||
|
//Create two blocks in second nameservice
|
||||||
|
FileSystem fs2 = cluster.getFileSystem(3);
|
||||||
|
int bp2Files = 2;
|
||||||
|
writeFile(fs2, bp2Files);
|
||||||
|
//Call the Directory scanner
|
||||||
|
scanner = new DirectoryScanner(dataNode, fds, CONF);
|
||||||
|
scanner.setRetainDiffs(true);
|
||||||
|
scanner.reconcile();
|
||||||
|
//Check blocks in corresponding BP
|
||||||
|
bpid = cluster.getNamesystem(1).getBlockPoolId();
|
||||||
|
verifyStats(bp1Files, 0, 0, 0, 0, 0, 0);
|
||||||
|
bpid = cluster.getNamesystem(3).getBlockPoolId();
|
||||||
|
verifyStats(bp2Files, 0, 0, 0, 0, 0, 0);
|
||||||
|
} finally {
|
||||||
|
if (scanner != null) {
|
||||||
|
scanner.shutdown();
|
||||||
|
scanner = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void writeFile(FileSystem fs, int numFiles) throws IOException {
|
||||||
|
final String fileName = "/" + GenericTestUtils.getMethodName();
|
||||||
|
final Path filePath = new Path(fileName);
|
||||||
|
for (int i = 0; i < numFiles; i++) {
|
||||||
|
DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue