HDFS-9292. Make TestFileConcorruption independent to underlying FsDataset Implementation. (lei)
This commit is contained in:
parent
d8736eb9ca
commit
399ad00915
|
@ -1581,6 +1581,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-8945. Update the description about replica placement in HDFS
|
HDFS-8945. Update the description about replica placement in HDFS
|
||||||
Architecture documentation. (Masatake Iwasaki via wang)
|
Architecture documentation. (Masatake Iwasaki via wang)
|
||||||
|
|
||||||
|
HDFS-9292. Make TestFileConcorruption independent to underlying FsDataset
|
||||||
|
Implementation. (lei)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -24,20 +24,16 @@ import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
import java.io.DataOutputStream;
|
import java.io.DataOutputStream;
|
||||||
import java.io.File;
|
|
||||||
import java.io.FileOutputStream;
|
import java.io.FileOutputStream;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Map;
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.apache.commons.io.FileUtils;
|
|
||||||
import org.apache.commons.io.filefilter.DirectoryFileFilter;
|
|
||||||
import org.apache.commons.io.filefilter.PrefixFileFilter;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.ChecksumException;
|
import org.apache.hadoop.fs.ChecksumException;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
@ -45,6 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.test.PathUtils;
|
import org.apache.hadoop.test.PathUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
@ -74,17 +71,17 @@ public class TestFileCorruption {
|
||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
util.createFiles(fs, "/srcdat");
|
util.createFiles(fs, "/srcdat");
|
||||||
// Now deliberately remove the blocks
|
// Now deliberately remove the blocks
|
||||||
File storageDir = cluster.getInstanceStorageDir(2, 0);
|
|
||||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
DataNode dn = cluster.getDataNodes().get(2);
|
||||||
assertTrue("data directory does not exist", data_dir.exists());
|
Map<DatanodeStorage, BlockListAsLongs> blockReports =
|
||||||
Collection<File> blocks = FileUtils.listFiles(data_dir,
|
dn.getFSDataset().getBlockReports(bpid);
|
||||||
new PrefixFileFilter(Block.BLOCK_FILE_PREFIX),
|
assertTrue("Blocks do not exist on data-dir", !blockReports.isEmpty());
|
||||||
DirectoryFileFilter.DIRECTORY);
|
for (BlockListAsLongs report : blockReports.values()) {
|
||||||
assertTrue("Blocks do not exist in data-dir", blocks.size() > 0);
|
for (BlockReportReplica brr : report) {
|
||||||
for (File block : blocks) {
|
LOG.info("Deliberately removing block {}", brr.getBlockName());
|
||||||
System.out.println("Deliberately removing file " + block.getName());
|
cluster.getFsDatasetTestUtils(2).getMaterializedReplica(
|
||||||
assertTrue("Cannot remove file.", block.delete());
|
new ExtendedBlock(bpid, brr)).deleteData();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
assertTrue("Corrupted replicas not handled properly.",
|
assertTrue("Corrupted replicas not handled properly.",
|
||||||
util.checkFiles(fs, "/srcdat"));
|
util.checkFiles(fs, "/srcdat"));
|
||||||
|
@ -110,7 +107,7 @@ public class TestFileCorruption {
|
||||||
// Now attempt to read the file
|
// Now attempt to read the file
|
||||||
DataInputStream dis = fs.open(file, 512);
|
DataInputStream dis = fs.open(file, 512);
|
||||||
try {
|
try {
|
||||||
System.out.println("A ChecksumException is expected to be logged.");
|
LOG.info("A ChecksumException is expected to be logged.");
|
||||||
dis.readByte();
|
dis.readByte();
|
||||||
} catch (ChecksumException ignore) {
|
} catch (ChecksumException ignore) {
|
||||||
//expect this exception but let any NPE get thrown
|
//expect this exception but let any NPE get thrown
|
||||||
|
@ -137,15 +134,7 @@ public class TestFileCorruption {
|
||||||
|
|
||||||
// get the block
|
// get the block
|
||||||
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
File storageDir = cluster.getInstanceStorageDir(0, 0);
|
ExtendedBlock blk = getFirstBlock(cluster.getDataNodes().get(0), bpid);
|
||||||
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
|
||||||
assertTrue("Data directory does not exist", dataDir.exists());
|
|
||||||
ExtendedBlock blk = getBlock(bpid, dataDir);
|
|
||||||
if (blk == null) {
|
|
||||||
storageDir = cluster.getInstanceStorageDir(0, 1);
|
|
||||||
dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
|
|
||||||
blk = getBlock(bpid, dataDir);
|
|
||||||
}
|
|
||||||
assertFalse("Data directory does not contain any blocks or there was an "
|
assertFalse("Data directory does not contain any blocks or there was an "
|
||||||
+ "IO error", blk==null);
|
+ "IO error", blk==null);
|
||||||
|
|
||||||
|
@ -173,20 +162,20 @@ public class TestFileCorruption {
|
||||||
//clean up
|
//clean up
|
||||||
fs.delete(FILE_PATH, false);
|
fs.delete(FILE_PATH, false);
|
||||||
} finally {
|
} finally {
|
||||||
if (cluster != null) { cluster.shutdown(); }
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static ExtendedBlock getFirstBlock(DataNode dn, String bpid) {
|
||||||
|
Map<DatanodeStorage, BlockListAsLongs> blockReports =
|
||||||
|
dn.getFSDataset().getBlockReports(bpid);
|
||||||
|
for (BlockListAsLongs blockLongs : blockReports.values()) {
|
||||||
|
for (BlockReportReplica block : blockLongs) {
|
||||||
|
return new ExtendedBlock(bpid, block);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ExtendedBlock getBlock(String bpid, File dataDir) {
|
|
||||||
List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(dataDir);
|
|
||||||
if (metadataFiles == null || metadataFiles.isEmpty()) {
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
File metadataFile = metadataFiles.get(0);
|
|
||||||
File blockFile = Block.metaToBlockFile(metadataFile);
|
|
||||||
return new ExtendedBlock(bpid, Block.getBlockId(blockFile.getName()),
|
|
||||||
blockFile.length(), Block.getGenerationStamp(metadataFile.getName()));
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue