HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)

(cherry picked from commit 7632409482)
This commit is contained in:
Lei Xu 2015-11-02 17:09:39 -08:00
parent bd2867cd39
commit 41faf438e1
4 changed files with 64 additions and 85 deletions

View File

@ -812,6 +812,8 @@ Release 2.8.0 - UNRELEASED
HDFS-9168. Move client side unit test to hadoop-hdfs-client. (wheat9)
HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei)
BUG FIXES
HDFS-8091: ACLStatus and XAttributes should be presented to

View File

@ -20,22 +20,14 @@
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import com.google.common.base.Supplier;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Random;
@ -52,7 +44,6 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -64,6 +55,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@ -376,7 +368,7 @@ public void testPendingReplicationRetry() throws IOException {
for (int i=0; i<buffer.length; i++) {
buffer[i] = '1';
}
try {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, Integer.toString(numDataNodes));
@ -396,41 +388,29 @@ public void testPendingReplicationRetry() throws IOException {
// get first block of the file.
ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(testFile,
0, Long.MAX_VALUE).get(0).getBlock();
cluster.shutdown();
for (int i=0; i<25; i++) {
buffer[i] = '0';
List<MaterializedReplica> replicas = new ArrayList<>();
for (int dnIndex=0; dnIndex<3; dnIndex++) {
replicas.add(cluster.getMaterializedReplica(dnIndex, block));
}
assertEquals(3, replicas.size());
cluster.shutdown();
int fileCount = 0;
// Choose 3 copies of block file - delete 1 and corrupt the remaining 2
for (int dnIndex=0; dnIndex<3; dnIndex++) {
File blockFile = cluster.getBlockFile(dnIndex, block);
LOG.info("Checking for file " + blockFile);
if (blockFile != null && blockFile.exists()) {
if (fileCount == 0) {
LOG.info("Deleting file " + blockFile);
assertTrue(blockFile.delete());
} else {
// corrupt it.
LOG.info("Corrupting file " + blockFile);
long len = blockFile.length();
assertTrue(len > 50);
RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw");
try {
blockOut.seek(len/3);
blockOut.write(buffer, 0, 25);
} finally {
blockOut.close();
}
}
fileCount++;
for (MaterializedReplica replica : replicas) {
if (fileCount == 0) {
LOG.info("Deleting block " + replica);
replica.deleteData();
} else {
// corrupt it.
LOG.info("Corrupting file " + replica);
replica.corruptData();
}
fileCount++;
}
assertEquals(3, fileCount);
/* Start the MiniDFSCluster with more datanodes since once a writeBlock
* to a datanode node fails, same block can not be written to it
* immediately. In our case some replication attempts will fail.
@ -539,63 +519,28 @@ public void testReplicationWhenBlockCorruption() throws Exception {
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream create = fs.create(new Path("/test"));
fs.setReplication(new Path("/test"), (short) 1);
Path filePath = new Path("/test");
FSDataOutputStream create = fs.create(filePath);
fs.setReplication(filePath, (short) 1);
create.write(new byte[1024]);
create.close();
List<File> nonParticipatedNodeDirs = new ArrayList<File>();
File participatedNodeDirs = null;
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
File storageDir = cluster.getInstanceStorageDir(i, 0);
String bpid = cluster.getNamesystem().getBlockPoolId();
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
if (data_dir.listFiles().length == 0) {
nonParticipatedNodeDirs.add(data_dir);
} else {
assertNull("participatedNodeDirs has already been set.",
participatedNodeDirs);
participatedNodeDirs = data_dir;
}
}
assertEquals(2, nonParticipatedNodeDirs.size());
String blockFile = null;
final List<File> listFiles = new ArrayList<>();
Files.walkFileTree(participatedNodeDirs.toPath(),
new SimpleFileVisitor<java.nio.file.Path>() {
@Override
public FileVisitResult visitFile(
java.nio.file.Path file, BasicFileAttributes attrs)
throws IOException {
listFiles.add(file.toFile());
return FileVisitResult.CONTINUE;
}
}
);
assertFalse(listFiles.isEmpty());
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
int numReplicaCreated = 0;
for (File file : listFiles) {
if (file.getName().startsWith(Block.BLOCK_FILE_PREFIX)
&& !file.getName().endsWith("meta")) {
blockFile = file.getName();
for (File file1 : nonParticipatedNodeDirs) {
file1.mkdirs();
new File(file1, blockFile).createNewFile();
new File(file1, blockFile + "_1000.meta").createNewFile();
numReplicaCreated++;
}
break;
for (final DataNode dn : cluster.getDataNodes()) {
if (!dn.getFSDataset().contains(block)) {
cluster.getFsDatasetTestUtils(dn).injectCorruptReplica(block);
numReplicaCreated++;
}
}
assertEquals(2, numReplicaCreated);
fs.setReplication(new Path("/test"), (short) 3);
fs.setReplication(filePath, (short) 3);
cluster.restartDataNodes(); // Lets detect all DNs about dummy copied
// blocks
cluster.waitActive();
cluster.triggerBlockReports();
DFSTestUtil.waitReplication(fs, new Path("/test"), (short) 3);
DFSTestUtil.waitReplication(fs, filePath, (short) 3);
} finally {
if (cluster != null) {
cluster.shutdown();

View File

@ -199,4 +199,11 @@ Replica createReplicaUnderRecovery(ExtendedBlock block, long recoveryId)
* @throws IOException
*/
void checkStoredReplica(final Replica replica) throws IOException;
/**
* Create dummy replicas for block data and metadata.
* @param block the block of which replica to be created.
* @throws IOException on I/O error.
*/
void injectCorruptReplica(ExtendedBlock block) throws IOException;
}

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.base.Preconditions;
import org.apache.commons.io.FileExistsException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -292,4 +293,28 @@ public void checkStoredReplica(Replica replica) throws IOException {
ReplicaInfo r = (ReplicaInfo) replica;
FsDatasetImpl.checkReplicaFiles(r);
}
@Override
public void injectCorruptReplica(ExtendedBlock block) throws IOException {
Preconditions.checkState(!dataset.contains(block),
"Block " + block + " already exists on dataset.");
try (FsVolumeReferences volRef = dataset.getFsVolumeReferences()) {
FsVolumeImpl volume = (FsVolumeImpl) volRef.get(0);
FinalizedReplica finalized = new FinalizedReplica(
block.getLocalBlock(),
volume,
volume.getFinalizedDir(block.getBlockPoolId()));
File blockFile = finalized.getBlockFile();
if (!blockFile.createNewFile()) {
throw new FileExistsException(
"Block file " + blockFile + " already exists.");
}
File metaFile = FsDatasetUtil.getMetaFile(blockFile, 1000);
if (!metaFile.createNewFile()) {
throw new FileExistsException(
"Meta file " + metaFile + " already exists."
);
}
}
}
}