diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 18f9f103be4..86ff6c4a0f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -812,6 +812,8 @@ Release 2.8.0 - UNRELEASED HDFS-9168. Move client side unit test to hadoop-hdfs-client. (wheat9) + HDFS-9312. Fix TestReplication to be FsDataset-agnostic. (lei) + BUG FIXES HDFS-8091: ACLStatus and XAttributes should be presented to diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index 3297c67f55c..df8c394bdbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -20,22 +20,14 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import com.google.common.base.Supplier; -import java.io.File; import java.io.IOException; import java.io.OutputStream; -import java.io.RandomAccessFile; import java.net.InetSocketAddress; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.SimpleFileVisitor; -import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Iterator; import java.util.Random; @@ -52,7 +44,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -64,6 +55,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; +import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -376,7 +368,7 @@ public class TestReplication { for (int i=0; i replicas = new ArrayList<>(); + for (int dnIndex=0; dnIndex<3; dnIndex++) { + replicas.add(cluster.getMaterializedReplica(dnIndex, block)); } - + assertEquals(3, replicas.size()); + + cluster.shutdown(); + int fileCount = 0; // Choose 3 copies of block file - delete 1 and corrupt the remaining 2 - for (int dnIndex=0; dnIndex<3; dnIndex++) { - File blockFile = cluster.getBlockFile(dnIndex, block); - LOG.info("Checking for file " + blockFile); - - if (blockFile != null && blockFile.exists()) { - if (fileCount == 0) { - LOG.info("Deleting file " + blockFile); - assertTrue(blockFile.delete()); - } else { - // corrupt it. - LOG.info("Corrupting file " + blockFile); - long len = blockFile.length(); - assertTrue(len > 50); - RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw"); - try { - blockOut.seek(len/3); - blockOut.write(buffer, 0, 25); - } finally { - blockOut.close(); - } - } - fileCount++; + for (MaterializedReplica replica : replicas) { + if (fileCount == 0) { + LOG.info("Deleting block " + replica); + replica.deleteData(); + } else { + // corrupt it. + LOG.info("Corrupting file " + replica); + replica.corruptData(); } + fileCount++; } - assertEquals(3, fileCount); - + /* Start the MiniDFSCluster with more datanodes since once a writeBlock * to a datanode node fails, same block can not be written to it * immediately. In our case some replication attempts will fail. @@ -539,63 +519,28 @@ public class TestReplication { DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs = cluster.getFileSystem(); - FSDataOutputStream create = fs.create(new Path("/test")); - fs.setReplication(new Path("/test"), (short) 1); + Path filePath = new Path("/test"); + FSDataOutputStream create = fs.create(filePath); + fs.setReplication(filePath, (short) 1); create.write(new byte[1024]); create.close(); - List nonParticipatedNodeDirs = new ArrayList(); - File participatedNodeDirs = null; - for (int i = 0; i < cluster.getDataNodes().size(); i++) { - File storageDir = cluster.getInstanceStorageDir(i, 0); - String bpid = cluster.getNamesystem().getBlockPoolId(); - File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); - if (data_dir.listFiles().length == 0) { - nonParticipatedNodeDirs.add(data_dir); - } else { - assertNull("participatedNodeDirs has already been set.", - participatedNodeDirs); - participatedNodeDirs = data_dir; - } - } - assertEquals(2, nonParticipatedNodeDirs.size()); - - String blockFile = null; - final List listFiles = new ArrayList<>(); - Files.walkFileTree(participatedNodeDirs.toPath(), - new SimpleFileVisitor() { - @Override - public FileVisitResult visitFile( - java.nio.file.Path file, BasicFileAttributes attrs) - throws IOException { - listFiles.add(file.toFile()); - return FileVisitResult.CONTINUE; - } - } - ); - assertFalse(listFiles.isEmpty()); + ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath); int numReplicaCreated = 0; - for (File file : listFiles) { - if (file.getName().startsWith(Block.BLOCK_FILE_PREFIX) - && !file.getName().endsWith("meta")) { - blockFile = file.getName(); - for (File file1 : nonParticipatedNodeDirs) { - file1.mkdirs(); - new File(file1, blockFile).createNewFile(); - new File(file1, blockFile + "_1000.meta").createNewFile(); - numReplicaCreated++; - } - break; + for (final DataNode dn : cluster.getDataNodes()) { + if (!dn.getFSDataset().contains(block)) { + cluster.getFsDatasetTestUtils(dn).injectCorruptReplica(block); + numReplicaCreated++; } } assertEquals(2, numReplicaCreated); - fs.setReplication(new Path("/test"), (short) 3); + fs.setReplication(filePath, (short) 3); cluster.restartDataNodes(); // Lets detect all DNs about dummy copied // blocks cluster.waitActive(); cluster.triggerBlockReports(); - DFSTestUtil.waitReplication(fs, new Path("/test"), (short) 3); + DFSTestUtil.waitReplication(fs, filePath, (short) 3); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java index eb986ff107f..40c4438c41d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java @@ -199,4 +199,11 @@ public interface FsDatasetTestUtils { * @throws IOException */ void checkStoredReplica(final Replica replica) throws IOException; + + /** + * Create dummy replicas for block data and metadata. + * @param block the block of which replica to be created. + * @throws IOException on I/O error. + */ + void injectCorruptReplica(ExtendedBlock block) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index ed32faec792..e8e45323c0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import com.google.common.base.Preconditions; +import org.apache.commons.io.FileExistsException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -292,4 +293,28 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils { ReplicaInfo r = (ReplicaInfo) replica; FsDatasetImpl.checkReplicaFiles(r); } + + @Override + public void injectCorruptReplica(ExtendedBlock block) throws IOException { + Preconditions.checkState(!dataset.contains(block), + "Block " + block + " already exists on dataset."); + try (FsVolumeReferences volRef = dataset.getFsVolumeReferences()) { + FsVolumeImpl volume = (FsVolumeImpl) volRef.get(0); + FinalizedReplica finalized = new FinalizedReplica( + block.getLocalBlock(), + volume, + volume.getFinalizedDir(block.getBlockPoolId())); + File blockFile = finalized.getBlockFile(); + if (!blockFile.createNewFile()) { + throw new FileExistsException( + "Block file " + blockFile + " already exists."); + } + File metaFile = FsDatasetUtil.getMetaFile(blockFile, 1000); + if (!metaFile.createNewFile()) { + throw new FileExistsException( + "Meta file " + metaFile + " already exists." + ); + } + } + } }