From c0c788aafc892373b23ae0c7905d913fd788c3a6 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Sat, 28 Apr 2018 09:07:56 -0700 Subject: [PATCH] HDFS-13509. Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix TestFileAppend failures on Windows. Contributed by Xiao Liang. --- .../hdfs/server/datanode/ReplicaInfo.java | 21 ++++--- .../apache/hadoop/hdfs/TestFileAppend.java | 61 +++++++++++++------ 2 files changed, 53 insertions(+), 29 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java index 9817f97d96c..f3f6db11ccd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java @@ -240,19 +240,22 @@ private void breakHardlinks(File file, Block b) throws IOException { final FileIoProvider fileIoProvider = getFileIoProvider(); final File tmpFile = DatanodeUtil.createFileWithExistsCheck( getVolume(), b, DatanodeUtil.getUnlinkTmpFile(file), fileIoProvider); - try (FileInputStream in = fileIoProvider.getFileInputStream( + try { + try (FileInputStream in = fileIoProvider.getFileInputStream( getVolume(), file)) { - try (FileOutputStream out = fileIoProvider.getFileOutputStream( - getVolume(), tmpFile)) { - IOUtils.copyBytes(in, out, 16 * 1024); - } - if (file.length() != tmpFile.length()) { - throw new IOException("Copy of file " + file + " size " + file.length()+ - " into file " + tmpFile + - " resulted in a size of " + tmpFile.length()); + try (FileOutputStream out = fileIoProvider.getFileOutputStream( + getVolume(), tmpFile)) { + IOUtils.copyBytes(in, out, 16 * 1024); + } + if (file.length() != tmpFile.length()) { + throw new IOException("Copy of file " + file + " size " + + file.length() + " into file " + tmpFile + + " resulted in a size of " + tmpFile.length()); + } } fileIoProvider.replaceFile(getVolume(), tmpFile, file); } catch (IOException e) { + DataNode.LOG.error("Cannot breakHardlinks for file " + file, e); if (!fileIoProvider.delete(getVolume(), tmpFile)) { DataNode.LOG.info("detachFile failed to delete temporary file " + tmpFile); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index fbf09fbef02..59cc31a483c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -120,7 +121,9 @@ private void checkFile(DistributedFileSystem fileSys, Path name, int repl) @Test public void testBreakHardlinksIfNeeded() throws IOException { Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); FileSystem fs = cluster.getFileSystem(); InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); @@ -186,7 +189,9 @@ public void testBreakHardlinksIfNeeded() throws IOException { public void testSimpleFlush() throws IOException { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); DistributedFileSystem fs = cluster.getFileSystem(); try { @@ -239,7 +244,9 @@ public void testSimpleFlush() throws IOException { public void testComplexFlush() throws IOException { Configuration conf = new HdfsConfiguration(); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); DistributedFileSystem fs = cluster.getFileSystem(); try { @@ -286,7 +293,9 @@ public void testComplexFlush() throws IOException { @Test(expected = FileNotFoundException.class) public void testFileNotFound() throws IOException { Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); FileSystem fs = cluster.getFileSystem(); try { Path file1 = new Path("/nonexistingfile.dat"); @@ -301,7 +310,9 @@ public void testFileNotFound() throws IOException { @Test public void testAppendTwice() throws Exception { Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); final FileSystem fs1 = cluster.getFileSystem(); final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf); try { @@ -340,7 +351,9 @@ public void testAppendTwice() throws Exception { @Test public void testAppend2Twice() throws Exception { Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); final DistributedFileSystem fs1 = cluster.getFileSystem(); final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf); try { @@ -386,8 +399,9 @@ public void testMultipleAppends() throws Exception { HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, false); - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(4).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, + builderBaseDir).numDataNodes(4).build(); final DistributedFileSystem fs = cluster.getFileSystem(); try { final Path p = new Path("/testMultipleAppend/foo"); @@ -439,8 +453,9 @@ public void testAppendAfterSoftLimit() final long softLimit = 1L; final long hardLimit = 9999999L; - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) - .build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .numDataNodes(1).build(); cluster.setLeasePeriod(softLimit, hardLimit); cluster.waitActive(); @@ -479,8 +494,9 @@ public void testAppend2AfterSoftLimit() throws Exception { final long softLimit = 1L; final long hardLimit = 9999999L; - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) - .build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .numDataNodes(1).build(); cluster.setLeasePeriod(softLimit, hardLimit); cluster.waitActive(); @@ -526,8 +542,9 @@ public void testFailedAppendBlockRejection() throws Exception { Configuration conf = new HdfsConfiguration(); conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "false"); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) - .build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .numDataNodes(3).build(); DistributedFileSystem fs = null; try { fs = cluster.getFileSystem(); @@ -541,7 +558,7 @@ public void testFailedAppendBlockRejection() throws Exception { String dnAddress = dnProp.datanode.getXferAddress().toString(); if (dnAddress.startsWith("/")) { dnAddress = dnAddress.substring(1); -} + } // append again to bump genstamps for (int i = 0; i < 2; i++) { @@ -579,8 +596,9 @@ public void testMultiAppend2() throws Exception { Configuration conf = new HdfsConfiguration(); conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "false"); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) - .build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .numDataNodes(3).build(); DistributedFileSystem fs = null; final String hello = "hello\n"; try { @@ -651,8 +669,9 @@ public void testAppendCorruptedBlock() throws Exception { conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setInt("dfs.min.replication", 1); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) - .build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .numDataNodes(1).build(); try { DistributedFileSystem fs = cluster.getFileSystem(); Path fileName = new Path("/appendCorruptBlock"); @@ -677,7 +696,9 @@ public void testConcurrentAppendRead() conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setInt("dfs.min.replication", 1); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath()); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir) + .build(); try { cluster.waitActive(); DataNode dn = cluster.getDataNodes().get(0);