HDFS-13509. Bug fix for breakHardlinks() of ReplicaInfo/LocalReplica, and fix TestFileAppend failures on Windows. Contributed by Xiao Liang.

This commit is contained in:
Inigo Goiri 2018-04-28 09:07:56 -07:00
parent c8441811fd
commit c0c788aafc
2 changed files with 53 additions and 29 deletions

View File

@ -240,19 +240,22 @@ private void breakHardlinks(File file, Block b) throws IOException {
final FileIoProvider fileIoProvider = getFileIoProvider(); final FileIoProvider fileIoProvider = getFileIoProvider();
final File tmpFile = DatanodeUtil.createFileWithExistsCheck( final File tmpFile = DatanodeUtil.createFileWithExistsCheck(
getVolume(), b, DatanodeUtil.getUnlinkTmpFile(file), fileIoProvider); getVolume(), b, DatanodeUtil.getUnlinkTmpFile(file), fileIoProvider);
try (FileInputStream in = fileIoProvider.getFileInputStream( try {
try (FileInputStream in = fileIoProvider.getFileInputStream(
getVolume(), file)) { getVolume(), file)) {
try (FileOutputStream out = fileIoProvider.getFileOutputStream( try (FileOutputStream out = fileIoProvider.getFileOutputStream(
getVolume(), tmpFile)) { getVolume(), tmpFile)) {
IOUtils.copyBytes(in, out, 16 * 1024); IOUtils.copyBytes(in, out, 16 * 1024);
} }
if (file.length() != tmpFile.length()) { if (file.length() != tmpFile.length()) {
throw new IOException("Copy of file " + file + " size " + file.length()+ throw new IOException("Copy of file " + file + " size "
" into file " + tmpFile + + file.length() + " into file " + tmpFile
" resulted in a size of " + tmpFile.length()); + " resulted in a size of " + tmpFile.length());
}
} }
fileIoProvider.replaceFile(getVolume(), tmpFile, file); fileIoProvider.replaceFile(getVolume(), tmpFile, file);
} catch (IOException e) { } catch (IOException e) {
DataNode.LOG.error("Cannot breakHardlinks for file " + file, e);
if (!fileIoProvider.delete(getVolume(), tmpFile)) { if (!fileIoProvider.delete(getVolume(), tmpFile)) {
DataNode.LOG.info("detachFile failed to delete temporary file " + DataNode.LOG.info("detachFile failed to delete temporary file " +
tmpFile); tmpFile);

View File

@ -55,6 +55,7 @@
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.junit.Assert; import org.junit.Assert;
@ -120,7 +121,9 @@ private void checkFile(DistributedFileSystem fileSys, Path name, int repl)
@Test @Test
public void testBreakHardlinksIfNeeded() throws IOException { public void testBreakHardlinksIfNeeded() throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
InetSocketAddress addr = new InetSocketAddress("localhost", InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort()); cluster.getNameNodePort());
@ -186,7 +189,9 @@ public void testBreakHardlinksIfNeeded() throws IOException {
public void testSimpleFlush() throws IOException { public void testSimpleFlush() throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.build();
DistributedFileSystem fs = cluster.getFileSystem(); DistributedFileSystem fs = cluster.getFileSystem();
try { try {
@ -239,7 +244,9 @@ public void testSimpleFlush() throws IOException {
public void testComplexFlush() throws IOException { public void testComplexFlush() throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE); fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.build();
DistributedFileSystem fs = cluster.getFileSystem(); DistributedFileSystem fs = cluster.getFileSystem();
try { try {
@ -286,7 +293,9 @@ public void testComplexFlush() throws IOException {
@Test(expected = FileNotFoundException.class) @Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException { public void testFileNotFound() throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
try { try {
Path file1 = new Path("/nonexistingfile.dat"); Path file1 = new Path("/nonexistingfile.dat");
@ -301,7 +310,9 @@ public void testFileNotFound() throws IOException {
@Test @Test
public void testAppendTwice() throws Exception { public void testAppendTwice() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.build();
final FileSystem fs1 = cluster.getFileSystem(); final FileSystem fs1 = cluster.getFileSystem();
final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf); final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
try { try {
@ -340,7 +351,9 @@ public void testAppendTwice() throws Exception {
@Test @Test
public void testAppend2Twice() throws Exception { public void testAppend2Twice() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.build();
final DistributedFileSystem fs1 = cluster.getFileSystem(); final DistributedFileSystem fs1 = cluster.getFileSystem();
final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf); final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
try { try {
@ -386,8 +399,9 @@ public void testMultipleAppends() throws Exception {
HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY,
false); false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
.numDataNodes(4).build(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf,
builderBaseDir).numDataNodes(4).build();
final DistributedFileSystem fs = cluster.getFileSystem(); final DistributedFileSystem fs = cluster.getFileSystem();
try { try {
final Path p = new Path("/testMultipleAppend/foo"); final Path p = new Path("/testMultipleAppend/foo");
@ -439,8 +453,9 @@ public void testAppendAfterSoftLimit()
final long softLimit = 1L; final long softLimit = 1L;
final long hardLimit = 9999999L; final long hardLimit = 9999999L;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
.build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.numDataNodes(1).build();
cluster.setLeasePeriod(softLimit, hardLimit); cluster.setLeasePeriod(softLimit, hardLimit);
cluster.waitActive(); cluster.waitActive();
@ -479,8 +494,9 @@ public void testAppend2AfterSoftLimit() throws Exception {
final long softLimit = 1L; final long softLimit = 1L;
final long hardLimit = 9999999L; final long hardLimit = 9999999L;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
.build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.numDataNodes(1).build();
cluster.setLeasePeriod(softLimit, hardLimit); cluster.setLeasePeriod(softLimit, hardLimit);
cluster.waitActive(); cluster.waitActive();
@ -526,8 +542,9 @@ public void testFailedAppendBlockRejection() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
"false"); "false");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
.build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.numDataNodes(3).build();
DistributedFileSystem fs = null; DistributedFileSystem fs = null;
try { try {
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
@ -541,7 +558,7 @@ public void testFailedAppendBlockRejection() throws Exception {
String dnAddress = dnProp.datanode.getXferAddress().toString(); String dnAddress = dnProp.datanode.getXferAddress().toString();
if (dnAddress.startsWith("/")) { if (dnAddress.startsWith("/")) {
dnAddress = dnAddress.substring(1); dnAddress = dnAddress.substring(1);
} }
// append again to bump genstamps // append again to bump genstamps
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
@ -579,8 +596,9 @@ public void testMultiAppend2() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
"false"); "false");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
.build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.numDataNodes(3).build();
DistributedFileSystem fs = null; DistributedFileSystem fs = null;
final String hello = "hello\n"; final String hello = "hello\n";
try { try {
@ -651,8 +669,9 @@ public void testAppendCorruptedBlock() throws Exception {
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setInt("dfs.min.replication", 1); conf.setInt("dfs.min.replication", 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
.build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.numDataNodes(1).build();
try { try {
DistributedFileSystem fs = cluster.getFileSystem(); DistributedFileSystem fs = cluster.getFileSystem();
Path fileName = new Path("/appendCorruptBlock"); Path fileName = new Path("/appendCorruptBlock");
@ -677,7 +696,9 @@ public void testConcurrentAppendRead()
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setInt("dfs.min.replication", 1); conf.setInt("dfs.min.replication", 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
.build();
try { try {
cluster.waitActive(); cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0); DataNode dn = cluster.getDataNodes().get(0);