From 6bb27f0ff6a52d01c423d2c9fa9647f188c941b8 Mon Sep 17 00:00:00 2001 From: arp Date: Wed, 24 Sep 2014 20:13:30 -0700 Subject: [PATCH] HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (Contributed by Tsz Wo Nicholas Sze) Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt --- .../fsdataset/impl/BlockPoolSlice.java | 50 ++++++++++++++----- 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index bfa1772dd42..3f58d384b7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -29,6 +29,8 @@ import java.util.Scanner; import org.apache.commons.io.FileUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DU; import org.apache.hadoop.fs.FileUtil; @@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -57,6 +60,8 @@ * This class is synchronized by {@link FsVolumeImpl}. */ class BlockPoolSlice { + static final Log LOG = LogFactory.getLog(BlockPoolSlice.class); + private final String bpid; private final FsVolumeImpl volume; // volume to which this BlockPool belongs to private final File currentDir; // StorageDirectory/current/bpid/current @@ -375,22 +380,36 @@ private int moveLazyPersistReplicasToFinalized(File source) File targetDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId); if (blockFile.exists()) { - File targetBlockFile = new File(targetDir, blockFile.getName()); - File targetMetaFile = new File(targetDir, metaFile.getName()); if (!targetDir.exists() && !targetDir.mkdirs()) { - FsDatasetImpl.LOG.warn("Failed to move " + blockFile + " to " + targetDir); + LOG.warn("Failed to mkdirs " + targetDir); continue; } - metaFile.renameTo(targetMetaFile); - blockFile.renameTo(targetBlockFile); + final File targetMetaFile = new File(targetDir, metaFile.getName()); + try { + NativeIO.renameTo(metaFile, targetMetaFile); + } catch (IOException e) { + LOG.warn("Failed to move meta file from " + + metaFile + " to " + targetMetaFile, e); + continue; + + } + + final File targetBlockFile = new File(targetDir, blockFile.getName()); + try { + NativeIO.renameTo(blockFile, targetBlockFile); + } catch (IOException e) { + LOG.warn("Failed to move block file from " + + blockFile + " to " + targetBlockFile, e); + continue; + } if (targetBlockFile.exists() && targetMetaFile.exists()) { ++numRecovered; } else { // Failure should be rare. - FsDatasetImpl.LOG.warn("Failed to move " + blockFile + " to " + targetDir); + LOG.warn("Failed to move " + blockFile + " to " + targetDir); } } } @@ -544,16 +563,23 @@ ReplicaInfo resolveDuplicateReplicas( replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1; + if (LOG.isDebugEnabled()) { + LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep + + ". Will try to delete " + replicaToDelete); + } + // Update volumeMap. volumeMap.add(bpid, replicaToKeep); // Delete the files on disk. Failure here is okay. - replicaToDelete.getBlockFile().delete(); - replicaToDelete.getMetaFile().delete(); - - FsDatasetImpl.LOG.info( - "resolveDuplicateReplicas keeping " + replicaToKeep.getBlockFile() + - ", deleting " + replicaToDelete.getBlockFile()); + final File blockFile = replicaToDelete.getBlockFile(); + if (!blockFile.delete()) { + LOG.warn("Failed to delete block file " + blockFile); + } + final File metaFile = replicaToDelete.getMetaFile(); + if (!metaFile.delete()) { + LOG.warn("Failed to delete meta file " + metaFile); + } return replicaToKeep; }