HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (Contributed by Tsz Wo Nicholas Sze)

This commit is contained in:
arp 2014-09-24 20:13:30 -07:00
parent b1000fbba4
commit feda4733a8
2 changed files with 40 additions and 12 deletions

View File

@ -71,4 +71,6 @@
HDFS-6990. Add unit test for evict/delete RAM_DISK block with open HDFS-6990. Add unit test for evict/delete RAM_DISK block with open
handle. (Xiaoyu Yao via Arpit Agarwal) handle. (Xiaoyu Yao via Arpit Agarwal)
HDFS-7143. Fix findbugs warnings in HDFS-6581 branch. (szetszwo via
Arpit Agarwal)

View File

@ -29,6 +29,8 @@ import java.io.RandomAccessFile;
import java.util.Scanner; import java.util.Scanner;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DU; import org.apache.hadoop.fs.DU;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
@ -43,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered; import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@ -57,6 +60,8 @@ import org.apache.hadoop.util.Time;
* This class is synchronized by {@link FsVolumeImpl}. * This class is synchronized by {@link FsVolumeImpl}.
*/ */
class BlockPoolSlice { class BlockPoolSlice {
static final Log LOG = LogFactory.getLog(BlockPoolSlice.class);
private final String bpid; private final String bpid;
private final FsVolumeImpl volume; // volume to which this BlockPool belongs to private final FsVolumeImpl volume; // volume to which this BlockPool belongs to
private final File currentDir; // StorageDirectory/current/bpid/current private final File currentDir; // StorageDirectory/current/bpid/current
@ -369,22 +374,36 @@ class BlockPoolSlice {
File targetDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId); File targetDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
if (blockFile.exists()) { if (blockFile.exists()) {
File targetBlockFile = new File(targetDir, blockFile.getName());
File targetMetaFile = new File(targetDir, metaFile.getName());
if (!targetDir.exists() && !targetDir.mkdirs()) { if (!targetDir.exists() && !targetDir.mkdirs()) {
FsDatasetImpl.LOG.warn("Failed to move " + blockFile + " to " + targetDir); LOG.warn("Failed to mkdirs " + targetDir);
continue; continue;
} }
metaFile.renameTo(targetMetaFile); final File targetMetaFile = new File(targetDir, metaFile.getName());
blockFile.renameTo(targetBlockFile); try {
NativeIO.renameTo(metaFile, targetMetaFile);
} catch (IOException e) {
LOG.warn("Failed to move meta file from "
+ metaFile + " to " + targetMetaFile, e);
continue;
}
final File targetBlockFile = new File(targetDir, blockFile.getName());
try {
NativeIO.renameTo(blockFile, targetBlockFile);
} catch (IOException e) {
LOG.warn("Failed to move block file from "
+ blockFile + " to " + targetBlockFile, e);
continue;
}
if (targetBlockFile.exists() && targetMetaFile.exists()) { if (targetBlockFile.exists() && targetMetaFile.exists()) {
++numRecovered; ++numRecovered;
} else { } else {
// Failure should be rare. // Failure should be rare.
FsDatasetImpl.LOG.warn("Failed to move " + blockFile + " to " + targetDir); LOG.warn("Failed to move " + blockFile + " to " + targetDir);
} }
} }
} }
@ -538,16 +557,23 @@ class BlockPoolSlice {
replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1; replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;
if (LOG.isDebugEnabled()) {
LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
+ ". Will try to delete " + replicaToDelete);
}
// Update volumeMap. // Update volumeMap.
volumeMap.add(bpid, replicaToKeep); volumeMap.add(bpid, replicaToKeep);
// Delete the files on disk. Failure here is okay. // Delete the files on disk. Failure here is okay.
replicaToDelete.getBlockFile().delete(); final File blockFile = replicaToDelete.getBlockFile();
replicaToDelete.getMetaFile().delete(); if (!blockFile.delete()) {
LOG.warn("Failed to delete block file " + blockFile);
FsDatasetImpl.LOG.info( }
"resolveDuplicateReplicas keeping " + replicaToKeep.getBlockFile() + final File metaFile = replicaToDelete.getMetaFile();
", deleting " + replicaToDelete.getBlockFile()); if (!metaFile.delete()) {
LOG.warn("Failed to delete meta file " + metaFile);
}
return replicaToKeep; return replicaToKeep;
} }