diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt index 7693981a730..e23808de48b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt @@ -77,4 +77,7 @@ HDFS-6932. Balancer and Mover tools should ignore replicas on RAM_DISK. (Xiaoyu Yao via Arpit Agarwal) + HDFS-7144. Fix findbugs warnings in RamDiskReplicaTracker. (szetszwo via + Arpit Agarwal) + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java index 0899e703a96..78080034ae8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java @@ -40,6 +40,16 @@ public class RamDiskReplicaLruTracker extends RamDiskReplicaTracker { private RamDiskReplicaLru(String bpid, long blockId, FsVolumeImpl ramDiskVolume) { super(bpid, blockId, ramDiskVolume); } + + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public boolean equals(Object other) { + return super.equals(other); + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java index 03fc0680274..24014247377 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; import com.google.common.base.Preconditions; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -31,6 +34,7 @@ import java.io.File; @InterfaceAudience.Private @InterfaceStability.Unstable public abstract class RamDiskReplicaTracker { + static final Log LOG = LogFactory.getLog(RamDiskReplicaTracker.class); FsDatasetImpl fsDataset; @@ -117,18 +121,18 @@ public abstract class RamDiskReplicaTracker { // Delete the saved meta and block files. Failure to delete can be // ignored, the directory scanner will retry the deletion later. void deleteSavedFiles() { - try { - if (savedBlockFile != null) { - savedBlockFile.delete(); - savedBlockFile = null; + if (savedBlockFile != null) { + if (!savedBlockFile.delete()) { + LOG.warn("Failed to delete block file " + savedBlockFile); } + savedBlockFile = null; + } - if (savedMetaFile != null) { - savedMetaFile.delete(); - savedMetaFile = null; + if (savedMetaFile != null) { + if (!savedMetaFile.delete()) { + LOG.warn("Failed to delete meta file " + savedMetaFile); } - } catch (Throwable t) { - // Ignore any exceptions. + savedMetaFile = null; } }