From 9cb6d291ea137a02aa899fe3dd022e96be71f77e Mon Sep 17 00:00:00 2001 From: Lei Xu Date: Mon, 8 Aug 2016 15:54:12 -0700 Subject: [PATCH] HDFS-10457. DataNode should not auto-format block pool directory if VERSION is missing. (Wei-Chiu Chuang via lei) (cherry picked from commit bb3bcb9397593fc8a2fa63a48eba126609f72c42) --- .../datanode/BlockPoolSliceStorage.java | 2 +- .../TestDataNodeVolumeFailureReporting.java | 20 +++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index 90a46698a86..fd90ae921aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -151,7 +151,7 @@ public class BlockPoolSliceStorage extends Storage { throws IOException { StorageDirectory sd = new StorageDirectory(dataDir, null, true); try { - StorageState curState = sd.analyzeStorage(startOpt, this); + StorageState curState = sd.analyzeStorage(startOpt, this, true); // sd is locked but not opened switch (curState) { case NORMAL: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java index c76fa2cdffa..2a2fc4a4524 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; +import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; @@ -480,6 +481,25 @@ public class TestDataNodeVolumeFailureReporting { checkFailuresAtNameNode(dm, dns.get(0), false, dn1Vol1.getAbsolutePath()); } + @Test + public void testAutoFormatEmptyBlockPoolDirectory() throws Exception { + // remove the version file + DataNode dn = cluster.getDataNodes().get(0); + String bpid = cluster.getNamesystem().getBlockPoolId(); + BlockPoolSliceStorage bps = dn.getStorage().getBPStorage(bpid); + Storage.StorageDirectory dir = bps.getStorageDir(0); + File current = dir.getCurrentDir(); + + File currentVersion = new File(current, "VERSION"); + currentVersion.delete(); + // restart the data node + assertTrue(cluster.restartDataNodes(true)); + // the DN should tolerate one volume failure. + cluster.waitActive(); + assertFalse("DataNode should not reformat if VERSION is missing", + currentVersion.exists()); + } + /** * Checks the NameNode for correct values of aggregate counters tracking failed * volumes across all DataNodes.