HDFS-3970. Fix bug causing rollback of HDFS upgrade to result in bad VERSION file. Contributed by Vinay and Andrew Wang.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1430038 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2013-01-07 21:50:28 +00:00
parent 314e79bc2d
commit 4e9f524ee3
3 changed files with 48 additions and 1 deletions

View File

@ -357,6 +357,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-4302. Fix fatal exception when starting NameNode with DEBUG logs
(Eugene Koontz via todd)
HDFS-3970. Fix bug causing rollback of HDFS upgrade to result in bad
VERSION file. (Vinay and Andrew Wang via atm)
BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs.

View File

@ -78,6 +78,10 @@ public class BlockPoolSliceStorage extends Storage {
this.clusterID = clusterId;
}
private BlockPoolSliceStorage() {
super(NodeType.DATA_NODE);
}
/**
* Analyze storage directories. Recover from previous transitions if required.
*
@ -378,7 +382,7 @@ public class BlockPoolSliceStorage extends Storage {
if (!prevDir.exists())
return;
// read attributes out of the VERSION file of previous directory
DataStorage prevInfo = new DataStorage();
BlockPoolSliceStorage prevInfo = new BlockPoolSliceStorage();
prevInfo.readPreviousVersionProperties(bpSd);
// We allow rollback to a state, which is either consistent with

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
@ -31,6 +32,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
@ -176,6 +178,44 @@ public class TestDFSRollback {
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("Normal BlockPool rollback", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.startupOption(StartupOption.ROLLBACK)
.build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current",
UpgradeUtilities.getCurrentBlockPoolID(cluster));
// Create a previous snapshot for the blockpool
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous",
UpgradeUtilities.getCurrentBlockPoolID(cluster));
// Older LayoutVersion to make it rollback
storageInfo = new StorageInfo(
UpgradeUtilities.getCurrentLayoutVersion()+1,
UpgradeUtilities.getCurrentNamespaceID(cluster),
UpgradeUtilities.getCurrentClusterID(cluster),
UpgradeUtilities.getCurrentFsscTime(cluster));
// Create old VERSION file for each data dir
for (int i=0; i<dataNodeDirs.length; i++) {
Path bpPrevPath = new Path(dataNodeDirs[i] + "/current/"
+ UpgradeUtilities.getCurrentBlockPoolID(cluster));
UpgradeUtilities.createBlockPoolVersionFile(
new File(bpPrevPath.toString()),
storageInfo,
UpgradeUtilities.getCurrentBlockPoolID(cluster));
}
cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
assertTrue(cluster.isDataNodeUp());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode rollback without existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");