HDFS-6023. Test whether the standby NN continues to checkpoint after the prepare stage. Contributed by Haohui Mai.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1572337 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2014-02-26 23:17:53 +00:00
parent 9cc0d5d497
commit ccf0744243
3 changed files with 55 additions and 3 deletions

View File

@ -108,3 +108,6 @@ HDFS-5535 subtasks:
HDFS-6019. Standby NN might not checkpoint when processing the rolling HDFS-6019. Standby NN might not checkpoint when processing the rolling
upgrade marker. (Haohui Mai via jing9) upgrade marker. (Haohui Mai via jing9)
HDFS-6023. Test whether the standby NN continues to checkpoint after the
prepare stage. (Haohui Mai via jing9)

View File

@ -676,7 +676,6 @@ public static String getRollbackImageFileName(long txid) {
return getNameNodeFileName(NameNodeFile.IMAGE_ROLLBACK, txid); return getNameNodeFileName(NameNodeFile.IMAGE_ROLLBACK, txid);
} }
@VisibleForTesting
private static String getNameNodeFileName(NameNodeFile nnf, long txid) { private static String getNameNodeFileName(NameNodeFile nnf, long txid) {
return String.format("%s_%019d", nnf.getName(), txid); return String.format("%s_%019d", nnf.getName(), txid);
} }

View File

@ -34,6 +34,7 @@
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -467,17 +468,66 @@ public void testQuery() throws Exception {
} }
} }
@Test(timeout = 300000)
public void testCheckpoint() throws IOException, InterruptedException {
final Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
MiniQJMHACluster cluster = null;
final Path foo = new Path("/foo");
try {
cluster = new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
// start rolling upgrade
RollingUpgradeInfo info = dfs
.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
queryForPreparation(dfs);
dfs.mkdirs(foo);
long txid = dfs.rollEdits();
Assert.assertTrue(txid > 0);
int retries = 0;
while (++retries < 5) {
NNStorage storage = dfsCluster.getNamesystem(1).getFSImage()
.getStorage();
if (storage.getFsImageName(txid - 1) != null) {
return;
}
Thread.sleep(1000);
}
Assert.fail("new checkpoint does not exist");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
static void queryForPreparation(DistributedFileSystem dfs) throws IOException, static void queryForPreparation(DistributedFileSystem dfs) throws IOException,
InterruptedException { InterruptedException {
RollingUpgradeInfo info; RollingUpgradeInfo info;
int retries = 0; int retries = 0;
while (retries < 10) { while (++retries < 10) {
info = dfs.rollingUpgrade(RollingUpgradeAction.QUERY); info = dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
if (info.createdRollbackImages()) { if (info.createdRollbackImages()) {
break; break;
} }
Thread.sleep(1000); Thread.sleep(1000);
++retries; }
if (retries >= 10) {
Assert.fail("Query return false");
} }
} }
} }