HDFS-5963. TestRollingUpgrade#testSecondaryNameNode causes subsequent tests to fail. (Contributed by szetszwo)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1569993 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2014-02-19 23:56:02 +00:00
parent d69985d90b
commit e891c55f8b
10 changed files with 102 additions and 158 deletions

View File

@ -52,3 +52,7 @@ HDFS-5535 subtasks:
HDFS-5974. Fix compilation error, NameNodeLayoutVersion and
DataNodeLayoutVersion after merge from trunk. (szetszwo)
HDFS-5963. TestRollingUpgrade#testSecondaryNameNode causes subsequent
tests to fail. (szetszwo via Arpit Agarwal)

View File

@ -278,7 +278,7 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
@Override
public DoUpgradeResponseProto doUpgrade(RpcController controller,
DoUpgradeRequestProto request) throws ServiceException {
StorageInfo si = PBHelper.convert(request.getSInfo(), NodeType.NAME_NODE);
StorageInfo si = PBHelper.convert(request.getSInfo(), NodeType.JOURNAL_NODE);
try {
impl.doUpgrade(convert(request.getJid()), si);
return DoUpgradeResponseProto.getDefaultInstance();
@ -302,9 +302,9 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
public CanRollBackResponseProto canRollBack(RpcController controller,
CanRollBackRequestProto request) throws ServiceException {
try {
StorageInfo si = PBHelper.convert(request.getStorage(), NodeType.NAME_NODE);
StorageInfo si = PBHelper.convert(request.getStorage(), NodeType.JOURNAL_NODE);
Boolean result = impl.canRollBack(convert(request.getJid()), si,
PBHelper.convert(request.getPrevStorage(), NodeType.NAME_NODE),
PBHelper.convert(request.getPrevStorage(), NodeType.JOURNAL_NODE),
request.getTargetLayoutVersion());
return CanRollBackResponseProto.newBuilder()
.setCanRollBack(result)

View File

@ -49,7 +49,7 @@ public final class HdfsServerConstants {
/** Startup options for rolling upgrade. */
public static enum RollingUpgradeStartupOption{
ROLLBACK, DOWNGRADE, STARTED;
ROLLBACK, DOWNGRADE;
private static final RollingUpgradeStartupOption[] VALUES = values();

View File

@ -163,10 +163,14 @@ public class StorageInfo {
/** Validate and set storage type from {@link Properties}*/
protected void checkStorageType(Properties props, StorageDirectory sd)
throws InconsistentFSStateException {
if (storageType == null) { //don't care about storage type
return;
}
NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType"));
if (!storageType.equals(type)) {
throw new InconsistentFSStateException(sd.root,
"node type is incompatible with others.");
"Incompatible node types: storageType=" + storageType
+ " but StorageDirectory type=" + type);
}
}

View File

@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
@ -718,7 +717,6 @@ public class FSEditLogLoader {
break;
}
case OP_ROLLING_UPGRADE_START: {
boolean started = false;
if (startOpt == StartupOption.ROLLINGUPGRADE) {
final RollingUpgradeStartupOption rollingUpgradeOpt
= startOpt.getRollingUpgradeStartupOption();
@ -727,33 +725,19 @@ public class FSEditLogLoader {
} else if (rollingUpgradeOpt == RollingUpgradeStartupOption.DOWNGRADE) {
//ignore upgrade marker
break;
} else if (rollingUpgradeOpt == RollingUpgradeStartupOption.STARTED) {
started = true;
}
}
if (started || fsNamesys.isInStandbyState()) {
if (totalEdits > 1) {
// save namespace if this is not the second edit transaction
// (the first must be OP_START_LOG_SEGMENT)
fsNamesys.getFSImage().saveNamespace(fsNamesys,
NameNodeFile.IMAGE_ROLLBACK, null);
}
//rolling upgrade is already started, set info
final RollingUpgradeOp upgradeOp = (RollingUpgradeOp)op;
fsNamesys.setRollingUpgradeInfo(upgradeOp.getTime());
break;
}
throw new RollingUpgradeException(
"Unexpected OP_ROLLING_UPGRADE_START in edit log: op=" + op);
// save namespace if this is not the second edit transaction
// (the first must be OP_START_LOG_SEGMENT)
final boolean saveNamespace = totalEdits > 1;
final long startTime = ((RollingUpgradeOp) op).getTime();
fsNamesys.startRollingUpgradeInternal(startTime, saveNamespace);
break;
}
case OP_ROLLING_UPGRADE_FINALIZE: {
if (!fsNamesys.isRollingUpgrade()) {
throw new RollingUpgradeException(
"Unexpected OP_ROLLING_UPGRADE_FINALIZE "
+ " since there is no rolling upgrade in progress.");
}
final long finalizeTime = ((RollingUpgradeOp) op).getTime();
fsNamesys.finalizeRollingUpgradeInternal(finalizeTime);
fsNamesys.getFSImage().purgeCheckpoints(NameNodeFile.IMAGE_ROLLBACK);
break;
}

View File

@ -7146,19 +7146,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeLock();
try {
checkOperation(OperationCategory.WRITE);
final String action = "start rolling upgrade";
checkNameNodeSafeMode("Failed to " + action);
checkRollingUpgrade(action);
getFSImage().checkUpgrade(this);
getFSImage().saveNamespace(this, NameNodeFile.IMAGE_ROLLBACK, null);
LOG.info("Successfully saved namespace for preparing rolling upgrade.");
setRollingUpgradeInfo(now());
checkNameNodeSafeMode("Failed to start rolling upgrade");
startRollingUpgradeInternal(now(), true);
getEditLog().logStartRollingUpgrade(rollingUpgradeInfo.getStartTime());
} finally {
writeUnlock();
}
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
@ -7167,6 +7161,25 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return rollingUpgradeInfo;
}
/**
* Update internal state to indicate that a rolling upgrade is in progress.
* Ootionally create a checkpoint before starting the RU.
* @param startTime
* @param saveNamespace If true then a checkpoint is created before initiating
* the rolling upgrade.
*/
void startRollingUpgradeInternal(long startTime, boolean saveNamespace)
throws IOException {
checkRollingUpgrade("start rolling upgrade");
getFSImage().checkUpgrade(this);
if (saveNamespace) {
getFSImage().saveNamespace(this, NameNodeFile.IMAGE_ROLLBACK, null);
LOG.info("Successfully saved namespace for preparing rolling upgrade.");
}
setRollingUpgradeInfo(startTime);
}
void setRollingUpgradeInfo(long startTime) {
rollingUpgradeInfo = new RollingUpgradeInfo(blockPoolId, startTime);
}
@ -7187,7 +7200,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
+ " Existing rolling upgrade info:\n" + rollingUpgradeInfo);
}
}
RollingUpgradeInfo finalizeRollingUpgrade() throws IOException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.WRITE);
@ -7195,17 +7208,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final RollingUpgradeInfo returnInfo;
try {
checkOperation(OperationCategory.WRITE);
final String err = "Failed to finalize rolling upgrade";
checkNameNodeSafeMode(err);
checkNameNodeSafeMode("Failed to finalize rolling upgrade");
if (!isRollingUpgrade()) {
throw new RollingUpgradeException(err
+ " since there is no rolling upgrade in progress.");
}
returnInfo = new RollingUpgradeInfo(blockPoolId,
rollingUpgradeInfo.getStartTime(), now());
rollingUpgradeInfo = null;
returnInfo = finalizeRollingUpgradeInternal(now());
getEditLog().logFinalizeRollingUpgrade(returnInfo.getFinalizeTime());
getFSImage().saveNamespace(this);
getFSImage().purgeCheckpoints(NameNodeFile.IMAGE_ROLLBACK);
@ -7213,12 +7218,26 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
writeUnlock();
}
// getEditLog().logSync() is not needed since it does saveNamespace
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "finalizeRollingUpgrade", null, null, null);
}
return returnInfo;
}
RollingUpgradeInfo finalizeRollingUpgradeInternal(long finalizeTime)
throws RollingUpgradeException {
if (!isRollingUpgrade()) {
throw new RollingUpgradeException(
"Failed to finalize rolling upgrade since there is no rolling upgrade in progress.");
}
final long startTime = rollingUpgradeInfo.getStartTime();
rollingUpgradeInfo = null;
return new RollingUpgradeInfo(blockPoolId, startTime, finalizeTime);
}
long addCacheDirective(CacheDirectiveInfo directive, EnumSet<CacheFlag> flags)
throws IOException {
checkOperation(OperationCategory.WRITE);

View File

@ -568,7 +568,7 @@ public class FileJournalManager implements JournalManager {
@Override
public long getJournalCTime() throws IOException {
StorageInfo sInfo = new StorageInfo(NodeType.NAME_NODE);
StorageInfo sInfo = new StorageInfo((NodeType)null);
sInfo.readProperties(sd);
return sInfo.getCTime();
}

View File

@ -26,17 +26,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.junit.Assert;
import org.junit.Test;
@ -105,7 +101,7 @@ public class TestRollingUpgrade {
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode("-rollingupgrade", "started");
cluster.restartNameNode();
{
final DistributedFileSystem dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
@ -189,13 +185,10 @@ public class TestRollingUpgrade {
// cluster2 takes over QJM
final Configuration conf2 = setConf(new Configuration(), nn2Dir, mjc);
// use "started" option for ignoring upgrade marker in editlog
StartupOption.ROLLINGUPGRADE.setRollingUpgradeStartupOption("started");
cluster2 = new MiniDFSCluster.Builder(conf2)
.numDataNodes(0)
.format(false)
.manageNameDfsDirs(false)
.startupOption(StartupOption.ROLLINGUPGRADE)
.build();
final DistributedFileSystem dfs2 = cluster2.getFileSystem();
@ -209,7 +202,7 @@ public class TestRollingUpgrade {
dfs2.mkdirs(baz);
LOG.info("RESTART cluster 2 with -rollingupgrade started");
LOG.info("RESTART cluster 2");
cluster2.restartNameNode();
Assert.assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
@ -223,8 +216,8 @@ public class TestRollingUpgrade {
LOG.info("The exception is expected.", e);
}
LOG.info("RESTART cluster 2 with -rollingupgrade started again");
cluster2.restartNameNode("-rollingupgrade", "started");
LOG.info("RESTART cluster 2 again");
cluster2.restartNameNode();
Assert.assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
@ -246,14 +239,43 @@ public class TestRollingUpgrade {
}
}
@Test(expected=ExitException.class)
public void testSecondaryNameNode() throws Exception {
ExitUtil.disableSystemExit();
@Test
public void testRollback() throws IOException {
// start a cluster
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final String[] args = {
StartupOption.ROLLINGUPGRADE.getName(),
RollingUpgradeStartupOption.STARTED.name()};
SecondaryNameNode.main(args);
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
{
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(foo);
//start rolling upgrade
dfs.rollingUpgrade(RollingUpgradeAction.START);
dfs.mkdirs(bar);
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
}
// Restart should succeed!
// cluster.restartNameNode();
cluster.restartNameNode("-rollingUpgrade", "rollback");
{
final DistributedFileSystem dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
}
} finally {
if(cluster != null) cluster.shutdown();
}
}
@Test

View File

@ -86,9 +86,6 @@ public class TestHdfsServerConstants {
verifyStartupOptionResult("ROLLINGUPGRADE(DOWNGRADE)",
StartupOption.ROLLINGUPGRADE,
RollingUpgradeStartupOption.DOWNGRADE);
verifyStartupOptionResult("ROLLINGUPGRADE(STARTED)",
StartupOption.ROLLINGUPGRADE,
RollingUpgradeStartupOption.STARTED);
try {
verifyStartupOptionResult("ROLLINGUPGRADE(UNKNOWNOPTION)", StartupOption.ROLLINGUPGRADE, null);

View File

@ -1,86 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
import org.junit.Assert;
import org.junit.Test;
/**
* This class tests the edit log upgrade marker.
*/
public class TestEditLogUpgradeMarker {
private static final Log LOG = LogFactory.getLog(TestEditLogUpgradeMarker.class);
/**
* Test edit log upgrade marker.
*/
@Test
public void testUpgradeMarker() throws IOException {
// start a cluster
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
final Path foo = new Path("/foo");
final Path bar = new Path("/bar");
{
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(foo);
//add marker
namesystem.startRollingUpgrade();
dfs.mkdirs(bar);
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
}
try {
cluster.restartNameNode();
Assert.fail();
} catch(RollingUpgradeException e) {
LOG.info("The exception is expected: ", e);
}
cluster.restartNameNode("-rollingUpgrade", "rollback");
{
final DistributedFileSystem dfs = cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
}
} finally {
if(cluster != null) cluster.shutdown();
}
}
}