HDFS-2952. NN should not start with upgrade option or with a pending an unfinalized upgrade. Contributed by Aaron T. Myers.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1245875 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
ef5d7156db
commit
41e56dfece
|
@ -216,3 +216,5 @@ HDFS-2955. IllegalStateException during standby startup in getCurSegmentTxId. (H
|
||||||
HDFS-2937. TestDFSHAAdmin needs tests with MiniDFSCluster. (Brandon Li via suresh)
|
HDFS-2937. TestDFSHAAdmin needs tests with MiniDFSCluster. (Brandon Li via suresh)
|
||||||
|
|
||||||
HDFS-2586. Add protobuf service and implementation for HAServiceProtocol. (suresh via atm)
|
HDFS-2586. Add protobuf service and implementation for HAServiceProtocol. (suresh via atm)
|
||||||
|
|
||||||
|
HDFS-2952. NN should not start with upgrade option or with a pending an unfinalized upgrade. (atm)
|
||||||
|
|
|
@ -340,8 +340,8 @@ public class FSImage implements Closeable {
|
||||||
File prevDir = sd.getPreviousDir();
|
File prevDir = sd.getPreviousDir();
|
||||||
File tmpDir = sd.getPreviousTmp();
|
File tmpDir = sd.getPreviousTmp();
|
||||||
assert curDir.exists() : "Current directory must exist.";
|
assert curDir.exists() : "Current directory must exist.";
|
||||||
assert !prevDir.exists() : "prvious directory must not exist.";
|
assert !prevDir.exists() : "previous directory must not exist.";
|
||||||
assert !tmpDir.exists() : "prvious.tmp directory must not exist.";
|
assert !tmpDir.exists() : "previous.tmp directory must not exist.";
|
||||||
assert !editLog.isSegmentOpen() : "Edits log must not be open.";
|
assert !editLog.isSegmentOpen() : "Edits log must not be open.";
|
||||||
|
|
||||||
// rename current to tmp
|
// rename current to tmp
|
||||||
|
|
|
@ -539,7 +539,7 @@ public class NameNode {
|
||||||
if (!haEnabled) {
|
if (!haEnabled) {
|
||||||
state = ACTIVE_STATE;
|
state = ACTIVE_STATE;
|
||||||
} else {
|
} else {
|
||||||
state = STANDBY_STATE;;
|
state = STANDBY_STATE;
|
||||||
}
|
}
|
||||||
this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf);
|
this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf);
|
||||||
this.haContext = createHAContext();
|
this.haContext = createHAContext();
|
||||||
|
@ -814,6 +814,14 @@ public class NameNode {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
setStartupOption(conf, startOpt);
|
setStartupOption(conf, startOpt);
|
||||||
|
|
||||||
|
if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf)) &&
|
||||||
|
(startOpt == StartupOption.UPGRADE ||
|
||||||
|
startOpt == StartupOption.ROLLBACK ||
|
||||||
|
startOpt == StartupOption.FINALIZE)) {
|
||||||
|
throw new HadoopIllegalArgumentException("Invalid startup option. " +
|
||||||
|
"Cannot perform DFS upgrade with HA enabled.");
|
||||||
|
}
|
||||||
|
|
||||||
switch (startOpt) {
|
switch (startOpt) {
|
||||||
case FORMAT:
|
case FORMAT:
|
||||||
|
|
|
@ -1252,6 +1252,15 @@ public class MiniDFSCluster {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restart all namenodes.
|
||||||
|
*/
|
||||||
|
public synchronized void restartNameNodes() throws IOException {
|
||||||
|
for (int i = 0; i < nameNodes.length; i++) {
|
||||||
|
restartNameNode(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Restart the namenode.
|
* Restart the namenode.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -0,0 +1,107 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.URI;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests for upgrading with HA enabled.
|
||||||
|
*/
|
||||||
|
public class TestDFSUpgradeWithHA {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(TestDFSUpgradeWithHA.class);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Make sure that an HA NN refuses to start if given an upgrade-related
|
||||||
|
* startup option.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testStartingWithUpgradeOptionsFails() throws IOException {
|
||||||
|
for (StartupOption startOpt : Lists.newArrayList(new StartupOption[] {
|
||||||
|
StartupOption.UPGRADE, StartupOption.FINALIZE,
|
||||||
|
StartupOption.ROLLBACK })) {
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(new Configuration())
|
||||||
|
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
||||||
|
.startupOption(startOpt)
|
||||||
|
.numDataNodes(0)
|
||||||
|
.build();
|
||||||
|
fail("Should not have been able to start an HA NN in upgrade mode");
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
GenericTestUtils.assertExceptionContains(
|
||||||
|
"Cannot perform DFS upgrade with HA enabled.", iae);
|
||||||
|
LOG.info("Got expected exception", iae);
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Make sure that an HA NN won't start if a previous upgrade was in progress.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testStartingWithUpgradeInProgressFails() throws Exception {
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(new Configuration())
|
||||||
|
.nnTopology(MiniDFSNNTopology.simpleHATopology())
|
||||||
|
.numDataNodes(0)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Simulate an upgrade having started.
|
||||||
|
for (int i = 0; i < 2; i++) {
|
||||||
|
for (URI uri : cluster.getNameDirs(i)) {
|
||||||
|
File prevTmp = new File(new File(uri), Storage.STORAGE_TMP_PREVIOUS);
|
||||||
|
LOG.info("creating previous tmp dir: " + prevTmp);
|
||||||
|
assertTrue(prevTmp.mkdirs());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.restartNameNodes();
|
||||||
|
fail("Should not have been able to start an HA NN with an in-progress upgrade");
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
GenericTestUtils.assertExceptionContains(
|
||||||
|
"Cannot start an HA namenode with name dirs that need recovery.",
|
||||||
|
ioe);
|
||||||
|
LOG.info("Got expected exception", ioe);
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue