HDFS-7237. The command "hdfs namenode -rollingUpgrade" throws ArrayIndexOutOfBoundsException.

This commit is contained in:
Tsz-Wo Nicholas Sze 2014-10-13 18:28:34 -07:00
parent 0e57aa3bf6
commit f6d0b8892a
5 changed files with 75 additions and 12 deletions

View File

@ -945,6 +945,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6544. Broken Link for GFS in package.html. (Suraj Nayak M via wheat9)
HDFS-7237. The command "hdfs namenode -rollingUpgrade" throws
ArrayIndexOutOfBoundsException. (szetszwo)
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
HDFS-6387. HDFS CLI admin tool for creating & deleting an

View File

@ -72,6 +72,15 @@ public final class HdfsServerConstants {
throw new IllegalArgumentException("Failed to convert \"" + s
+ "\" to " + RollingUpgradeStartupOption.class.getSimpleName());
}
public static String getAllOptionString() {
final StringBuilder b = new StringBuilder("<");
for(RollingUpgradeStartupOption opt : VALUES) {
b.append(opt.name().toLowerCase()).append("|");
}
b.setCharAt(b.length() - 1, '>');
return b.toString();
}
}
/** Startup options */

View File

@ -216,10 +216,8 @@ public class NameNode implements NameNodeStatusMXBean {
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
+ StartupOption.ROLLBACK.getName() + "] | \n\t["
+ StartupOption.ROLLINGUPGRADE.getName() + " <"
+ RollingUpgradeStartupOption.DOWNGRADE.name().toLowerCase() + "|"
+ RollingUpgradeStartupOption.ROLLBACK.name().toLowerCase() + "|"
+ RollingUpgradeStartupOption.STARTED.name().toLowerCase() + "> ] | \n\t["
+ StartupOption.ROLLINGUPGRADE.getName() + " "
+ RollingUpgradeStartupOption.getAllOptionString() + " ] | \n\t["
+ StartupOption.FINALIZE.getName() + "] | \n\t["
+ StartupOption.IMPORT.getName() + "] | \n\t["
+ StartupOption.INITIALIZESHAREDEDITS.getName() + "] | \n\t["
@ -1250,6 +1248,11 @@ public class NameNode implements NameNodeStatusMXBean {
} else if (StartupOption.ROLLINGUPGRADE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLINGUPGRADE;
++i;
if (i >= argsLen) {
LOG.fatal("Must specify a rolling upgrade startup option "
+ RollingUpgradeStartupOption.getAllOptionString());
return null;
}
startOpt.setRollingUpgradeStartupOption(args[i]);
} else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
@ -1508,7 +1511,7 @@ public class NameNode implements NameNodeStatusMXBean {
namenode.join();
}
} catch (Throwable e) {
LOG.fatal("Exception in namenode join", e);
LOG.fatal("Failed to start namenode.", e);
terminate(1, e);
}
}

View File

@ -17,12 +17,12 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.*;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.junit.Test;
/**
@ -43,10 +43,10 @@ public class TestHdfsServerConstants {
RollingUpgradeStartupOption expectedRollupOption) {
StartupOption option = StartupOption.getEnum(value);
assertThat(option, is(expectedOption));
assertEquals(expectedOption, option);
if (expectedRollupOption != null) {
assertThat(option.getRollingUpgradeStartupOption(), is(expectedRollupOption));
assertEquals(expectedRollupOption, option.getRollingUpgradeStartupOption());
}
}
@ -86,6 +86,9 @@ public class TestHdfsServerConstants {
verifyStartupOptionResult("ROLLINGUPGRADE(DOWNGRADE)",
StartupOption.ROLLINGUPGRADE,
RollingUpgradeStartupOption.DOWNGRADE);
verifyStartupOptionResult("ROLLINGUPGRADE(STARTED)",
StartupOption.ROLLINGUPGRADE,
RollingUpgradeStartupOption.STARTED);
try {
verifyStartupOptionResult("ROLLINGUPGRADE(UNKNOWNOPTION)", StartupOption.ROLLINGUPGRADE, null);

View File

@ -23,7 +23,9 @@ import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.junit.Assert;
import org.junit.Test;
public class TestNameNodeOptionParsing {
@ -102,4 +104,47 @@ public class TestNameNodeOptionParsing {
assertNull(opt);
}
@Test(timeout = 10000)
public void testRollingUpgrade() {
{
final String[] args = {"-rollingUpgrade"};
final StartupOption opt = NameNode.parseArguments(args);
assertNull(opt);
}
{
final String[] args = {"-rollingUpgrade", "started"};
final StartupOption opt = NameNode.parseArguments(args);
assertEquals(StartupOption.ROLLINGUPGRADE, opt);
assertEquals(RollingUpgradeStartupOption.STARTED, opt.getRollingUpgradeStartupOption());
assertTrue(RollingUpgradeStartupOption.STARTED.matches(opt));
}
{
final String[] args = {"-rollingUpgrade", "downgrade"};
final StartupOption opt = NameNode.parseArguments(args);
assertEquals(StartupOption.ROLLINGUPGRADE, opt);
assertEquals(RollingUpgradeStartupOption.DOWNGRADE, opt.getRollingUpgradeStartupOption());
assertTrue(RollingUpgradeStartupOption.DOWNGRADE.matches(opt));
}
{
final String[] args = {"-rollingUpgrade", "rollback"};
final StartupOption opt = NameNode.parseArguments(args);
assertEquals(StartupOption.ROLLINGUPGRADE, opt);
assertEquals(RollingUpgradeStartupOption.ROLLBACK, opt.getRollingUpgradeStartupOption());
assertTrue(RollingUpgradeStartupOption.ROLLBACK.matches(opt));
}
{
final String[] args = {"-rollingUpgrade", "foo"};
try {
NameNode.parseArguments(args);
Assert.fail();
} catch(IllegalArgumentException iae) {
// the exception is expected.
}
}
}
}