HDFS-8656. Preserve compatibility of ClientProtocol#rollingUpgrade after finalization.

(cherry picked from commit 60b858bfa6)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
This commit is contained in:
Kihwal Lee 2015-10-13 10:46:20 -05:00
parent 3a5b9f49df
commit 61388b41e7
6 changed files with 71 additions and 21 deletions

View File

@ -31,6 +31,9 @@ Release 2.7.2 - UNRELEASED
HDFS-8806. Inconsistent metrics: number of missing blocks with replication HDFS-8806. Inconsistent metrics: number of missing blocks with replication
factor 1 not properly cleared. (Zhe Zhang via aajisaka) factor 1 not properly cleared. (Zhe Zhang via aajisaka)
HDFS-8656. Preserve compatibility of ClientProtocol#rollingUpgrade after
finalization. (wang)
HDFS-8852. HDFS architecture documentation of version 2.x is outdated HDFS-8852. HDFS architecture documentation of version 2.x is outdated
about append write support. (Ajith S via aajisaka) about append write support. (Ajith S via aajisaka)

View File

@ -847,8 +847,9 @@ public interface ClientProtocol {
/** /**
* Rolling upgrade operations. * Rolling upgrade operations.
* @param action either query, start or finailze. * @param action either query, prepare or finalize.
* @return rolling upgrade information. * @return rolling upgrade information. On query, if no upgrade is in
* progress, returns null.
*/ */
@Idempotent @Idempotent
public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)

View File

@ -7485,10 +7485,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
readLock(); readLock();
try { try {
if (rollingUpgradeInfo != null) { if (!isRollingUpgrade()) {
boolean hasRollbackImage = this.getFSImage().hasRollbackFSImage(); return null;
rollingUpgradeInfo.setCreatedRollbackImages(hasRollbackImage);
} }
Preconditions.checkNotNull(rollingUpgradeInfo);
boolean hasRollbackImage = this.getFSImage().hasRollbackFSImage();
rollingUpgradeInfo.setCreatedRollbackImages(hasRollbackImage);
return rollingUpgradeInfo; return rollingUpgradeInfo;
} finally { } finally {
readUnlock(); readUnlock();

View File

@ -81,9 +81,10 @@ public interface NameNodeMXBean {
public boolean isUpgradeFinalized(); public boolean isUpgradeFinalized();
/** /**
* Gets the RollingUpgrade information * Gets the RollingUpgrade information.
* *
* @return Rolling upgrade information * @return Rolling upgrade information if an upgrade is in progress. Else
* (e.g. if there is no upgrade or the upgrade is finalized), returns null.
*/ */
public RollingUpgradeInfo.Bean getRollingUpgradeStatus(); public RollingUpgradeInfo.Bean getRollingUpgradeStatus();

View File

@ -19,6 +19,16 @@ package org.apache.hadoop.hdfs;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.lang.management.ManagementFactory;
import javax.management.AttributeNotFoundException;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import javax.management.openmbean.CompositeDataSupport;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -44,6 +54,9 @@ import org.apache.hadoop.io.IOUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull;
/** /**
* This class tests rolling upgrade. * This class tests rolling upgrade.
@ -54,7 +67,7 @@ public class TestRollingUpgrade {
public static void runCmd(DFSAdmin dfsadmin, boolean success, public static void runCmd(DFSAdmin dfsadmin, boolean success,
String... args) throws Exception { String... args) throws Exception {
if (success) { if (success) {
Assert.assertEquals(0, dfsadmin.run(args)); assertEquals(0, dfsadmin.run(args));
} else { } else {
Assert.assertTrue(dfsadmin.run(args) != 0); Assert.assertTrue(dfsadmin.run(args) != 0);
} }
@ -84,6 +97,7 @@ public class TestRollingUpgrade {
//illegal argument "abc" to rollingUpgrade option //illegal argument "abc" to rollingUpgrade option
runCmd(dfsadmin, false, "-rollingUpgrade", "abc"); runCmd(dfsadmin, false, "-rollingUpgrade", "abc");
checkMxBeanIsNull();
//query rolling upgrade //query rolling upgrade
runCmd(dfsadmin, true, "-rollingUpgrade"); runCmd(dfsadmin, true, "-rollingUpgrade");
@ -94,11 +108,16 @@ public class TestRollingUpgrade {
//query rolling upgrade //query rolling upgrade
runCmd(dfsadmin, true, "-rollingUpgrade", "query"); runCmd(dfsadmin, true, "-rollingUpgrade", "query");
checkMxBean();
dfs.mkdirs(bar); dfs.mkdirs(bar);
//finalize rolling upgrade //finalize rolling upgrade
runCmd(dfsadmin, true, "-rollingUpgrade", "finalize"); runCmd(dfsadmin, true, "-rollingUpgrade", "finalize");
// RollingUpgradeInfo should be null after finalization, both via
// Java API and in JMX
assertNull(dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
checkMxBeanIsNull();
dfs.mkdirs(baz); dfs.mkdirs(baz);
@ -195,8 +214,8 @@ public class TestRollingUpgrade {
LOG.info("START\n" + info1); LOG.info("START\n" + info1);
//query rolling upgrade //query rolling upgrade
Assert.assertEquals(info1, dfs.rollingUpgrade(RollingUpgradeAction.QUERY)); assertEquals(info1, dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs.mkdirs(bar); dfs.mkdirs(bar);
cluster.shutdown(); cluster.shutdown();
} }
@ -216,13 +235,13 @@ public class TestRollingUpgrade {
Assert.assertFalse(dfs2.exists(baz)); Assert.assertFalse(dfs2.exists(baz));
//query rolling upgrade in cluster2 //query rolling upgrade in cluster2
Assert.assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs2.mkdirs(baz); dfs2.mkdirs(baz);
LOG.info("RESTART cluster 2"); LOG.info("RESTART cluster 2");
cluster2.restartNameNode(); cluster2.restartNameNode();
Assert.assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo)); Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar)); Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz)); Assert.assertTrue(dfs2.exists(baz));
@ -236,7 +255,7 @@ public class TestRollingUpgrade {
LOG.info("RESTART cluster 2 again"); LOG.info("RESTART cluster 2 again");
cluster2.restartNameNode(); cluster2.restartNameNode();
Assert.assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); assertEquals(info1, dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo)); Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar)); Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz)); Assert.assertTrue(dfs2.exists(baz));
@ -257,9 +276,31 @@ public class TestRollingUpgrade {
} }
} }
private static CompositeDataSupport getBean()
throws MalformedObjectNameException, MBeanException,
AttributeNotFoundException, InstanceNotFoundException,
ReflectionException {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName =
new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
return (CompositeDataSupport)mbs.getAttribute(mxbeanName,
"RollingUpgradeStatus");
}
private static void checkMxBeanIsNull() throws Exception {
CompositeDataSupport ruBean = getBean();
assertNull(ruBean);
}
private static void checkMxBean() throws Exception {
CompositeDataSupport ruBean = getBean();
assertNotEquals(0l, ruBean.get("startTime"));
assertEquals(0l, ruBean.get("finalizeTime"));
}
@Test @Test
public void testRollback() throws IOException { public void testRollback() throws Exception {
// start a cluster // start a cluster
final Configuration conf = new HdfsConfiguration(); final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
@ -277,10 +318,13 @@ public class TestRollingUpgrade {
out.write(data, 0, data.length); out.write(data, 0, data.length);
out.close(); out.close();
checkMxBeanIsNull();
startRollingUpgrade(foo, bar, file, data, cluster); startRollingUpgrade(foo, bar, file, data, cluster);
checkMxBean();
cluster.getFileSystem().rollEdits(); cluster.getFileSystem().rollEdits();
cluster.getFileSystem().rollEdits(); cluster.getFileSystem().rollEdits();
rollbackRollingUpgrade(foo, bar, file, data, cluster); rollbackRollingUpgrade(foo, bar, file, data, cluster);
checkMxBeanIsNull();
startRollingUpgrade(foo, bar, file, data, cluster); startRollingUpgrade(foo, bar, file, data, cluster);
cluster.getFileSystem().rollEdits(); cluster.getFileSystem().rollEdits();
@ -353,18 +397,18 @@ public class TestRollingUpgrade {
// check the datanode // check the datanode
final String dnAddr = dn.getDatanodeId().getIpcAddr(false); final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
final String[] args1 = {"-getDatanodeInfo", dnAddr}; final String[] args1 = {"-getDatanodeInfo", dnAddr};
Assert.assertEquals(0, dfsadmin.run(args1)); runCmd(dfsadmin, true, args1);
// issue shutdown to the datanode. // issue shutdown to the datanode.
final String[] args2 = {"-shutdownDatanode", dnAddr, "upgrade" }; final String[] args2 = {"-shutdownDatanode", dnAddr, "upgrade" };
Assert.assertEquals(0, dfsadmin.run(args2)); runCmd(dfsadmin, true, args2);
// the datanode should be down. // the datanode should be down.
Thread.sleep(2000); Thread.sleep(2000);
Assert.assertFalse("DataNode should exit", dn.isDatanodeUp()); Assert.assertFalse("DataNode should exit", dn.isDatanodeUp());
// ping should fail. // ping should fail.
Assert.assertEquals(-1, dfsadmin.run(args1)); assertEquals(-1, dfsadmin.run(args1));
} finally { } finally {
if (cluster != null) cluster.shutdown(); if (cluster != null) cluster.shutdown();
} }

View File

@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.top.TopConf; import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionInfo;
@ -45,8 +44,6 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op;
import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
@ -196,6 +193,8 @@ public class TestNameNodeMXBean {
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
cluster.getDataNodes().size(), cluster.getDataNodes().size(),
mbs.getAttribute(mxbeanName, "CacheCapacity")); mbs.getAttribute(mxbeanName, "CacheCapacity"));
assertNull("RollingUpgradeInfo should be null when there is no rolling"
+ " upgrade", mbs.getAttribute(mxbeanName, "RollingUpgradeStatus"));
} finally { } finally {
if (cluster != null) { if (cluster != null) {
for (URI dir : cluster.getNameDirs(0)) { for (URI dir : cluster.getNameDirs(0)) {