From dc951e606f40bb779632a8a3e3a46aeccc4a446a Mon Sep 17 00:00:00 2001 From: Colin Patrick Mccabe Date: Thu, 17 Mar 2016 10:37:42 -0700 Subject: [PATCH] HDFS-9949. Add a test case to ensure that the DataNode does not regenerate its UUID when a storage directory is cleared (Harsh J via cmccabe) --- .../server/datanode/TestDataNodeUUID.java | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java index 34e53a34504..ebf7c35841e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeUUID.java @@ -19,17 +19,21 @@ package org.apache.hadoop.hdfs.server.datanode; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.Test; +import java.io.File; import java.net.InetSocketAddress; import java.util.ArrayList; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; public class TestDataNodeUUID { @@ -62,4 +66,52 @@ public class TestDataNodeUUID { // Make sure that we have a valid DataNodeUUID at that point of time. assertNotEquals(dn.getDatanodeUuid(), nullString); } + + @Test(timeout = 10000) + public void testUUIDRegeneration() throws Exception { + File baseDir = new File(System.getProperty("test.build.data")); + File disk1 = new File(baseDir, "disk1"); + File disk2 = new File(baseDir, "disk2"); + + // Ensure the configured disks do not pre-exist + FileUtils.deleteDirectory(disk1); + FileUtils.deleteDirectory(disk2); + + MiniDFSCluster cluster = null; + HdfsConfiguration conf = new HdfsConfiguration(); + conf.setStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, + disk1.toURI().toString(), + disk2.toURI().toString()); + try { + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(1) + .manageDataDfsDirs(false) + .build(); + cluster.waitActive(); + + // Grab the new-cluster UUID as the original one to test against + String originalUUID = cluster.getDataNodes().get(0).getDatanodeUuid(); + // Stop and simulate a DN wipe or unmount-but-root-path condition + // on the second disk + MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0); + FileUtils.deleteDirectory(disk2); + assertTrue("Failed to recreate the data directory: " + disk2, + disk2.mkdirs()); + + // Restart and check if the UUID changed + assertTrue("DataNode failed to start up: " + dn, + cluster.restartDataNode(dn)); + // We need to wait until the DN has completed registration + while (!cluster.getDataNodes().get(0).isDatanodeFullyStarted()) { + Thread.sleep(50); + } + assertEquals( + "DN generated a new UUID despite disk1 having it intact", + originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } }