HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. Contributed by Aaron T. Myers.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1326012 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2012-04-13 23:30:52 +00:00
parent 68f136337d
commit ca3ed5e320
2 changed files with 112 additions and 82 deletions

View File

@ -391,6 +391,8 @@ Release 2.0.0 - UNRELEASED
HDFS-2799. Trim fs.checkpoint.dir values. (Amith D K via eli) HDFS-2799. Trim fs.checkpoint.dir values. (Amith D K via eli)
HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. (atm)
BREAKDOWN OF HDFS-1623 SUBTASKS BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

View File

@ -17,20 +17,30 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import junit.framework.TestCase; import static org.junit.Assert.assertEquals;
import java.io.*; import static org.junit.Assert.assertFalse;
import java.util.Random; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSet;
@ -39,7 +49,10 @@ import com.google.common.collect.ImmutableSet;
* This class tests various combinations of dfs.namenode.name.dir * This class tests various combinations of dfs.namenode.name.dir
* and dfs.namenode.edits.dir configurations. * and dfs.namenode.edits.dir configurations.
*/ */
public class TestNameEditsConfigs extends TestCase { public class TestNameEditsConfigs {
private static final Log LOG = LogFactory.getLog(FSEditLog.class);
static final long SEED = 0xDEADBEEFL; static final long SEED = 0xDEADBEEFL;
static final int BLOCK_SIZE = 4096; static final int BLOCK_SIZE = 4096;
static final int FILE_SIZE = 8192; static final int FILE_SIZE = 8192;
@ -51,9 +64,9 @@ public class TestNameEditsConfigs extends TestCase {
private File base_dir = new File( private File base_dir = new File(
System.getProperty("test.build.data", "build/test/data"), "dfs/"); System.getProperty("test.build.data", "build/test/data"), "dfs/");
protected void setUp() throws java.lang.Exception { @Before
if(base_dir.exists()) { public void setUp() throws IOException {
if (!FileUtil.fullyDelete(base_dir)) if(base_dir.exists() && !FileUtil.fullyDelete(base_dir)) {
throw new IOException("Cannot remove directory " + base_dir); throw new IOException("Cannot remove directory " + base_dir);
} }
} }
@ -126,6 +139,7 @@ public class TestNameEditsConfigs extends TestCase {
* sure we are reading proper edits and image. * sure we are reading proper edits and image.
* @throws Exception * @throws Exception
*/ */
@Test
public void testNameEditsConfigs() throws Exception { public void testNameEditsConfigs() throws Exception {
Path file1 = new Path("TestNameEditsConfigs1"); Path file1 = new Path("TestNameEditsConfigs1");
Path file2 = new Path("TestNameEditsConfigs2"); Path file2 = new Path("TestNameEditsConfigs2");
@ -310,12 +324,14 @@ public class TestNameEditsConfigs extends TestCase {
* This test tries to simulate failure scenarios. * This test tries to simulate failure scenarios.
* 1. Start cluster with shared name and edits dir * 1. Start cluster with shared name and edits dir
* 2. Restart cluster by adding separate name and edits dirs * 2. Restart cluster by adding separate name and edits dirs
* T3. Restart cluster by removing shared name and edits dir * 3. Restart cluster by removing shared name and edits dir
* 4. Restart cluster with old shared name and edits dir, but only latest * 4. Restart cluster with old shared name and edits dir, but only latest
* name dir. This should fail since we dont have latest edits dir * name dir. This should fail since we don't have latest edits dir
* 5. Restart cluster with old shared name and edits dir, but only latest * 5. Restart cluster with old shared name and edits dir, but only latest
* edits dir. This should fail since we dont have latest name dir * edits dir. This should succeed since the latest edits will have
* segments leading all the way from the image in name_and_edits.
*/ */
@Test
public void testNameEditsConfigsFailure() throws IOException { public void testNameEditsConfigsFailure() throws IOException {
Path file1 = new Path("TestNameEditsConfigs1"); Path file1 = new Path("TestNameEditsConfigs1");
Path file2 = new Path("TestNameEditsConfigs2"); Path file2 = new Path("TestNameEditsConfigs2");
@ -323,15 +339,18 @@ public class TestNameEditsConfigs extends TestCase {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
Configuration conf = null; Configuration conf = null;
FileSystem fileSys = null; FileSystem fileSys = null;
File newNameDir = new File(base_dir, "name"); File nameOnlyDir = new File(base_dir, "name");
File newEditsDir = new File(base_dir, "edits"); File editsOnlyDir = new File(base_dir, "edits");
File nameAndEdits = new File(base_dir, "name_and_edits"); File nameAndEditsDir = new File(base_dir, "name_and_edits");
// 1
// Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try {
// Manage our own dfs directories // Manage our own dfs directories
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES) .numDataNodes(NUM_DATA_NODES)
@ -340,11 +359,10 @@ public class TestNameEditsConfigs extends TestCase {
cluster.waitActive(); cluster.waitActive();
// Check that the dir has a VERSION file // Check that the dir has a VERSION file
assertTrue(new File(nameAndEdits, "current/VERSION").exists()); assertTrue(new File(nameAndEditsDir, "current/VERSION").exists());
fileSys = cluster.getFileSystem(); fileSys = cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1)); assertTrue(!fileSys.exists(file1));
writeFile(fileSys, file1, replication); writeFile(fileSys, file1, replication);
checkFile(fileSys, file1, replication); checkFile(fileSys, file1, replication);
@ -353,16 +371,19 @@ public class TestNameEditsConfigs extends TestCase {
cluster.shutdown(); cluster.shutdown();
} }
// 2
// Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
assertTrue(newNameDir.mkdir()); assertTrue(nameOnlyDir.mkdir());
assertTrue(newEditsDir.mkdir()); assertTrue(editsOnlyDir.mkdir());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath() +
"," + newNameDir.getPath()); "," + nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath() + conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath() +
"," + newEditsDir.getPath()); "," + editsOnlyDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try {
// Manage our own dfs directories. Do not format. // Manage our own dfs directories. Do not format.
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES) .numDataNodes(NUM_DATA_NODES)
@ -372,13 +393,12 @@ public class TestNameEditsConfigs extends TestCase {
cluster.waitActive(); cluster.waitActive();
// Check that the dirs have a VERSION file // Check that the dirs have a VERSION file
assertTrue(new File(nameAndEdits, "current/VERSION").exists()); assertTrue(new File(nameAndEditsDir, "current/VERSION").exists());
assertTrue(new File(newNameDir, "current/VERSION").exists()); assertTrue(new File(nameOnlyDir, "current/VERSION").exists());
assertTrue(new File(newEditsDir, "current/VERSION").exists()); assertTrue(new File(editsOnlyDir, "current/VERSION").exists());
fileSys = cluster.getFileSystem(); fileSys = cluster.getFileSystem();
try {
assertTrue(fileSys.exists(file1)); assertTrue(fileSys.exists(file1));
checkFile(fileSys, file1, replication); checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1); cleanupFile(fileSys, file1);
@ -389,11 +409,13 @@ public class TestNameEditsConfigs extends TestCase {
cluster.shutdown(); cluster.shutdown();
} }
// 3
// Now remove common directory both have and start namenode with // Now remove common directory both have and start namenode with
// separate name and edits dirs // separate name and edits dirs
try {
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsOnlyDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES) .numDataNodes(NUM_DATA_NODES)
@ -403,8 +425,7 @@ public class TestNameEditsConfigs extends TestCase {
cluster.waitActive(); cluster.waitActive();
fileSys = cluster.getFileSystem(); fileSys = cluster.getFileSystem();
try { assertFalse(fileSys.exists(file1));
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2)); assertTrue(fileSys.exists(file2));
checkFile(fileSys, file2, replication); checkFile(fileSys, file2, replication);
cleanupFile(fileSys, file2); cleanupFile(fileSys, file2);
@ -415,11 +436,12 @@ public class TestNameEditsConfigs extends TestCase {
cluster.shutdown(); cluster.shutdown();
} }
// 4
// Add old shared directory for name and edits along with latest name // Add old shared directory for name and edits along with latest name
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath() + "," + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameOnlyDir.getPath() + "," +
nameAndEdits.getPath()); nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try { try {
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
@ -427,21 +449,25 @@ public class TestNameEditsConfigs extends TestCase {
.format(false) .format(false)
.manageNameDfsDirs(false) .manageNameDfsDirs(false)
.build(); .build();
assertTrue(false); fail("Successfully started cluster but should not have been able to.");
} catch (IOException e) { // expect to fail } catch (IOException e) { // expect to fail
System.out.println("cluster start failed due to missing " + LOG.info("EXPECTED: cluster start failed due to missing " +
"latest edits dir"); "latest edits dir", e);
} finally { } finally {
if (cluster != null) {
cluster.shutdown();
}
cluster = null; cluster = null;
} }
// 5
// Add old shared directory for name and edits along with latest edits. // Add old shared directory for name and edits along with latest edits.
// This is OK, since the latest edits will have segments leading all // This is OK, since the latest edits will have segments leading all
// the way from the image in name_and_edits. // the way from the image in name_and_edits.
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath()); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath() + conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsOnlyDir.getPath() +
"," + nameAndEdits.getPath()); "," + nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try { try {
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
@ -449,14 +475,16 @@ public class TestNameEditsConfigs extends TestCase {
.format(false) .format(false)
.manageNameDfsDirs(false) .manageNameDfsDirs(false)
.build(); .build();
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2)); fileSys = cluster.getFileSystem();
checkFile(fileSys, file2, replication);
cleanupFile(fileSys, file2); assertFalse(fileSys.exists(file1));
assertFalse(fileSys.exists(file2));
assertTrue(fileSys.exists(file3));
checkFile(fileSys, file3, replication);
cleanupFile(fileSys, file3);
writeFile(fileSys, file3, replication); writeFile(fileSys, file3, replication);
checkFile(fileSys, file3, replication); checkFile(fileSys, file3, replication);
} catch (IOException e) { // expect to fail
System.out.println("cluster start failed due to missing latest name dir");
} finally { } finally {
fileSys.close(); fileSys.close();
cluster.shutdown(); cluster.shutdown();