HDFS-2414. Fix TestDFSRollback to avoid spurious failures. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1180540 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-10-09 03:58:57 +00:00
parent 9993ecab87
commit 9cea634f7d
5 changed files with 73 additions and 15 deletions

View File

@ -1032,6 +1032,8 @@ Release 0.23.0 - Unreleased
HDFS-2412. Add backwards-compatibility layer for renamed FSConstants HDFS-2412. Add backwards-compatibility layer for renamed FSConstants
class (todd) class (todd)
HDFS-2414. Fix TestDFSRollback to avoid spurious failures. (todd)
BREAKDOWN OF HDFS-1073 SUBTASKS BREAKDOWN OF HDFS-1073 SUBTASKS
HDFS-1521. Persist transaction ID on disk between NN restarts. HDFS-1521. Persist transaction ID on disk between NN restarts.

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
/** /**
@ -263,10 +264,14 @@ public class TestDFSRollback extends TestCase {
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
for (File f : baseDirs) { for (File f : baseDirs) {
UpgradeUtilities.corruptFile(new File(f,"VERSION")); UpgradeUtilities.corruptFile(
new File(f,"VERSION"),
"layoutVersion".getBytes(Charsets.UTF_8),
"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
} }
startNameNodeShouldFail(StartupOption.ROLLBACK, startNameNodeShouldFail(StartupOption.ROLLBACK,
"file VERSION has layoutVersion missing"); "file VERSION has layoutVersion missing");
UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with old layout version in previous", numDirs); log("NameNode rollback with old layout version in previous", numDirs);

View File

@ -39,6 +39,7 @@ import org.junit.BeforeClass;
import org.junit.Ignore; import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import static org.junit.Assert.*; import static org.junit.Assert.*;
@ -303,7 +304,10 @@ public class TestDFSUpgrade {
log("NameNode upgrade with corrupt version file", numDirs); log("NameNode upgrade with corrupt version file", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
for (File f : baseDirs) { for (File f : baseDirs) {
UpgradeUtilities.corruptFile(new File (f,"VERSION")); UpgradeUtilities.corruptFile(
new File(f,"VERSION"),
"layoutVersion".getBytes(Charsets.UTF_8),
"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
} }
startNameNodeShouldFail(StartupOption.UPGRADE); startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(nameNodeDirs);

View File

@ -24,10 +24,8 @@ import java.io.File;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.URI; import java.net.URI;
import java.util.Arrays; import java.util.Arrays;
import java.util.Random;
import java.util.Collections; import java.util.Collections;
import java.util.zip.CRC32; import java.util.zip.CRC32;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -53,6 +51,10 @@ import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import com.google.common.base.Preconditions;
import com.google.common.io.Files;
import com.google.common.primitives.Bytes;
/** /**
* This class defines a number of static helper methods used by the * This class defines a number of static helper methods used by the
* DFS Upgrade unit tests. By default, a singleton master populated storage * DFS Upgrade unit tests. By default, a singleton master populated storage
@ -483,20 +485,26 @@ public class UpgradeUtilities {
* @throws IllegalArgumentException if the given file is not a file * @throws IllegalArgumentException if the given file is not a file
* @throws IOException if an IOException occurs while reading or writing the file * @throws IOException if an IOException occurs while reading or writing the file
*/ */
public static void corruptFile(File file) throws IOException { public static void corruptFile(File file,
byte[] stringToCorrupt,
byte[] replacement) throws IOException {
Preconditions.checkArgument(replacement.length == stringToCorrupt.length);
if (!file.isFile()) { if (!file.isFile()) {
throw new IllegalArgumentException( throw new IllegalArgumentException(
"Given argument is not a file:" + file); "Given argument is not a file:" + file);
} }
RandomAccessFile raf = new RandomAccessFile(file,"rws"); byte[] data = Files.toByteArray(file);
Random random = new Random(); int index = Bytes.indexOf(data, stringToCorrupt);
for (long i = 0; i < raf.length(); i++) { if (index == -1) {
raf.seek(i); throw new IOException(
if (random.nextBoolean()) { "File " + file + " does not contain string " +
raf.writeByte(random.nextInt()); new String(stringToCorrupt));
}
} }
raf.close();
for (int i = 0; i < stringToCorrupt.length; i++) {
data[index + i] = replacement[i];
}
Files.write(data, file);
} }
/** /**

View File

@ -29,6 +29,7 @@ import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties; import java.util.Properties;
import java.util.Set; import java.util.Set;
@ -233,7 +234,45 @@ public abstract class FSImageTestUtil {
// recurse // recurse
assertParallelFilesAreIdentical(sameNameList, ignoredFileNames); assertParallelFilesAreIdentical(sameNameList, ignoredFileNames);
} else { } else {
assertFileContentsSame(sameNameList.toArray(new File[0])); if ("VERSION".equals(sameNameList.get(0).getName())) {
assertPropertiesFilesSame(sameNameList.toArray(new File[0]));
} else {
assertFileContentsSame(sameNameList.toArray(new File[0]));
}
}
}
}
/**
* Assert that a set of properties files all contain the same data.
* We cannot simply check the md5sums here, since Properties files
* contain timestamps -- thus, two properties files from the same
* saveNamespace operation may actually differ in md5sum.
* @param propFiles the files to compare
* @throws IOException if the files cannot be opened or read
* @throws AssertionError if the files differ
*/
public static void assertPropertiesFilesSame(File[] propFiles)
throws IOException {
Set<Map.Entry<Object, Object>> prevProps = null;
for (File f : propFiles) {
Properties props;
FileInputStream is = new FileInputStream(f);
try {
props = new Properties();
props.load(is);
} finally {
IOUtils.closeStream(is);
}
if (prevProps == null) {
prevProps = props.entrySet();
} else {
Set<Entry<Object,Object>> diff =
Sets.symmetricDifference(prevProps, props.entrySet());
if (!diff.isEmpty()) {
fail("Properties file " + f + " differs from " + propFiles[0]);
}
} }
} }
} }