HDFS-2414. Fix TestDFSRollback to avoid spurious failures. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1180541 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-10-09 03:59:21 +00:00
parent 3ab2e79ad7
commit 4cf6bc415f
5 changed files with 73 additions and 15 deletions

View File

@ -1128,6 +1128,8 @@ Release 0.23.0 - Unreleased
HDFS-2412. Add backwards-compatibility layer for renamed FSConstants
class (todd)
HDFS-2414. Fix TestDFSRollback to avoid spurious failures. (todd)
BREAKDOWN OF HDFS-1073 SUBTASKS
HDFS-1521. Persist transaction ID on disk between NN restarts.

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
/**
@ -263,10 +264,14 @@ public void testRollback() throws Exception {
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
for (File f : baseDirs) {
UpgradeUtilities.corruptFile(new File(f,"VERSION"));
UpgradeUtilities.corruptFile(
new File(f,"VERSION"),
"layoutVersion".getBytes(Charsets.UTF_8),
"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail(StartupOption.ROLLBACK,
"file VERSION has layoutVersion missing");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with old layout version in previous", numDirs);

View File

@ -39,6 +39,7 @@
import org.junit.Ignore;
import org.junit.Test;
import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import static org.junit.Assert.*;
@ -303,7 +304,10 @@ public void testUpgrade() throws Exception {
log("NameNode upgrade with corrupt version file", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
for (File f : baseDirs) {
UpgradeUtilities.corruptFile(new File (f,"VERSION"));
UpgradeUtilities.corruptFile(
new File(f,"VERSION"),
"layoutVersion".getBytes(Charsets.UTF_8),
"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);

View File

@ -24,10 +24,8 @@
import java.io.FileInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.URI;
import java.util.Arrays;
import java.util.Random;
import java.util.Collections;
import java.util.zip.CRC32;
import org.apache.hadoop.conf.Configuration;
@ -53,6 +51,10 @@
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import com.google.common.base.Preconditions;
import com.google.common.io.Files;
import com.google.common.primitives.Bytes;
/**
* This class defines a number of static helper methods used by the
* DFS Upgrade unit tests. By default, a singleton master populated storage
@ -483,20 +485,26 @@ public static void createBlockPoolVersionFile(File bpDir,
* @throws IllegalArgumentException if the given file is not a file
* @throws IOException if an IOException occurs while reading or writing the file
*/
public static void corruptFile(File file) throws IOException {
public static void corruptFile(File file,
byte[] stringToCorrupt,
byte[] replacement) throws IOException {
Preconditions.checkArgument(replacement.length == stringToCorrupt.length);
if (!file.isFile()) {
throw new IllegalArgumentException(
"Given argument is not a file:" + file);
}
RandomAccessFile raf = new RandomAccessFile(file,"rws");
Random random = new Random();
for (long i = 0; i < raf.length(); i++) {
raf.seek(i);
if (random.nextBoolean()) {
raf.writeByte(random.nextInt());
byte[] data = Files.toByteArray(file);
int index = Bytes.indexOf(data, stringToCorrupt);
if (index == -1) {
throw new IOException(
"File " + file + " does not contain string " +
new String(stringToCorrupt));
}
for (int i = 0; i < stringToCorrupt.length; i++) {
data[index + i] = replacement[i];
}
raf.close();
Files.write(data, file);
}
/**

View File

@ -29,6 +29,7 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
@ -232,11 +233,49 @@ public static void assertParallelFilesAreIdentical(List<File> dirs,
if (sameNameList.get(0).isDirectory()) {
// recurse
assertParallelFilesAreIdentical(sameNameList, ignoredFileNames);
} else {
if ("VERSION".equals(sameNameList.get(0).getName())) {
assertPropertiesFilesSame(sameNameList.toArray(new File[0]));
} else {
assertFileContentsSame(sameNameList.toArray(new File[0]));
}
}
}
}
/**
* Assert that a set of properties files all contain the same data.
* We cannot simply check the md5sums here, since Properties files
* contain timestamps -- thus, two properties files from the same
* saveNamespace operation may actually differ in md5sum.
* @param propFiles the files to compare
* @throws IOException if the files cannot be opened or read
* @throws AssertionError if the files differ
*/
public static void assertPropertiesFilesSame(File[] propFiles)
throws IOException {
Set<Map.Entry<Object, Object>> prevProps = null;
for (File f : propFiles) {
Properties props;
FileInputStream is = new FileInputStream(f);
try {
props = new Properties();
props.load(is);
} finally {
IOUtils.closeStream(is);
}
if (prevProps == null) {
prevProps = props.entrySet();
} else {
Set<Entry<Object,Object>> diff =
Sets.symmetricDifference(prevProps, props.entrySet());
if (!diff.isEmpty()) {
fail("Properties file " + f + " differs from " + propFiles[0]);
}
}
}
}
/**
* Assert that all of the given paths have the exact same