From 4cf6bc415f8112001acac9e27af6c3cf3161634b Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Sun, 9 Oct 2011 03:59:21 +0000 Subject: [PATCH] HDFS-2414. Fix TestDFSRollback to avoid spurious failures. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1180541 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../apache/hadoop/hdfs/TestDFSRollback.java | 7 +++- .../apache/hadoop/hdfs/TestDFSUpgrade.java | 6 ++- .../apache/hadoop/hdfs/UpgradeUtilities.java | 32 +++++++++------ .../hdfs/server/namenode/FSImageTestUtil.java | 41 ++++++++++++++++++- 5 files changed, 73 insertions(+), 15 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3aa6b1daa3b..090c9d9f3fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1128,6 +1128,8 @@ Release 0.23.0 - Unreleased HDFS-2412. Add backwards-compatibility layer for renamed FSConstants class (todd) + HDFS-2414. Fix TestDFSRollback to avoid spurious failures. (todd) + BREAKDOWN OF HDFS-1073 SUBTASKS HDFS-1521. Persist transaction ID on disk between NN restarts. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 95bf47f97c3..a8f814b6526 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.util.StringUtils; +import com.google.common.base.Charsets; import com.google.common.collect.Lists; /** @@ -263,10 +264,14 @@ public class TestDFSRollback extends TestCase { UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous"); for (File f : baseDirs) { - UpgradeUtilities.corruptFile(new File(f,"VERSION")); + UpgradeUtilities.corruptFile( + new File(f,"VERSION"), + "layoutVersion".getBytes(Charsets.UTF_8), + "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8)); } startNameNodeShouldFail(StartupOption.ROLLBACK, "file VERSION has layoutVersion missing"); + UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode rollback with old layout version in previous", numDirs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index 251f23dee70..a308c230cb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -39,6 +39,7 @@ import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; +import com.google.common.base.Charsets; import com.google.common.base.Joiner; import static org.junit.Assert.*; @@ -303,7 +304,10 @@ public class TestDFSUpgrade { log("NameNode upgrade with corrupt version file", numDirs); baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current"); for (File f : baseDirs) { - UpgradeUtilities.corruptFile(new File (f,"VERSION")); + UpgradeUtilities.corruptFile( + new File(f,"VERSION"), + "layoutVersion".getBytes(Charsets.UTF_8), + "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8)); } startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index 337fa8a17c0..0b6bceafafc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -24,10 +24,8 @@ import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.OutputStream; -import java.io.RandomAccessFile; import java.net.URI; import java.util.Arrays; -import java.util.Random; import java.util.Collections; import java.util.zip.CRC32; import org.apache.hadoop.conf.Configuration; @@ -53,6 +51,10 @@ import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import com.google.common.base.Preconditions; +import com.google.common.io.Files; +import com.google.common.primitives.Bytes; + /** * This class defines a number of static helper methods used by the * DFS Upgrade unit tests. By default, a singleton master populated storage @@ -483,20 +485,26 @@ public class UpgradeUtilities { * @throws IllegalArgumentException if the given file is not a file * @throws IOException if an IOException occurs while reading or writing the file */ - public static void corruptFile(File file) throws IOException { + public static void corruptFile(File file, + byte[] stringToCorrupt, + byte[] replacement) throws IOException { + Preconditions.checkArgument(replacement.length == stringToCorrupt.length); if (!file.isFile()) { throw new IllegalArgumentException( - "Given argument is not a file:" + file); + "Given argument is not a file:" + file); } - RandomAccessFile raf = new RandomAccessFile(file,"rws"); - Random random = new Random(); - for (long i = 0; i < raf.length(); i++) { - raf.seek(i); - if (random.nextBoolean()) { - raf.writeByte(random.nextInt()); - } + byte[] data = Files.toByteArray(file); + int index = Bytes.indexOf(data, stringToCorrupt); + if (index == -1) { + throw new IOException( + "File " + file + " does not contain string " + + new String(stringToCorrupt)); } - raf.close(); + + for (int i = 0; i < stringToCorrupt.length; i++) { + data[index + i] = replacement[i]; + } + Files.write(data, file); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 39e7db17dc7..49b2caf4d9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -29,6 +29,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Properties; import java.util.Set; @@ -233,11 +234,49 @@ public abstract class FSImageTestUtil { // recurse assertParallelFilesAreIdentical(sameNameList, ignoredFileNames); } else { - assertFileContentsSame(sameNameList.toArray(new File[0])); + if ("VERSION".equals(sameNameList.get(0).getName())) { + assertPropertiesFilesSame(sameNameList.toArray(new File[0])); + } else { + assertFileContentsSame(sameNameList.toArray(new File[0])); + } } } } + /** + * Assert that a set of properties files all contain the same data. + * We cannot simply check the md5sums here, since Properties files + * contain timestamps -- thus, two properties files from the same + * saveNamespace operation may actually differ in md5sum. + * @param propFiles the files to compare + * @throws IOException if the files cannot be opened or read + * @throws AssertionError if the files differ + */ + public static void assertPropertiesFilesSame(File[] propFiles) + throws IOException { + Set> prevProps = null; + + for (File f : propFiles) { + Properties props; + FileInputStream is = new FileInputStream(f); + try { + props = new Properties(); + props.load(is); + } finally { + IOUtils.closeStream(is); + } + if (prevProps == null) { + prevProps = props.entrySet(); + } else { + Set> diff = + Sets.symmetricDifference(prevProps, props.entrySet()); + if (!diff.isEmpty()) { + fail("Properties file " + f + " differs from " + propFiles[0]); + } + } + } + } + /** * Assert that all of the given paths have the exact same * contents