From 5826b0c186f398a7930b89b3cb6c7034dc30fc4d Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Wed, 5 Jun 2013 23:42:11 +0000 Subject: [PATCH] HDFS-4850. Merging change r1490080 from trunk git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1490084 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/INodesInPath.java | 2 +- .../ImageLoaderCurrent.java | 57 +++++++++++------ .../offlineImageViewer/ImageVisitor.java | 2 +- .../TextWriterImageVisitor.java | 1 - .../offlineImageViewer/XmlImageVisitor.java | 2 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 21 +++++++ .../namenode/snapshot/TestSnapshot.java | 61 +++++++++++++++++-- .../TestOfflineImageViewer.java | 26 ++------ 9 files changed, 125 insertions(+), 50 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5ca3a5ddc06..a71b31c897a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -829,6 +829,9 @@ Release 2.1.0-beta - UNRELEASED HDFS-4826. TestNestedSnapshots times out due to repeated slow edit log flushes when running on virtualized disk. (Chris Nauroth via szetszwo) + HDFS-4850. Fix OfflineImageViewer to work on fsimages with empty files or + snapshots. (jing9) + Release 2.0.5-alpha - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index a1d77687071..accfe4a9a48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -186,7 +186,7 @@ static INodesInPath resolve(final INodeDirectory startingDir, // check if the next byte[] in components is for ".snapshot" if (isDotSnapshotDir(childName) - && isDir && dir instanceof INodeDirectoryWithSnapshot) { + && isDir && dir instanceof INodeDirectorySnapshottable) { // skip the ".snapshot" in components count++; index++; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index c931af5be5f..1ee70ffd522 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -177,7 +177,11 @@ public void loadImage(DataInputStream in, ImageVisitor v, imageVersion); if (supportSnapshot) { v.visit(ImageElement.SNAPSHOT_COUNTER, in.readInt()); - v.visit(ImageElement.NUM_SNAPSHOTS_TOTAL, in.readInt()); + int numSnapshots = in.readInt(); + v.visit(ImageElement.NUM_SNAPSHOTS_TOTAL, numSnapshots); + for (int i = 0; i < numSnapshots; i++) { + processSnapshot(in, v); + } } if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) { @@ -335,8 +339,8 @@ private void processBlocks(DataInputStream in, ImageVisitor v, v.visitEnclosingElement(ImageElement.BLOCKS, ImageElement.NUM_BLOCKS, numBlocks); - // directory or symlink, no blocks to process - if(numBlocks == -1 || numBlocks == -2) { + // directory or symlink or reference node, no blocks to process + if(numBlocks < 0) { v.leaveEnclosingElement(); // Blocks return; } @@ -484,10 +488,6 @@ private void processSnapshots(DataInputStream in, ImageVisitor v, // process snapshot v.visitEnclosingElement(ImageElement.SNAPSHOT); v.visit(ImageElement.SNAPSHOT_ID, in.readInt()); - // process root of snapshot - v.visitEnclosingElement(ImageElement.SNAPSHOT_ROOT); - processINode(in, v, true, rootName, false); - v.leaveEnclosingElement(); v.leaveEnclosingElement(); } v.visit(ImageElement.SNAPSHOT_QUOTA, in.readInt()); @@ -495,6 +495,17 @@ private void processSnapshots(DataInputStream in, ImageVisitor v, } } + private void processSnapshot(DataInputStream in, ImageVisitor v) + throws IOException { + v.visitEnclosingElement(ImageElement.SNAPSHOT); + v.visit(ImageElement.SNAPSHOT_ID, in.readInt()); + // process root of snapshot + v.visitEnclosingElement(ImageElement.SNAPSHOT_ROOT); + processINode(in, v, true, "", false); + v.leaveEnclosingElement(); + v.leaveEnclosingElement(); + } + private void processDirectoryDiffList(DataInputStream in, ImageVisitor v, String currentINodeName) throws IOException { final int numDirDiff = in.readInt(); @@ -512,8 +523,8 @@ private void processDirectoryDiffList(DataInputStream in, ImageVisitor v, private void processDirectoryDiff(DataInputStream in, ImageVisitor v, String currentINodeName) throws IOException { v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF); - String snapshot = FSImageSerialization.readString(in); - v.visit(ImageElement.SNAPSHOT_DIFF_SNAPSHOTROOT, snapshot); + int snapshotId = in.readInt(); + v.visit(ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId); v.visit(ImageElement.SNAPSHOT_DIR_DIFF_CHILDREN_SIZE, in.readInt()); // process snapshotINode @@ -617,7 +628,7 @@ private void processINode(DataInputStream in, ImageVisitor v, processBlocks(in, v, numBlocks, skipBlocks); - if (numBlocks > 0) { // File + if (numBlocks >= 0) { // File if (supportSnapshot) { // process file diffs processFileDiffList(in, v, parentName); @@ -631,6 +642,7 @@ private void processINode(DataInputStream in, ImageVisitor v, } } } + processPermission(in, v); } else if (numBlocks == -1) { // Directory if (supportSnapshot && supportInodeId) { dirNodeMap.put(inodeId, pathName); @@ -647,6 +659,7 @@ private void processINode(DataInputStream in, ImageVisitor v, v.visit(ImageElement.IS_SNAPSHOTTABLE_DIR, Boolean.toString(snapshottable)); } } + processPermission(in, v); } else if (numBlocks == -2) { v.visit(ImageElement.SYMLINK, Text.readString(in)); } else if (numBlocks == -3) { // reference node @@ -668,7 +681,6 @@ private void processINode(DataInputStream in, ImageVisitor v, } } - processPermission(in, v); v.leaveEnclosingElement(); // INode } @@ -678,18 +690,27 @@ private void processFileDiffList(DataInputStream in, ImageVisitor v, if (size >= 0) { v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFFS, ImageElement.NUM_SNAPSHOT_FILE_DIFF, size); - String snapshot = FSImageSerialization.readString(in); - v.visit(ImageElement.SNAPSHOT_DIFF_SNAPSHOTROOT, snapshot); - v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong()); - if (in.readBoolean()) { - v.visitEnclosingElement(ImageElement.SNAPSHOT_DIFF_SNAPSHOTINODE); - processINode(in, v, true, currentINodeName, true); - v.leaveEnclosingElement(); + for (int i = 0; i < size; i++) { + processFileDiff(in, v, currentINodeName); } v.leaveEnclosingElement(); } } + private void processFileDiff(DataInputStream in, ImageVisitor v, + String currentINodeName) throws IOException { + int snapshotId = in.readInt(); + v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFF, + ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId); + v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong()); + if (in.readBoolean()) { + v.visitEnclosingElement(ImageElement.SNAPSHOT_DIFF_SNAPSHOTINODE); + processINode(in, v, true, currentINodeName, true); + v.leaveEnclosingElement(); + } + v.leaveEnclosingElement(); + } + /** * Helper method to format dates during processing. * @param date Date as read from image file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java index fe2ea862465..a3a7781b01c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java @@ -95,7 +95,7 @@ public enum ImageElement { NUM_SNAPSHOT_DIR_DIFF, SNAPSHOT_DIR_DIFFS, SNAPSHOT_DIR_DIFF, - SNAPSHOT_DIFF_SNAPSHOTROOT, + SNAPSHOT_DIFF_SNAPSHOTID, SNAPSHOT_DIR_DIFF_CHILDREN_SIZE, SNAPSHOT_DIFF_SNAPSHOTINODE, SNAPSHOT_DIR_DIFF_CREATEDLIST, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java index 652d0100485..972701b60c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer; import java.io.FileOutputStream; -import java.io.FileWriter; import java.io.IOException; import java.io.OutputStreamWriter; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java index 8efc519048d..939eb0c8654 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java @@ -24,7 +24,7 @@ * An XmlImageVisitor walks over an fsimage structure and writes out * an equivalent XML document that contains the fsimage's components. */ -class XmlImageVisitor extends TextWriterImageVisitor { +public class XmlImageVisitor extends TextWriterImageVisitor { final private LinkedList tagQ = new LinkedList(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 20a776594b8..d42956cf08d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -30,9 +30,11 @@ import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; +import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.Socket; @@ -844,6 +846,25 @@ public static DatanodeRegistration getLocalDatanodeRegistration() { return new DatanodeRegistration(getLocalDatanodeID(), new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion()); } + + /** Copy one file's contents into the other **/ + public static void copyFile(File src, File dest) throws IOException { + InputStream in = null; + OutputStream out = null; + + try { + in = new FileInputStream(src); + out = new FileOutputStream(dest); + + byte [] b = new byte[1024]; + while( in.read(b) > 0 ) { + out.write(b); + } + } finally { + if(in != null) in.close(); + if(out != null) out.close(); + } + } public static class Builder { private int maxLevels = 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index e47b861899b..b5c5fcafcd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -44,10 +45,13 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; +import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node; +import org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer; +import org.apache.hadoop.hdfs.tools.offlineImageViewer.XmlImageVisitor; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; @@ -68,7 +72,13 @@ public class TestSnapshot { SnapshotTestHelper.disableLogs(); } - private static final long seed = Time.now(); + private static final long seed; + private static final Random random; + static { + seed = Time.now(); + random = new Random(seed); + System.out.println("Random seed: " + seed); + } protected static final short REPLICATION = 3; protected static final int BLOCKSIZE = 1024; /** The number of times snapshots are created for a snapshottable directory */ @@ -81,8 +91,6 @@ public class TestSnapshot { protected static FSNamesystem fsn; protected static FSDirectory fsdir; protected DistributedFileSystem hdfs; - - private static Random random = new Random(seed); private static String testDir = System.getProperty("test.build.data", "build/test/data"); @@ -220,16 +228,57 @@ private void checkFSImage() throws Exception { @Test public void testSnapshot() throws Throwable { try { - runTestSnapshot(); + runTestSnapshot(SNAPSHOT_ITERATION_NUMBER); } catch(Throwable t) { SnapshotTestHelper.LOG.info("FAILED", t); SnapshotTestHelper.dumpTree("FAILED", cluster); throw t; } } + + /** + * Test if the OfflineImageViewer can correctly parse a fsimage containing + * snapshots + */ + @Test + public void testOfflineImageViewer() throws Throwable { + runTestSnapshot(SNAPSHOT_ITERATION_NUMBER); + + // retrieve the fsimage. Note that we already save namespace to fsimage at + // the end of each iteration of runTestSnapshot. + File originalFsimage = FSImageTestUtil.findLatestImageFile( + FSImageTestUtil.getFSImage( + cluster.getNameNode()).getStorage().getStorageDir(0)); + assertNotNull("Didn't generate or can't find fsimage", originalFsimage); + + String ROOT = System.getProperty("test.build.data", "build/test/data"); + File testFile = new File(ROOT, "/image"); + String xmlImage = ROOT + "/image_xml"; + boolean success = false; + + try { + DFSTestUtil.copyFile(originalFsimage, testFile); + XmlImageVisitor v = new XmlImageVisitor(xmlImage, true); + OfflineImageViewer oiv = new OfflineImageViewer(testFile.getPath(), v, + true); + oiv.go(); + success = true; + } finally { + if (testFile.exists()) { + testFile.delete(); + } + // delete the xml file if the parsing is successful + if (success) { + File xmlImageFile = new File(xmlImage); + if (xmlImageFile.exists()) { + xmlImageFile.delete(); + } + } + } + } - private void runTestSnapshot() throws Exception { - for (int i = 0; i < SNAPSHOT_ITERATION_NUMBER; i++) { + private void runTestSnapshot(int iteration) throws Exception { + for (int i = 0; i < iteration; i++) { // create snapshot and check the creation cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); TestDirectoryTree.Node[] ssNodes = createSnapshots(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 91a8b6186df..50e816417d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -47,6 +47,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; @@ -173,7 +174,7 @@ public void outputOfLSVisitor() throws IOException { File outputFile = new File(ROOT, "/basicCheckOutput"); try { - copyFile(originalFsimage, testFile); + DFSTestUtil.copyFile(originalFsimage, testFile); ImageVisitor v = new LsImageVisitor(outputFile.getPath(), true); OfflineImageViewer oiv = new OfflineImageViewer(testFile.getPath(), v, false); @@ -366,25 +367,6 @@ private void copyPartOfFile(File src, File dest) throws IOException { if(out != null) out.close(); } } - - // Copy one file's contents into the other - private void copyFile(File src, File dest) throws IOException { - InputStream in = null; - OutputStream out = null; - - try { - in = new FileInputStream(src); - out = new FileOutputStream(dest); - - byte [] b = new byte[1024]; - while( in.read(b) > 0 ) { - out.write(b); - } - } finally { - if(in != null) in.close(); - if(out != null) out.close(); - } - } @Test public void outputOfFileDistributionVisitor() throws IOException { @@ -394,7 +376,7 @@ public void outputOfFileDistributionVisitor() throws IOException { int totalFiles = 0; BufferedReader reader = null; try { - copyFile(originalFsimage, testFile); + DFSTestUtil.copyFile(originalFsimage, testFile); ImageVisitor v = new FileDistributionVisitor(outputFile.getPath(), 0, 0); OfflineImageViewer oiv = new OfflineImageViewer(testFile.getPath(), v, false); @@ -466,7 +448,7 @@ public void outputOfTestVisitor() throws IOException { File testFile = new File(ROOT, "/basicCheck"); try { - copyFile(originalFsimage, testFile); + DFSTestUtil.copyFile(originalFsimage, testFile); TestImageVisitor v = new TestImageVisitor(); OfflineImageViewer oiv = new OfflineImageViewer(testFile.getPath(), v, true); oiv.go();