diff --git a/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm index d3d22c91490..857b7317fa4 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm @@ -381,6 +381,7 @@ Administration Commands *-----------------+-----------------------------------------------------------+ | -metasave filename | Save Namenode's primary data structures to in | the directory specified by hadoop.log.dir property. + | is overwritten if it exists. | will contain one line for each of the following\ | 1. Datanodes heart beating with Namenode\ | 2. Blocks waiting to be replicated\ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f7d49588407..5598a8f8da1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -449,6 +449,9 @@ Release 2.1.0-beta - 2013-07-02 HDFS-4992. Make balancer's mover thread count and dispatcher thread count configurable. (Max Lapan via szetszwo) + HDFS-4996. ClientProtocol#metaSave can be made idempotent by overwriting the + output file instead of appending to it. (cnauroth) + OPTIMIZATIONS HDFS-4465. Optimize datanode ReplicasMap and ReplicaInfo. (atm) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 08264e94d29..2b54007ac7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1197,7 +1197,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, checkOperation(OperationCategory.UNCHECKED); File file = new File(System.getProperty("hadoop.log.dir"), filename); PrintWriter out = new PrintWriter(new BufferedWriter( - new OutputStreamWriter(new FileOutputStream(file, true), Charsets.UTF_8))); + new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8))); metaSave(out); out.flush(); out.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 2a8801ad9c8..98691df6a57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -628,6 +628,7 @@ public class DFSAdmin extends FsShell { String metaSave = "-metasave : \tSave Namenode's primary data structures\n" + "\t\tto in the directory specified by hadoop.log.dir property.\n" + + "\t\t is overwritten if it exists.\n" + "\t\t will contain one line for each of the following\n" + "\t\t\t1. Datanodes heart beating with Namenode\n" + "\t\t\t2. Blocks waiting to be replicated\n" + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 1681bee8403..bb86618b179 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -18,9 +18,11 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.BufferedReader; import java.io.DataInputStream; +import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; @@ -28,6 +30,7 @@ import java.io.InputStreamReader; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -45,6 +48,7 @@ public class TestMetaSave { static final int blockSize = 8192; private static MiniDFSCluster cluster = null; private static FileSystem fileSys = null; + private static FSNamesystem namesystem = null; @BeforeClass public static void setUp() throws IOException { @@ -59,6 +63,7 @@ public class TestMetaSave { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); + namesystem = cluster.getNamesystem(); } /** @@ -66,9 +71,6 @@ public class TestMetaSave { */ @Test public void testMetaSave() throws IOException, InterruptedException { - - final FSNamesystem namesystem = cluster.getNamesystem(); - for (int i = 0; i < 2; i++) { Path file = new Path("/filestatus" + i); DFSTestUtil.createFile(fileSys, file, 1024, 1024, blockSize, (short) 2, @@ -83,9 +85,8 @@ public class TestMetaSave { namesystem.metaSave("metasave.out.txt"); // Verification - String logFile = System.getProperty("hadoop.log.dir") + "/" - + "metasave.out.txt"; - FileInputStream fstream = new FileInputStream(logFile); + FileInputStream fstream = new FileInputStream(getLogFile( + "metasave.out.txt")); DataInputStream in = new DataInputStream(fstream); BufferedReader reader = null; try { @@ -112,9 +113,6 @@ public class TestMetaSave { @Test public void testMetasaveAfterDelete() throws IOException, InterruptedException { - - final FSNamesystem namesystem = cluster.getNamesystem(); - for (int i = 0; i < 2; i++) { Path file = new Path("/filestatus" + i); DFSTestUtil.createFile(fileSys, file, 1024, 1024, blockSize, (short) 2, @@ -131,11 +129,10 @@ public class TestMetaSave { namesystem.metaSave("metasaveAfterDelete.out.txt"); // Verification - String logFile = System.getProperty("hadoop.log.dir") + "/" - + "metasaveAfterDelete.out.txt"; BufferedReader reader = null; try { - FileInputStream fstream = new FileInputStream(logFile); + FileInputStream fstream = new FileInputStream(getLogFile( + "metasaveAfterDelete.out.txt")); DataInputStream in = new DataInputStream(fstream); reader = new BufferedReader(new InputStreamReader(in)); reader.readLine(); @@ -155,6 +152,42 @@ public class TestMetaSave { } } + /** + * Tests that metasave overwrites the output file (not append). + */ + @Test + public void testMetaSaveOverwrite() throws Exception { + // metaSave twice. + namesystem.metaSave("metaSaveOverwrite.out.txt"); + namesystem.metaSave("metaSaveOverwrite.out.txt"); + + // Read output file. + FileInputStream fis = null; + InputStreamReader isr = null; + BufferedReader rdr = null; + try { + fis = new FileInputStream(getLogFile("metaSaveOverwrite.out.txt")); + isr = new InputStreamReader(fis); + rdr = new BufferedReader(isr); + + // Validate that file was overwritten (not appended) by checking for + // presence of only one "Live Datanodes" line. + boolean foundLiveDatanodesLine = false; + String line = rdr.readLine(); + while (line != null) { + if (line.startsWith("Live Datanodes")) { + if (foundLiveDatanodesLine) { + fail("multiple Live Datanodes lines, output file not overwritten"); + } + foundLiveDatanodesLine = true; + } + line = rdr.readLine(); + } + } finally { + IOUtils.cleanup(null, rdr, isr, fis); + } + } + @AfterClass public static void tearDown() throws IOException { if (fileSys != null) @@ -162,4 +195,14 @@ public class TestMetaSave { if (cluster != null) cluster.shutdown(); } + + /** + * Returns a File for the given name inside the log directory. + * + * @param name String file name + * @return File for given name inside log directory + */ + private static File getLogFile(String name) { + return new File(System.getProperty("hadoop.log.dir"), name); + } }