diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 04feab0801f..2bd7abd3b11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -387,6 +387,9 @@ Release 2.4.1 - UNRELEASED HDFS-6232. OfflineEditsViewer throws a NPE on edits containing ACL modifications (ajisakaa via cmccabe) + HDFS-6236. ImageServlet should use Time#monotonicNow to measure latency. + (cnauroth) + Release 2.4.0 - 2014-04-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index fb765680f5c..d406a15118a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.util.Time.now; +import static org.apache.hadoop.util.Time.monotonicNow; import java.net.HttpURLConnection; import java.security.PrivilegedExceptionAction; @@ -116,11 +116,11 @@ public Void run() throws Exception { throw new IOException(errorMessage); } CheckpointFaultInjector.getInstance().beforeGetImageSetsHeaders(); - long start = now(); + long start = monotonicNow(); serveFile(imageFile); if (metrics != null) { // Metrics non-null only when used inside name node - long elapsed = now() - start; + long elapsed = monotonicNow() - start; metrics.addGetImage(elapsed); } } else if (parsedParams.isGetEdit()) { @@ -129,11 +129,11 @@ public Void run() throws Exception { File editFile = nnImage.getStorage() .findFinalizedEditsFile(startTxId, endTxId); - long start = now(); + long start = monotonicNow(); serveFile(editFile); if (metrics != null) { // Metrics non-null only when used inside name node - long elapsed = now() - start; + long elapsed = monotonicNow() - start; metrics.addGetEdit(elapsed); } } @@ -469,7 +469,7 @@ public Void run() throws Exception { InputStream stream = request.getInputStream(); try { - long start = now(); + long start = monotonicNow(); MD5Hash downloadImageDigest = TransferFsImage .handleUploadImageRequest(request, txid, nnImage.getStorage(), stream, @@ -478,7 +478,7 @@ public Void run() throws Exception { downloadImageDigest); // Metrics non-null only when used inside name node if (metrics != null) { - long elapsed = now() - start; + long elapsed = monotonicNow() - start; metrics.addPutImage(elapsed); } // Now that we have a new checkpoint, we might be able to diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 73e837734a4..14d8edf2027 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -1897,7 +1897,12 @@ public void testReformatNNBetweenCheckpoints() throws IOException { .format(true).build(); int origPort = cluster.getNameNodePort(); int origHttpPort = cluster.getNameNode().getHttpAddress().getPort(); - secondary = startSecondaryNameNode(conf); + Configuration snnConf = new Configuration(conf); + File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(), + "namesecondary"); + snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, + checkpointDir.getAbsolutePath()); + secondary = startSecondaryNameNode(snnConf); // secondary checkpoints once secondary.doCheckpoint();