From 193f11a7ab539c360ecd9f2015c0f46cd070a875 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Sat, 12 Apr 2014 21:38:35 +0000 Subject: [PATCH] HDFS-6236. ImageServlet should use Time#monotonicNow to measure latency. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1586902 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/namenode/ImageServlet.java | 14 +++++++------- .../hdfs/server/namenode/TestCheckpoint.java | 7 ++++++- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 04feab0801f..2bd7abd3b11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -387,6 +387,9 @@ Release 2.4.1 - UNRELEASED HDFS-6232. OfflineEditsViewer throws a NPE on edits containing ACL modifications (ajisakaa via cmccabe) + HDFS-6236. ImageServlet should use Time#monotonicNow to measure latency. + (cnauroth) + Release 2.4.0 - 2014-04-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index fb765680f5c..d406a15118a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.apache.hadoop.util.Time.now; +import static org.apache.hadoop.util.Time.monotonicNow; import java.net.HttpURLConnection; import java.security.PrivilegedExceptionAction; @@ -116,11 +116,11 @@ public class ImageServlet extends HttpServlet { throw new IOException(errorMessage); } CheckpointFaultInjector.getInstance().beforeGetImageSetsHeaders(); - long start = now(); + long start = monotonicNow(); serveFile(imageFile); if (metrics != null) { // Metrics non-null only when used inside name node - long elapsed = now() - start; + long elapsed = monotonicNow() - start; metrics.addGetImage(elapsed); } } else if (parsedParams.isGetEdit()) { @@ -129,11 +129,11 @@ public class ImageServlet extends HttpServlet { File editFile = nnImage.getStorage() .findFinalizedEditsFile(startTxId, endTxId); - long start = now(); + long start = monotonicNow(); serveFile(editFile); if (metrics != null) { // Metrics non-null only when used inside name node - long elapsed = now() - start; + long elapsed = monotonicNow() - start; metrics.addGetEdit(elapsed); } } @@ -469,7 +469,7 @@ public class ImageServlet extends HttpServlet { InputStream stream = request.getInputStream(); try { - long start = now(); + long start = monotonicNow(); MD5Hash downloadImageDigest = TransferFsImage .handleUploadImageRequest(request, txid, nnImage.getStorage(), stream, @@ -478,7 +478,7 @@ public class ImageServlet extends HttpServlet { downloadImageDigest); // Metrics non-null only when used inside name node if (metrics != null) { - long elapsed = now() - start; + long elapsed = monotonicNow() - start; metrics.addPutImage(elapsed); } // Now that we have a new checkpoint, we might be able to diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 73e837734a4..14d8edf2027 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -1897,7 +1897,12 @@ public class TestCheckpoint { .format(true).build(); int origPort = cluster.getNameNodePort(); int origHttpPort = cluster.getNameNode().getHttpAddress().getPort(); - secondary = startSecondaryNameNode(conf); + Configuration snnConf = new Configuration(conf); + File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(), + "namesecondary"); + snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, + checkpointDir.getAbsolutePath()); + secondary = startSecondaryNameNode(snnConf); // secondary checkpoints once secondary.doCheckpoint();