HDFS-6236. Merging change r1586902 from trunk to branch-2.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1586903 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris Nauroth 2014-04-12 21:46:13 +00:00
parent 755e8c684c
commit 3672935a18
3 changed files with 16 additions and 8 deletions

View File

@ -137,6 +137,9 @@ Release 2.4.1 - UNRELEASED
HDFS-6232. OfflineEditsViewer throws a NPE on edits containing ACL HDFS-6232. OfflineEditsViewer throws a NPE on edits containing ACL
modifications (ajisakaa via cmccabe) modifications (ajisakaa via cmccabe)
HDFS-6236. ImageServlet should use Time#monotonicNow to measure latency.
(cnauroth)
Release 2.4.0 - 2014-04-07 Release 2.4.0 - 2014-04-07
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.util.Time.monotonicNow;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
@ -116,11 +116,11 @@ public Void run() throws Exception {
throw new IOException(errorMessage); throw new IOException(errorMessage);
} }
CheckpointFaultInjector.getInstance().beforeGetImageSetsHeaders(); CheckpointFaultInjector.getInstance().beforeGetImageSetsHeaders();
long start = now(); long start = monotonicNow();
serveFile(imageFile); serveFile(imageFile);
if (metrics != null) { // Metrics non-null only when used inside name node if (metrics != null) { // Metrics non-null only when used inside name node
long elapsed = now() - start; long elapsed = monotonicNow() - start;
metrics.addGetImage(elapsed); metrics.addGetImage(elapsed);
} }
} else if (parsedParams.isGetEdit()) { } else if (parsedParams.isGetEdit()) {
@ -129,11 +129,11 @@ public Void run() throws Exception {
File editFile = nnImage.getStorage() File editFile = nnImage.getStorage()
.findFinalizedEditsFile(startTxId, endTxId); .findFinalizedEditsFile(startTxId, endTxId);
long start = now(); long start = monotonicNow();
serveFile(editFile); serveFile(editFile);
if (metrics != null) { // Metrics non-null only when used inside name node if (metrics != null) { // Metrics non-null only when used inside name node
long elapsed = now() - start; long elapsed = monotonicNow() - start;
metrics.addGetEdit(elapsed); metrics.addGetEdit(elapsed);
} }
} }
@ -469,7 +469,7 @@ public Void run() throws Exception {
InputStream stream = request.getInputStream(); InputStream stream = request.getInputStream();
try { try {
long start = now(); long start = monotonicNow();
MD5Hash downloadImageDigest = TransferFsImage MD5Hash downloadImageDigest = TransferFsImage
.handleUploadImageRequest(request, txid, .handleUploadImageRequest(request, txid,
nnImage.getStorage(), stream, nnImage.getStorage(), stream,
@ -478,7 +478,7 @@ public Void run() throws Exception {
downloadImageDigest); downloadImageDigest);
// Metrics non-null only when used inside name node // Metrics non-null only when used inside name node
if (metrics != null) { if (metrics != null) {
long elapsed = now() - start; long elapsed = monotonicNow() - start;
metrics.addPutImage(elapsed); metrics.addPutImage(elapsed);
} }
// Now that we have a new checkpoint, we might be able to // Now that we have a new checkpoint, we might be able to

View File

@ -1900,7 +1900,12 @@ public void testReformatNNBetweenCheckpoints() throws IOException {
.format(true).build(); .format(true).build();
int origPort = cluster.getNameNodePort(); int origPort = cluster.getNameNodePort();
int origHttpPort = cluster.getNameNode().getHttpAddress().getPort(); int origHttpPort = cluster.getNameNode().getHttpAddress().getPort();
secondary = startSecondaryNameNode(conf); Configuration snnConf = new Configuration(conf);
File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(),
"namesecondary");
snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
checkpointDir.getAbsolutePath());
secondary = startSecondaryNameNode(snnConf);
// secondary checkpoints once // secondary checkpoints once
secondary.doCheckpoint(); secondary.doCheckpoint();