From f36a6c55f2383e8faf0fec8a45e1751df90ab077 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 15 Aug 2013 18:27:55 +0000 Subject: [PATCH] HDFS-5076. Merge change r1514422 from trunk. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1514429 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/qjournal/server/JournalNode.java | 52 ++++++++- .../qjournal/server/JournalNodeMXBean.java | 36 ++++++ .../hdfs/server/namenode/FSNamesystem.java | 10 ++ .../hdfs/server/namenode/NameNodeMXBean.java | 6 + .../server/TestJournalNodeMXBean.java | 107 ++++++++++++++++++ .../server/namenode/TestNameNodeMXBean.java | 5 + 7 files changed, 218 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index cb279ed9508..02c4b6e4e35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -62,6 +62,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-4763 Add script changes/utility for starting NFS gateway (brandonli) + HDFS-5076 Add MXBean methods to query NN's transaction information and + JournalNode's journal status. (jing9) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 8291b5932eb..4ed4244ac16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -18,8 +18,10 @@ package org.apache.hadoop.hdfs.qjournal.server; import java.io.File; +import java.io.FileFilter; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.HashMap; import java.util.Map; import org.apache.commons.logging.Log; @@ -34,11 +36,13 @@ import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; +import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.mortbay.util.ajax.JSON; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; @@ -51,7 +55,7 @@ import com.google.common.collect.Maps; * in the quorum protocol. */ @InterfaceAudience.Private -public class JournalNode implements Tool, Configurable { +public class JournalNode implements Tool, Configurable, JournalNodeMXBean { public static final Log LOG = LogFactory.getLog(JournalNode.class); private Configuration conf; private JournalNodeRpcServer rpcServer; @@ -128,6 +132,8 @@ public class JournalNode implements Tool, Configurable { SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName()); + registerJNMXBean(); + httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); @@ -208,6 +214,50 @@ public class JournalNode implements Tool, Configurable { return new File(new File(dir), jid); } + @Override // JournalNodeMXBean + public String getJournalsStatus() { + // jid:{Formatted:True/False} + Map> status = + new HashMap>(); + synchronized (this) { + for (Map.Entry entry : journalsById.entrySet()) { + Map jMap = new HashMap(); + jMap.put("Formatted", Boolean.toString(entry.getValue().isFormatted())); + status.put(entry.getKey(), jMap); + } + } + + // It is possible that some journals have been formatted before, while the + // corresponding journals are not in journalsById yet (because of restarting + // JN, e.g.). For simplicity, let's just assume a journal is formatted if + // there is a directory for it. We can also call analyzeStorage method for + // these directories if necessary. + // Also note that we do not need to check localDir here since + // validateAndCreateJournalDir has been called before we register the + // MXBean. + File[] journalDirs = localDir.listFiles(new FileFilter() { + @Override + public boolean accept(File file) { + return file.isDirectory(); + } + }); + for (File journalDir : journalDirs) { + String jid = journalDir.getName(); + if (!status.containsKey(jid)) { + Map jMap = new HashMap(); + jMap.put("Formatted", "true"); + status.put(jid, jMap); + } + } + return JSON.toString(status); + } + + /** + * Register JournalNodeMXBean + */ + private void registerJNMXBean() { + MBeans.register("JournalNode", "JournalNodeInfo", this); + } private class ErrorReporter implements StorageErrorReporter { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java new file mode 100644 index 00000000000..4e8d9da50f9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.qjournal.server; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This is the JMX management interface for JournalNode information + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface JournalNodeMXBean { + + /** + * Get status information (e.g., whether formatted) of JournalNode's journals. + * + * @return A string presenting status for each journal + */ + public String getJournalsStatus(); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 20993a7cbc1..6a996df6571 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6347,6 +6347,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return JSON.toString(jasList); } + @Override // NameNodeMxBean + public String getJournalTransactionInfo() { + Map txnIdMap = new HashMap(); + txnIdMap.put("LastAppliedOrWrittenTxId", + Long.toString(this.getFSImage().getLastAppliedOrWrittenTxId())); + txnIdMap.put("MostRecentCheckpointTxId", + Long.toString(this.getFSImage().getMostRecentCheckpointTxId())); + return JSON.toString(txnIdMap); + } + @Override // NameNodeMXBean public String getNNStarted() { return getStartTime().toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java index 50315a4ae67..173d5aea4c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java @@ -188,6 +188,12 @@ public interface NameNodeMXBean { * @return the name journal status information, as a JSON string. */ public String getNameJournalStatus(); + + /** + * Get information about the transaction ID, including the last applied + * transaction ID and the most recent checkpoint's transaction ID + */ + public String getJournalTransactionInfo(); /** * Gets the NN start time diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java new file mode 100644 index 00000000000..347184870f6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.qjournal.server; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.HashMap; +import java.util.Map; + +import javax.management.MBeanServer; +import javax.management.ObjectName; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mortbay.util.ajax.JSON; + +/** + * Test {@link JournalNodeMXBean} + */ +public class TestJournalNodeMXBean { + + private static final String NAMESERVICE = "ns1"; + private static final int NUM_JN = 1; + + private MiniJournalCluster jCluster; + private JournalNode jn; + + @Before + public void setup() throws IOException { + // start 1 journal node + jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true) + .numJournalNodes(NUM_JN).build(); + jn = jCluster.getJournalNode(0); + } + + @After + public void cleanup() throws IOException { + if (jCluster != null) { + jCluster.shutdown(); + } + } + + @Test + public void testJournalNodeMXBean() throws Exception { + // we have not formatted the journals yet, and the journal status in jmx + // should be empty since journal objects are created lazily + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName mxbeanName = new ObjectName( + "Hadoop:service=JournalNode,name=JournalNodeInfo"); + + // getJournalsStatus + String journalStatus = (String) mbs.getAttribute(mxbeanName, + "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + assertFalse(journalStatus.contains(NAMESERVICE)); + + // format the journal ns1 + final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(12345, "mycluster", + "my-bp", 0L); + jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO); + + // check again after format + // getJournalsStatus + journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + Map> jMap = new HashMap>(); + Map infoMap = new HashMap(); + infoMap.put("Formatted", "true"); + jMap.put(NAMESERVICE, infoMap); + assertEquals(JSON.toString(jMap), journalStatus); + + // restart journal node without formatting + jCluster = new MiniJournalCluster.Builder(new Configuration()).format(false) + .numJournalNodes(NUM_JN).build(); + jn = jCluster.getJournalNode(0); + // re-check + journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + jMap = new HashMap>(); + infoMap = new HashMap(); + infoMap.put("Formatted", "true"); + jMap.put(NAMESERVICE, infoMap); + assertEquals(JSON.toString(jMap), journalStatus); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 227d2cef402..8d188d7b651 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -120,6 +120,11 @@ public class TestNameNodeMXBean { String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName, "NameJournalStatus")); assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus); + // get attribute JournalTransactionInfo + String journalTxnInfo = (String) mbs.getAttribute(mxbeanName, + "JournalTransactionInfo"); + assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(), + journalTxnInfo); // get attribute "NNStarted" String nnStarted = (String) mbs.getAttribute(mxbeanName, "NNStarted"); assertEquals("Bad value for NNStarted", fsn.getNNStarted(), nnStarted);