HDFS-5076. Merge change r1514422 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1514429 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
5155be23e3
commit
f36a6c55f2
|
@ -62,6 +62,9 @@ Release 2.1.1-beta - UNRELEASED
|
|||
|
||||
HDFS-4763 Add script changes/utility for starting NFS gateway (brandonli)
|
||||
|
||||
HDFS-5076 Add MXBean methods to query NN's transaction information and
|
||||
JournalNode's journal status. (jing9)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may
|
||||
|
|
|
@ -18,8 +18,10 @@
|
|||
package org.apache.hadoop.hdfs.qjournal.server;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileFilter;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -34,11 +36,13 @@ import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
|
|||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.util.DiskChecker;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Maps;
|
||||
|
@ -51,7 +55,7 @@ import com.google.common.collect.Maps;
|
|||
* in the quorum protocol.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class JournalNode implements Tool, Configurable {
|
||||
public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
|
||||
public static final Log LOG = LogFactory.getLog(JournalNode.class);
|
||||
private Configuration conf;
|
||||
private JournalNodeRpcServer rpcServer;
|
||||
|
@ -128,6 +132,8 @@ public class JournalNode implements Tool, Configurable {
|
|||
SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
|
||||
DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName());
|
||||
|
||||
registerJNMXBean();
|
||||
|
||||
httpServer = new JournalNodeHttpServer(conf, this);
|
||||
httpServer.start();
|
||||
|
||||
|
@ -208,6 +214,50 @@ public class JournalNode implements Tool, Configurable {
|
|||
return new File(new File(dir), jid);
|
||||
}
|
||||
|
||||
@Override // JournalNodeMXBean
|
||||
public String getJournalsStatus() {
|
||||
// jid:{Formatted:True/False}
|
||||
Map<String, Map<String, String>> status =
|
||||
new HashMap<String, Map<String, String>>();
|
||||
synchronized (this) {
|
||||
for (Map.Entry<String, Journal> entry : journalsById.entrySet()) {
|
||||
Map<String, String> jMap = new HashMap<String, String>();
|
||||
jMap.put("Formatted", Boolean.toString(entry.getValue().isFormatted()));
|
||||
status.put(entry.getKey(), jMap);
|
||||
}
|
||||
}
|
||||
|
||||
// It is possible that some journals have been formatted before, while the
|
||||
// corresponding journals are not in journalsById yet (because of restarting
|
||||
// JN, e.g.). For simplicity, let's just assume a journal is formatted if
|
||||
// there is a directory for it. We can also call analyzeStorage method for
|
||||
// these directories if necessary.
|
||||
// Also note that we do not need to check localDir here since
|
||||
// validateAndCreateJournalDir has been called before we register the
|
||||
// MXBean.
|
||||
File[] journalDirs = localDir.listFiles(new FileFilter() {
|
||||
@Override
|
||||
public boolean accept(File file) {
|
||||
return file.isDirectory();
|
||||
}
|
||||
});
|
||||
for (File journalDir : journalDirs) {
|
||||
String jid = journalDir.getName();
|
||||
if (!status.containsKey(jid)) {
|
||||
Map<String, String> jMap = new HashMap<String, String>();
|
||||
jMap.put("Formatted", "true");
|
||||
status.put(jid, jMap);
|
||||
}
|
||||
}
|
||||
return JSON.toString(status);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register JournalNodeMXBean
|
||||
*/
|
||||
private void registerJNMXBean() {
|
||||
MBeans.register("JournalNode", "JournalNodeInfo", this);
|
||||
}
|
||||
|
||||
private class ErrorReporter implements StorageErrorReporter {
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.qjournal.server;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* This is the JMX management interface for JournalNode information
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public interface JournalNodeMXBean {
|
||||
|
||||
/**
|
||||
* Get status information (e.g., whether formatted) of JournalNode's journals.
|
||||
*
|
||||
* @return A string presenting status for each journal
|
||||
*/
|
||||
public String getJournalsStatus();
|
||||
}
|
|
@ -6347,6 +6347,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return JSON.toString(jasList);
|
||||
}
|
||||
|
||||
@Override // NameNodeMxBean
|
||||
public String getJournalTransactionInfo() {
|
||||
Map<String, String> txnIdMap = new HashMap<String, String>();
|
||||
txnIdMap.put("LastAppliedOrWrittenTxId",
|
||||
Long.toString(this.getFSImage().getLastAppliedOrWrittenTxId()));
|
||||
txnIdMap.put("MostRecentCheckpointTxId",
|
||||
Long.toString(this.getFSImage().getMostRecentCheckpointTxId()));
|
||||
return JSON.toString(txnIdMap);
|
||||
}
|
||||
|
||||
@Override // NameNodeMXBean
|
||||
public String getNNStarted() {
|
||||
return getStartTime().toString();
|
||||
|
|
|
@ -189,6 +189,12 @@ public interface NameNodeMXBean {
|
|||
*/
|
||||
public String getNameJournalStatus();
|
||||
|
||||
/**
|
||||
* Get information about the transaction ID, including the last applied
|
||||
* transaction ID and the most recent checkpoint's transaction ID
|
||||
*/
|
||||
public String getJournalTransactionInfo();
|
||||
|
||||
/**
|
||||
* Gets the NN start time
|
||||
*
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.qjournal.server;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
/**
|
||||
* Test {@link JournalNodeMXBean}
|
||||
*/
|
||||
public class TestJournalNodeMXBean {
|
||||
|
||||
private static final String NAMESERVICE = "ns1";
|
||||
private static final int NUM_JN = 1;
|
||||
|
||||
private MiniJournalCluster jCluster;
|
||||
private JournalNode jn;
|
||||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
// start 1 journal node
|
||||
jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true)
|
||||
.numJournalNodes(NUM_JN).build();
|
||||
jn = jCluster.getJournalNode(0);
|
||||
}
|
||||
|
||||
@After
|
||||
public void cleanup() throws IOException {
|
||||
if (jCluster != null) {
|
||||
jCluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testJournalNodeMXBean() throws Exception {
|
||||
// we have not formatted the journals yet, and the journal status in jmx
|
||||
// should be empty since journal objects are created lazily
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
ObjectName mxbeanName = new ObjectName(
|
||||
"Hadoop:service=JournalNode,name=JournalNodeInfo");
|
||||
|
||||
// getJournalsStatus
|
||||
String journalStatus = (String) mbs.getAttribute(mxbeanName,
|
||||
"JournalsStatus");
|
||||
assertEquals(jn.getJournalsStatus(), journalStatus);
|
||||
assertFalse(journalStatus.contains(NAMESERVICE));
|
||||
|
||||
// format the journal ns1
|
||||
final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(12345, "mycluster",
|
||||
"my-bp", 0L);
|
||||
jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO);
|
||||
|
||||
// check again after format
|
||||
// getJournalsStatus
|
||||
journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus");
|
||||
assertEquals(jn.getJournalsStatus(), journalStatus);
|
||||
Map<String, Map<String, String>> jMap = new HashMap<String, Map<String, String>>();
|
||||
Map<String, String> infoMap = new HashMap<String, String>();
|
||||
infoMap.put("Formatted", "true");
|
||||
jMap.put(NAMESERVICE, infoMap);
|
||||
assertEquals(JSON.toString(jMap), journalStatus);
|
||||
|
||||
// restart journal node without formatting
|
||||
jCluster = new MiniJournalCluster.Builder(new Configuration()).format(false)
|
||||
.numJournalNodes(NUM_JN).build();
|
||||
jn = jCluster.getJournalNode(0);
|
||||
// re-check
|
||||
journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus");
|
||||
assertEquals(jn.getJournalsStatus(), journalStatus);
|
||||
jMap = new HashMap<String, Map<String, String>>();
|
||||
infoMap = new HashMap<String, String>();
|
||||
infoMap.put("Formatted", "true");
|
||||
jMap.put(NAMESERVICE, infoMap);
|
||||
assertEquals(JSON.toString(jMap), journalStatus);
|
||||
}
|
||||
}
|
|
@ -120,6 +120,11 @@ public class TestNameNodeMXBean {
|
|||
String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName,
|
||||
"NameJournalStatus"));
|
||||
assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus);
|
||||
// get attribute JournalTransactionInfo
|
||||
String journalTxnInfo = (String) mbs.getAttribute(mxbeanName,
|
||||
"JournalTransactionInfo");
|
||||
assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(),
|
||||
journalTxnInfo);
|
||||
// get attribute "NNStarted"
|
||||
String nnStarted = (String) mbs.getAttribute(mxbeanName, "NNStarted");
|
||||
assertEquals("Bad value for NNStarted", fsn.getNNStarted(), nnStarted);
|
||||
|
|
Loading…
Reference in New Issue