HDFS-2978. The NameNode should expose name dir statuses via JMX. Contributed by Aaron T. Myers.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1293709 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1c7fb4f019
commit
eb84282c0b
|
@ -6,6 +6,8 @@ Release 0.23.3 - UNRELEASED
|
|||
|
||||
NEW FEATURES
|
||||
|
||||
HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
|
|
@ -140,6 +140,8 @@ import org.apache.hadoop.hdfs.server.common.GenerationStamp;
|
|||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.common.Util;
|
||||
|
@ -4397,6 +4399,30 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
return blockPoolId;
|
||||
}
|
||||
|
||||
@Override // NameNodeMXBean
|
||||
public String getNameDirStatuses() {
|
||||
Map<String, Map<File, StorageDirType>> statusMap =
|
||||
new HashMap<String, Map<File, StorageDirType>>();
|
||||
|
||||
Map<File, StorageDirType> activeDirs = new HashMap<File, StorageDirType>();
|
||||
for (Iterator<StorageDirectory> it
|
||||
= getFSImage().getStorage().dirIterator(); it.hasNext();) {
|
||||
StorageDirectory st = it.next();
|
||||
activeDirs.put(st.getRoot(), st.getStorageDirType());
|
||||
}
|
||||
statusMap.put("active", activeDirs);
|
||||
|
||||
List<Storage.StorageDirectory> removedStorageDirs
|
||||
= getFSImage().getStorage().getRemovedStorageDirs();
|
||||
Map<File, StorageDirType> failedDirs = new HashMap<File, StorageDirType>();
|
||||
for (StorageDirectory st : removedStorageDirs) {
|
||||
failedDirs.put(st.getRoot(), st.getStorageDirType());
|
||||
}
|
||||
statusMap.put("failed", failedDirs);
|
||||
|
||||
return JSON.toString(statusMap);
|
||||
}
|
||||
|
||||
/** @return the block manager. */
|
||||
public BlockManager getBlockManager() {
|
||||
return blockManager;
|
||||
|
|
|
@ -166,4 +166,12 @@ public interface NameNodeMXBean {
|
|||
* @return the block pool id
|
||||
*/
|
||||
public String getBlockPoolId();
|
||||
|
||||
/**
|
||||
* Get status information about the directories storing image and edits logs
|
||||
* of the NN.
|
||||
*
|
||||
* @return the name dir status information, as a JSON string.
|
||||
*/
|
||||
public String getNameDirStatuses();
|
||||
}
|
||||
|
|
|
@ -17,23 +17,33 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.io.File;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.net.URI;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
/**
|
||||
* Class for testing {@link NameNodeMXBean} implementation
|
||||
*/
|
||||
public class TestNameNodeMXBean {
|
||||
@SuppressWarnings({ "unchecked", "deprecation" })
|
||||
@Test
|
||||
public void testNameNodeMXBeanInfo() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
|
@ -88,8 +98,46 @@ public class TestNameNodeMXBean {
|
|||
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
|
||||
"DeadNodes"));
|
||||
Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo);
|
||||
// get attribute NameDirStatuses
|
||||
String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
|
||||
"NameDirStatuses"));
|
||||
Assert.assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
|
||||
Map<String, Map<String, String>> statusMap =
|
||||
(Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
|
||||
Collection<URI> nameDirUris = cluster.getNameDirs(0);
|
||||
for (URI nameDirUri : nameDirUris) {
|
||||
File nameDir = new File(nameDirUri);
|
||||
System.out.println("Checking for the presence of " + nameDir +
|
||||
" in active name dirs.");
|
||||
assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
|
||||
}
|
||||
assertEquals(2, statusMap.get("active").size());
|
||||
assertEquals(0, statusMap.get("failed").size());
|
||||
|
||||
// This will cause the first dir to fail.
|
||||
File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]);
|
||||
assertEquals(0, FileUtil.chmod(failedNameDir.getAbsolutePath(), "000"));
|
||||
cluster.getNameNodeRpc().rollEditLog();
|
||||
|
||||
nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
|
||||
"NameDirStatuses"));
|
||||
statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
|
||||
for (URI nameDirUri : nameDirUris) {
|
||||
File nameDir = new File(nameDirUri);
|
||||
String expectedStatus =
|
||||
nameDir.equals(failedNameDir) ? "failed" : "active";
|
||||
System.out.println("Checking for the presence of " + nameDir +
|
||||
" in " + expectedStatus + " name dirs.");
|
||||
assertTrue(statusMap.get(expectedStatus).containsKey(
|
||||
nameDir.getAbsolutePath()));
|
||||
}
|
||||
assertEquals(1, statusMap.get("active").size());
|
||||
assertEquals(1, statusMap.get("failed").size());
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
for (URI dir : cluster.getNameDirs(0)) {
|
||||
FileUtil.chmod(new File(dir).toString(), "700");
|
||||
}
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue