HDFS-5561. FSNameSystem#getNameJournalStatus() in JMX should return plain text instead of HTML. Contributed by Haohui Mai.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1545791 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
82ff2d3f2e
commit
740cf232bd
|
@ -542,6 +542,9 @@ Release 2.3.0 - UNRELEASED
|
|||
|
||||
HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-5561. FSNameSystem#getNameJournalStatus() in JMX should return plain
|
||||
text instead of HTML. (Haohui Mai via jing9)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
|
||||
|
|
|
@ -150,5 +150,5 @@ interface AsyncLogger {
|
|||
* Append an HTML-formatted report for this logger's status to the provided
|
||||
* StringBuilder. This is displayed on the NN web UI.
|
||||
*/
|
||||
public void appendHtmlReport(StringBuilder sb);
|
||||
public void appendReport(StringBuilder sb);
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRe
|
|||
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.jasper.compiler.JspUtil;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Joiner;
|
||||
|
@ -177,17 +176,16 @@ class AsyncLoggerSet {
|
|||
* state of the underlying loggers.
|
||||
* @param sb the StringBuilder to append to
|
||||
*/
|
||||
void appendHtmlReport(StringBuilder sb) {
|
||||
sb.append("<table class=\"storage\">");
|
||||
sb.append("<thead><tr><td>JN</td><td>Status</td></tr></thead>\n");
|
||||
for (AsyncLogger l : loggers) {
|
||||
sb.append("<tr>");
|
||||
sb.append("<td>" + JspUtil.escapeXml(l.toString()) + "</td>");
|
||||
sb.append("<td>");
|
||||
l.appendHtmlReport(sb);
|
||||
sb.append("</td></tr>\n");
|
||||
void appendReport(StringBuilder sb) {
|
||||
for (int i = 0, len = loggers.size(); i < len; ++i) {
|
||||
AsyncLogger l = loggers.get(i);
|
||||
if (i != 0) {
|
||||
sb.append(", ");
|
||||
}
|
||||
sb.append(l).append(" (");
|
||||
l.appendReport(sb);
|
||||
sb.append(")");
|
||||
}
|
||||
sb.append("</table>");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -569,7 +569,7 @@ public class IPCLoggerChannel implements AsyncLogger {
|
|||
}
|
||||
|
||||
@Override
|
||||
public synchronized void appendHtmlReport(StringBuilder sb) {
|
||||
public synchronized void appendReport(StringBuilder sb) {
|
||||
sb.append("Written txid ").append(highestAckedTxId);
|
||||
long behind = getLagTxns();
|
||||
if (behind > 0) {
|
||||
|
|
|
@ -114,10 +114,10 @@ class QuorumOutputStream extends EditLogOutputStream {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String generateHtmlReport() {
|
||||
public String generateReport() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Writing segment beginning at txid " + segmentTxId + "<br/>\n");
|
||||
loggers.appendHtmlReport(sb);
|
||||
sb.append("Writing segment beginning at txid " + segmentTxId + ". \n");
|
||||
loggers.appendReport(sb);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import static org.apache.hadoop.util.Time.now;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.jasper.compiler.JspUtil;
|
||||
|
||||
/**
|
||||
* A generic abstract class to support journaling of edits logs into
|
||||
|
@ -141,10 +140,10 @@ public abstract class EditLogOutputStream implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return a short HTML snippet suitable for describing the current
|
||||
* @return a short text snippet suitable for describing the current
|
||||
* status of the stream
|
||||
*/
|
||||
public String generateHtmlReport() {
|
||||
return JspUtil.escapeXml(this.toString());
|
||||
public String generateReport() {
|
||||
return toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6635,7 +6635,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
} else if (openForWrite) {
|
||||
EditLogOutputStream elos = jas.getCurrentStream();
|
||||
if (elos != null) {
|
||||
jasMap.put("stream", elos.generateHtmlReport());
|
||||
jasMap.put("stream", elos.generateReport());
|
||||
} else {
|
||||
jasMap.put("stream", "not currently writing");
|
||||
}
|
||||
|
|
|
@ -340,7 +340,7 @@ class NamenodeJspHelper {
|
|||
} else if (openForWrite) {
|
||||
EditLogOutputStream elos = jas.getCurrentStream();
|
||||
if (elos != null) {
|
||||
out.println(elos.generateHtmlReport());
|
||||
out.println(elos.generateReport());
|
||||
} else {
|
||||
out.println("not currently writing");
|
||||
}
|
||||
|
|
|
@ -91,7 +91,6 @@ public class TestIPCLoggerChannel {
|
|||
*/
|
||||
@Test
|
||||
public void testQueueLimiting() throws Exception {
|
||||
|
||||
// Block the underlying fake proxy from actually completing any calls.
|
||||
DelayAnswer delayer = new DelayAnswer(LOG);
|
||||
Mockito.doAnswer(delayer).when(mockProxy).journal(
|
||||
|
|
|
@ -25,6 +25,8 @@ import java.io.IOException;
|
|||
import java.net.URI;
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.qjournal.client.AsyncLogger;
|
||||
|
@ -124,7 +126,7 @@ public class TestQuorumJournalManagerUnit {
|
|||
.when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong());
|
||||
qjm.startLogSegment(1);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testQuorumOfLoggersFail() throws Exception {
|
||||
futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong());
|
||||
|
@ -140,6 +142,16 @@ public class TestQuorumJournalManagerUnit {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testQuorumOutputStreamReport() throws Exception {
|
||||
futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong());
|
||||
futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong());
|
||||
futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong());
|
||||
QuorumOutputStream os = (QuorumOutputStream) qjm.startLogSegment(1);
|
||||
String report = os.generateReport();
|
||||
Assert.assertFalse("Report should be plain text", report.contains("<"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testWriteEdits() throws Exception {
|
||||
EditLogOutputStream stm = createLogSegment();
|
||||
|
|
|
@ -26,24 +26,27 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.test.PathUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Test the EditLogFileOutputStream
|
||||
*/
|
||||
public class TestEditLogFileOutputStream {
|
||||
private final static File TEST_DIR = PathUtils.getTestDir(TestEditLogFileOutputStream.class);
|
||||
private static final File TEST_EDITS =
|
||||
new File(TEST_DIR, "testEditLogFileOutput.log");
|
||||
final static int MIN_PREALLOCATION_LENGTH =
|
||||
EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH;
|
||||
private final static File TEST_DIR = PathUtils
|
||||
.getTestDir(TestEditLogFileOutputStream.class);
|
||||
private static final File TEST_EDITS = new File(TEST_DIR,
|
||||
"testEditLogFileOutput.log");
|
||||
final static int MIN_PREALLOCATION_LENGTH = EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH;
|
||||
|
||||
private Configuration conf;
|
||||
|
||||
static {
|
||||
@BeforeClass
|
||||
public static void disableFsync() {
|
||||
// No need to fsync for the purposes of tests. This makes
|
||||
// the tests run much faster.
|
||||
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
|
||||
|
@ -52,7 +55,8 @@ public class TestEditLogFileOutputStream {
|
|||
@Before
|
||||
@After
|
||||
public void deleteEditsFile() {
|
||||
if (TEST_EDITS.exists()) TEST_EDITS.delete();
|
||||
if (TEST_EDITS.exists())
|
||||
TEST_EDITS.delete();
|
||||
}
|
||||
|
||||
@Before
|
||||
|
@ -66,17 +70,17 @@ public class TestEditLogFileOutputStream {
|
|||
elos.flushAndSync(true);
|
||||
assertEquals(expectedLength, elos.getFile().length());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Tests writing to the EditLogFileOutputStream. Due to preallocation, the
|
||||
* Tests writing to the EditLogFileOutputStream. Due to preallocation, the
|
||||
* length of the edit log will usually be longer than its valid contents.
|
||||
*/
|
||||
@Test
|
||||
public void testRawWrites() throws IOException {
|
||||
EditLogFileOutputStream elos = new EditLogFileOutputStream(conf, TEST_EDITS,
|
||||
0);
|
||||
EditLogFileOutputStream elos = new EditLogFileOutputStream(conf,
|
||||
TEST_EDITS, 0);
|
||||
try {
|
||||
byte[] small = new byte[] {1,2,3,4,5,8,7};
|
||||
byte[] small = new byte[] { 1, 2, 3, 4, 5, 8, 7 };
|
||||
elos.create();
|
||||
// The first (small) write we make extends the file by 1 MB due to
|
||||
// preallocation.
|
||||
|
@ -101,7 +105,8 @@ public class TestEditLogFileOutputStream {
|
|||
}
|
||||
flushAndCheckLength(elos, 4 * MIN_PREALLOCATION_LENGTH);
|
||||
} finally {
|
||||
if (elos != null) elos.close();
|
||||
if (elos != null)
|
||||
elos.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -112,8 +117,8 @@ public class TestEditLogFileOutputStream {
|
|||
@Test
|
||||
public void testEditLogFileOutputStreamCloseAbort() throws IOException {
|
||||
// abort after a close should just ignore
|
||||
EditLogFileOutputStream editLogStream =
|
||||
new EditLogFileOutputStream(conf, TEST_EDITS, 0);
|
||||
EditLogFileOutputStream editLogStream = new EditLogFileOutputStream(conf,
|
||||
TEST_EDITS, 0);
|
||||
editLogStream.close();
|
||||
editLogStream.abort();
|
||||
}
|
||||
|
@ -125,8 +130,8 @@ public class TestEditLogFileOutputStream {
|
|||
@Test
|
||||
public void testEditLogFileOutputStreamCloseClose() throws IOException {
|
||||
// close after a close should result in an IOE
|
||||
EditLogFileOutputStream editLogStream =
|
||||
new EditLogFileOutputStream(conf, TEST_EDITS, 0);
|
||||
EditLogFileOutputStream editLogStream = new EditLogFileOutputStream(conf,
|
||||
TEST_EDITS, 0);
|
||||
editLogStream.close();
|
||||
try {
|
||||
editLogStream.close();
|
||||
|
@ -135,7 +140,7 @@ public class TestEditLogFileOutputStream {
|
|||
assertTrue(msg, msg.contains("Trying to use aborted output stream"));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Tests EditLogFileOutputStream doesn't throw NullPointerException on being
|
||||
* abort/abort sequence. See HDFS-2011.
|
||||
|
@ -143,9 +148,13 @@ public class TestEditLogFileOutputStream {
|
|||
@Test
|
||||
public void testEditLogFileOutputStreamAbortAbort() throws IOException {
|
||||
// abort after a close should just ignore
|
||||
EditLogFileOutputStream editLogStream =
|
||||
new EditLogFileOutputStream(conf, TEST_EDITS, 0);
|
||||
editLogStream.abort();
|
||||
editLogStream.abort();
|
||||
EditLogFileOutputStream editLogStream = null;
|
||||
try {
|
||||
editLogStream = new EditLogFileOutputStream(conf, TEST_EDITS, 0);
|
||||
editLogStream.abort();
|
||||
editLogStream.abort();
|
||||
} finally {
|
||||
IOUtils.cleanup(null, editLogStream);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ public class TestNameNodeMXBean {
|
|||
*/
|
||||
private static final double DELTA = 0.000001;
|
||||
|
||||
@SuppressWarnings({ "unchecked", "deprecation" })
|
||||
@SuppressWarnings({ "unchecked" })
|
||||
@Test
|
||||
public void testNameNodeMXBeanInfo() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
|
@ -152,7 +152,7 @@ public class TestNameNodeMXBean {
|
|||
assertEquals(0, statusMap.get("failed").size());
|
||||
|
||||
// This will cause the first dir to fail.
|
||||
File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]);
|
||||
File failedNameDir = new File(nameDirUris.iterator().next());
|
||||
assertEquals(0, FileUtil.chmod(
|
||||
new File(failedNameDir, "current").getAbsolutePath(), "000"));
|
||||
cluster.getNameNodeRpc().rollEditLog();
|
||||
|
|
Loading…
Reference in New Issue