HDFS-10817. Add Logging for Long-held NN Read Locks. Contributed by Erik Krogen.

(cherry picked from commit 6f4b0d33ca)
(cherry picked from commit f87e2f5d05)
(cherry picked from commit 7adbd569a6)
This commit is contained in:
Zhe Zhang 2016-08-31 15:40:01 -07:00
parent 73ba5a0170
commit 6793ef0de6
4 changed files with 152 additions and 4 deletions

View File

@ -377,10 +377,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size";
public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384;
// Threshold for how long a write lock must be held for the event to be logged
// Threshold for how long namenode locks must be held for the
// event to be logged
public static final String DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY =
"dfs.namenode.write-lock-reporting-threshold-ms";
public static final long DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 1000L;
public static final String DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY =
"dfs.namenode.read-lock-reporting-threshold-ms";
public static final long DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
//Following keys have no defaults
public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";

View File

@ -73,6 +73,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_I
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT;
@ -825,6 +827,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
this.writeLockReportingThreshold = conf.getLong(
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
this.readLockReportingThreshold = conf.getLong(
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
// For testing purposes, allow the DT secret manager to be started regardless
// of whether security is enabled.
@ -1482,14 +1487,41 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
private long writeLockReportingThreshold;
/** Last time stamp for write lock. Keep the longest one for multi-entrance.*/
private long writeLockHeldTimeStamp;
/** Threshold (ms) for long holding read lock report. */
private long readLockReportingThreshold;
/**
* Last time stamp for read lock. Keep the longest one for
* multi-entrance. This is ThreadLocal since there could be
* many read locks held simultaneously.
*/
private static ThreadLocal<Long> readLockHeldTimeStamp =
new ThreadLocal<Long>() {
@Override
public Long initialValue() {
return Long.MAX_VALUE;
}
};
@Override
public void readLock() {
this.fsLock.readLock().lock();
if (this.fsLock.getReadHoldCount() == 1) {
readLockHeldTimeStamp.set(monotonicNow());
}
}
@Override
public void readUnlock() {
final boolean needReport = this.fsLock.getReadHoldCount() == 1;
final long readLockInterval = monotonicNow() - readLockHeldTimeStamp.get();
this.fsLock.readLock().unlock();
if (needReport) {
readLockHeldTimeStamp.remove();
if (readLockInterval > this.readLockReportingThreshold) {
LOG.info("FSNamesystem read lock held for " + readLockInterval +
" ms via\n" + StringUtils.getStackTrace(Thread.currentThread()));
}
}
}
@Override
public void writeLock() {

View File

@ -2335,6 +2335,15 @@
</description>
</property>
<property>
<name>dfs.namenode.read-lock-reporting-threshold-ms</name>
<value>5000</value>
<description>When a read lock is held on the namenode for a long time,
this will be logged as the lock is released. This sets how long the
lock must be held for logging to occur.
</description>
</property>
<property>
<name>dfs.namenode.startup.delay.block.deletion.sec</name>
<value>0</value>

View File

@ -25,9 +25,11 @@ import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import com.google.common.base.Supplier;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -51,6 +53,8 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeoutException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class TestFSNamesystem {
@ -257,10 +261,11 @@ public class TestFSNamesystem {
}
/**
* Test when FSNamesystem lock is held for a long time, logger will report it.
* Test when FSNamesystem write lock is held for a long time,
* logger will report it.
*/
@Test(timeout=45000)
public void testFSLockLongHoldingReport() throws Exception {
public void testFSWriteLockLongHoldingReport() throws Exception {
final long writeLockReportingThreshold = 100L;
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
@ -268,7 +273,7 @@ public class TestFSNamesystem {
FSImage fsImage = Mockito.mock(FSImage.class);
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
final FSNamesystem fsn = new FSNamesystem(conf, fsImage);
LogCapturer logs = LogCapturer.captureLogs(FSNamesystem.LOG);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.INFO);
@ -311,4 +316,102 @@ public class TestFSNamesystem {
fsn.writeUnlock();
assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName()));
}
/**
* Test when FSNamesystem read lock is held for a long time,
* logger will report it.
*/
@Test(timeout=45000)
public void testFSReadLockLongHoldingReport() throws Exception {
final long readLockReportingThreshold = 100L;
final String readLockLogStmt = "FSNamesystem read lock held for ";
Configuration conf = new Configuration();
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
readLockReportingThreshold);
FSImage fsImage = Mockito.mock(FSImage.class);
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
final FSNamesystem fsn = new FSNamesystem(conf, fsImage);
LogCapturer logs = LogCapturer.captureLogs(FSNamesystem.LOG);
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.INFO);
// Don't report if the read lock is held for a short time
fsn.readLock();
Thread.sleep(readLockReportingThreshold / 2);
fsn.readUnlock();
assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()) &&
logs.getOutput().contains(readLockLogStmt));
// Report if the read lock is held for a long time
fsn.readLock();
Thread.sleep(readLockReportingThreshold + 10);
logs.clearOutput();
fsn.readUnlock();
assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName())
&& logs.getOutput().contains(readLockLogStmt));
// Report if it's held for a long time when re-entering read lock
fsn.readLock();
Thread.sleep(readLockReportingThreshold / 2 + 1);
fsn.readLock();
Thread.sleep(readLockReportingThreshold / 2 + 1);
logs.clearOutput();
fsn.readUnlock();
assertFalse(logs.getOutput().contains(GenericTestUtils.getMethodName()) ||
logs.getOutput().contains(readLockLogStmt));
logs.clearOutput();
fsn.readUnlock();
assertTrue(logs.getOutput().contains(GenericTestUtils.getMethodName()) &&
logs.getOutput().contains(readLockLogStmt));
// Report if it's held for a long time while another thread also has the
// read lock. Let one thread hold the lock long enough to activate an
// alert, then have another thread grab the read lock to ensure that this
// doesn't reset the timing.
logs.clearOutput();
final CountDownLatch barrier = new CountDownLatch(1);
final CountDownLatch barrier2 = new CountDownLatch(1);
Thread t1 = new Thread() {
@Override
public void run() {
try {
fsn.readLock();
Thread.sleep(readLockReportingThreshold + 1);
barrier.countDown(); // Allow for t2 to acquire the read lock
barrier2.await(); // Wait until t2 has the read lock
fsn.readUnlock();
} catch (InterruptedException e) {
fail("Interrupted during testing");
}
}
};
Thread t2 = new Thread() {
@Override
public void run() {
try {
barrier.await(); // Wait until t1 finishes sleeping
fsn.readLock();
barrier2.countDown(); // Allow for t1 to unlock
fsn.readUnlock();
} catch (InterruptedException e) {
fail("Interrupted during testing");
}
}
};
t1.start();
t2.start();
t1.join();
t2.join();
// Look for the differentiating class names in the stack trace
String stackTracePatternString =
String.format("INFO.+%s(.+\n){4}\\Q%%s\\E\\.run", readLockLogStmt);
Pattern t1Pattern = Pattern.compile(
String.format(stackTracePatternString, t1.getClass().getName()));
assertTrue(t1Pattern.matcher(logs.getOutput()).find());
Pattern t2Pattern = Pattern.compile(
String.format(stackTracePatternString, t2.getClass().getName()));
assertFalse(t2Pattern.matcher(logs.getOutput()).find());
}
}