HDFS-3981. Fix handling of FSN lock in getBlockLocations. Contributed by Xiaobo Peng and Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1465751 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2013-04-08 19:57:16 +00:00
parent 16fedf5473
commit 087acf8fd6
4 changed files with 68 additions and 2 deletions

View File

@ -20,6 +20,9 @@ package org.apache.hadoop.test;
import java.io.Closeable; import java.io.Closeable;
import org.mockito.Mockito; import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.mockito.stubbing.Stubber;
public abstract class MockitoUtil { public abstract class MockitoUtil {
@ -33,4 +36,29 @@ public abstract class MockitoUtil {
return Mockito.mock(clazz, return Mockito.mock(clazz,
Mockito.withSettings().extraInterfaces(Closeable.class)); Mockito.withSettings().extraInterfaces(Closeable.class));
} }
/**
* Throw an exception from the mock/spy only in the case that the
* call stack at the time the method has a line which matches the given
* pattern.
*
* @param t the Throwable to throw
* @param pattern the pattern against which to match the call stack trace
* @return the stub in progress
*/
public static Stubber doThrowWhenCallStackMatches(
final Throwable t, final String pattern) {
return Mockito.doAnswer(new Answer<Object>() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
t.setStackTrace(Thread.currentThread().getStackTrace());
for (StackTraceElement elem : t.getStackTrace()) {
if (elem.toString().matches(pattern)) {
throw t;
}
}
return invocation.callRealMethod();
}
});
}
} }

View File

@ -483,6 +483,9 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4658. Standby NN will log that it has received a block report "after HDFS-4658. Standby NN will log that it has received a block report "after
becoming active" (atm) becoming active" (atm)
HDFS-3981. Fix handling of FSN lock in getBlockLocations. (Xiaobo Peng
and todd via todd)
Release 2.0.4-alpha - UNRELEASED Release 2.0.4-alpha - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -1378,14 +1378,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
long now = now(); long now = now();
final INodeFile inode = INodeFile.valueOf(dir.getINode(src), src); final INodeFile inode = INodeFile.valueOf(dir.getINode(src), src);
if (doAccessTime && isAccessTimeSupported()) { if (doAccessTime && isAccessTimeSupported()) {
if (now <= inode.getAccessTime() + getAccessTimePrecision()) { if (now > inode.getAccessTime() + getAccessTimePrecision()) {
// if we have to set access time but we only have the readlock, then // if we have to set access time but we only have the readlock, then
// restart this entire operation with the writeLock. // restart this entire operation with the writeLock.
if (isReadOp) { if (isReadOp) {
continue; continue;
} }
dir.setTimes(src, inode, -1, now, false);
} }
dir.setTimes(src, inode, -1, now, false);
} }
return blockManager.createLocatedBlocks(inode.getBlocks(), return blockManager.createLocatedBlocks(inode.getBlocks(),
inode.computeFileSize(false), inode.isUnderConstruction(), inode.computeFileSize(false), inode.isUnderConstruction(),

View File

@ -27,6 +27,7 @@ import java.net.InetSocketAddress;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.Date; import java.util.Date;
import java.util.Random; import java.util.Random;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -36,8 +37,11 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.test.MockitoUtil;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito;
/** /**
* This class tests the access time on files. * This class tests the access time on files.
@ -273,6 +277,37 @@ public class TestSetTimes {
cluster.shutdown(); cluster.shutdown();
} }
} }
/**
* Test that when access time updates are not needed, the FSNamesystem
* write lock is not taken by getBlockLocations.
* Regression test for HDFS-3981.
*/
@Test(timeout=60000)
public void testGetBlockLocationsOnlyUsesReadLock() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 100*1000);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.build();
ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNamesystem());
try {
// Create empty file in the FSN.
Path p = new Path("/empty-file");
DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short)1, 0L);
// getBlockLocations() should not need the write lock, since we just created
// the file (and thus its access time is already within the 100-second
// accesstime precision configured above).
MockitoUtil.doThrowWhenCallStackMatches(
new AssertionError("Should not need write lock"),
".*getBlockLocations.*")
.when(spyLock).writeLock();
cluster.getFileSystem().getFileBlockLocations(p, 0, 100);
} finally {
cluster.shutdown();
}
}
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
new TestSetTimes().testTimes(); new TestSetTimes().testTimes();