diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5b300cad230..ac701cf2633 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -443,6 +443,10 @@ Release 2.5.0 - UNRELEASED HDFS-6499. Use NativeIO#renameTo instead of File#renameTo in FileJournalManager. (Yongjun Zhang via atm) + HDFS-6518. TestCacheDirectives#testExceedsCapacity should + take FSN read lock when accessing pendingCached list. + (wang) + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index 8ef3887dd56..bb8ef969168 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -1408,12 +1408,17 @@ public class TestCacheDirectives { */ private void checkPendingCachedEmpty(MiniDFSCluster cluster) throws Exception { - final DatanodeManager datanodeManager = - cluster.getNamesystem().getBlockManager().getDatanodeManager(); - for (DataNode dn : cluster.getDataNodes()) { - DatanodeDescriptor descriptor = - datanodeManager.getDatanode(dn.getDatanodeId()); - Assert.assertTrue(descriptor.getPendingCached().isEmpty()); + cluster.getNamesystem().readLock(); + try { + final DatanodeManager datanodeManager = + cluster.getNamesystem().getBlockManager().getDatanodeManager(); + for (DataNode dn : cluster.getDataNodes()) { + DatanodeDescriptor descriptor = + datanodeManager.getDatanode(dn.getDatanodeId()); + Assert.assertTrue(descriptor.getPendingCached().isEmpty()); + } + } finally { + cluster.getNamesystem().readUnlock(); } }