diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 25df81fa8d6..c2781541dba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -375,6 +375,9 @@ Trunk (Unreleased) HDFS-5320. Add datanode caching metrics. (wang) + HDFS-5520. loading cache path directives from edit log doesn't update + nextEntryId (cmccabe) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 27ff5180bfe..f12c36eed20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -249,7 +249,7 @@ public final class CacheManager { private long getNextEntryId() throws IOException { assert namesystem.hasWriteLock(); - if (nextEntryId == Long.MAX_VALUE) { + if (nextEntryId >= Long.MAX_VALUE - 1) { throw new IOException("No more available IDs."); } return nextEntryId++; @@ -357,6 +357,17 @@ public final class CacheManager { // We are loading an entry from the edit log. // Use the ID from the edit log. id = directive.getId(); + if (id <= 0) { + throw new InvalidRequestException("can't add an ID " + + "of " + id + ": it is not positive."); + } + if (id >= Long.MAX_VALUE) { + throw new InvalidRequestException("can't add an ID " + + "of " + id + ": it is too big."); + } + if (nextEntryId <= id) { + nextEntryId = id + 1; + } } else { // Add a new entry with the next available ID. id = getNextEntryId(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index a6909cb4788..80a86e57bf8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock; +import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand; @@ -91,6 +92,10 @@ public class TestFsDatasetCache { private static PageRounder rounder = new PageRounder(); private static CacheManipulator prevCacheManipulator; + static { + EditLogFileOutputStream.setShouldSkipFsyncForTesting(false); + } + @Before public void setUp() throws Exception { assumeTrue(!Path.WINDOWS); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java index 7182aad7043..d21b60224dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; +import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.nativeio.NativeIO; @@ -85,6 +86,10 @@ public class TestPathBasedCacheRequests { static private NamenodeProtocols proto; static private CacheManipulator prevCacheManipulator; + static { + EditLogFileOutputStream.setShouldSkipFsyncForTesting(false); + } + @Before public void setup() throws Exception { conf = new HdfsConfiguration(); @@ -510,8 +515,9 @@ public class TestPathBasedCacheRequests { // Create some cache entries int numEntries = 10; String entryPrefix = "/party-"; + long prevId = -1; for (int i=0; i