diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index 17985700616..2a46bd807fd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -1377,4 +1377,19 @@ public static Set getAllShells() { return new HashSet<>(CHILD_SHELLS.keySet()); } } + + /** + * Static method to return the memory lock limit for datanode. + * @param ulimit max value at which memory locked should be capped. + * @return long value specifying the memory lock limit. + */ + public static Long getMemlockLimit(Long ulimit) { + if (WINDOWS) { + // HDFS-13560: if ulimit is too large on Windows, Windows will complain + // "1450: Insufficient system resources exist to complete the requested + // service". Thus, cap Windows memory lock limit at Integer.MAX_VALUE. + return Math.min(Integer.MAX_VALUE, ulimit); + } + return ulimit; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 2ee624bfee7..fdcee6ccc5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.apache.hadoop.util.Shell.getMemlockLimit; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; @@ -95,7 +96,7 @@ public class TestDirectoryScanner { CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1); CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - Long.MAX_VALUE); + getMemlockLimit(Long.MAX_VALUE)); } @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java index 4a01e5596a4..3c86e6d00e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java @@ -26,6 +26,7 @@ import static org.apache.hadoop.fs.StorageType.DEFAULT; import static org.apache.hadoop.fs.StorageType.RAM_DISK; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import static org.apache.hadoop.util.Shell.getMemlockLimit; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -422,7 +423,7 @@ public void build() throws IOException { private StorageType[] storageTypes = null; private int ramDiskReplicaCapacity = -1; private long ramDiskStorageLimit = -1; - private long maxLockedMemory = Long.MAX_VALUE; + private long maxLockedMemory = getMemlockLimit(Long.MAX_VALUE); private boolean hasTransientStorage = true; private boolean useScr = false; private boolean useLegacyBlockReaderLocal = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 8250ea6d191..b93cc642b17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -63,6 +63,7 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.util.Shell.getMemlockLimit; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -91,8 +92,10 @@ public class TestNameNodeMXBean { @Test public void testNameNodeMXBeanInfo() throws Exception { Configuration conf = new Configuration(); + Long maxLockedMemory = getMemlockLimit( + NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); + maxLockedMemory); MiniDFSCluster cluster = null; try { @@ -244,7 +247,7 @@ public void testNameNodeMXBeanInfo() throws Exception { assertEquals(1, statusMap.get("active").size()); assertEquals(1, statusMap.get("failed").size()); assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed")); - assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * + assertEquals(maxLockedMemory * cluster.getDataNodes().size(), mbs.getAttribute(mxbeanName, "CacheCapacity")); assertNull("RollingUpgradeInfo should be null when there is no rolling"