From 53b807a6a8486cefe0b036f7893de9f619bd44a1 Mon Sep 17 00:00:00 2001 From: Inigo Goiri Date: Thu, 17 May 2018 17:03:23 -0700 Subject: [PATCH] HDFS-13560. Insufficient system resources exist to complete the requested service for some tests on Windows. Contributed by Anbang Hu. --- .../main/java/org/apache/hadoop/util/Shell.java | 15 +++++++++++++++ .../server/datanode/TestDirectoryScanner.java | 3 ++- .../fsdataset/impl/LazyPersistTestCase.java | 3 ++- .../hdfs/server/namenode/TestNameNodeMXBean.java | 7 +++++-- 4 files changed, 24 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index c25cba24793..04b4b4fe394 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -1388,4 +1388,19 @@ public abstract class Shell { return new HashSet<>(CHILD_SHELLS.keySet()); } } + + /** + * Static method to return the memory lock limit for datanode. + * @param ulimit max value at which memory locked should be capped. + * @return long value specifying the memory lock limit. + */ + public static Long getMemlockLimit(Long ulimit) { + if (WINDOWS) { + // HDFS-13560: if ulimit is too large on Windows, Windows will complain + // "1450: Insufficient system resources exist to complete the requested + // service". Thus, cap Windows memory lock limit at Integer.MAX_VALUE. + return Math.min(Integer.MAX_VALUE, ulimit); + } + return ulimit; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index c95c71bf591..f79252318ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; +import static org.apache.hadoop.util.Shell.getMemlockLimit; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; @@ -99,7 +100,7 @@ public class TestDirectoryScanner { CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1); CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - Long.MAX_VALUE); + getMemlockLimit(Long.MAX_VALUE)); } @Before diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java index c412dad1512..aae59ddc5e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java @@ -25,6 +25,7 @@ import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST; import static org.apache.hadoop.fs.StorageType.DEFAULT; import static org.apache.hadoop.fs.StorageType.RAM_DISK; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import static org.apache.hadoop.util.Shell.getMemlockLimit; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -421,7 +422,7 @@ public abstract class LazyPersistTestCase { private StorageType[] storageTypes = null; private int ramDiskReplicaCapacity = -1; private long ramDiskStorageLimit = -1; - private long maxLockedMemory = Long.MAX_VALUE; + private long maxLockedMemory = getMemlockLimit(Long.MAX_VALUE); private boolean hasTransientStorage = true; private boolean useScr = false; private boolean useLegacyBlockReaderLocal = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 9c165d8fdb1..37284202847 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -75,6 +75,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.util.Shell.getMemlockLimit; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -103,8 +104,10 @@ public class TestNameNodeMXBean { @Test public void testNameNodeMXBeanInfo() throws Exception { Configuration conf = new Configuration(); + Long maxLockedMemory = getMemlockLimit( + NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); + maxLockedMemory); MiniDFSCluster cluster = null; try { @@ -256,7 +259,7 @@ public class TestNameNodeMXBean { assertEquals(1, statusMap.get("active").size()); assertEquals(1, statusMap.get("failed").size()); assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed")); - assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * + assertEquals(maxLockedMemory * cluster.getDataNodes().size(), mbs.getAttribute(mxbeanName, "CacheCapacity")); assertNull("RollingUpgradeInfo should be null when there is no rolling"