HDFS-13560. Insufficient system resources exist to complete the requested service for some tests on Windows. Contributed by Anbang Hu.

(cherry picked from commit 53b807a6a8)
This commit is contained in:
Inigo Goiri 2018-05-17 17:03:23 -07:00
parent ccd7d50faf
commit cdc16b3a2f
4 changed files with 24 additions and 4 deletions

View File

@ -1373,4 +1373,19 @@ public abstract class Shell {
return new HashSet<>(CHILD_SHELLS.keySet()); return new HashSet<>(CHILD_SHELLS.keySet());
} }
} }
/**
* Static method to return the memory lock limit for datanode.
* @param ulimit max value at which memory locked should be capped.
* @return long value specifying the memory lock limit.
*/
public static Long getMemlockLimit(Long ulimit) {
if (WINDOWS) {
// HDFS-13560: if ulimit is too large on Windows, Windows will complain
// "1450: Insufficient system resources exist to complete the requested
// service". Thus, cap Windows memory lock limit at Integer.MAX_VALUE.
return Math.min(Integer.MAX_VALUE, ulimit);
}
return ulimit;
}
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.util.Shell.getMemlockLimit;
import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.Is.is; import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
@ -99,7 +100,7 @@ public class TestDirectoryScanner {
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1); CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CONF.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
Long.MAX_VALUE); getMemlockLimit(Long.MAX_VALUE));
} }
@Before @Before

View File

@ -26,6 +26,7 @@ import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
import static org.apache.hadoop.fs.StorageType.DEFAULT; import static org.apache.hadoop.fs.StorageType.DEFAULT;
import static org.apache.hadoop.fs.StorageType.RAM_DISK; import static org.apache.hadoop.fs.StorageType.RAM_DISK;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.apache.hadoop.util.Shell.getMemlockLimit;
import static org.hamcrest.core.Is.is; import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat; import static org.junit.Assert.assertThat;
@ -422,7 +423,7 @@ public abstract class LazyPersistTestCase {
private StorageType[] storageTypes = null; private StorageType[] storageTypes = null;
private int ramDiskReplicaCapacity = -1; private int ramDiskReplicaCapacity = -1;
private long ramDiskStorageLimit = -1; private long ramDiskStorageLimit = -1;
private long maxLockedMemory = Long.MAX_VALUE; private long maxLockedMemory = getMemlockLimit(Long.MAX_VALUE);
private boolean hasTransientStorage = true; private boolean hasTransientStorage = true;
private boolean useScr = false; private boolean useScr = false;
private boolean useLegacyBlockReaderLocal = false; private boolean useLegacyBlockReaderLocal = false;

View File

@ -75,6 +75,7 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.util.Shell.getMemlockLimit;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
@ -103,8 +104,10 @@ public class TestNameNodeMXBean {
@Test @Test
public void testNameNodeMXBeanInfo() throws Exception { public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
Long maxLockedMemory = getMemlockLimit(
NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); maxLockedMemory);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
@ -256,7 +259,7 @@ public class TestNameNodeMXBean {
assertEquals(1, statusMap.get("active").size()); assertEquals(1, statusMap.get("active").size());
assertEquals(1, statusMap.get("failed").size()); assertEquals(1, statusMap.get("failed").size());
assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed")); assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * assertEquals(maxLockedMemory *
cluster.getDataNodes().size(), cluster.getDataNodes().size(),
mbs.getAttribute(mxbeanName, "CacheCapacity")); mbs.getAttribute(mxbeanName, "CacheCapacity"));
assertNull("RollingUpgradeInfo should be null when there is no rolling" assertNull("RollingUpgradeInfo should be null when there is no rolling"