diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3c2c9248033..6af6c26c60d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -27,6 +27,8 @@ Release 2.6.0 - UNRELEASED HDFS-6640. Syntax for MKDIRS, CREATESYMLINK, and SETXATTR are given wrongly in WebHdfs document (missed webhdfs/v1). (Stephen Chu via jing9) + HDFS-5202. Support Centralized Cache Management on Windows. (cnauroth) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index bbe4c45c63f..f5f77f043d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd @@ -47,7 +47,7 @@ if "%1" == "--config" ( goto print_usage ) - set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin for %%i in ( %hdfscommands% ) do ( if %hdfs-command% == %%i set hdfscommand=true ) @@ -146,6 +146,10 @@ goto :eof set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir goto :eof +:cacheadmin + set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin + goto :eof + @rem This changes %1, %2 etc. Hence those cannot be used after calling this. :make_command_arguments if "%1" == "--config" ( @@ -193,6 +197,7 @@ goto :eof @echo current directory contents with a snapshot @echo lsSnapshottableDir list all snapshottable dirs owned by the current user @echo Use -help to see options + @echo cacheadmin configure the HDFS cache @echo. @echo Most commands print help when invoked w/o parameters. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 4dc2ec74493..db3ff9c0d7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -752,15 +752,19 @@ public class DataNode extends Configured " size (%s) is greater than zero and native code is not available.", DFS_DATANODE_MAX_LOCKED_MEMORY_KEY)); } - long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit(); - if (dnConf.maxLockedMemory > ulimit) { - throw new RuntimeException(String.format( - "Cannot start datanode because the configured max locked memory" + - " size (%s) of %d bytes is more than the datanode's available" + - " RLIMIT_MEMLOCK ulimit of %d bytes.", - DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, - dnConf.maxLockedMemory, - ulimit)); + if (Path.WINDOWS) { + NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory); + } else { + long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit(); + if (dnConf.maxLockedMemory > ulimit) { + throw new RuntimeException(String.format( + "Cannot start datanode because the configured max locked memory" + + " size (%s) of %d bytes is more than the datanode's available" + + " RLIMIT_MEMLOCK ulimit of %d bytes.", + DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, + dnConf.maxLockedMemory, + ulimit)); + } } } LOG.info("Starting DataNode with maxLockedMemory = " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm index d3a712294ab..8f5647b6ac5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm @@ -270,7 +270,7 @@ Centralized Cache Management in HDFS ** {Native Libraries} In order to lock block files into memory, the DataNode relies on native JNI - code found in <<>>. Be sure to + code found in <<>> or <<>> on Windows. Be sure to {{{../hadoop-common/NativeLibraries.html}enable JNI}} if you are using HDFS centralized cache management. @@ -283,11 +283,11 @@ Centralized Cache Management in HDFS * dfs.datanode.max.locked.memory This determines the maximum amount of memory a DataNode will use for caching. - The "locked-in-memory size" ulimit (<<>>) of the DataNode user - also needs to be increased to match this parameter (see below section on - {{OS Limits}}). When setting this value, please remember that you will need - space in memory for other things as well, such as the DataNode and - application JVM heaps and the operating system page cache. + On Unix-like systems, the "locked-in-memory size" ulimit (<<>>) of + the DataNode user also needs to be increased to match this parameter (see + below section on {{OS Limits}}). When setting this value, please remember + that you will need space in memory for other things as well, such as the + DataNode and application JVM heaps and the operating system page cache. *** Optional @@ -339,3 +339,6 @@ Centralized Cache Management in HDFS "unlimited," indicating that there is no limit. Note that it's typical for <<>> to output the memory lock limit in KB, but dfs.datanode.max.locked.memory must be specified in bytes. + + This information does not apply to deployments on Windows. Windows has no + direct equivalent of <<>>. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java index 2ccd7568c01..5ac13eec270 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyLong; @@ -68,6 +67,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator; import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator; @@ -114,7 +114,6 @@ public class TestFsDatasetCache { @Before public void setUp() throws Exception { - assumeTrue(!Path.WINDOWS); conf = new HdfsConfiguration(); conf.setLong( DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100); @@ -143,6 +142,9 @@ public class TestFsDatasetCache { @After public void tearDown() throws Exception { + // Verify that each test uncached whatever it cached. This cleanup is + // required so that file descriptors are not leaked across tests. + DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd); if (fs != null) { fs.close(); } @@ -205,9 +207,16 @@ public class TestFsDatasetCache { String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId(); Block block = loc.getLocatedBlock().getBlock().getLocalBlock(); ExtendedBlock extBlock = new ExtendedBlock(bpid, block); - FileChannel blockChannel = - ((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel(); - sizes[i] = blockChannel.size(); + FileInputStream blockInputStream = null; + FileChannel blockChannel = null; + try { + blockInputStream = + (FileInputStream)fsd.getBlockInputStream(extBlock, 0); + blockChannel = blockInputStream.getChannel(); + sizes[i] = blockChannel.size(); + } finally { + IOUtils.cleanup(LOG, blockChannel, blockInputStream); + } } return sizes; } @@ -571,5 +580,7 @@ public class TestFsDatasetCache { return true; } }, 1000, 30000); + + dfs.removeCacheDirective(shortCacheDirectiveId); } }