HDFS-5202. Support Centralized Cache Management on Windows. Contributed by Chris Nauroth.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1609618 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Chris Nauroth 2014-07-11 03:06:11 +00:00
parent b90dfe0b49
commit 81930b75bd
5 changed files with 46 additions and 21 deletions

View File

@ -284,6 +284,8 @@ Release 2.6.0 - UNRELEASED
HDFS-6640. Syntax for MKDIRS, CREATESYMLINK, and SETXATTR are given wrongly
in WebHdfs document (missed webhdfs/v1). (Stephen Chu via jing9)
HDFS-5202. Support Centralized Cache Management on Windows. (cnauroth)
OPTIMIZATIONS
BUG FIXES

View File

@ -47,7 +47,7 @@ if "%1" == "--config" (
goto print_usage
)
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin
for %%i in ( %hdfscommands% ) do (
if %hdfs-command% == %%i set hdfscommand=true
)
@ -146,6 +146,10 @@ goto :eof
set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
goto :eof
:cacheadmin
set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
goto :eof
@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
:make_command_arguments
if "%1" == "--config" (
@ -193,6 +197,7 @@ goto :eof
@echo current directory contents with a snapshot
@echo lsSnapshottableDir list all snapshottable dirs owned by the current user
@echo Use -help to see options
@echo cacheadmin configure the HDFS cache
@echo.
@echo Most commands print help when invoked w/o parameters.

View File

@ -745,6 +745,9 @@ void startDataNode(Configuration conf,
" size (%s) is greater than zero and native code is not available.",
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
}
if (Path.WINDOWS) {
NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
} else {
long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
if (dnConf.maxLockedMemory > ulimit) {
throw new RuntimeException(String.format(
@ -756,6 +759,7 @@ void startDataNode(Configuration conf,
ulimit));
}
}
}
LOG.info("Starting DataNode with maxLockedMemory = " +
dnConf.maxLockedMemory);

View File

@ -270,7 +270,7 @@ Centralized Cache Management in HDFS
** {Native Libraries}
In order to lock block files into memory, the DataNode relies on native JNI
code found in <<<libhadoop.so>>>. Be sure to
code found in <<<libhadoop.so>>> or <<<hadoop.dll>>> on Windows. Be sure to
{{{../hadoop-common/NativeLibraries.html}enable JNI}} if you are using HDFS
centralized cache management.
@ -283,11 +283,11 @@ Centralized Cache Management in HDFS
* dfs.datanode.max.locked.memory
This determines the maximum amount of memory a DataNode will use for caching.
The "locked-in-memory size" ulimit (<<<ulimit -l>>>) of the DataNode user
also needs to be increased to match this parameter (see below section on
{{OS Limits}}). When setting this value, please remember that you will need
space in memory for other things as well, such as the DataNode and
application JVM heaps and the operating system page cache.
On Unix-like systems, the "locked-in-memory size" ulimit (<<<ulimit -l>>>) of
the DataNode user also needs to be increased to match this parameter (see
below section on {{OS Limits}}). When setting this value, please remember
that you will need space in memory for other things as well, such as the
DataNode and application JVM heaps and the operating system page cache.
*** Optional
@ -339,3 +339,6 @@ Centralized Cache Management in HDFS
"unlimited," indicating that there is no limit. Note that it's typical for
<<<ulimit -l>>> to output the memory lock limit in KB, but
dfs.datanode.max.locked.memory must be specified in bytes.
This information does not apply to deployments on Windows. Windows has no
direct equivalent of <<<ulimit -l>>>.

View File

@ -20,7 +20,6 @@
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.anyLong;
@ -68,6 +67,7 @@
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
@ -114,7 +114,6 @@ public class TestFsDatasetCache {
@Before
public void setUp() throws Exception {
assumeTrue(!Path.WINDOWS);
conf = new HdfsConfiguration();
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
@ -143,6 +142,9 @@ public void setUp() throws Exception {
@After
public void tearDown() throws Exception {
// Verify that each test uncached whatever it cached. This cleanup is
// required so that file descriptors are not leaked across tests.
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
if (fs != null) {
fs.close();
}
@ -205,9 +207,16 @@ private static long[] getBlockSizes(HdfsBlockLocation[] locs)
String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
FileChannel blockChannel =
((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel();
FileInputStream blockInputStream = null;
FileChannel blockChannel = null;
try {
blockInputStream =
(FileInputStream)fsd.getBlockInputStream(extBlock, 0);
blockChannel = blockInputStream.getChannel();
sizes[i] = blockChannel.size();
} finally {
IOUtils.cleanup(LOG, blockChannel, blockInputStream);
}
}
return sizes;
}
@ -571,5 +580,7 @@ public Boolean get() {
return true;
}
}, 1000, 30000);
dfs.removeCacheDirective(shortCacheDirectiveId);
}
}