HDFS-5202. Merging change r1609618 from trunk to branch-2.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1609619 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
ebbf204aa2
commit
9e5c339023
|
@ -27,6 +27,8 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6640. Syntax for MKDIRS, CREATESYMLINK, and SETXATTR are given wrongly
|
||||
in WebHdfs document (missed webhdfs/v1). (Stephen Chu via jing9)
|
||||
|
||||
HDFS-5202. Support Centralized Cache Management on Windows. (cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
|
|
@ -47,7 +47,7 @@ if "%1" == "--config" (
|
|||
goto print_usage
|
||||
)
|
||||
|
||||
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir
|
||||
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin
|
||||
for %%i in ( %hdfscommands% ) do (
|
||||
if %hdfs-command% == %%i set hdfscommand=true
|
||||
)
|
||||
|
@ -146,6 +146,10 @@ goto :eof
|
|||
set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
|
||||
goto :eof
|
||||
|
||||
:cacheadmin
|
||||
set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
|
||||
goto :eof
|
||||
|
||||
@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
|
||||
:make_command_arguments
|
||||
if "%1" == "--config" (
|
||||
|
@ -193,6 +197,7 @@ goto :eof
|
|||
@echo current directory contents with a snapshot
|
||||
@echo lsSnapshottableDir list all snapshottable dirs owned by the current user
|
||||
@echo Use -help to see options
|
||||
@echo cacheadmin configure the HDFS cache
|
||||
@echo.
|
||||
@echo Most commands print help when invoked w/o parameters.
|
||||
|
||||
|
|
|
@ -752,15 +752,19 @@ public class DataNode extends Configured
|
|||
" size (%s) is greater than zero and native code is not available.",
|
||||
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
|
||||
}
|
||||
long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
|
||||
if (dnConf.maxLockedMemory > ulimit) {
|
||||
throw new RuntimeException(String.format(
|
||||
"Cannot start datanode because the configured max locked memory" +
|
||||
" size (%s) of %d bytes is more than the datanode's available" +
|
||||
" RLIMIT_MEMLOCK ulimit of %d bytes.",
|
||||
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
||||
dnConf.maxLockedMemory,
|
||||
ulimit));
|
||||
if (Path.WINDOWS) {
|
||||
NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
|
||||
} else {
|
||||
long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
|
||||
if (dnConf.maxLockedMemory > ulimit) {
|
||||
throw new RuntimeException(String.format(
|
||||
"Cannot start datanode because the configured max locked memory" +
|
||||
" size (%s) of %d bytes is more than the datanode's available" +
|
||||
" RLIMIT_MEMLOCK ulimit of %d bytes.",
|
||||
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
||||
dnConf.maxLockedMemory,
|
||||
ulimit));
|
||||
}
|
||||
}
|
||||
}
|
||||
LOG.info("Starting DataNode with maxLockedMemory = " +
|
||||
|
|
|
@ -270,7 +270,7 @@ Centralized Cache Management in HDFS
|
|||
** {Native Libraries}
|
||||
|
||||
In order to lock block files into memory, the DataNode relies on native JNI
|
||||
code found in <<<libhadoop.so>>>. Be sure to
|
||||
code found in <<<libhadoop.so>>> or <<<hadoop.dll>>> on Windows. Be sure to
|
||||
{{{../hadoop-common/NativeLibraries.html}enable JNI}} if you are using HDFS
|
||||
centralized cache management.
|
||||
|
||||
|
@ -283,11 +283,11 @@ Centralized Cache Management in HDFS
|
|||
* dfs.datanode.max.locked.memory
|
||||
|
||||
This determines the maximum amount of memory a DataNode will use for caching.
|
||||
The "locked-in-memory size" ulimit (<<<ulimit -l>>>) of the DataNode user
|
||||
also needs to be increased to match this parameter (see below section on
|
||||
{{OS Limits}}). When setting this value, please remember that you will need
|
||||
space in memory for other things as well, such as the DataNode and
|
||||
application JVM heaps and the operating system page cache.
|
||||
On Unix-like systems, the "locked-in-memory size" ulimit (<<<ulimit -l>>>) of
|
||||
the DataNode user also needs to be increased to match this parameter (see
|
||||
below section on {{OS Limits}}). When setting this value, please remember
|
||||
that you will need space in memory for other things as well, such as the
|
||||
DataNode and application JVM heaps and the operating system page cache.
|
||||
|
||||
*** Optional
|
||||
|
||||
|
@ -339,3 +339,6 @@ Centralized Cache Management in HDFS
|
|||
"unlimited," indicating that there is no limit. Note that it's typical for
|
||||
<<<ulimit -l>>> to output the memory lock limit in KB, but
|
||||
dfs.datanode.max.locked.memory must be specified in bytes.
|
||||
|
||||
This information does not apply to deployments on Windows. Windows has no
|
||||
direct equivalent of <<<ulimit -l>>>.
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode;
|
|||
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assume.assumeTrue;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyInt;
|
||||
import static org.mockito.Matchers.anyLong;
|
||||
|
@ -68,6 +67,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|||
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
|
||||
|
@ -114,7 +114,6 @@ public class TestFsDatasetCache {
|
|||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
assumeTrue(!Path.WINDOWS);
|
||||
conf = new HdfsConfiguration();
|
||||
conf.setLong(
|
||||
DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
|
||||
|
@ -143,6 +142,9 @@ public class TestFsDatasetCache {
|
|||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
// Verify that each test uncached whatever it cached. This cleanup is
|
||||
// required so that file descriptors are not leaked across tests.
|
||||
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
|
||||
if (fs != null) {
|
||||
fs.close();
|
||||
}
|
||||
|
@ -205,9 +207,16 @@ public class TestFsDatasetCache {
|
|||
String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
|
||||
Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
|
||||
ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
|
||||
FileChannel blockChannel =
|
||||
((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel();
|
||||
sizes[i] = blockChannel.size();
|
||||
FileInputStream blockInputStream = null;
|
||||
FileChannel blockChannel = null;
|
||||
try {
|
||||
blockInputStream =
|
||||
(FileInputStream)fsd.getBlockInputStream(extBlock, 0);
|
||||
blockChannel = blockInputStream.getChannel();
|
||||
sizes[i] = blockChannel.size();
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, blockChannel, blockInputStream);
|
||||
}
|
||||
}
|
||||
return sizes;
|
||||
}
|
||||
|
@ -571,5 +580,7 @@ public class TestFsDatasetCache {
|
|||
return true;
|
||||
}
|
||||
}, 1000, 30000);
|
||||
|
||||
dfs.removeCacheDirective(shortCacheDirectiveId);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue