HDFS-5202. Merging change r1609618 from trunk to branch-2.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1609619 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
ebbf204aa2
commit
9e5c339023
|
@ -27,6 +27,8 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-6640. Syntax for MKDIRS, CREATESYMLINK, and SETXATTR are given wrongly
|
HDFS-6640. Syntax for MKDIRS, CREATESYMLINK, and SETXATTR are given wrongly
|
||||||
in WebHdfs document (missed webhdfs/v1). (Stephen Chu via jing9)
|
in WebHdfs document (missed webhdfs/v1). (Stephen Chu via jing9)
|
||||||
|
|
||||||
|
HDFS-5202. Support Centralized Cache Management on Windows. (cnauroth)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -47,7 +47,7 @@ if "%1" == "--config" (
|
||||||
goto print_usage
|
goto print_usage
|
||||||
)
|
)
|
||||||
|
|
||||||
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir
|
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin
|
||||||
for %%i in ( %hdfscommands% ) do (
|
for %%i in ( %hdfscommands% ) do (
|
||||||
if %hdfs-command% == %%i set hdfscommand=true
|
if %hdfs-command% == %%i set hdfscommand=true
|
||||||
)
|
)
|
||||||
|
@ -146,6 +146,10 @@ goto :eof
|
||||||
set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
|
set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
|
||||||
goto :eof
|
goto :eof
|
||||||
|
|
||||||
|
:cacheadmin
|
||||||
|
set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
|
||||||
|
goto :eof
|
||||||
|
|
||||||
@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
|
@rem This changes %1, %2 etc. Hence those cannot be used after calling this.
|
||||||
:make_command_arguments
|
:make_command_arguments
|
||||||
if "%1" == "--config" (
|
if "%1" == "--config" (
|
||||||
|
@ -193,6 +197,7 @@ goto :eof
|
||||||
@echo current directory contents with a snapshot
|
@echo current directory contents with a snapshot
|
||||||
@echo lsSnapshottableDir list all snapshottable dirs owned by the current user
|
@echo lsSnapshottableDir list all snapshottable dirs owned by the current user
|
||||||
@echo Use -help to see options
|
@echo Use -help to see options
|
||||||
|
@echo cacheadmin configure the HDFS cache
|
||||||
@echo.
|
@echo.
|
||||||
@echo Most commands print help when invoked w/o parameters.
|
@echo Most commands print help when invoked w/o parameters.
|
||||||
|
|
||||||
|
|
|
@ -752,6 +752,9 @@ public class DataNode extends Configured
|
||||||
" size (%s) is greater than zero and native code is not available.",
|
" size (%s) is greater than zero and native code is not available.",
|
||||||
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
|
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
|
||||||
}
|
}
|
||||||
|
if (Path.WINDOWS) {
|
||||||
|
NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
|
||||||
|
} else {
|
||||||
long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
|
long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
|
||||||
if (dnConf.maxLockedMemory > ulimit) {
|
if (dnConf.maxLockedMemory > ulimit) {
|
||||||
throw new RuntimeException(String.format(
|
throw new RuntimeException(String.format(
|
||||||
|
@ -763,6 +766,7 @@ public class DataNode extends Configured
|
||||||
ulimit));
|
ulimit));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
LOG.info("Starting DataNode with maxLockedMemory = " +
|
LOG.info("Starting DataNode with maxLockedMemory = " +
|
||||||
dnConf.maxLockedMemory);
|
dnConf.maxLockedMemory);
|
||||||
|
|
||||||
|
|
|
@ -270,7 +270,7 @@ Centralized Cache Management in HDFS
|
||||||
** {Native Libraries}
|
** {Native Libraries}
|
||||||
|
|
||||||
In order to lock block files into memory, the DataNode relies on native JNI
|
In order to lock block files into memory, the DataNode relies on native JNI
|
||||||
code found in <<<libhadoop.so>>>. Be sure to
|
code found in <<<libhadoop.so>>> or <<<hadoop.dll>>> on Windows. Be sure to
|
||||||
{{{../hadoop-common/NativeLibraries.html}enable JNI}} if you are using HDFS
|
{{{../hadoop-common/NativeLibraries.html}enable JNI}} if you are using HDFS
|
||||||
centralized cache management.
|
centralized cache management.
|
||||||
|
|
||||||
|
@ -283,11 +283,11 @@ Centralized Cache Management in HDFS
|
||||||
* dfs.datanode.max.locked.memory
|
* dfs.datanode.max.locked.memory
|
||||||
|
|
||||||
This determines the maximum amount of memory a DataNode will use for caching.
|
This determines the maximum amount of memory a DataNode will use for caching.
|
||||||
The "locked-in-memory size" ulimit (<<<ulimit -l>>>) of the DataNode user
|
On Unix-like systems, the "locked-in-memory size" ulimit (<<<ulimit -l>>>) of
|
||||||
also needs to be increased to match this parameter (see below section on
|
the DataNode user also needs to be increased to match this parameter (see
|
||||||
{{OS Limits}}). When setting this value, please remember that you will need
|
below section on {{OS Limits}}). When setting this value, please remember
|
||||||
space in memory for other things as well, such as the DataNode and
|
that you will need space in memory for other things as well, such as the
|
||||||
application JVM heaps and the operating system page cache.
|
DataNode and application JVM heaps and the operating system page cache.
|
||||||
|
|
||||||
*** Optional
|
*** Optional
|
||||||
|
|
||||||
|
@ -339,3 +339,6 @@ Centralized Cache Management in HDFS
|
||||||
"unlimited," indicating that there is no limit. Note that it's typical for
|
"unlimited," indicating that there is no limit. Note that it's typical for
|
||||||
<<<ulimit -l>>> to output the memory lock limit in KB, but
|
<<<ulimit -l>>> to output the memory lock limit in KB, but
|
||||||
dfs.datanode.max.locked.memory must be specified in bytes.
|
dfs.datanode.max.locked.memory must be specified in bytes.
|
||||||
|
|
||||||
|
This information does not apply to deployments on Windows. Windows has no
|
||||||
|
direct equivalent of <<<ulimit -l>>>.
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode;
|
||||||
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assume.assumeTrue;
|
|
||||||
import static org.mockito.Matchers.any;
|
import static org.mockito.Matchers.any;
|
||||||
import static org.mockito.Matchers.anyInt;
|
import static org.mockito.Matchers.anyInt;
|
||||||
import static org.mockito.Matchers.anyLong;
|
import static org.mockito.Matchers.anyLong;
|
||||||
|
@ -68,6 +67,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
|
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
|
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
|
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
|
||||||
|
@ -114,7 +114,6 @@ public class TestFsDatasetCache {
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
assumeTrue(!Path.WINDOWS);
|
|
||||||
conf = new HdfsConfiguration();
|
conf = new HdfsConfiguration();
|
||||||
conf.setLong(
|
conf.setLong(
|
||||||
DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
|
DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
|
||||||
|
@ -143,6 +142,9 @@ public class TestFsDatasetCache {
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
|
// Verify that each test uncached whatever it cached. This cleanup is
|
||||||
|
// required so that file descriptors are not leaked across tests.
|
||||||
|
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
|
||||||
if (fs != null) {
|
if (fs != null) {
|
||||||
fs.close();
|
fs.close();
|
||||||
}
|
}
|
||||||
|
@ -205,9 +207,16 @@ public class TestFsDatasetCache {
|
||||||
String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
|
String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
|
||||||
Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
|
Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
|
||||||
ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
|
ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
|
||||||
FileChannel blockChannel =
|
FileInputStream blockInputStream = null;
|
||||||
((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel();
|
FileChannel blockChannel = null;
|
||||||
|
try {
|
||||||
|
blockInputStream =
|
||||||
|
(FileInputStream)fsd.getBlockInputStream(extBlock, 0);
|
||||||
|
blockChannel = blockInputStream.getChannel();
|
||||||
sizes[i] = blockChannel.size();
|
sizes[i] = blockChannel.size();
|
||||||
|
} finally {
|
||||||
|
IOUtils.cleanup(LOG, blockChannel, blockInputStream);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return sizes;
|
return sizes;
|
||||||
}
|
}
|
||||||
|
@ -571,5 +580,7 @@ public class TestFsDatasetCache {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}, 1000, 30000);
|
}, 1000, 30000);
|
||||||
|
|
||||||
|
dfs.removeCacheDirective(shortCacheDirectiveId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue