HDFS-5404 Resolve regressions in Windows compatibility on HDFS-4949 branch. Contributed by Chris Nauroth.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1535217 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Wang 2013-10-24 00:08:15 +00:00
parent 69e5f90e9f
commit 4004a42d53
4 changed files with 84 additions and 66 deletions

View File

@ -383,6 +383,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mlock_1native(
JNIEnv *env, jclass clazz,
jobject buffer, jlong len)
{
#ifdef UNIX
void* buf = (void*)(*env)->GetDirectBufferAddress(env, buffer);
PASS_EXCEPTIONS(env);
@ -390,6 +391,12 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mlock_1native(
CHECK_DIRECT_BUFFER_ADDRESS(buf);
throw_ioe(env, errno);
}
#endif
#ifdef WINDOWS
THROW(env, "java/io/IOException",
"The function POSIX.mlock_native() is not supported on Windows");
#endif
}
/**
@ -404,6 +411,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munlock_1native(
JNIEnv *env, jclass clazz,
jobject buffer, jlong len)
{
#ifdef UNIX
void* buf = (void*)(*env)->GetDirectBufferAddress(env, buffer);
PASS_EXCEPTIONS(env);
@ -411,6 +419,12 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munlock_1native(
CHECK_DIRECT_BUFFER_ADDRESS(buf);
throw_ioe(env, errno);
}
#endif
#ifdef WINDOWS
THROW(env, "java/io/IOException",
"The function POSIX.munlock_native() is not supported on Windows");
#endif
}
#ifdef __FreeBSD__

View File

@ -115,3 +115,6 @@ HDFS-4949 (Unreleased)
HDFS-5385. Caching RPCs are AtMostOnce, but do not persist client ID and
call ID to edit log. (Chris Nauroth via Colin Patrick McCabe)
HDFS-5404. Resolve regressions in Windows compatibility on HDFS-4949
branch. (Chris Nauroth via Andrew Wang)

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assume.assumeTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyInt;
@ -50,6 +51,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Before;
@ -72,6 +74,8 @@ public class TestFsDatasetCache {
@Before
public void setUp() throws Exception {
assumeTrue(!Path.WINDOWS);
assumeTrue(NativeIO.isAvailable());
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,

View File

@ -447,11 +447,11 @@ public class TestPathBasedCacheRequests {
@Test(timeout=60000)
public void testCacheManagerRestart() throws Exception {
cluster.shutdown();
cluster = null;
HdfsConfiguration conf = createCachingConf();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
try {
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
@ -515,9 +515,6 @@ public class TestPathBasedCacheRequests {
assertEquals(pool, cd.getPool());
}
assertFalse("Unexpected # of cache descriptors found", dit.hasNext());
} finally {
cluster.shutdown();
}
}
private static void waitForCachedBlocks(NameNode nn,