svn merge -c 1527807. merging from trunk to branch-2 to fix HDFS-4517.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1527808 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
099c4eef3f
commit
4e4dd0f15c
|
@ -59,6 +59,9 @@ Release 2.3.0 - UNRELEASED
|
||||||
HDFS-5260. Merge zero-copy memory-mapped HDFS client reads to trunk and
|
HDFS-5260. Merge zero-copy memory-mapped HDFS client reads to trunk and
|
||||||
branch-2. (cnauroth)
|
branch-2. (cnauroth)
|
||||||
|
|
||||||
|
HDFS-4517. Cover class RemoteBlockReader with unit tests. (Vadim Bondarev
|
||||||
|
and Dennis Y via kihwal)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
|
HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn)
|
||||||
|
|
|
@ -578,4 +578,64 @@ public class TestShortCircuitLocalRead {
|
||||||
System.out.println("Iteration " + iteration + " took " + (end - start));
|
System.out.println("Iteration " + iteration + " took " + (end - start));
|
||||||
fs.delete(file1, false);
|
fs.delete(file1, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testReadWithRemoteBlockReader() throws IOException, InterruptedException {
|
||||||
|
doTestShortCircuitReadWithRemoteBlockReader(true, 3*blockSize+100, getCurrentUser(), 0, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that file data can be read by reading the block
|
||||||
|
* through RemoteBlockReader
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
|
||||||
|
int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
|
||||||
|
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
|
||||||
|
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||||
|
.format(true).build();
|
||||||
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
// check that / exists
|
||||||
|
Path path = new Path("/");
|
||||||
|
URI uri = cluster.getURI();
|
||||||
|
assertTrue("/ should be a directory", fs.getFileStatus(path)
|
||||||
|
.isDirectory() == true);
|
||||||
|
|
||||||
|
byte[] fileData = AppendTestUtil.randomBytes(seed, size);
|
||||||
|
Path file1 = new Path("filelocal.dat");
|
||||||
|
FSDataOutputStream stm = createFile(fs, file1, 1);
|
||||||
|
|
||||||
|
stm.write(fileData);
|
||||||
|
stm.close();
|
||||||
|
try {
|
||||||
|
checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, conf, shortCircuitFails);
|
||||||
|
//RemoteBlockReader have unsupported method read(ByteBuffer bf)
|
||||||
|
assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
|
||||||
|
checkUnsupportedMethod(fs, file1, fileData, readOffset));
|
||||||
|
} catch(IOException e) {
|
||||||
|
throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
|
||||||
|
} catch(InterruptedException inEx) {
|
||||||
|
throw inEx;
|
||||||
|
} finally {
|
||||||
|
fs.close();
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean checkUnsupportedMethod(FileSystem fs, Path file,
|
||||||
|
byte[] expected, int readOffset) throws IOException {
|
||||||
|
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file);
|
||||||
|
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
|
||||||
|
IOUtils.skipFully(stm, readOffset);
|
||||||
|
try {
|
||||||
|
stm.read(actual);
|
||||||
|
} catch(UnsupportedOperationException unex) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue