From 4e4dd0f15ceafb4734cbd9ead3d244668eadce20 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Mon, 30 Sep 2013 22:33:01 +0000 Subject: [PATCH] svn merge -c 1527807. merging from trunk to branch-2 to fix HDFS-4517. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1527808 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/TestShortCircuitLocalRead.java | 60 +++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1868be90911..406865a29cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -59,6 +59,9 @@ Release 2.3.0 - UNRELEASED HDFS-5260. Merge zero-copy memory-mapped HDFS client reads to trunk and branch-2. (cnauroth) + HDFS-4517. Cover class RemoteBlockReader with unit tests. (Vadim Bondarev + and Dennis Y via kihwal) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java index 17a3077da0f..58a069d129a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java @@ -578,4 +578,64 @@ public void run() { System.out.println("Iteration " + iteration + " took " + (end - start)); fs.delete(file1, false); } + + public void testReadWithRemoteBlockReader() throws IOException, InterruptedException { + doTestShortCircuitReadWithRemoteBlockReader(true, 3*blockSize+100, getCurrentUser(), 0, false); + } + + /** + * Test that file data can be read by reading the block + * through RemoteBlockReader + * @throws IOException + */ + public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser, + int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException { + Configuration conf = new Configuration(); + conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true); + conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .format(true).build(); + FileSystem fs = cluster.getFileSystem(); + // check that / exists + Path path = new Path("/"); + URI uri = cluster.getURI(); + assertTrue("/ should be a directory", fs.getFileStatus(path) + .isDirectory() == true); + + byte[] fileData = AppendTestUtil.randomBytes(seed, size); + Path file1 = new Path("filelocal.dat"); + FSDataOutputStream stm = createFile(fs, file1, 1); + + stm.write(fileData); + stm.close(); + try { + checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, conf, shortCircuitFails); + //RemoteBlockReader have unsupported method read(ByteBuffer bf) + assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error", + checkUnsupportedMethod(fs, file1, fileData, readOffset)); + } catch(IOException e) { + throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e); + } catch(InterruptedException inEx) { + throw inEx; + } finally { + fs.close(); + cluster.shutdown(); + } + } + + private boolean checkUnsupportedMethod(FileSystem fs, Path file, + byte[] expected, int readOffset) throws IOException { + HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(file); + ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset); + IOUtils.skipFully(stm, readOffset); + try { + stm.read(actual); + } catch(UnsupportedOperationException unex) { + return true; + } + return false; + } + + }