diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index cff259f1651..db42a8f74a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -844,26 +844,18 @@ public class DFSInputStream extends FSInputStream @Override public int doRead(BlockReader blockReader, int off, int len) throws IOException { - int oldpos = buf.position(); - int oldlimit = buf.limit(); - boolean success = false; - try { - int ret = blockReader.read(buf); - success = true; - updateReadStatistics(readStatistics, ret, blockReader); - dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(), - ret); - if (ret == 0) { - DFSClient.LOG.warn("zero"); - } - return ret; - } finally { - if (!success) { - // Reset to original state so that retries work correctly. - buf.position(oldpos); - buf.limit(oldlimit); - } + ByteBuffer tmpBuf = buf.duplicate(); + tmpBuf.limit(tmpBuf.position() + len); + int nRead = blockReader.read(tmpBuf); + updateReadStatistics(readStatistics, nRead, blockReader); + dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(), + nRead); + if (nRead == 0) { + DFSClient.LOG.warn("zero"); + } else if (nRead > 0) { + buf.position(buf.position() + nRead); } + return nRead; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 0221b301bb1..f2a6642825a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -189,6 +189,8 @@ public class TestEncryptionZones { testRootDir = new File(testRoot).getAbsoluteFile(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI()); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES, + true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); // Lower the batch size for testing conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, @@ -1418,6 +1420,14 @@ public class TestEncryptionZones { assertEquals("Got unexpected ez path", zone.toString(), dfsAdmin.getEncryptionZoneForPath(snap1Zone).getPath().toString()); + // Append the file + DFSTestUtil.appendFile(fs, zoneFile, len); + // Verify file content in the snapshot + final Path snapshottedZoneFile = new Path( + snap1.toString() + "/" + zone.getName() + "/" + zoneFile.getName()); + assertEquals("Contents of snapshotted file have changed unexpectedly", + contents, DFSTestUtil.readFile(fs, snapshottedZoneFile)); + // Now delete the encryption zone, recreate the dir, and take another // snapshot fsWrapper.delete(zone, true); @@ -1470,8 +1480,6 @@ public class TestEncryptionZones { assertEquals("Unexpected ez key", TEST_KEY2, listZone.getKeyName()); // Verify contents of the snapshotted file - final Path snapshottedZoneFile = new Path( - snap1.toString() + "/" + zone.getName() + "/" + zoneFile.getName()); assertEquals("Contents of snapshotted file have changed unexpectedly", contents, DFSTestUtil.readFile(fs, snapshottedZoneFile));