diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 187be3b5615..77b889757ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -798,6 +798,9 @@ Release 2.7.0 - UNRELEASED HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal) + HDFS-7885. Datanode should not trust the generation stamp provided by + client. (Tsz Wo Nicholas Sze via jing9) + BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS HDFS-7720. Quota by Storage Type API, tools and ClientNameNode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 01d208f21c3..9376accb986 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -2568,6 +2568,21 @@ public synchronized void deleteBlockPool(String bpid, boolean force) @Override // FsDatasetSpi public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException { + synchronized(this) { + final Replica replica = volumeMap.get(block.getBlockPoolId(), + block.getBlockId()); + if (replica == null) { + throw new ReplicaNotFoundException(block); + } + if (replica.getGenerationStamp() < block.getGenerationStamp()) { + throw new IOException( + "Replica generation stamp < block generation stamp, block=" + + block + ", replica=" + replica); + } else if (replica.getGenerationStamp() > block.getGenerationStamp()) { + block.setGenerationStamp(replica.getGenerationStamp()); + } + } + File datafile = getBlockFile(block); File metafile = FsDatasetUtil.getMetaFile(datafile, block.getGenerationStamp()); BlockLocalPathInfo info = new BlockLocalPathInfo(block, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java index cb505394620..1c4134f1f0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java @@ -30,11 +30,16 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; +import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.TemporarySocketDirectory; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; import org.junit.Assert; import org.junit.Assume; import org.junit.BeforeClass; @@ -153,4 +158,62 @@ public void testBothOldAndNewShortCircuitConfigured() throws Exception { Arrays.equals(orig, buf); cluster.shutdown(); } + + @Test(timeout=20000) + public void testBlockReaderLocalLegacyWithAppend() throws Exception { + final short REPL_FACTOR = 1; + final HdfsConfiguration conf = getConfiguration(null); + conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); + + final MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + + final DistributedFileSystem dfs = cluster.getFileSystem(); + final Path path = new Path("/testBlockReaderLocalLegacy"); + DFSTestUtil.createFile(dfs, path, 10, REPL_FACTOR, 0); + DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR); + + final ClientDatanodeProtocol proxy; + final Token token; + final ExtendedBlock originalBlock; + final long originalGS; + { + final LocatedBlock lb = cluster.getNameNode().getRpcServer() + .getBlockLocations(path.toString(), 0, 1).get(0); + proxy = DFSUtil.createClientDatanodeProtocolProxy( + lb.getLocations()[0], conf, 60000, false); + token = lb.getBlockToken(); + + // get block and generation stamp + final ExtendedBlock blk = new ExtendedBlock(lb.getBlock()); + originalBlock = new ExtendedBlock(blk); + originalGS = originalBlock.getGenerationStamp(); + + // test getBlockLocalPathInfo + final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(blk, token); + Assert.assertEquals(originalGS, info.getBlock().getGenerationStamp()); + } + + { // append one byte + FSDataOutputStream out = dfs.append(path); + out.write(1); + out.close(); + } + + { + // get new generation stamp + final LocatedBlock lb = cluster.getNameNode().getRpcServer() + .getBlockLocations(path.toString(), 0, 1).get(0); + final long newGS = lb.getBlock().getGenerationStamp(); + Assert.assertTrue(newGS > originalGS); + + // getBlockLocalPathInfo using the original block. + Assert.assertEquals(originalGS, originalBlock.getGenerationStamp()); + final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo( + originalBlock, token); + Assert.assertEquals(newGS, info.getBlock().getGenerationStamp()); + } + cluster.shutdown(); + } }