diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java index 68efac2b9cc..acd47fb96c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -131,7 +132,7 @@ public static void setup() throws Exception { String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); final Path jksPath = new Path(testRootDir.toString(), "test.jks"); - config.set(KeyProviderFactory.KEY_PROVIDER_PATH, + config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); ProxyUsers.refreshSuperUserGroupsConfiguration(config); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 436d2f0b318..9d4d8edf129 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -564,6 +564,9 @@ Release 2.6.0 - UNRELEASED HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang) + HDFS-6948. DN rejects blocks if it has older UC block + (Eric Payne via kihwal) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) @@ -774,6 +777,11 @@ Release 2.6.0 - UNRELEASED HDFS-6840. Clients are always sent to the same datanode when read is off rack. (wang) + HDFS-7065. Pipeline close recovery race can cause block corruption (kihwal) + + HDFS-7096. Fix TestRpcProgramNfs3 to use DFS_ENCRYPTION_KEY_PROVIDER_URI + (clamb via cmccabe) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index eff904f9a62..a29f5e6ee6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -948,7 +948,7 @@ public synchronized ReplicaInPipeline recoverAppend(ExtendedBlock b, } @Override // FsDatasetSpi - public String recoverClose(ExtendedBlock b, long newGS, + public synchronized String recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException { LOG.info("Recover failed close " + b); // check replica's state @@ -1152,9 +1152,17 @@ public synchronized ReplicaInPipeline createTemporary(StorageType storageType, ExtendedBlock b) throws IOException { ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId()); if (replicaInfo != null) { - throw new ReplicaAlreadyExistsException("Block " + b + - " already exists in state " + replicaInfo.getState() + - " and thus cannot be created."); + if (replicaInfo.getGenerationStamp() < b.getGenerationStamp() + && replicaInfo instanceof ReplicaInPipeline) { + // Stop the previous writer + ((ReplicaInPipeline)replicaInfo) + .stopWriter(datanode.getDnConf().getXceiverStopTimeout()); + invalidate(b.getBlockPoolId(), new Block[]{replicaInfo}); + } else { + throw new ReplicaAlreadyExistsException("Block " + b + + " already exists in state " + replicaInfo.getState() + + " and thus cannot be created."); + } } FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java index 73b0a5f1cdb..60c6d0304f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java @@ -111,7 +111,7 @@ public void testWriteToRbw() throws Exception { // test writeToTemporary @Test - public void testWriteToTempoary() throws Exception { + public void testWriteToTemporary() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build(); try { cluster.waitActive(); @@ -475,5 +475,28 @@ private void testWriteToTemporary(FsDatasetImpl dataSet, ExtendedBlock[] blocks) } dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]); + + try { + dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]); + Assert.fail("Should not have created a replica that had already been " + + "created " + blocks[NON_EXISTENT]); + } catch (Exception e) { + Assert.assertTrue( + e.getMessage().contains(blocks[NON_EXISTENT].getBlockName())); + Assert.assertTrue(e instanceof ReplicaAlreadyExistsException); + } + + long newGenStamp = blocks[NON_EXISTENT].getGenerationStamp() * 10; + blocks[NON_EXISTENT].setGenerationStamp(newGenStamp); + try { + ReplicaInPipeline replicaInfo = + dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]); + Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp); + Assert.assertTrue( + replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId()); + } catch (ReplicaAlreadyExistsException e) { + Assert.fail("createRbw() Should have removed the block with the older " + + "genstamp and replaced it with the newer one: " + blocks[NON_EXISTENT]); + } } }