HDFS-6948. DN rejects blocks if it has older UC block. Contributed by

Eric Payne.
(cherry picked from commit f02d934fed)
This commit is contained in:
Kihwal Lee 2014-09-19 08:52:16 -05:00
parent 9333ee3068
commit 2a6c9f0725
3 changed files with 38 additions and 4 deletions

View File

@ -227,6 +227,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang) HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang)
HDFS-6948. DN rejects blocks if it has older UC block
(Eric Payne via kihwal)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang) HDFS-6690. Deduplicate xattr names in memory. (wang)

View File

@ -1090,10 +1090,18 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
ExtendedBlock b) throws IOException { ExtendedBlock b) throws IOException {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId()); ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
if (replicaInfo != null) { if (replicaInfo != null) {
if (replicaInfo.getGenerationStamp() < b.getGenerationStamp()
&& replicaInfo instanceof ReplicaInPipeline) {
// Stop the previous writer
((ReplicaInPipeline)replicaInfo)
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
} else {
throw new ReplicaAlreadyExistsException("Block " + b + throw new ReplicaAlreadyExistsException("Block " + b +
" already exists in state " + replicaInfo.getState() + " already exists in state " + replicaInfo.getState() +
" and thus cannot be created."); " and thus cannot be created.");
} }
}
FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes()); FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());
// create a temporary file to hold block in the designated volume // create a temporary file to hold block in the designated volume

View File

@ -111,7 +111,7 @@ public class TestWriteToReplica {
// test writeToTemporary // test writeToTemporary
@Test @Test
public void testWriteToTempoary() throws Exception { public void testWriteToTemporary() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
try { try {
cluster.waitActive(); cluster.waitActive();
@ -475,5 +475,28 @@ public class TestWriteToReplica {
} }
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]); dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
try {
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
Assert.fail("Should not have created a replica that had already been "
+ "created " + blocks[NON_EXISTENT]);
} catch (Exception e) {
Assert.assertTrue(
e.getMessage().contains(blocks[NON_EXISTENT].getBlockName()));
Assert.assertTrue(e instanceof ReplicaAlreadyExistsException);
}
long newGenStamp = blocks[NON_EXISTENT].getGenerationStamp() * 10;
blocks[NON_EXISTENT].setGenerationStamp(newGenStamp);
try {
ReplicaInPipeline replicaInfo =
dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp);
Assert.assertTrue(
replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId());
} catch (ReplicaAlreadyExistsException e) {
Assert.fail("createRbw() Should have removed the block with the older "
+ "genstamp and replaced it with the newer one: " + blocks[NON_EXISTENT]);
}
} }
} }