HDFS-5723. Append failed FINALIZED replica should not be accepted as valid when that block is underconstruction. Contributed by Vinayakumar B.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615491 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Vinayakumar B 2014-08-04 06:44:06 +00:00
parent 04ae560370
commit 30a9ec26f7
4 changed files with 78 additions and 1 deletions

View File

@ -429,6 +429,10 @@ Release 2.6.0 - UNRELEASED
HDFS-6810. StorageReport array is initialized with wrong size in
DatanodeDescriptor#getStorageReports. (szetszwo via Arpit Agarwal)
HDFS-5723. Append failed FINALIZED replica should not be accepted as valid
when that block is underconstruction (vinayakumarb)
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -2167,6 +2167,16 @@ private BlockToMarkCorrupt checkReplicaCorrupt(
} else {
return null; // not corrupt
}
case UNDER_CONSTRUCTION:
if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) {
final long reportedGS = reported.getGenerationStamp();
return new BlockToMarkCorrupt(storedBlock, reportedGS, "block is "
+ ucState + " and reported state " + reportedState
+ ", But reported genstamp " + reportedGS
+ " does not match genstamp in block map "
+ storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
}
return null;
default:
return null;
}

View File

@ -19,6 +19,7 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileNotFoundException;
@ -32,6 +33,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -39,6 +41,7 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.junit.Assert;
import org.junit.Test;
@ -169,6 +172,7 @@ public void testCopyOnWrite() throws IOException {
}
} finally {
client.close();
fs.close();
cluster.shutdown();
}
@ -380,4 +384,57 @@ public void testAppendAfterSoftLimit()
}
}
/**
* Old replica of the block should not be accepted as valid for append/read
*/
@Test
public void testFailedAppendBlockRejection() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set("dfs.client.block.write.replace-datanode-on-failure.enable",
"false");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
.build();
DistributedFileSystem fs = null;
try {
fs = cluster.getFileSystem();
Path path = new Path("/test");
FSDataOutputStream out = fs.create(path);
out.writeBytes("hello\n");
out.close();
// stop one datanode
DataNodeProperties dnProp = cluster.stopDataNode(0);
String dnAddress = dnProp.datanode.getXferAddress().toString();
if (dnAddress.startsWith("/")) {
dnAddress = dnAddress.substring(1);
}
// append again to bump genstamps
for (int i = 0; i < 2; i++) {
out = fs.append(path);
out.writeBytes("helloagain\n");
out.close();
}
// re-open and make the block state as underconstruction
out = fs.append(path);
cluster.restartDataNode(dnProp, true);
// wait till the block report comes
Thread.sleep(2000);
// check the block locations, this should not contain restarted datanode
BlockLocation[] locations = fs.getFileBlockLocations(path, 0,
Long.MAX_VALUE);
String[] names = locations[0].getNames();
for (String node : names) {
if (node.equals(dnAddress)) {
fail("Failed append should not be present in latest block locations.");
}
}
out.close();
} finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
}

View File

@ -50,6 +50,7 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -725,7 +726,12 @@ void invoke() throws Exception {
client.getNamenode().updatePipeline(client.getClientName(), oldBlock,
newBlock, newNodes, storageIDs);
out.close();
// close can fail if the out.close() commit the block after block received
// notifications from Datanode.
// Since datanodes and output stream have still old genstamps, these
// blocks will be marked as corrupt after HDFS-5723 if RECEIVED
// notifications reaches namenode first and close() will fail.
DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream());
}
@Override