HDFS-9530. ReservedSpace is not cleared for abandoned Blocks (Contributed by Brahma Reddy Battula)

This commit is contained in:
Brahma Reddy Battula 2016-06-21 15:42:28 +05:30
parent 46f1602e89
commit f2ac132d6a
4 changed files with 57 additions and 0 deletions

View File

@ -53,4 +53,6 @@ public boolean dropHeartbeatPacket() {
public void stopSendingPacketDownstream() throws IOException {} public void stopSendingPacketDownstream() throws IOException {}
public void noRegistration() throws IOException { } public void noRegistration() throws IOException { }
public void failMirrorConnection() throws IOException { }
} }

View File

@ -739,6 +739,9 @@ public void writeBlock(final ExtendedBlock block,
mirrorTarget = NetUtils.createSocketAddr(mirrorNode); mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
mirrorSock = datanode.newSocket(); mirrorSock = datanode.newSocket();
try { try {
DataNodeFaultInjector.get().failMirrorConnection();
int timeoutValue = dnConf.socketTimeout + int timeoutValue = dnConf.socketTimeout +
(HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length); (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
int writeTimeout = dnConf.socketWriteTimeout + int writeTimeout = dnConf.socketWriteTimeout +

View File

@ -79,6 +79,7 @@
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten; import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.datanode.ReplicaHandler; import org.apache.hadoop.hdfs.server.datanode.ReplicaHandler;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline; import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException; import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery; import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
@ -1955,6 +1956,9 @@ public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
LOG.debug("Block file " + removing.getBlockFile().getName() LOG.debug("Block file " + removing.getBlockFile().getName()
+ " is to be deleted"); + " is to be deleted");
} }
if (removing instanceof ReplicaInPipelineInterface) {
((ReplicaInPipelineInterface) removing).releaseAllBytesReserved();
}
} }
if (v.isTransientStorage()) { if (v.isTransientStorage()) {

View File

@ -36,6 +36,7 @@
import org.apache.hadoop.hdfs.*; import org.apache.hadoop.hdfs.*;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
@ -77,6 +78,7 @@ public class TestSpaceReservation {
private DFSClient client = null; private DFSClient client = null;
FsVolumeReference singletonVolumeRef = null; FsVolumeReference singletonVolumeRef = null;
FsVolumeImpl singletonVolume = null; FsVolumeImpl singletonVolume = null;
private DataNodeFaultInjector old = null;
private static Random rand = new Random(); private static Random rand = new Random();
@ -146,6 +148,9 @@ public void shutdownCluster() throws IOException {
cluster.shutdown(); cluster.shutdown();
cluster = null; cluster = null;
} }
if (old != null) {
DataNodeFaultInjector.set(old);
}
} }
private void createFileAndTestSpaceReservation( private void createFileAndTestSpaceReservation(
@ -613,6 +618,49 @@ public void testReservedSpaceForAppend() throws Exception {
checkReservedSpace(expectedFile2Reserved); checkReservedSpace(expectedFile2Reserved);
} }
@Test(timeout = 30000)
public void testReservedSpaceForPipelineRecovery() throws Exception {
final short replication = 3;
startCluster(BLOCK_SIZE, replication, -1);
final String methodName = GenericTestUtils.getMethodName();
final Path file = new Path("/" + methodName + ".01.dat");
old = DataNodeFaultInjector.get();
// Fault injector to fail connection to mirror first time.
DataNodeFaultInjector.set(new DataNodeFaultInjector() {
private int tries = 0;
@Override
public void failMirrorConnection() throws IOException {
if (tries++ == 0) {
throw new IOException("Failing Mirror for space reservation");
}
}
});
// Write 1 byte to the file and kill the writer.
FSDataOutputStream os = fs.create(file, replication);
os.write(new byte[1]);
os.close();
// Ensure all space reserved for the replica was released on each
// DataNode.
cluster.triggerBlockReports();
for (final DataNode dn : cluster.getDataNodes()) {
try (FsDatasetSpi.FsVolumeReferences volumes =
dn.getFSDataset().getFsVolumeReferences()) {
final FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
LOG.info("dn " + dn.getDisplayName() + " space : "
+ volume.getReservedForReplicas());
return (volume.getReservedForReplicas() == 0);
}
}, 100, Integer.MAX_VALUE); // Wait until the test times out.
}
}
}
private void checkReservedSpace(final long expectedReserved) throws TimeoutException, private void checkReservedSpace(final long expectedReserved) throws TimeoutException,
InterruptedException, IOException { InterruptedException, IOException {
for (final DataNode dn : cluster.getDataNodes()) { for (final DataNode dn : cluster.getDataNodes()) {