HDFS-8486. DN startup may cause severe data loss. (Contributed by Daryn Sharp)

This commit is contained in:
Arpit Agarwal 2015-08-06 15:17:45 -07:00
parent 0daf9dd552
commit aa2303088f
4 changed files with 60 additions and 8 deletions

View File

@ -33,6 +33,8 @@ Release 2.6.1 - UNRELEASED
HDFS-7733. NFS: readdir/readdirplus return null directory HDFS-7733. NFS: readdir/readdirplus return null directory
attribute on failure. (Arpit Agarwal) attribute on failure. (Arpit Agarwal)
HDFS-8486. DN startup may cause severe data loss. (daryn via cmccabe)
Release 2.6.0 - 2014-11-18 Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -1279,9 +1279,8 @@ public class DataNode extends ReconfigurableBase
// failures. // failures.
checkDiskError(); checkDiskError();
initPeriodicScanners(conf);
data.addBlockPool(nsInfo.getBlockPoolID(), conf); data.addBlockPool(nsInfo.getBlockPoolID(), conf);
initDirectoryScanner(conf);
} }
BPOfferService[] getAllBpOs() { BPOfferService[] getAllBpOs() {

View File

@ -531,10 +531,28 @@ class BlockPoolSlice {
// Leave both block replicas in place. // Leave both block replicas in place.
return replica1; return replica1;
} }
final ReplicaInfo replicaToDelete =
selectReplicaToDelete(replica1, replica2);
final ReplicaInfo replicaToKeep =
(replicaToDelete != replica1) ? replica1 : replica2;
// Update volumeMap and delete the replica
volumeMap.add(bpid, replicaToKeep);
if (replicaToDelete != null) {
deleteReplica(replicaToDelete);
}
return replicaToKeep;
}
static ReplicaInfo selectReplicaToDelete(final ReplicaInfo replica1,
final ReplicaInfo replica2) {
ReplicaInfo replicaToKeep; ReplicaInfo replicaToKeep;
ReplicaInfo replicaToDelete; ReplicaInfo replicaToDelete;
// it's the same block so don't ever delete it, even if GS or size
// differs. caller should keep the one it just discovered on disk
if (replica1.getBlockFile().equals(replica2.getBlockFile())) {
return null;
}
if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) { if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp() replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
? replica1 : replica2; ? replica1 : replica2;
@ -554,10 +572,10 @@ class BlockPoolSlice {
LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
+ ". Will try to delete " + replicaToDelete); + ". Will try to delete " + replicaToDelete);
} }
return replicaToDelete;
}
// Update volumeMap. private void deleteReplica(final ReplicaInfo replicaToDelete) {
volumeMap.add(bpid, replicaToKeep);
// Delete the files on disk. Failure here is okay. // Delete the files on disk. Failure here is okay.
final File blockFile = replicaToDelete.getBlockFile(); final File blockFile = replicaToDelete.getBlockFile();
if (!blockFile.delete()) { if (!blockFile.delete()) {
@ -567,8 +585,6 @@ class BlockPoolSlice {
if (!metaFile.delete()) { if (!metaFile.delete()) {
LOG.warn("Failed to delete meta file " + metaFile); LOG.warn("Failed to delete meta file " + metaFile);
} }
return replicaToKeep;
} }
/** /**

View File

@ -30,6 +30,8 @@ import org.apache.hadoop.hdfs.server.datanode.DNConf;
import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner; import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataStorage; import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -46,6 +48,8 @@ import java.util.List;
import java.util.Set; import java.util.Set;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import static org.mockito.Matchers.any; import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.anyString;
@ -186,4 +190,35 @@ public class TestFsDatasetImpl {
verify(scanner, times(BLOCK_POOL_IDS.length)) verify(scanner, times(BLOCK_POOL_IDS.length))
.deleteBlocks(anyString(), any(Block[].class)); .deleteBlocks(anyString(), any(Block[].class));
} }
@Test
public void testDuplicateReplicaResolution() throws IOException {
FsVolumeImpl fsv1 = Mockito.mock(FsVolumeImpl.class);
FsVolumeImpl fsv2 = Mockito.mock(FsVolumeImpl.class);
File f1 = new File("d1/block");
File f2 = new File("d2/block");
ReplicaInfo replicaOlder = new FinalizedReplica(1,1,1,fsv1,f1);
ReplicaInfo replica = new FinalizedReplica(1,2,2,fsv1,f1);
ReplicaInfo replicaSame = new FinalizedReplica(1,2,2,fsv1,f1);
ReplicaInfo replicaNewer = new FinalizedReplica(1,3,3,fsv1,f1);
ReplicaInfo replicaOtherOlder = new FinalizedReplica(1,1,1,fsv2,f2);
ReplicaInfo replicaOtherSame = new FinalizedReplica(1,2,2,fsv2,f2);
ReplicaInfo replicaOtherNewer = new FinalizedReplica(1,3,3,fsv2,f2);
// equivalent path so don't remove either
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaSame, replica));
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaOlder, replica));
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaNewer, replica));
// keep latest found replica
assertSame(replica,
BlockPoolSlice.selectReplicaToDelete(replicaOtherSame, replica));
assertSame(replicaOtherOlder,
BlockPoolSlice.selectReplicaToDelete(replicaOtherOlder, replica));
assertSame(replica,
BlockPoolSlice.selectReplicaToDelete(replicaOtherNewer, replica));
}
} }