HDFS-12985. NameNode crashes during restart after an OpenForWrite file present in the Snapshot got deleted.

(cherry picked from commit 73ff09b79a)
This commit is contained in:
Manoj Govindassamy 2018-01-08 15:34:00 -08:00 committed by Yongjun Zhang
parent 2a3cefa022
commit 821729905e
2 changed files with 55 additions and 6 deletions

View File

@ -727,6 +727,13 @@ public class INodeFile extends INodeWithAdditionalFields
this.blocks = BlockInfo.EMPTY_ARRAY;
}
private void updateRemovedUnderConstructionFiles(
ReclaimContext reclaimContext) {
if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
reclaimContext.removedUCFiles.add(getId());
}
}
@Override
public void cleanSubtree(ReclaimContext reclaimContext,
final int snapshot, int priorSnapshotId) {
@ -735,6 +742,7 @@ public class INodeFile extends INodeWithAdditionalFields
// TODO: avoid calling getStoragePolicyID
sf.cleanFile(reclaimContext, this, snapshot, priorSnapshotId,
getStoragePolicyID());
updateRemovedUnderConstructionFiles(reclaimContext);
} else {
if (snapshot == CURRENT_STATE_ID) {
if (priorSnapshotId == NO_SNAPSHOT_ID) {
@ -747,9 +755,7 @@ public class INodeFile extends INodeWithAdditionalFields
// clean the 0-sized block if the file is UC
if (uc != null) {
uc.cleanZeroSizeBlock(this, reclaimContext.collectedBlocks);
if (reclaimContext.removedUCFiles != null) {
reclaimContext.removedUCFiles.add(getId());
}
updateRemovedUnderConstructionFiles(reclaimContext);
}
}
}
@ -768,9 +774,7 @@ public class INodeFile extends INodeWithAdditionalFields
reclaimContext.collectedBlocks);
sf.clearDiffs();
}
if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
reclaimContext.removedUCFiles.add(getId());
}
updateRemovedUnderConstructionFiles(reclaimContext);
}
public void clearFile(ReclaimContext reclaimContext) {

View File

@ -630,6 +630,51 @@ public class TestOpenFilesWithSnapshot {
hbaseOutputStream.close();
}
/**
* Verify if the NameNode can restart properly after an OpenForWrite
* file and the only snapshot it was present in were deleted.
*
* @throws Exception
*/
@Test (timeout = 600000)
public void testOpenFileDeletionAndNNRestart() throws Exception {
// Construct the directory tree
final Path snapRootDir = new Path("/level_0_A/test");
final String hbaseFileName = "hbase.log";
final String snap1Name = "snap_1";
// Create a file with few blocks. Get its output stream
// for append.
final Path hbaseFile = new Path(snapRootDir, hbaseFileName);
createFile(hbaseFile);
FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile);
int newWriteLength = (int) (BLOCKSIZE * 1.5);
byte[] buf = new byte[newWriteLength];
Random random = new Random();
random.nextBytes(buf);
// Write more data to the file
writeToStream(hbaseOutputStream, buf);
// Take a snapshot while the file is open for write
final Path snap1Dir = SnapshotTestHelper.createSnapshot(
fs, snapRootDir, snap1Name);
LOG.info("Open file status in snap: " +
fs.getFileStatus(new Path(snap1Dir, hbaseFileName)));
// Delete the open file and the snapshot while
// its output stream is still open.
fs.delete(hbaseFile, true);
fs.deleteSnapshot(snapRootDir, snap1Name);
Assert.assertFalse(fs.exists(hbaseFile));
// Verify file existence after the NameNode restart
cluster.restartNameNode();
cluster.waitActive();
Assert.assertFalse(fs.exists(hbaseFile));
}
/**
* Test client writing to open files are not interrupted when snapshots
* that captured open files get deleted.