HDFS-5443. Delete 0-sized block when deleting an under-construction file that is included in snapshot. Contributed by Jing Zhao.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1539754 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-11-07 18:37:38 +00:00
parent 16c6755554
commit 9964a94386
4 changed files with 112 additions and 2 deletions

View File

@ -498,6 +498,9 @@ Release 2.3.0 - UNRELEASED
HDFS-5427. Not able to read deleted files from snapshot directly under HDFS-5427. Not able to read deleted files from snapshot directly under
snapshottable dir after checkpoint and NN restart. (Vinay via jing9) snapshottable dir after checkpoint and NN restart. (Vinay via jing9)
HDFS-5443. Delete 0-sized block when deleting an under-construction file that
is included in snapshot. (jing9)
Release 2.2.1 - UNRELEASED Release 2.2.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
@ -30,6 +31,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.Quota.Counts;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@ -130,6 +132,39 @@ public class INodeFileUnderConstruction extends INodeFile implements MutableBloc
return f; return f;
} }
@Override
public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior,
final BlocksMapUpdateInfo collectedBlocks,
final List<INode> removedINodes, final boolean countDiffChange)
throws QuotaExceededException {
if (snapshot == null && prior != null) {
cleanZeroSizeBlock(collectedBlocks);
return Counts.newInstance();
} else {
return super.cleanSubtree(snapshot, prior, collectedBlocks,
removedINodes, countDiffChange);
}
}
/**
* When deleting a file in the current fs directory, and the file is contained
* in a snapshot, we should delete the last block if it's under construction
* and its size is 0.
*/
private void cleanZeroSizeBlock(final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] blocks = getBlocks();
if (blocks != null && blocks.length > 0
&& blocks[blocks.length - 1] instanceof BlockInfoUnderConstruction) {
BlockInfoUnderConstruction lastUC =
(BlockInfoUnderConstruction) blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC);
removeLastBlock(lastUC);
}
}
}
@Override @Override
public INodeFileUnderConstruction recordModification(final Snapshot latest, public INodeFileUnderConstruction recordModification(final Snapshot latest,
final INodeMap inodeMap) throws QuotaExceededException { final INodeMap inodeMap) throws QuotaExceededException {
@ -157,7 +192,7 @@ public class INodeFileUnderConstruction extends INodeFile implements MutableBloc
* Remove a block from the block list. This block should be * Remove a block from the block list. This block should be
* the last one on the list. * the last one on the list.
*/ */
boolean removeLastBlock(Block oldblock) throws IOException { boolean removeLastBlock(Block oldblock) {
final BlockInfo[] blocks = getBlocks(); final BlockInfo[] blocks = getBlocks();
if (blocks == null || blocks.length == 0) { if (blocks == null || blocks.length == 0) {
return false; return false;

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@ -274,4 +275,76 @@ public class TestSnapshotBlocksMap {
"s2/bar"); "s2/bar");
DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath)); DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath));
} }
/**
* Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
*/
@Test
public void testDeletionWithZeroSizeBlock() throws Exception {
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
cluster.getNameNodeRpc()
.addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
null, barNode.getId(), null);
SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
barNode = fsdir.getINode4Write(bar.toString()).asFile();
blks = barNode.getBlocks();
assertEquals(2, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
assertEquals(0, blks[1].getNumBytes());
hdfs.delete(bar, true);
final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
bar.getName());
barNode = fsdir.getINode(sbar.toString()).asFile();
blks = barNode.getBlocks();
assertEquals(1, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
/** Make sure we delete 0-sized block when deleting an INodeFileUC */
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
final Path foo = new Path("/foo");
final Path subDir = new Path(foo, "sub");
final Path bar = new Path(subDir, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length);
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
cluster.getNameNodeRpc()
.addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
null, barNode.getId(), null);
SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
barNode = fsdir.getINode4Write(bar.toString()).asFile();
blks = barNode.getBlocks();
assertEquals(2, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
assertEquals(0, blks[1].getNumBytes());
hdfs.delete(subDir, true);
final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
barNode = fsdir.getINode(sbar.toString()).asFile();
blks = barNode.getBlocks();
assertEquals(1, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
} }