HDFS-9406. FSImage may get corrupted after deleting snapshot. (Contributed by Jing Zhao, Stanislav Antic, Vinayakumar B, Yongjun Zhang)
(cherry picked from commit34ab50ea92
) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (cherry picked from commitfc8d9cc758
) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
This commit is contained in:
parent
6b6167d401
commit
3d88293dda
|
@ -73,6 +73,9 @@ Release 2.7.3 - UNRELEASED
|
||||||
HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.
|
HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.
|
||||||
(szetszwo)
|
(szetszwo)
|
||||||
|
|
||||||
|
HDFS-9406. FSImage may get corrupted after deleting snapshot.
|
||||||
|
(Contributed by Jing Zhao, Stanislav Antic, Vinayakumar B, Yongjun Zhang)
|
||||||
|
|
||||||
Release 2.7.2 - 2016-01-25
|
Release 2.7.2 - 2016-01-25
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -531,7 +531,7 @@ public class INodeFile extends INodeWithAdditionalFields
|
||||||
blk.setBlockCollection(null);
|
blk.setBlockCollection(null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
setBlocks(null);
|
setBlocks(BlockInfoContiguous.EMPTY_ARRAY);
|
||||||
if (getAclFeature() != null) {
|
if (getAclFeature() != null) {
|
||||||
AclStorage.removeAclFeature(getAclFeature());
|
AclStorage.removeAclFeature(getAclFeature());
|
||||||
}
|
}
|
||||||
|
|
|
@ -429,7 +429,8 @@ public abstract class INodeReference extends INode {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
WithName getLastWithName() {
|
/** Return the last WithName reference if there is any, null otherwise. */
|
||||||
|
public WithName getLastWithName() {
|
||||||
return withNameList.size() > 0 ?
|
return withNameList.size() > 0 ?
|
||||||
withNameList.get(withNameList.size() - 1) : null;
|
withNameList.get(withNameList.size() - 1) : null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -481,8 +481,17 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
|
||||||
if (topNode instanceof INodeReference.WithName) {
|
if (topNode instanceof INodeReference.WithName) {
|
||||||
INodeReference.WithName wn = (INodeReference.WithName) topNode;
|
INodeReference.WithName wn = (INodeReference.WithName) topNode;
|
||||||
if (wn.getLastSnapshotId() >= post) {
|
if (wn.getLastSnapshotId() >= post) {
|
||||||
|
INodeReference.WithCount wc =
|
||||||
|
(INodeReference.WithCount) wn.getReferredINode();
|
||||||
|
if (wc.getLastWithName() == wn && wc.getParentReference() == null) {
|
||||||
|
// this wn is the last wn inside of the wc, also the dstRef node has
|
||||||
|
// been deleted. In this case, we should treat the referred file/dir
|
||||||
|
// as normal case
|
||||||
|
queue.add(wc.getReferredINode());
|
||||||
|
} else {
|
||||||
wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
|
wn.cleanSubtree(bsps, post, prior, collectedBlocks, removedINodes);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// For DstReference node, since the node is not in the created list of
|
// For DstReference node, since the node is not in the created list of
|
||||||
// prior, we should treat it as regular file/dir
|
// prior, we should treat it as regular file/dir
|
||||||
} else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
|
} else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
|
||||||
|
|
|
@ -17,12 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.*;
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.junit.Assert.assertNull;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
|
@ -60,6 +55,7 @@ import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -1134,4 +1130,91 @@ public class TestSnapshotDeletion {
|
||||||
cluster.restartNameNode(0);
|
cluster.restartNameNode(0);
|
||||||
assertEquals(numberOfBlocks, cluster.getNamesystem().getBlocksTotal());
|
assertEquals(numberOfBlocks, cluster.getNamesystem().getBlocksTotal());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test fsimage corruption reported in HDFS-9697.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testFsImageCorruption() throws Exception {
|
||||||
|
final Path st = new Path("/st");
|
||||||
|
final Path nonst = new Path("/nonst");
|
||||||
|
final Path stY = new Path(st, "y");
|
||||||
|
final Path nonstTrash = new Path(nonst, "trash");
|
||||||
|
|
||||||
|
hdfs.mkdirs(stY);
|
||||||
|
|
||||||
|
hdfs.allowSnapshot(st);
|
||||||
|
hdfs.createSnapshot(st, "s0");
|
||||||
|
|
||||||
|
Path f = new Path(stY, "nn.log");
|
||||||
|
hdfs.createNewFile(f);
|
||||||
|
hdfs.createSnapshot(st, "s1");
|
||||||
|
|
||||||
|
Path f2 = new Path(stY, "nn2.log");
|
||||||
|
hdfs.rename(f, f2);
|
||||||
|
hdfs.createSnapshot(st, "s2");
|
||||||
|
|
||||||
|
Path trashSt = new Path(nonstTrash, "st");
|
||||||
|
hdfs.mkdirs(trashSt);
|
||||||
|
hdfs.rename(stY, trashSt);
|
||||||
|
hdfs.delete(nonstTrash, true);
|
||||||
|
|
||||||
|
hdfs.deleteSnapshot(st, "s1");
|
||||||
|
hdfs.deleteSnapshot(st, "s2");
|
||||||
|
|
||||||
|
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||||
|
hdfs.saveNamespace();
|
||||||
|
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||||
|
|
||||||
|
cluster.restartNameNodes();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test renaming file to outside of snapshottable dir then deleting it.
|
||||||
|
* Ensure it's deleted from both its parent INodeDirectory and InodeMap,
|
||||||
|
* after the last snapshot containing it is deleted.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testRenameAndDelete() throws IOException {
|
||||||
|
final Path foo = new Path("/foo");
|
||||||
|
final Path x = new Path(foo, "x");
|
||||||
|
final Path y = new Path(foo, "y");
|
||||||
|
final Path trash = new Path("/trash");
|
||||||
|
hdfs.mkdirs(x);
|
||||||
|
hdfs.mkdirs(y);
|
||||||
|
final long parentId = fsdir.getINode4Write(y.toString()).getId();
|
||||||
|
|
||||||
|
hdfs.mkdirs(trash);
|
||||||
|
hdfs.allowSnapshot(foo);
|
||||||
|
// 1. create snapshot s0
|
||||||
|
hdfs.createSnapshot(foo, "s0");
|
||||||
|
// 2. create file /foo/x/bar
|
||||||
|
final Path file = new Path(x, "bar");
|
||||||
|
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, (short) 1, 0L);
|
||||||
|
final long fileId = fsdir.getINode4Write(file.toString()).getId();
|
||||||
|
// 3. move file into /foo/y
|
||||||
|
final Path newFile = new Path(y, "bar");
|
||||||
|
hdfs.rename(file, newFile);
|
||||||
|
// 4. create snapshot s1
|
||||||
|
hdfs.createSnapshot(foo, "s1");
|
||||||
|
// 5. move /foo/y to /trash
|
||||||
|
final Path deletedY = new Path(trash, "y");
|
||||||
|
hdfs.rename(y, deletedY);
|
||||||
|
// 6. create snapshot s2
|
||||||
|
hdfs.createSnapshot(foo, "s2");
|
||||||
|
// 7. delete /trash/y
|
||||||
|
hdfs.delete(deletedY, true);
|
||||||
|
// 8. delete snapshot s1
|
||||||
|
hdfs.deleteSnapshot(foo, "s1");
|
||||||
|
|
||||||
|
// make sure bar has been removed from its parent
|
||||||
|
INode p = fsdir.getInode(parentId);
|
||||||
|
Assert.assertNotNull(p);
|
||||||
|
INodeDirectory pd = p.asDirectory();
|
||||||
|
Assert.assertNotNull(pd);
|
||||||
|
Assert.assertNull(pd.getChild("bar".getBytes(), Snapshot.CURRENT_STATE_ID));
|
||||||
|
|
||||||
|
// make sure bar has been cleaned from inodeMap
|
||||||
|
Assert.assertNull(fsdir.getInode(fileId));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue