HDFS-4429. When the latest snapshot exists, INodeFileUnderConstruction should be replaced with INodeFileWithSnapshot but not INodeFile.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1438304 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-01-25 03:09:26 +00:00
parent bb80f2fb29
commit 12e8ba804f
8 changed files with 352 additions and 28 deletions

View File

@ -120,3 +120,7 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4436. Change INode.recordModification(..) to return only the current
inode and remove the updateCircularList parameter from some methods in
INodeDirectoryWithSnapshot.Diff. (szetszwo)
HDFS-4429. When the latest snapshot exists, INodeFileUnderConstruction should
be replaced with INodeFileWithSnapshot but not INodeFile. (Jing Zhao
via szetszwo)

View File

@ -178,6 +178,7 @@
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
@ -3293,6 +3294,21 @@ private void finalizeINodeFileUnderConstruction(String src,
throws IOException, UnresolvedLinkException {
assert hasWriteLock();
leaseManager.removeLease(pendingFile.getClientName(), src);
if (latestSnapshot != null) {
if (!(pendingFile instanceof INodeFileUnderConstructionWithSnapshot)) {
// replace INodeFileUnderConstruction with
// INodeFileUnderConstructionWithSnapshot. This replacement does not
// need to be recorded in snapshot.
INodeFileUnderConstructionWithSnapshot pendingFileWithSnaphsot =
new INodeFileUnderConstructionWithSnapshot(pendingFile);
dir.replaceINodeFile(src, pendingFile,
pendingFileWithSnaphsot, null);
pendingFile = pendingFileWithSnaphsot;
}
pendingFile = (INodeFileUnderConstruction) pendingFile
.recordModification(latestSnapshot);
}
// The file is no longer pending.
// Create permanent INode, update blocks

View File

@ -39,6 +39,7 @@
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
@ -431,8 +432,8 @@ public String toString() {
@VisibleForTesting
public String getObjectString() {
final String s = super.toString();
return s.substring(s.lastIndexOf(getClass().getSimpleName()));
return getClass().getSimpleName() + "@"
+ Integer.toHexString(super.hashCode());
}
@VisibleForTesting
@ -693,15 +694,21 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
String blocksInfo = ((INodeFile) this).printBlocksInfo();
out.print(", blocks=[" + blocksInfo + "]");
}
if (this instanceof INodeFileWithSnapshot) {
INodeFileWithSnapshot nodeWithLink = (INodeFileWithSnapshot) this;
FileWithSnapshot next = nodeWithLink.getNext();
out.print(", next="
+ (next != null ? next.asINodeFile().getObjectString() : "null"));
if (this instanceof INodeFileSnapshot) {
if (this instanceof FileWithSnapshot) {
if (this instanceof INodeFileSnapshot
|| this instanceof INodeFileUnderConstructionSnapshot) {
out.print(", computedSize="
+ ((INodeFileSnapshot) this).computeFileSize(true));
}
FileWithSnapshot nodeWithLink = (FileWithSnapshot) this;
FileWithSnapshot next = nodeWithLink.getNext();
// An INodeFileWithSnapshot whose next link pointing to itself should be
// equivalent with a normal INodeFile
if (!(this instanceof INodeFileWithSnapshot &&
((INodeFileWithSnapshot) this).getNext() == this)) {
out.print(", next="
+ (next != null ? next.asINodeFile().getObjectString() : "null"));
}
}
out.println();
} else {

View File

@ -528,7 +528,7 @@ private SnapshotDiff(Snapshot snapshot, INodeDirectory dir) {
diff.deleted = deletedList;
}
public Diff getDiff() {
Diff getDiff() {
return diff;
}

View File

@ -39,6 +39,20 @@ public class INodeFileUnderConstructionWithSnapshot
super(f.asINodeFile(), clientName, clientMachine, clientNode);
}
/**
* The constructor that creates an
* {@link INodeFileUnderConstructionWithSnapshot} based on an
* {@link INodeFileUnderConstruction}
*
* @param child The given {@link INodeFileUnderConstruction} instance
*/
public INodeFileUnderConstructionWithSnapshot(
INodeFileUnderConstruction child) {
super(child, child.getClientName(), child.getClientMachine(), child
.getClientNode());
next = this;
}
@Override
protected INodeFileWithSnapshot toINodeFile(final long mtime) {
assertAllBlocksComplete();
@ -46,7 +60,8 @@ protected INodeFileWithSnapshot toINodeFile(final long mtime) {
final INodeFileWithSnapshot f = new INodeFileWithSnapshot(this);
f.setModificationTime(mtime, null);
f.setAccessTime(atime, null);
Util.replace(this, f);
// link f with this
this.insertBefore(f);
return f;
}

View File

@ -43,7 +43,7 @@ public INodeFileUnderConstructionWithSnapshot toUnderConstruction(
final INodeFileUnderConstructionWithSnapshot f
= new INodeFileUnderConstructionWithSnapshot(this,
clientName, clientMachine, clientNode);
Util.replace(this, f);
this.insertBefore(f);
return f;
}

View File

@ -0,0 +1,240 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.Diff;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Test snapshot functionalities while file appending.
*/
public class TestINodeFileUnderConstructionWithSnapshot {
static final long seed = 0;
static final short REPLICATION = 3;
static final int BLOCKSIZE = 1024;
private final Path dir = new Path("/TestSnapshot");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
FSDirectory fsdir;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
hdfs.mkdirs(dir);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Check if the given nodes can form a circular list
*/
private void checkCircularList(FileWithSnapshot... nodes) {
for (int i = 0; i < nodes.length; i++) {
FileWithSnapshot next = nodes[i].getNext();
FileWithSnapshot expectedNext = nodes[(i + 1) % nodes.length];
Assert.assertTrue(next == expectedNext);
}
}
/**
* Test snapshot after file appending
*/
@Test
public void testSnapshotAfterAppending() throws Exception {
Path file = new Path(dir, "file");
// 1. create snapshot --> create file --> append
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
// check the circular list and corresponding inodes: there should only be a
// reference of the current node in the created list
INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
.getINode(dir.toString());
Diff diff = dirNode.getLastSnapshotDiff().getDiff();
INode nodeInCreated = diff.searchCreated(fileNode.getLocalNameBytes());
assertTrue(fileNode == nodeInCreated);
INode nodeInDeleted = diff.searchDeleted(fileNode.getLocalNameBytes());
assertNull(nodeInDeleted);
// 2. create snapshot --> modify the file --> append
hdfs.createSnapshot(dir, "s1");
hdfs.setReplication(file, (short) (REPLICATION - 1));
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
// check the circular list and corresponding inodes
diff = dirNode.getLastSnapshotDiff().getDiff();
fileNode = (INodeFile) fsdir.getINode(file.toString());
nodeInCreated = diff.searchCreated(fileNode.getLocalNameBytes());
assertTrue(fileNode == nodeInCreated);
assertEquals(REPLICATION - 1,
((INodeFile) nodeInCreated).getFileReplication());
assertEquals(BLOCKSIZE * 3, ((INodeFile) fileNode).computeFileSize(true));
nodeInDeleted = diff.searchDeleted(fileNode.getLocalNameBytes());
assertEquals(REPLICATION,
((INodeFile) nodeInDeleted).getFileReplication());
assertEquals(BLOCKSIZE * 2,
((INodeFile) nodeInDeleted).computeFileSize(true));
checkCircularList((INodeFileWithSnapshot) fileNode,
(INodeFileSnapshot) nodeInDeleted);
// 3. create snapshot --> append
hdfs.createSnapshot(dir, "s2");
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
// check the circular list and corresponding inodes
diff = dirNode.getLastSnapshotDiff().getDiff();
fileNode = (INodeFile) fsdir.getINode(file.toString());
nodeInCreated = diff.searchCreated(fileNode.getLocalNameBytes());
assertTrue(fileNode == nodeInCreated);
assertEquals(REPLICATION - 1,
((INodeFile) nodeInCreated).getFileReplication());
assertEquals(BLOCKSIZE * 4, ((INodeFile) fileNode).computeFileSize(true));
INode nodeInDeleted2 = diff.searchDeleted(fileNode.getLocalNameBytes());
assertEquals(REPLICATION - 1,
((INodeFile) nodeInDeleted2).getFileReplication());
assertEquals(BLOCKSIZE * 3,
((INodeFile) nodeInDeleted2).computeFileSize(true));
checkCircularList((INodeFileWithSnapshot) fileNode,
(INodeFileSnapshot) nodeInDeleted2, (INodeFileSnapshot) nodeInDeleted);
}
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
throws IOException {
byte[] toAppend = new byte[length];
Random random = new Random();
random.nextBytes(toAppend);
HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
out.write(toAppend);
return out;
}
/**
* Test snapshot during file appending, before the corresponding
* {@link FSDataOutputStream} instance closes.
*/
@Test
public void testSnapshotWhileAppending() throws Exception {
Path file = new Path(dir, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
// 1. append without closing stream --> create snapshot
HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
out.close();
// check: an INodeFileUnderConstructionSnapshot should be stored into s0's
// deleted list, with size BLOCKSIZE*2
INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
assertEquals(BLOCKSIZE * 2, ((INodeFile) fileNode).computeFileSize(true));
INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
.getINode(dir.toString());
Diff diff = dirNode.getLastSnapshotDiff().getDiff();
INode nodeInDeleted_S0 = diff.searchDeleted(fileNode.getLocalNameBytes());
assertTrue(nodeInDeleted_S0 instanceof INodeFileUnderConstructionSnapshot);
assertEquals(BLOCKSIZE * 2,
((INodeFile) nodeInDeleted_S0).computeFileSize(true));
// 2. append without closing stream
out = appendFileWithoutClosing(file, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// re-check nodeInDeleted_S0
dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
diff = dirNode.getLastSnapshotDiff().getDiff();
nodeInDeleted_S0 = diff.searchDeleted(fileNode.getLocalNameBytes());
assertTrue(nodeInDeleted_S0 instanceof INodeFileUnderConstructionSnapshot);
assertEquals(BLOCKSIZE * 2,
((INodeFile) nodeInDeleted_S0).computeFileSize(true));
// 3. take snapshot --> close stream
hdfs.createSnapshot(dir, "s1");
out.close();
// check: an INodeFileUnderConstructionSnapshot with size BLOCKSIZE*3 should
// have been stored in s1's deleted list
fileNode = (INodeFile) fsdir.getINode(file.toString());
dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
diff = dirNode.getLastSnapshotDiff().getDiff();
INode nodeInCreated_S1 = diff.searchCreated(fileNode.getLocalNameBytes());
assertTrue(fileNode == nodeInCreated_S1);
assertTrue(fileNode instanceof INodeFileWithSnapshot);
INode nodeInDeleted_S1 = diff.searchDeleted(fileNode.getLocalNameBytes());
assertTrue(nodeInDeleted_S1 instanceof INodeFileUnderConstructionSnapshot);
assertEquals(BLOCKSIZE * 3,
((INodeFile) nodeInDeleted_S1).computeFileSize(true));
// also check the circular linked list
checkCircularList((INodeFileWithSnapshot) fileNode,
(INodeFileUnderConstructionSnapshot) nodeInDeleted_S1,
(INodeFileUnderConstructionSnapshot) nodeInDeleted_S0);
// 4. modify file --> append without closing stream --> take snapshot -->
// close stream
hdfs.setReplication(file, (short) (REPLICATION - 1));
out = appendFileWithoutClosing(file, BLOCKSIZE);
hdfs.createSnapshot(dir, "s2");
out.close();
// re-check the size of nodeInDeleted_S1
assertEquals(BLOCKSIZE * 3,
((INodeFile) nodeInDeleted_S1).computeFileSize(true));
}
}

View File

@ -20,6 +20,12 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@ -42,6 +48,7 @@
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.Time;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@ -68,6 +75,9 @@ public class TestSnapshot {
private static Random random = new Random(seed);
private static String testDir =
System.getProperty("test.build.data", "build/test/data");
@Rule
public ExpectedException exception = ExpectedException.none();
@ -144,13 +154,23 @@ protected TestDirectoryTree.Node[] createSnapshots() throws Exception {
return nodes;
}
private File getDumpTreeFile(String dir, String suffix) {
return new File(dir, String.format("dumptree_%s", suffix));
}
/**
* Restart the cluster to check edit log applying and fsimage saving/loading
*/
private void checkFSImage() throws Exception {
File fsnBefore = getDumpTreeFile(testDir, "before");
File fsnMiddle = getDumpTreeFile(testDir, "middle");
File fsnAfter = getDumpTreeFile(testDir, "after");
String rootDir = "/";
StringBuffer fsnStrBefore = fsn.getFSDirectory().getINode(rootDir)
.dumpTreeRecursively();
PrintWriter out = new PrintWriter(new FileWriter(fsnBefore, false), true);
fsn.getFSDirectory().getINode(rootDir)
.dumpTreeRecursively(out, new StringBuilder(), null);
out.close();
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
@ -158,11 +178,13 @@ private void checkFSImage() throws Exception {
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// later check fsnStrMiddle to see if the edit log is recorded and applied
// later check fsnMiddle to see if the edit log is recorded and applied
// correctly
StringBuffer fsnStrMiddle = fsn.getFSDirectory().getINode(rootDir)
.dumpTreeRecursively();
out = new PrintWriter(new FileWriter(fsnMiddle, false), true);
fsn.getFSDirectory().getINode(rootDir)
.dumpTreeRecursively(out, new StringBuilder(), null);
out.close();
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
@ -174,18 +196,38 @@ private void checkFSImage() throws Exception {
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// dump the namespace loaded from fsimage
StringBuffer fsnStrAfter = fsn.getFSDirectory().getINode(rootDir)
.dumpTreeRecursively();
out = new PrintWriter(new FileWriter(fsnAfter, false), true);
fsn.getFSDirectory().getINode(rootDir)
.dumpTreeRecursively(out, new StringBuilder(), null);
out.close();
System.out.println("================== Original FSDir ==================");
System.out.println(fsnStrBefore.toString());
System.out.println("================== FSDir After Applying Edit Logs ==================");
System.out.println(fsnStrMiddle.toString());
System.out.println("================ FSDir After FSImage Saving/Loading ================");
System.out.println(fsnStrAfter.toString());
System.out.println("====================================================");
assertEquals(fsnStrBefore.toString(), fsnStrMiddle.toString());
assertEquals(fsnStrBefore.toString(), fsnStrAfter.toString());
compareFile(fsnBefore, fsnMiddle);
compareFile(fsnBefore, fsnAfter);
}
/** compare two file's content */
private void compareFile(File file1, File file2) throws IOException {
BufferedReader reader1 = new BufferedReader(new FileReader(file1));
BufferedReader reader2 = new BufferedReader(new FileReader(file2));
try {
String line1 = "";
String line2 = "";
while ((line1 = reader1.readLine()) != null
&& (line2 = reader2.readLine()) != null) {
// skip the hashCode part of the object string during the comparison,
// also ignore the difference between INodeFile/INodeFileWithSnapshot
line1 = line1.replaceAll("INodeFileWithSnapshot", "INodeFile");
line2 = line2.replaceAll("INodeFileWithSnapshot", "INodeFile");
line1 = line1.replaceAll("@[\\dabcdef]+", "");
line2 = line2.replaceAll("@[\\dabcdef]+", "");
assertEquals(line1, line2);
}
Assert.assertNull(reader1.readLine());
Assert.assertNull(reader2.readLine());
} finally {
reader1.close();
reader2.close();
}
}
/**