HDFS-4429. When the latest snapshot exists, INodeFileUnderConstruction should be replaced with INodeFileWithSnapshot but not INodeFile.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1438304 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-01-25 03:09:26 +00:00
parent bb80f2fb29
commit 12e8ba804f
8 changed files with 352 additions and 28 deletions

View File

@ -120,3 +120,7 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4436. Change INode.recordModification(..) to return only the current HDFS-4436. Change INode.recordModification(..) to return only the current
inode and remove the updateCircularList parameter from some methods in inode and remove the updateCircularList parameter from some methods in
INodeDirectoryWithSnapshot.Diff. (szetszwo) INodeDirectoryWithSnapshot.Diff. (szetszwo)
HDFS-4429. When the latest snapshot exists, INodeFileUnderConstruction should
be replaced with INodeFileWithSnapshot but not INodeFile. (Jing Zhao
via szetszwo)

View File

@ -178,6 +178,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
@ -3294,6 +3295,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
assert hasWriteLock(); assert hasWriteLock();
leaseManager.removeLease(pendingFile.getClientName(), src); leaseManager.removeLease(pendingFile.getClientName(), src);
if (latestSnapshot != null) {
if (!(pendingFile instanceof INodeFileUnderConstructionWithSnapshot)) {
// replace INodeFileUnderConstruction with
// INodeFileUnderConstructionWithSnapshot. This replacement does not
// need to be recorded in snapshot.
INodeFileUnderConstructionWithSnapshot pendingFileWithSnaphsot =
new INodeFileUnderConstructionWithSnapshot(pendingFile);
dir.replaceINodeFile(src, pendingFile,
pendingFileWithSnaphsot, null);
pendingFile = pendingFileWithSnaphsot;
}
pendingFile = (INodeFileUnderConstruction) pendingFile
.recordModification(latestSnapshot);
}
// The file is no longer pending. // The file is no longer pending.
// Create permanent INode, update blocks // Create permanent INode, update blocks
final INodeFile newFile = pendingFile.toINodeFile(now()); final INodeFile newFile = pendingFile.toINodeFile(now());

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
@ -431,8 +432,8 @@ public abstract class INode implements Comparable<byte[]> {
@VisibleForTesting @VisibleForTesting
public String getObjectString() { public String getObjectString() {
final String s = super.toString(); return getClass().getSimpleName() + "@"
return s.substring(s.lastIndexOf(getClass().getSimpleName())); + Integer.toHexString(super.hashCode());
} }
@VisibleForTesting @VisibleForTesting
@ -693,15 +694,21 @@ public abstract class INode implements Comparable<byte[]> {
String blocksInfo = ((INodeFile) this).printBlocksInfo(); String blocksInfo = ((INodeFile) this).printBlocksInfo();
out.print(", blocks=[" + blocksInfo + "]"); out.print(", blocks=[" + blocksInfo + "]");
} }
if (this instanceof INodeFileWithSnapshot) { if (this instanceof FileWithSnapshot) {
INodeFileWithSnapshot nodeWithLink = (INodeFileWithSnapshot) this; if (this instanceof INodeFileSnapshot
FileWithSnapshot next = nodeWithLink.getNext(); || this instanceof INodeFileUnderConstructionSnapshot) {
out.print(", next="
+ (next != null ? next.asINodeFile().getObjectString() : "null"));
if (this instanceof INodeFileSnapshot) {
out.print(", computedSize=" out.print(", computedSize="
+ ((INodeFileSnapshot) this).computeFileSize(true)); + ((INodeFileSnapshot) this).computeFileSize(true));
} }
FileWithSnapshot nodeWithLink = (FileWithSnapshot) this;
FileWithSnapshot next = nodeWithLink.getNext();
// An INodeFileWithSnapshot whose next link pointing to itself should be
// equivalent with a normal INodeFile
if (!(this instanceof INodeFileWithSnapshot &&
((INodeFileWithSnapshot) this).getNext() == this)) {
out.print(", next="
+ (next != null ? next.asINodeFile().getObjectString() : "null"));
}
} }
out.println(); out.println();
} else { } else {

View File

@ -528,7 +528,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
diff.deleted = deletedList; diff.deleted = deletedList;
} }
public Diff getDiff() { Diff getDiff() {
return diff; return diff;
} }

View File

@ -39,6 +39,20 @@ public class INodeFileUnderConstructionWithSnapshot
super(f.asINodeFile(), clientName, clientMachine, clientNode); super(f.asINodeFile(), clientName, clientMachine, clientNode);
} }
/**
* The constructor that creates an
* {@link INodeFileUnderConstructionWithSnapshot} based on an
* {@link INodeFileUnderConstruction}
*
* @param child The given {@link INodeFileUnderConstruction} instance
*/
public INodeFileUnderConstructionWithSnapshot(
INodeFileUnderConstruction child) {
super(child, child.getClientName(), child.getClientMachine(), child
.getClientNode());
next = this;
}
@Override @Override
protected INodeFileWithSnapshot toINodeFile(final long mtime) { protected INodeFileWithSnapshot toINodeFile(final long mtime) {
assertAllBlocksComplete(); assertAllBlocksComplete();
@ -46,7 +60,8 @@ public class INodeFileUnderConstructionWithSnapshot
final INodeFileWithSnapshot f = new INodeFileWithSnapshot(this); final INodeFileWithSnapshot f = new INodeFileWithSnapshot(this);
f.setModificationTime(mtime, null); f.setModificationTime(mtime, null);
f.setAccessTime(atime, null); f.setAccessTime(atime, null);
Util.replace(this, f); // link f with this
this.insertBefore(f);
return f; return f;
} }

View File

@ -43,7 +43,7 @@ public class INodeFileWithSnapshot extends INodeFile
final INodeFileUnderConstructionWithSnapshot f final INodeFileUnderConstructionWithSnapshot f
= new INodeFileUnderConstructionWithSnapshot(this, = new INodeFileUnderConstructionWithSnapshot(this,
clientName, clientMachine, clientNode); clientName, clientMachine, clientNode);
Util.replace(this, f); this.insertBefore(f);
return f; return f;
} }

View File

@ -0,0 +1,240 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.Diff;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Test snapshot functionalities while file appending.
*/
public class TestINodeFileUnderConstructionWithSnapshot {
static final long seed = 0;
static final short REPLICATION = 3;
static final int BLOCKSIZE = 1024;
private final Path dir = new Path("/TestSnapshot");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
FSDirectory fsdir;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
hdfs.mkdirs(dir);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Check if the given nodes can form a circular list
*/
private void checkCircularList(FileWithSnapshot... nodes) {
for (int i = 0; i < nodes.length; i++) {
FileWithSnapshot next = nodes[i].getNext();
FileWithSnapshot expectedNext = nodes[(i + 1) % nodes.length];
Assert.assertTrue(next == expectedNext);
}
}
/**
* Test snapshot after file appending
*/
@Test
public void testSnapshotAfterAppending() throws Exception {
Path file = new Path(dir, "file");
// 1. create snapshot --> create file --> append
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
// check the circular list and corresponding inodes: there should only be a
// reference of the current node in the created list
INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
.getINode(dir.toString());
Diff diff = dirNode.getLastSnapshotDiff().getDiff();
INode nodeInCreated = diff.searchCreated(fileNode.getLocalNameBytes());
assertTrue(fileNode == nodeInCreated);
INode nodeInDeleted = diff.searchDeleted(fileNode.getLocalNameBytes());
assertNull(nodeInDeleted);
// 2. create snapshot --> modify the file --> append
hdfs.createSnapshot(dir, "s1");
hdfs.setReplication(file, (short) (REPLICATION - 1));
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
// check the circular list and corresponding inodes
diff = dirNode.getLastSnapshotDiff().getDiff();
fileNode = (INodeFile) fsdir.getINode(file.toString());
nodeInCreated = diff.searchCreated(fileNode.getLocalNameBytes());
assertTrue(fileNode == nodeInCreated);
assertEquals(REPLICATION - 1,
((INodeFile) nodeInCreated).getFileReplication());
assertEquals(BLOCKSIZE * 3, ((INodeFile) fileNode).computeFileSize(true));
nodeInDeleted = diff.searchDeleted(fileNode.getLocalNameBytes());
assertEquals(REPLICATION,
((INodeFile) nodeInDeleted).getFileReplication());
assertEquals(BLOCKSIZE * 2,
((INodeFile) nodeInDeleted).computeFileSize(true));
checkCircularList((INodeFileWithSnapshot) fileNode,
(INodeFileSnapshot) nodeInDeleted);
// 3. create snapshot --> append
hdfs.createSnapshot(dir, "s2");
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
// check the circular list and corresponding inodes
diff = dirNode.getLastSnapshotDiff().getDiff();
fileNode = (INodeFile) fsdir.getINode(file.toString());
nodeInCreated = diff.searchCreated(fileNode.getLocalNameBytes());
assertTrue(fileNode == nodeInCreated);
assertEquals(REPLICATION - 1,
((INodeFile) nodeInCreated).getFileReplication());
assertEquals(BLOCKSIZE * 4, ((INodeFile) fileNode).computeFileSize(true));
INode nodeInDeleted2 = diff.searchDeleted(fileNode.getLocalNameBytes());
assertEquals(REPLICATION - 1,
((INodeFile) nodeInDeleted2).getFileReplication());
assertEquals(BLOCKSIZE * 3,
((INodeFile) nodeInDeleted2).computeFileSize(true));
checkCircularList((INodeFileWithSnapshot) fileNode,
(INodeFileSnapshot) nodeInDeleted2, (INodeFileSnapshot) nodeInDeleted);
}
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
throws IOException {
byte[] toAppend = new byte[length];
Random random = new Random();
random.nextBytes(toAppend);
HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
out.write(toAppend);
return out;
}
/**
* Test snapshot during file appending, before the corresponding
* {@link FSDataOutputStream} instance closes.
*/
@Test
public void testSnapshotWhileAppending() throws Exception {
Path file = new Path(dir, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
// 1. append without closing stream --> create snapshot
HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
out.close();
// check: an INodeFileUnderConstructionSnapshot should be stored into s0's
// deleted list, with size BLOCKSIZE*2
INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
assertEquals(BLOCKSIZE * 2, ((INodeFile) fileNode).computeFileSize(true));
INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
.getINode(dir.toString());
Diff diff = dirNode.getLastSnapshotDiff().getDiff();
INode nodeInDeleted_S0 = diff.searchDeleted(fileNode.getLocalNameBytes());
assertTrue(nodeInDeleted_S0 instanceof INodeFileUnderConstructionSnapshot);
assertEquals(BLOCKSIZE * 2,
((INodeFile) nodeInDeleted_S0).computeFileSize(true));
// 2. append without closing stream
out = appendFileWithoutClosing(file, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// re-check nodeInDeleted_S0
dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
diff = dirNode.getLastSnapshotDiff().getDiff();
nodeInDeleted_S0 = diff.searchDeleted(fileNode.getLocalNameBytes());
assertTrue(nodeInDeleted_S0 instanceof INodeFileUnderConstructionSnapshot);
assertEquals(BLOCKSIZE * 2,
((INodeFile) nodeInDeleted_S0).computeFileSize(true));
// 3. take snapshot --> close stream
hdfs.createSnapshot(dir, "s1");
out.close();
// check: an INodeFileUnderConstructionSnapshot with size BLOCKSIZE*3 should
// have been stored in s1's deleted list
fileNode = (INodeFile) fsdir.getINode(file.toString());
dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
diff = dirNode.getLastSnapshotDiff().getDiff();
INode nodeInCreated_S1 = diff.searchCreated(fileNode.getLocalNameBytes());
assertTrue(fileNode == nodeInCreated_S1);
assertTrue(fileNode instanceof INodeFileWithSnapshot);
INode nodeInDeleted_S1 = diff.searchDeleted(fileNode.getLocalNameBytes());
assertTrue(nodeInDeleted_S1 instanceof INodeFileUnderConstructionSnapshot);
assertEquals(BLOCKSIZE * 3,
((INodeFile) nodeInDeleted_S1).computeFileSize(true));
// also check the circular linked list
checkCircularList((INodeFileWithSnapshot) fileNode,
(INodeFileUnderConstructionSnapshot) nodeInDeleted_S1,
(INodeFileUnderConstructionSnapshot) nodeInDeleted_S0);
// 4. modify file --> append without closing stream --> take snapshot -->
// close stream
hdfs.setReplication(file, (short) (REPLICATION - 1));
out = appendFileWithoutClosing(file, BLOCKSIZE);
hdfs.createSnapshot(dir, "s2");
out.close();
// re-check the size of nodeInDeleted_S1
assertEquals(BLOCKSIZE * 3,
((INodeFile) nodeInDeleted_S1).computeFileSize(true));
}
}

View File

@ -20,6 +20,12 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
@ -42,6 +48,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDi
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.junit.After; import org.junit.After;
import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
@ -68,6 +75,9 @@ public class TestSnapshot {
private static Random random = new Random(seed); private static Random random = new Random(seed);
private static String testDir =
System.getProperty("test.build.data", "build/test/data");
@Rule @Rule
public ExpectedException exception = ExpectedException.none(); public ExpectedException exception = ExpectedException.none();
@ -144,13 +154,23 @@ public class TestSnapshot {
return nodes; return nodes;
} }
private File getDumpTreeFile(String dir, String suffix) {
return new File(dir, String.format("dumptree_%s", suffix));
}
/** /**
* Restart the cluster to check edit log applying and fsimage saving/loading * Restart the cluster to check edit log applying and fsimage saving/loading
*/ */
private void checkFSImage() throws Exception { private void checkFSImage() throws Exception {
File fsnBefore = getDumpTreeFile(testDir, "before");
File fsnMiddle = getDumpTreeFile(testDir, "middle");
File fsnAfter = getDumpTreeFile(testDir, "after");
String rootDir = "/"; String rootDir = "/";
StringBuffer fsnStrBefore = fsn.getFSDirectory().getINode(rootDir) PrintWriter out = new PrintWriter(new FileWriter(fsnBefore, false), true);
.dumpTreeRecursively(); fsn.getFSDirectory().getINode(rootDir)
.dumpTreeRecursively(out, new StringBuilder(), null);
out.close();
cluster.shutdown(); cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false) cluster = new MiniDFSCluster.Builder(conf).format(false)
@ -158,10 +178,12 @@ public class TestSnapshot {
cluster.waitActive(); cluster.waitActive();
fsn = cluster.getNamesystem(); fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem(); hdfs = cluster.getFileSystem();
// later check fsnStrMiddle to see if the edit log is recorded and applied // later check fsnMiddle to see if the edit log is recorded and applied
// correctly // correctly
StringBuffer fsnStrMiddle = fsn.getFSDirectory().getINode(rootDir) out = new PrintWriter(new FileWriter(fsnMiddle, false), true);
.dumpTreeRecursively(); fsn.getFSDirectory().getINode(rootDir)
.dumpTreeRecursively(out, new StringBuilder(), null);
out.close();
// save namespace and restart cluster // save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
@ -174,18 +196,38 @@ public class TestSnapshot {
fsn = cluster.getNamesystem(); fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem(); hdfs = cluster.getFileSystem();
// dump the namespace loaded from fsimage // dump the namespace loaded from fsimage
StringBuffer fsnStrAfter = fsn.getFSDirectory().getINode(rootDir) out = new PrintWriter(new FileWriter(fsnAfter, false), true);
.dumpTreeRecursively(); fsn.getFSDirectory().getINode(rootDir)
.dumpTreeRecursively(out, new StringBuilder(), null);
out.close();
System.out.println("================== Original FSDir =================="); compareFile(fsnBefore, fsnMiddle);
System.out.println(fsnStrBefore.toString()); compareFile(fsnBefore, fsnAfter);
System.out.println("================== FSDir After Applying Edit Logs =================="); }
System.out.println(fsnStrMiddle.toString());
System.out.println("================ FSDir After FSImage Saving/Loading ================"); /** compare two file's content */
System.out.println(fsnStrAfter.toString()); private void compareFile(File file1, File file2) throws IOException {
System.out.println("===================================================="); BufferedReader reader1 = new BufferedReader(new FileReader(file1));
assertEquals(fsnStrBefore.toString(), fsnStrMiddle.toString()); BufferedReader reader2 = new BufferedReader(new FileReader(file2));
assertEquals(fsnStrBefore.toString(), fsnStrAfter.toString()); try {
String line1 = "";
String line2 = "";
while ((line1 = reader1.readLine()) != null
&& (line2 = reader2.readLine()) != null) {
// skip the hashCode part of the object string during the comparison,
// also ignore the difference between INodeFile/INodeFileWithSnapshot
line1 = line1.replaceAll("INodeFileWithSnapshot", "INodeFile");
line2 = line2.replaceAll("INodeFileWithSnapshot", "INodeFile");
line1 = line1.replaceAll("@[\\dabcdef]+", "");
line2 = line2.replaceAll("@[\\dabcdef]+", "");
assertEquals(line1, line2);
}
Assert.assertNull(reader1.readLine());
Assert.assertNull(reader2.readLine());
} finally {
reader1.close();
reader2.close();
}
} }
/** /**