HDFS-4993. Fsck can fail if a file is renamed or deleted. Contributed by Robert Parker.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1512451 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Kihwal Lee 2013-08-09 19:13:22 +00:00
parent d61827c9a8
commit 2f988135e3
3 changed files with 68 additions and 3 deletions

View File

@ -304,6 +304,9 @@ Release 2.1.1-beta - UNRELEASED
HDFS-5043. For HdfsFileStatus, set default value of childrenNum to -1 HDFS-5043. For HdfsFileStatus, set default value of childrenNum to -1
instead of 0 to avoid confusing applications. (brandonli) instead of 0 to avoid confusing applications. (brandonli)
HDFS-4993. Fsck can fail if a file is renamed or deleted. (Robert Parker
via kihwal)
Release 2.1.0-beta - 2013-08-06 Release 2.1.0-beta - 2013-08-06
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -142,7 +142,7 @@ public class NamenodeFsck {
/** /**
* Filesystem checker. * Filesystem checker.
* @param conf configuration (namenode config) * @param conf configuration (namenode config)
* @param nn namenode that this fsck is going to use * @param namenode namenode that this fsck is going to use
* @param pmap key=value[] map passed to the http servlet as url parameters * @param pmap key=value[] map passed to the http servlet as url parameters
* @param out output stream to write the fsck output * @param out output stream to write the fsck output
* @param totalDatanodes number of live datanodes * @param totalDatanodes number of live datanodes
@ -302,8 +302,13 @@ public class NamenodeFsck {
long fileLen = file.getLen(); long fileLen = file.getLen();
// Get block locations without updating the file access time // Get block locations without updating the file access time
// and without block access tokens // and without block access tokens
LocatedBlocks blocks = namenode.getNamesystem().getBlockLocations(path, 0, LocatedBlocks blocks;
fileLen, false, false, false); try {
blocks = namenode.getNamesystem().getBlockLocations(path, 0,
fileLen, false, false, false);
} catch (FileNotFoundException fnfe) {
blocks = null;
}
if (blocks == null) { // the file is deleted if (blocks == null) { // the file is deleted
return; return;
} }

View File

@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -81,6 +83,8 @@ import org.apache.log4j.RollingFileAppender;
import org.junit.Test; import org.junit.Test;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import org.mockito.Mockito;
import static org.mockito.Mockito.*;
/** /**
* A JUnit test for doing fsck * A JUnit test for doing fsck
@ -876,6 +880,59 @@ public class TestFsck {
} }
} }
/** Test fsck with FileNotFound */
@Test
public void testFsckFileNotFound() throws Exception {
// Number of replicas to actually start
final short NUM_REPLICAS = 1;
Configuration conf = new Configuration();
NameNode namenode = mock(NameNode.class);
NetworkTopology nettop = mock(NetworkTopology.class);
Map<String,String[]> pmap = new HashMap<String, String[]>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
FSNamesystem fsName = mock(FSNamesystem.class);
when(namenode.getNamesystem()).thenReturn(fsName);
when(fsName.getBlockLocations(anyString(), anyLong(), anyLong(),
anyBoolean(), anyBoolean(), anyBoolean())).
thenThrow(new FileNotFoundException()) ;
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
NUM_REPLICAS, (short)1, remoteAddress);
String pathString = "/tmp/testFile";
long length = 123L;
boolean isDir = false;
int blockReplication = 1;
long blockSize = 128 *1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
FsPermission perms = FsPermission.getDefault();
String owner = "foo";
String group = "bar";
byte [] symlink = null;
byte [] path = new byte[128];
path = DFSUtil.string2Bytes(pathString);
long fileId = 312321L;
int numChildren = 1;
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
blockSize, modTime, accessTime, perms, owner, group, symlink, path,
fileId, numChildren);
Result res = new Result(conf);
try {
fsck.check(pathString, file, res);
} catch (Exception e) {
fail("Unexpected exception "+ e.getMessage());
}
assertTrue(res.toString().contains("HEALTHY"));
}
/** Test fsck with symlinks in the filesystem */ /** Test fsck with symlinks in the filesystem */
@Test @Test
public void testFsckSymlink() throws Exception { public void testFsckSymlink() throws Exception {