svn merge -c 1512451 merging from trunk to branch-2 to fix HDFS-4993.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1512452 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
be700565a4
commit
da44e0e909
|
@ -81,6 +81,9 @@ Release 2.1.1-beta - UNRELEASED
|
|||
HDFS-5043. For HdfsFileStatus, set default value of childrenNum to -1
|
||||
instead of 0 to avoid confusing applications. (brandonli)
|
||||
|
||||
HDFS-4993. Fsck can fail if a file is renamed or deleted. (Robert Parker
|
||||
via kihwal)
|
||||
|
||||
Release 2.1.0-beta - 2013-08-06
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -142,7 +142,7 @@ public class NamenodeFsck {
|
|||
/**
|
||||
* Filesystem checker.
|
||||
* @param conf configuration (namenode config)
|
||||
* @param nn namenode that this fsck is going to use
|
||||
* @param namenode namenode that this fsck is going to use
|
||||
* @param pmap key=value[] map passed to the http servlet as url parameters
|
||||
* @param out output stream to write the fsck output
|
||||
* @param totalDatanodes number of live datanodes
|
||||
|
@ -302,8 +302,13 @@ public class NamenodeFsck {
|
|||
long fileLen = file.getLen();
|
||||
// Get block locations without updating the file access time
|
||||
// and without block access tokens
|
||||
LocatedBlocks blocks = namenode.getNamesystem().getBlockLocations(path, 0,
|
||||
LocatedBlocks blocks;
|
||||
try {
|
||||
blocks = namenode.getNamesystem().getBlockLocations(path, 0,
|
||||
fileLen, false, false, false);
|
||||
} catch (FileNotFoundException fnfe) {
|
||||
blocks = null;
|
||||
}
|
||||
if (blocks == null) { // the file is deleted
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
|
|||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
|
@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.DFSClient;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSInputStream;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
|
@ -81,6 +83,8 @@ import org.apache.log4j.RollingFileAppender;
|
|||
import org.junit.Test;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import org.mockito.Mockito;
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
/**
|
||||
* A JUnit test for doing fsck
|
||||
|
@ -876,6 +880,59 @@ public class TestFsck {
|
|||
}
|
||||
}
|
||||
|
||||
/** Test fsck with FileNotFound */
|
||||
@Test
|
||||
public void testFsckFileNotFound() throws Exception {
|
||||
|
||||
// Number of replicas to actually start
|
||||
final short NUM_REPLICAS = 1;
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
NameNode namenode = mock(NameNode.class);
|
||||
NetworkTopology nettop = mock(NetworkTopology.class);
|
||||
Map<String,String[]> pmap = new HashMap<String, String[]>();
|
||||
Writer result = new StringWriter();
|
||||
PrintWriter out = new PrintWriter(result, true);
|
||||
InetAddress remoteAddress = InetAddress.getLocalHost();
|
||||
FSNamesystem fsName = mock(FSNamesystem.class);
|
||||
when(namenode.getNamesystem()).thenReturn(fsName);
|
||||
when(fsName.getBlockLocations(anyString(), anyLong(), anyLong(),
|
||||
anyBoolean(), anyBoolean(), anyBoolean())).
|
||||
thenThrow(new FileNotFoundException()) ;
|
||||
|
||||
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
|
||||
NUM_REPLICAS, (short)1, remoteAddress);
|
||||
|
||||
String pathString = "/tmp/testFile";
|
||||
|
||||
long length = 123L;
|
||||
boolean isDir = false;
|
||||
int blockReplication = 1;
|
||||
long blockSize = 128 *1024L;
|
||||
long modTime = 123123123L;
|
||||
long accessTime = 123123120L;
|
||||
FsPermission perms = FsPermission.getDefault();
|
||||
String owner = "foo";
|
||||
String group = "bar";
|
||||
byte [] symlink = null;
|
||||
byte [] path = new byte[128];
|
||||
path = DFSUtil.string2Bytes(pathString);
|
||||
long fileId = 312321L;
|
||||
int numChildren = 1;
|
||||
|
||||
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
|
||||
blockSize, modTime, accessTime, perms, owner, group, symlink, path,
|
||||
fileId, numChildren);
|
||||
Result res = new Result(conf);
|
||||
|
||||
try {
|
||||
fsck.check(pathString, file, res);
|
||||
} catch (Exception e) {
|
||||
fail("Unexpected exception "+ e.getMessage());
|
||||
}
|
||||
assertTrue(res.toString().contains("HEALTHY"));
|
||||
}
|
||||
|
||||
/** Test fsck with symlinks in the filesystem */
|
||||
@Test
|
||||
public void testFsckSymlink() throws Exception {
|
||||
|
|
Loading…
Reference in New Issue