From 5827d1667cf9f9ef6602db534481a931739480ad Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 24 Sep 2013 10:16:14 +0000 Subject: [PATCH] HDFS-5228. The RemoteIterator returned by DistributedFileSystem.listFiles may throw NullPointerException. Contributed by szetszwo and cnauroth git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1525828 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hadoop/hdfs/DistributedFileSystem.java | 5 ++-- .../hdfs/TestDistributedFileSystem.java | 29 +++++++++++++++++-- 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 25db6583df4..096ead7f992 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -952,6 +952,9 @@ Release 2.1.0-beta - 2013-08-22 HDFS-5016. Deadlock in pipeline recovery causes Datanode to be marked dead. (suresh) + HDFS-5228. The RemoteIterator returned by DistributedFileSystem.listFiles + may throw NullPointerException. (szetszwo and cnauroth via szetszwo) + BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 6cb84741ab1..6fb572aa472 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -713,6 +713,7 @@ public class DistributedFileSystem extends FileSystem { protected RemoteIterator listLocatedStatus(final Path p, final PathFilter filter) throws IOException { + final Path absF = fixRelativePart(p); return new RemoteIterator() { private DirectoryListing thisListing; private int i; @@ -722,7 +723,7 @@ public class DistributedFileSystem extends FileSystem { { // initializer // Fully resolve symlinks in path first to avoid additional resolution // round-trips as we fetch more batches of listings - src = getPathName(resolvePath(p)); + src = getPathName(resolvePath(absF)); // fetch the first batch of entries in the directory thisListing = dfs.listPaths(src, HdfsFileStatus.EMPTY_NAME, true); statistics.incrementReadOps(1); @@ -736,7 +737,7 @@ public class DistributedFileSystem extends FileSystem { while (curStat == null && hasNextNoFilter()) { LocatedFileStatus next = ((HdfsLocatedFileStatus)thisListing.getPartialListing()[i++]) - .makeQualifiedLocated(getUri(), p); + .makeQualifiedLocated(getUri(), absF); if (filter.accept(next.getPath())) { curStat = next; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index c0093d21aa7..a57ad745612 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -31,8 +31,10 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; +import java.util.List; import java.util.Random; import org.apache.commons.lang.ArrayUtils; @@ -47,9 +49,11 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; @@ -226,7 +230,7 @@ public class TestDistributedFileSystem { final long millis = Time.now(); { - DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); + final DistributedFileSystem dfs = cluster.getFileSystem(); dfs.dfs.getLeaseRenewer().setGraceSleepPeriod(grace); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); @@ -326,7 +330,7 @@ public class TestDistributedFileSystem { } { - DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem(); + final DistributedFileSystem dfs = cluster.getFileSystem(); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); //open and check the file @@ -835,4 +839,25 @@ public class TestDistributedFileSystem { } } + @Test(timeout=60000) + public void testListFiles() throws IOException { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + + try { + DistributedFileSystem fs = cluster.getFileSystem(); + + final Path relative = new Path("relative"); + fs.create(new Path(relative, "foo")).close(); + + final List retVal = new ArrayList(); + final RemoteIterator iter = fs.listFiles(relative, true); + while (iter.hasNext()) { + retVal.add(iter.next()); + } + System.out.println("retVal = " + retVal); + } finally { + cluster.shutdown(); + } + } }