diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 795a34eef3e..554edeae330 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -438,6 +438,8 @@ Release 2.3.0 - UNRELEASED HDFS-5582. hdfs getconf -excludeFile or -includeFile always failed (sathish via cmccabe) + HDFS-5671. Fix socket leak in DFSInputStream#getBlockReader. (JamesLi via umamahesh) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index c3a4b359a90..3909b3e1c0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -1188,11 +1188,21 @@ protected BlockReader getBlockReader(InetSocketAddress dnAddr, } // Try to create a new remote peer. Peer peer = newTcpPeer(dnAddr); - return BlockReaderFactory.newBlockReader( - dfsClient.getConf(), file, block, blockToken, startOffset, - len, verifyChecksum, clientName, peer, chosenNode, - dsFactory, peerCache, fileInputStreamCache, false, + try { + reader = BlockReaderFactory.newBlockReader(dfsClient.getConf(), file, + block, blockToken, startOffset, len, verifyChecksum, clientName, + peer, chosenNode, dsFactory, peerCache, fileInputStreamCache, false, curCachingStrategy); + return reader; + } catch (IOException ex) { + DFSClient.LOG.debug( + "Exception while getting block reader, closing stale " + peer, ex); + throw ex; + } finally { + if (reader == null) { + IOUtils.closeQuietly(peer); + } + } }