HDFS-3754. BlockSender doesn't shutdown ReadaheadPool threads. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1370496 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-08-07 20:23:11 +00:00
parent faf54e5b2a
commit 84314d99e5
3 changed files with 10 additions and 6 deletions

View File

@ -410,6 +410,8 @@ Release 2.0.1-alpha - UNRELEASED
HDFS-3579. libhdfs: fix exception handling. (Colin Patrick McCabe via atm) HDFS-3579. libhdfs: fix exception handling. (Colin Patrick McCabe via atm)
HDFS-3754. BlockSender doesn't shutdown ReadaheadPool threads. (eli)
BREAKDOWN OF HDFS-3042 SUBTASKS BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd) HDFS-2185. HDFS portion of ZK-based FailoverController (todd)

View File

@ -20,7 +20,6 @@
import java.io.BufferedInputStream; import java.io.BufferedInputStream;
import java.io.DataInputStream; import java.io.DataInputStream;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.FileDescriptor; import java.io.FileDescriptor;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
@ -38,7 +37,6 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.LongWritable;
@ -163,8 +161,6 @@ class BlockSender implements java.io.Closeable {
*/ */
private static final long LONG_READ_THRESHOLD_BYTES = 256 * 1024; private static final long LONG_READ_THRESHOLD_BYTES = 256 * 1024;
private static ReadaheadPool readaheadPool =
ReadaheadPool.getInstance();
/** /**
* Constructor * Constructor
@ -691,8 +687,8 @@ private void manageOsCache() throws IOException {
} }
// Perform readahead if necessary // Perform readahead if necessary
if (readaheadLength > 0 && readaheadPool != null) { if (readaheadLength > 0 && datanode.readaheadPool != null) {
curReadahead = readaheadPool.readaheadStream( curReadahead = datanode.readaheadPool.readaheadStream(
clientTraceFmt, blockInFd, clientTraceFmt, blockInFd,
offset, readaheadLength, Long.MAX_VALUE, offset, readaheadLength, Long.MAX_VALUE,
curReadahead); curReadahead);

View File

@ -146,6 +146,7 @@
import org.apache.hadoop.hdfs.web.resources.Param; import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.ReadaheadPool;
import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
@ -278,6 +279,7 @@ public static InetSocketAddress createSocketAddr(String target) {
private Configuration conf; private Configuration conf;
private final String userWithLocalPathAccess; private final String userWithLocalPathAccess;
ReadaheadPool readaheadPool;
/** /**
* Create the DataNode given a configuration and an array of dataDirs. * Create the DataNode given a configuration and an array of dataDirs.
@ -673,6 +675,10 @@ void startDataNode(Configuration conf,
blockPoolManager = new BlockPoolManager(this); blockPoolManager = new BlockPoolManager(this);
blockPoolManager.refreshNamenodes(conf); blockPoolManager.refreshNamenodes(conf);
// Create the ReadaheadPool from the DataNode context so we can
// exit without having to explicitly shutdown its thread pool.
readaheadPool = ReadaheadPool.getInstance();
} }
/** /**