HDFS-2568. svn merge -c 1204122 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1204123 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2011-11-20 04:15:18 +00:00
parent 2910bd53a6
commit f1d346bf80
3 changed files with 9 additions and 6 deletions

View File

@ -19,6 +19,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2563. Some cleanup in BPOfferService. (todd)
HDFS-2568. Use a set to manage child sockets in XceiverServer.
(harsh via eli)
OPTIMIZATIONS
HDFS-2130. Switch default checksum to CRC32C. (todd)

View File

@ -128,7 +128,7 @@ class DataXceiver extends Receiver implements Runnable {
public void run() {
int opsProcessed = 0;
Op op = null;
dataXceiverServer.childSockets.put(s, s);
dataXceiverServer.childSockets.add(s);
try {
int stdTimeout = s.getSoTimeout();

View File

@ -23,9 +23,9 @@ import java.net.Socket;
import java.net.SocketTimeoutException;
import java.nio.channels.AsynchronousCloseException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
@ -48,8 +48,8 @@ class DataXceiverServer implements Runnable {
ServerSocket ss;
DataNode datanode;
// Record all sockets opened for data transfer
Map<Socket, Socket> childSockets = Collections.synchronizedMap(
new HashMap<Socket, Socket>());
Set<Socket> childSockets = Collections.synchronizedSet(
new HashSet<Socket>());
/**
* Maximal number of concurrent xceivers per node.
@ -184,7 +184,7 @@ class DataXceiverServer implements Runnable {
// close all the sockets that were accepted earlier
synchronized (childSockets) {
for (Iterator<Socket> it = childSockets.values().iterator();
for (Iterator<Socket> it = childSockets.iterator();
it.hasNext();) {
Socket thissock = it.next();
try {