svn merge -c 1309103 from trunk for HDFS-3166. Add timeout to Hftp connections.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1309104 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-04-03 19:28:47 +00:00
parent 8b39213000
commit e53d39489c
6 changed files with 196 additions and 29 deletions

View File

@ -625,6 +625,8 @@ Release 0.23.3 - UNRELEASED
BUG FIXES
HDFS-3166. Add timeout to Hftp connections. (Daryn Sharp via szetszwo)
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -50,6 +50,7 @@
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
import org.apache.hadoop.hdfs.web.URLUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
@ -294,15 +295,6 @@ private String getEncodedUgiParameter() {
return ugiParamenter.toString();
}
static Void throwIOExceptionFromConnection(
final HttpURLConnection connection, final IOException ioe
) throws IOException {
final int code = connection.getResponseCode();
final String s = connection.getResponseMessage();
throw s == null? ioe:
new IOException(s + " (error code=" + code + ")", ioe);
}
/**
* Open an HTTP connection to the namenode to read file data and metadata.
* @param path The path component of the URL
@ -312,13 +304,10 @@ protected HttpURLConnection openConnection(String path, String query)
throws IOException {
query = addDelegationTokenParam(query);
final URL url = getNamenodeURL(path, query);
final HttpURLConnection connection = (HttpURLConnection)url.openConnection();
try {
connection.setRequestMethod("GET");
connection.connect();
} catch (IOException ioe) {
throwIOExceptionFromConnection(connection, ioe);
}
final HttpURLConnection connection =
(HttpURLConnection)URLUtils.openConnection(url);
connection.setRequestMethod("GET");
connection.connect();
return connection;
}
@ -342,7 +331,7 @@ static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
@Override
protected HttpURLConnection openConnection() throws IOException {
return (HttpURLConnection)url.openConnection();
return (HttpURLConnection)URLUtils.openConnection(url);
}
/** Use HTTP Range header for specifying offset. */

View File

@ -41,6 +41,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.web.URLUtils;
/**
* An implementation of a protocol for accessing filesystems over HTTPS. The
@ -137,15 +138,11 @@ protected HttpURLConnection openConnection(String path, String query)
query = addDelegationTokenParam(query);
final URL url = new URL("https", nnAddr.getHostName(),
nnAddr.getPort(), path + '?' + query);
HttpsURLConnection conn = (HttpsURLConnection)url.openConnection();
HttpsURLConnection conn = (HttpsURLConnection)URLUtils.openConnection(url);
// bypass hostname verification
try {
conn.setHostnameVerifier(new DummyHostnameVerifier());
conn.setRequestMethod("GET");
conn.connect();
} catch (IOException ioe) {
throwIOExceptionFromConnection(conn, ioe);
}
conn.setHostnameVerifier(new DummyHostnameVerifier());
conn.setRequestMethod("GET");
conn.connect();
// check cert expiration date
final int warnDays = ExpWarnDays;

View File

@ -48,6 +48,7 @@
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
import org.apache.hadoop.hdfs.web.URLUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
@ -216,8 +217,7 @@ static public Credentials getDTfromRemote(String nnAddr,
}
URL remoteURL = new URL(url.toString());
SecurityUtil.fetchServiceTicket(remoteURL);
URLConnection connection = remoteURL.openConnection();
URLConnection connection = URLUtils.openConnection(remoteURL);
InputStream in = connection.getInputStream();
Credentials ts = new Credentials();
dis = new DataInputStream(in);
@ -257,7 +257,7 @@ static public long renewDelegationToken(String nnAddr,
try {
URL url = new URL(buf.toString());
SecurityUtil.fetchServiceTicket(url);
connection = (HttpURLConnection) url.openConnection();
connection = (HttpURLConnection)URLUtils.openConnection(url);
if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
throw new IOException("Error renewing token: " +
connection.getResponseMessage());
@ -351,7 +351,7 @@ static public void cancelDelegationToken(String nnAddr,
try {
URL url = new URL(buf.toString());
SecurityUtil.fetchServiceTicket(url);
connection = (HttpURLConnection) url.openConnection();
connection = (HttpURLConnection)URLUtils.openConnection(url);
if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
throw new IOException("Error cancelling token: " +
connection.getResponseMessage());

View File

@ -0,0 +1,51 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.IOException;
import java.net.URL;
import java.net.URLConnection;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Utilities for handling URLs
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable
public class URLUtils {
/**
* Timeout for socket connects and reads
*/
public static int SOCKET_TIMEOUT = 1*60*1000; // 1 minute
/**
* Opens a url with read and connect timeouts
* @param url to open
* @return URLConnection
* @throws IOException
*/
public static URLConnection openConnection(URL url) throws IOException {
URLConnection connection = url.openConnection();
connection.setConnectTimeout(SOCKET_TIMEOUT);
connection.setReadTimeout(SOCKET_TIMEOUT);
return connection;
}
}

View File

@ -0,0 +1,128 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URLConnection;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.web.URLUtils;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestHftpURLTimeouts {
@BeforeClass
public static void setup() {
URLUtils.SOCKET_TIMEOUT = 1;
}
@Test
public void testHftpSocketTimeout() throws Exception {
Configuration conf = new Configuration();
ServerSocket socket = new ServerSocket(0,1);
URI uri = new URI("hftp", null,
InetAddress.getByName(null).getHostAddress(),
socket.getLocalPort(),
null, null, null);
boolean timedout = false;
HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf);
HttpURLConnection conn = fs.openConnection("/", "");
timedout = false;
try {
// this will consume the only slot in the backlog
conn.getInputStream();
} catch (SocketTimeoutException ste) {
timedout = true;
assertEquals("Read timed out", ste.getMessage());
} finally {
if (conn != null) conn.disconnect();
}
assertTrue("read timedout", timedout);
assertTrue("connect timedout", checkConnectTimeout(fs, false));
}
@Test
public void testHsftpSocketTimeout() throws Exception {
Configuration conf = new Configuration();
ServerSocket socket = new ServerSocket(0,1);
URI uri = new URI("hsftp", null,
InetAddress.getByName(null).getHostAddress(),
socket.getLocalPort(),
null, null, null);
boolean timedout = false;
HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf);
HttpURLConnection conn = null;
timedout = false;
try {
// this will consume the only slot in the backlog
conn = fs.openConnection("/", "");
} catch (SocketTimeoutException ste) {
// SSL expects a negotiation, so it will timeout on read, unlike hftp
timedout = true;
assertEquals("Read timed out", ste.getMessage());
} finally {
if (conn != null) conn.disconnect();
}
assertTrue("ssl read connect timedout", timedout);
assertTrue("connect timedout", checkConnectTimeout(fs, true));
}
private boolean checkConnectTimeout(HftpFileSystem fs, boolean ignoreReadTimeout)
throws IOException {
boolean timedout = false;
List<HttpURLConnection> conns = new LinkedList<HttpURLConnection>();
try {
// with a listen backlog of 1, should only have to make one connection
// to trigger a connection timeout. however... linux doesn't honor the
// socket's listen backlog so we have to try a bunch of times
for (int n=32; !timedout && n > 0; n--) {
try {
conns.add(fs.openConnection("/", ""));
} catch (SocketTimeoutException ste) {
String message = ste.getMessage();
// https will get a read timeout due to SSL negotiation, but
// a normal http will not, so need to ignore SSL read timeouts
// until a connect timeout occurs
if (!(ignoreReadTimeout && message.equals("Read timed out"))) {
timedout = true;
assertEquals("connect timed out", message);
}
}
}
} finally {
for (HttpURLConnection conn : conns) {
conn.disconnect();
}
}
return timedout;
}
}