From e53d39489c9655015641b324de9ea7334cd3ea9e Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 3 Apr 2012 19:28:47 +0000 Subject: [PATCH] svn merge -c 1309103 from trunk for HDFS-3166. Add timeout to Hftp connections. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1309104 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../apache/hadoop/hdfs/HftpFileSystem.java | 23 +--- .../apache/hadoop/hdfs/HsftpFileSystem.java | 13 +- .../hdfs/tools/DelegationTokenFetcher.java | 8 +- .../org/apache/hadoop/hdfs/web/URLUtils.java | 51 +++++++ .../hadoop/hdfs/TestHftpURLTimeouts.java | 128 ++++++++++++++++++ 6 files changed, 196 insertions(+), 29 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLUtils.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2c5822d8bc0..c30ea8b9f41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -625,6 +625,8 @@ Release 0.23.3 - UNRELEASED BUG FIXES + HDFS-3166. Add timeout to Hftp connections. (Daryn Sharp via szetszwo) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java index 8fe8cba60d8..829190623a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher; +import org.apache.hadoop.hdfs.web.URLUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; @@ -293,15 +294,6 @@ public class HftpFileSystem extends FileSystem } return ugiParamenter.toString(); } - - static Void throwIOExceptionFromConnection( - final HttpURLConnection connection, final IOException ioe - ) throws IOException { - final int code = connection.getResponseCode(); - final String s = connection.getResponseMessage(); - throw s == null? ioe: - new IOException(s + " (error code=" + code + ")", ioe); - } /** * Open an HTTP connection to the namenode to read file data and metadata. @@ -312,13 +304,10 @@ public class HftpFileSystem extends FileSystem throws IOException { query = addDelegationTokenParam(query); final URL url = getNamenodeURL(path, query); - final HttpURLConnection connection = (HttpURLConnection)url.openConnection(); - try { - connection.setRequestMethod("GET"); - connection.connect(); - } catch (IOException ioe) { - throwIOExceptionFromConnection(connection, ioe); - } + final HttpURLConnection connection = + (HttpURLConnection)URLUtils.openConnection(url); + connection.setRequestMethod("GET"); + connection.connect(); return connection; } @@ -342,7 +331,7 @@ public class HftpFileSystem extends FileSystem @Override protected HttpURLConnection openConnection() throws IOException { - return (HttpURLConnection)url.openConnection(); + return (HttpURLConnection)URLUtils.openConnection(url); } /** Use HTTP Range header for specifying offset. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java index 97e3b2414a3..b6fe12d66b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java @@ -41,6 +41,7 @@ import javax.net.ssl.X509TrustManager; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.web.URLUtils; /** * An implementation of a protocol for accessing filesystems over HTTPS. The @@ -137,15 +138,11 @@ public class HsftpFileSystem extends HftpFileSystem { query = addDelegationTokenParam(query); final URL url = new URL("https", nnAddr.getHostName(), nnAddr.getPort(), path + '?' + query); - HttpsURLConnection conn = (HttpsURLConnection)url.openConnection(); + HttpsURLConnection conn = (HttpsURLConnection)URLUtils.openConnection(url); // bypass hostname verification - try { - conn.setHostnameVerifier(new DummyHostnameVerifier()); - conn.setRequestMethod("GET"); - conn.connect(); - } catch (IOException ioe) { - throwIOExceptionFromConnection(conn, ioe); - } + conn.setHostnameVerifier(new DummyHostnameVerifier()); + conn.setRequestMethod("GET"); + conn.connect(); // check cert expiration date final int warnDays = ExpWarnDays; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index 0d062959c0b..c1889c5cc29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet; import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet; import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet; +import org.apache.hadoop.hdfs.web.URLUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; @@ -216,8 +217,7 @@ public class DelegationTokenFetcher { } URL remoteURL = new URL(url.toString()); SecurityUtil.fetchServiceTicket(remoteURL); - URLConnection connection = remoteURL.openConnection(); - + URLConnection connection = URLUtils.openConnection(remoteURL); InputStream in = connection.getInputStream(); Credentials ts = new Credentials(); dis = new DataInputStream(in); @@ -257,7 +257,7 @@ public class DelegationTokenFetcher { try { URL url = new URL(buf.toString()); SecurityUtil.fetchServiceTicket(url); - connection = (HttpURLConnection) url.openConnection(); + connection = (HttpURLConnection)URLUtils.openConnection(url); if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new IOException("Error renewing token: " + connection.getResponseMessage()); @@ -351,7 +351,7 @@ public class DelegationTokenFetcher { try { URL url = new URL(buf.toString()); SecurityUtil.fetchServiceTicket(url); - connection = (HttpURLConnection) url.openConnection(); + connection = (HttpURLConnection)URLUtils.openConnection(url); if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new IOException("Error cancelling token: " + connection.getResponseMessage()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLUtils.java new file mode 100644 index 00000000000..7e4edd2c81a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLUtils.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.web; + +import java.io.IOException; +import java.net.URL; +import java.net.URLConnection; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Utilities for handling URLs + */ +@InterfaceAudience.LimitedPrivate({"HDFS"}) +@InterfaceStability.Unstable +public class URLUtils { + /** + * Timeout for socket connects and reads + */ + public static int SOCKET_TIMEOUT = 1*60*1000; // 1 minute + + /** + * Opens a url with read and connect timeouts + * @param url to open + * @return URLConnection + * @throws IOException + */ + public static URLConnection openConnection(URL url) throws IOException { + URLConnection connection = url.openConnection(); + connection.setConnectTimeout(SOCKET_TIMEOUT); + connection.setReadTimeout(SOCKET_TIMEOUT); + return connection; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java new file mode 100644 index 00000000000..483d184d928 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.InetAddress; +import java.net.ServerSocket; +import java.net.SocketTimeoutException; +import java.net.URI; +import java.net.URLConnection; +import java.util.LinkedList; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.web.URLUtils; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestHftpURLTimeouts { + @BeforeClass + public static void setup() { + URLUtils.SOCKET_TIMEOUT = 1; + } + + @Test + public void testHftpSocketTimeout() throws Exception { + Configuration conf = new Configuration(); + ServerSocket socket = new ServerSocket(0,1); + URI uri = new URI("hftp", null, + InetAddress.getByName(null).getHostAddress(), + socket.getLocalPort(), + null, null, null); + boolean timedout = false; + + HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf); + HttpURLConnection conn = fs.openConnection("/", ""); + timedout = false; + try { + // this will consume the only slot in the backlog + conn.getInputStream(); + } catch (SocketTimeoutException ste) { + timedout = true; + assertEquals("Read timed out", ste.getMessage()); + } finally { + if (conn != null) conn.disconnect(); + } + assertTrue("read timedout", timedout); + assertTrue("connect timedout", checkConnectTimeout(fs, false)); + } + + @Test + public void testHsftpSocketTimeout() throws Exception { + Configuration conf = new Configuration(); + ServerSocket socket = new ServerSocket(0,1); + URI uri = new URI("hsftp", null, + InetAddress.getByName(null).getHostAddress(), + socket.getLocalPort(), + null, null, null); + boolean timedout = false; + + HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf); + HttpURLConnection conn = null; + timedout = false; + try { + // this will consume the only slot in the backlog + conn = fs.openConnection("/", ""); + } catch (SocketTimeoutException ste) { + // SSL expects a negotiation, so it will timeout on read, unlike hftp + timedout = true; + assertEquals("Read timed out", ste.getMessage()); + } finally { + if (conn != null) conn.disconnect(); + } + assertTrue("ssl read connect timedout", timedout); + assertTrue("connect timedout", checkConnectTimeout(fs, true)); + } + + private boolean checkConnectTimeout(HftpFileSystem fs, boolean ignoreReadTimeout) + throws IOException { + boolean timedout = false; + List conns = new LinkedList(); + try { + // with a listen backlog of 1, should only have to make one connection + // to trigger a connection timeout. however... linux doesn't honor the + // socket's listen backlog so we have to try a bunch of times + for (int n=32; !timedout && n > 0; n--) { + try { + conns.add(fs.openConnection("/", "")); + } catch (SocketTimeoutException ste) { + String message = ste.getMessage(); + // https will get a read timeout due to SSL negotiation, but + // a normal http will not, so need to ignore SSL read timeouts + // until a connect timeout occurs + if (!(ignoreReadTimeout && message.equals("Read timed out"))) { + timedout = true; + assertEquals("connect timed out", message); + } + } + } + } finally { + for (HttpURLConnection conn : conns) { + conn.disconnect(); + } + } + return timedout; + } +}