diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index cf12793c868..e6af2dbecc5 100755
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -261,6 +261,14 @@ public interface HdfsClientConfigKeys {
String DFS_LEASE_HARDLIMIT_KEY = "dfs.namenode.lease-hard-limit-sec";
long DFS_LEASE_HARDLIMIT_DEFAULT = 20 * 60;
+ String DFS_CLIENT_FSCK_CONNECT_TIMEOUT =
+ "dfs.client.fsck.connect.timeout";
+ int DFS_CLIENT_FSCK_CONNECT_TIMEOUT_DEFAULT = 60 * 1000;
+
+ String DFS_CLIENT_FSCK_READ_TIMEOUT =
+ "dfs.client.fsck.read.timeout";
+ int DFS_CLIENT_FSCK_READ_TIMEOUT_DEFAULT = 60 * 1000;
+
/**
* These are deprecated config keys to client code.
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 8a2ef8b5920..33a117f314e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -27,6 +27,7 @@ import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@@ -37,6 +38,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.security.UserGroupInformation;
@@ -137,8 +139,17 @@ public class DFSck extends Configured implements Tool {
super(conf);
this.ugi = UserGroupInformation.getCurrentUser();
this.out = out;
+ int connectTimeout = (int) conf.getTimeDuration(
+ HdfsClientConfigKeys.DFS_CLIENT_FSCK_CONNECT_TIMEOUT,
+ HdfsClientConfigKeys.DFS_CLIENT_FSCK_CONNECT_TIMEOUT_DEFAULT,
+ TimeUnit.MILLISECONDS);
+ int readTimeout = (int) conf.getTimeDuration(
+ HdfsClientConfigKeys.DFS_CLIENT_FSCK_READ_TIMEOUT,
+ HdfsClientConfigKeys.DFS_CLIENT_FSCK_READ_TIMEOUT_DEFAULT,
+ TimeUnit.MILLISECONDS);
+
this.connectionFactory = URLConnectionFactory
- .newDefaultURLConnectionFactory(conf);
+ .newDefaultURLConnectionFactory(connectTimeout, readTimeout, conf);
this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 0b573ddaf64..f8abb59daa0 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -6174,4 +6174,23 @@
set on fs.protected.directories.
+
+
+ dfs.client.fsck.connect.timeout
+ 60000ms
+
+ The amount of time the fsck client will wait to connect to the namenode
+ before timing out.
+
+
+
+
+ dfs.client.fsck.read.timeout
+ 60000ms
+
+ The amount of time the fsck client will wait to read from the namenode
+ before timing out. If the namenode does not report progress more
+ frequently than this time, the client will give up waiting.
+
+