HDFS-16610. Make fsck read timeout configurable (#4384)

(cherry picked from commit 34a973a90e)

 Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
This commit is contained in:
Stephen O'Donnell 2022-06-01 20:36:01 +01:00 committed by S O'Donnell
parent de4c975710
commit 7d6b133af3
3 changed files with 39 additions and 1 deletions

View File

@ -261,6 +261,14 @@ public interface HdfsClientConfigKeys {
String DFS_LEASE_HARDLIMIT_KEY = "dfs.namenode.lease-hard-limit-sec"; String DFS_LEASE_HARDLIMIT_KEY = "dfs.namenode.lease-hard-limit-sec";
long DFS_LEASE_HARDLIMIT_DEFAULT = 20 * 60; long DFS_LEASE_HARDLIMIT_DEFAULT = 20 * 60;
String DFS_CLIENT_FSCK_CONNECT_TIMEOUT =
"dfs.client.fsck.connect.timeout";
int DFS_CLIENT_FSCK_CONNECT_TIMEOUT_DEFAULT = 60 * 1000;
String DFS_CLIENT_FSCK_READ_TIMEOUT =
"dfs.client.fsck.read.timeout";
int DFS_CLIENT_FSCK_READ_TIMEOUT_DEFAULT = 60 * 1000;
/** /**
* These are deprecated config keys to client code. * These are deprecated config keys to client code.
*/ */

View File

@ -27,6 +27,7 @@ import java.net.URL;
import java.net.URLConnection; import java.net.URLConnection;
import java.net.URLEncoder; import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -37,6 +38,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -137,8 +139,17 @@ public class DFSck extends Configured implements Tool {
super(conf); super(conf);
this.ugi = UserGroupInformation.getCurrentUser(); this.ugi = UserGroupInformation.getCurrentUser();
this.out = out; this.out = out;
int connectTimeout = (int) conf.getTimeDuration(
HdfsClientConfigKeys.DFS_CLIENT_FSCK_CONNECT_TIMEOUT,
HdfsClientConfigKeys.DFS_CLIENT_FSCK_CONNECT_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
int readTimeout = (int) conf.getTimeDuration(
HdfsClientConfigKeys.DFS_CLIENT_FSCK_READ_TIMEOUT,
HdfsClientConfigKeys.DFS_CLIENT_FSCK_READ_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
this.connectionFactory = URLConnectionFactory this.connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf); .newDefaultURLConnectionFactory(connectTimeout, readTimeout, conf);
this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled(); this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
} }

View File

@ -6174,4 +6174,23 @@
set on fs.protected.directories. set on fs.protected.directories.
</description> </description>
</property> </property>
<property>
<name>dfs.client.fsck.connect.timeout</name>
<value>60000ms</value>
<description>
The amount of time the fsck client will wait to connect to the namenode
before timing out.
</description>
</property>
<property>
<name>dfs.client.fsck.read.timeout</name>
<value>60000ms</value>
<description>
The amount of time the fsck client will wait to read from the namenode
before timing out. If the namenode does not report progress more
frequently than this time, the client will give up waiting.
</description>
</property>
</configuration> </configuration>