diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index 604d60e20f0..7316e3b8238 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -60,4 +60,21 @@ public interface HdfsClientConfigKeys { public static final int WINDOW_BASE_DEFAULT = 3000; } + + // WebHDFS retry configuration policy + interface WebHdfsRetry { + String PREFIX = HdfsClientConfigKeys.PREFIX + "http.client."; + String RETRY_POLICY_ENABLED_KEY = PREFIX + "dfs.http.client.retry.policy.enabled"; + boolean RETRY_POLICY_ENABLED_DEFAULT = false; + String RETRY_POLICY_SPEC_KEY = PREFIX + "dfs.http.client.retry.policy.spec"; + String RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... + String FAILOVER_MAX_ATTEMPTS_KEY = PREFIX + "dfs.http.client.failover.max.attempts"; + int FAILOVER_MAX_ATTEMPTS_DEFAULT = 15; + String RETRY_MAX_ATTEMPTS_KEY = PREFIX + "dfs.http.client.retry.max.attempts"; + int RETRY_MAX_ATTEMPTS_DEFAULT = 10; + String FAILOVER_SLEEPTIME_BASE_KEY = PREFIX + "dfs.http.client.failover.sleep.base.millis"; + int FAILOVER_SLEEPTIME_BASE_DEFAULT = 500; + String FAILOVER_SLEEPTIME_MAX_KEY = PREFIX + "dfs.http.client.failover.sleep.max.millis"; + int FAILOVER_SLEEPTIME_MAX_DEFAULT = 15000; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 695dc368e39..e091a65eb10 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -412,6 +412,9 @@ Release 2.8.0 - UNRELEASED HDFS-8099. Change "DFSInputStream has been closed already" message to debug log level (Charles Lamb via Colin P. McCabe) + HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys. + (wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index d0ca12516b6..ce0807506fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -601,19 +601,43 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT = 60000; // WebHDFS retry policy - public static final String DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY = "dfs.http.client.retry.policy.enabled"; - public static final boolean DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = false; - public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY = "dfs.http.client.retry.policy.spec"; - public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... - public static final String DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.http.client.failover.max.attempts"; - public static final int DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15; - public static final String DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.http.client.retry.max.attempts"; - public static final int DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10; - public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.http.client.failover.sleep.base.millis"; - public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500; - public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.http.client.failover.sleep.max.millis"; - public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT = 15000; - + @Deprecated + public static final String DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY = + HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_ENABLED_KEY; + @Deprecated + public static final boolean DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = + HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_ENABLED_DEFAULT; + @Deprecated + public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY = + HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_SPEC_KEY; + @Deprecated + public static final String DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = + HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_SPEC_DEFAULT; + @Deprecated + public static final String DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_MAX_ATTEMPTS_KEY; + @Deprecated + public static final int DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_MAX_ATTEMPTS_DEFAULT; + @Deprecated + public static final String DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY = + HdfsClientConfigKeys.WebHdfsRetry.RETRY_MAX_ATTEMPTS_KEY; + @Deprecated + public static final int DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = + HdfsClientConfigKeys.WebHdfsRetry.RETRY_MAX_ATTEMPTS_DEFAULT; + @Deprecated + public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_BASE_KEY; + @Deprecated + public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_BASE_DEFAULT; + @Deprecated + public static final String DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_MAX_KEY; + @Deprecated + public static final int DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT + = HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_MAX_DEFAULT; + // Handling unresolved DN topology mapping public static final String DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY = "dfs.namenode.reject-unresolved-dn-topology-mapping"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 383f2e49485..044403e9c31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -59,6 +59,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; @@ -172,25 +173,25 @@ public class WebHdfsFileSystem extends FileSystem this.retryPolicy = RetryUtils.getDefaultRetryPolicy( conf, - DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, - DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, - DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY, - DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT, + HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_ENABLED_KEY, + HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_ENABLED_DEFAULT, + HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_SPEC_KEY, + HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_SPEC_DEFAULT, SafeModeException.class); } else { int maxFailoverAttempts = conf.getInt( - DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, - DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_MAX_ATTEMPTS_KEY, + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_MAX_ATTEMPTS_DEFAULT); int maxRetryAttempts = conf.getInt( - DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY, - DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT); + HdfsClientConfigKeys.WebHdfsRetry.RETRY_MAX_ATTEMPTS_KEY, + HdfsClientConfigKeys.WebHdfsRetry.RETRY_MAX_ATTEMPTS_DEFAULT); int failoverSleepBaseMillis = conf.getInt( - DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, - DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_BASE_KEY, + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_BASE_DEFAULT); int failoverSleepMaxMillis = conf.getInt( - DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY, - DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT); + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_MAX_KEY, + HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_MAX_DEFAULT); this.retryPolicy = RetryPolicies .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index abbcd4d5ea7..5d95a8b79f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -875,7 +875,7 @@ public class TestDFSClientRetries { final Path dir = new Path("/testNamenodeRestart"); if (isWebHDFS) { - conf.setBoolean(DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, true); + conf.setBoolean(HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_ENABLED_KEY, true); } else { conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true); }