From 54f83d9bd917e8641e902c5f0695e65ded472f9a Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Wed, 3 Jun 2015 12:11:46 +0530 Subject: [PATCH] HDFS-8270. create() always retried with hardcoded timeout when file already exists with open lease (Contributed by J.Andreina) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../org/apache/hadoop/hdfs/DFSClient.java | 5 +-- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 8 ---- .../apache/hadoop/hdfs/NameNodeProxies.java | 43 +------------------ .../apache/hadoop/hdfs/TestFileCreation.java | 6 --- 5 files changed, 5 insertions(+), 60 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index abf6452a07b..402a5479d2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -936,6 +936,9 @@ Release 2.7.1 - UNRELEASED HDFS-8486. DN startup may cause severe data loss (Daryn Sharp via Colin P. McCabe) + HDFS-8270. create() always retried with hardcoded timeout when file already + exists with open lease (J.Andreina via vinayakumarb) + Release 2.7.0 - 2015-04-20 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index fc1cd26e21f..f4ceab36ee4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -342,13 +342,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, this.namenode = rpcNamenode; dtService = null; } else { - boolean noRetries = conf.getBoolean( - DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES, - DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT); Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, - ClientProtocol.class, nnFallbackToSimpleAuth, !noRetries); + ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 9c19f91908a..5bb6e5380cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -999,13 +998,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number"; public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0; - // Create a NN proxy without retries for testing. - @VisibleForTesting - public static final String DFS_CLIENT_TEST_NO_PROXY_RETRIES = - "dfs.client.test.no.proxy.retries"; - @VisibleForTesting - public static final boolean DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT = false; - public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY = "dfs.client.slow.io.warning.threshold.ms"; public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index bafc76f49de..d8735265d67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -34,7 +34,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.impl.DfsClientConf; -import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; @@ -43,7 +42,6 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider; @@ -161,31 +159,6 @@ public class NameNodeProxies { public static ProxyAndInfo createProxy(Configuration conf, URI nameNodeUri, Class xface, AtomicBoolean fallbackToSimpleAuth) throws IOException { - return createProxy(conf, nameNodeUri, xface, fallbackToSimpleAuth, true); - } - - /** - * Creates the namenode proxy with the passed protocol. This will handle - * creation of either HA- or non-HA-enabled proxy objects, depending upon - * if the provided URI is a configured logical URI. - * - * @param conf the configuration containing the required IPC - * properties, client failover configurations, etc. - * @param nameNodeUri the URI pointing either to a specific NameNode - * or to a logical nameservice. - * @param xface the IPC interface which should be created - * @param fallbackToSimpleAuth set to true or false during calls to - * indicate if a secure client falls back to simple auth - * @param withRetries certain interfaces have a non-standard retry policy - * @return an object containing both the proxy and the associated - * delegation token service it corresponds to - * @throws IOException if there is an error creating the proxy - **/ - @SuppressWarnings("unchecked") - public static ProxyAndInfo createProxy(Configuration conf, - URI nameNodeUri, Class xface, AtomicBoolean fallbackToSimpleAuth, - boolean withRetries) - throws IOException { AbstractNNFailoverProxyProvider failoverProxyProvider = createFailoverProxyProvider(conf, nameNodeUri, xface, true, fallbackToSimpleAuth); @@ -193,7 +166,7 @@ public class NameNodeProxies { if (failoverProxyProvider == null) { // Non-HA case return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface, - UserGroupInformation.getCurrentUser(), withRetries, + UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth); } else { // HA case @@ -442,22 +415,8 @@ public class NameNodeProxies { if (withRetries) { // create the proxy with retries - RetryPolicy createPolicy = RetryPolicies - .retryUpToMaximumCountWithFixedSleep(5, - HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); - - Map, RetryPolicy> remoteExceptionToPolicyMap - = new HashMap, RetryPolicy>(); - remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class, - createPolicy); - - RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException( - defaultPolicy, remoteExceptionToPolicyMap); Map methodNameToPolicyMap = new HashMap(); - - methodNameToPolicyMap.put("create", methodPolicy); - ClientProtocol translatorProxy = new ClientNamenodeProtocolTranslatorPB(proxy); return (ClientProtocol) RetryProxy.create( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index 49770152f67..525d84ea218 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -65,7 +65,6 @@ import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -74,7 +73,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; @@ -380,10 +378,6 @@ public class TestFileCreation { SimulatedFSDataset.setFactory(conf); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); - // Force NameNodeProxies' createNNProxyWithClientProtocol to give - // up file creation after one failure. - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES, true); - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem();