diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 813de66366a..4b89e1d6de3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -599,6 +599,9 @@ Release 2.7.1 - UNRELEASED HDFS-8486. DN startup may cause severe data loss (Daryn Sharp via Colin P. McCabe) + HDFS-8270. create() always retried with hardcoded timeout when file already + exists with open lease (J.Andreina via vinayakumarb) + Release 2.7.0 - 2015-04-20 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index b228b119e1e..46a12354ce2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -344,13 +344,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, this.namenode = rpcNamenode; dtService = null; } else { - boolean noRetries = conf.getBoolean( - DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES, - DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT); Preconditions.checkArgument(nameNodeUri != null, "null URI"); proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, - ClientProtocol.class, nnFallbackToSimpleAuth, !noRetries); + ClientProtocol.class, nnFallbackToSimpleAuth); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index f78c8703d81..dc0ecca7a9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -1000,13 +999,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number"; public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0; - // Create a NN proxy without retries for testing. - @VisibleForTesting - public static final String DFS_CLIENT_TEST_NO_PROXY_RETRIES = - "dfs.client.test.no.proxy.retries"; - @VisibleForTesting - public static final boolean DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT = false; - public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY = "dfs.client.slow.io.warning.threshold.ms"; public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index a7dda7bf330..8706f01343c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -34,7 +34,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.impl.DfsClientConf; -import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; @@ -43,7 +42,6 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider; @@ -161,31 +159,6 @@ public class NameNodeProxies { public static ProxyAndInfo createProxy(Configuration conf, URI nameNodeUri, Class xface, AtomicBoolean fallbackToSimpleAuth) throws IOException { - return createProxy(conf, nameNodeUri, xface, fallbackToSimpleAuth, true); - } - - /** - * Creates the namenode proxy with the passed protocol. This will handle - * creation of either HA- or non-HA-enabled proxy objects, depending upon - * if the provided URI is a configured logical URI. - * - * @param conf the configuration containing the required IPC - * properties, client failover configurations, etc. - * @param nameNodeUri the URI pointing either to a specific NameNode - * or to a logical nameservice. - * @param xface the IPC interface which should be created - * @param fallbackToSimpleAuth set to true or false during calls to - * indicate if a secure client falls back to simple auth - * @param withRetries certain interfaces have a non-standard retry policy - * @return an object containing both the proxy and the associated - * delegation token service it corresponds to - * @throws IOException if there is an error creating the proxy - **/ - @SuppressWarnings("unchecked") - public static ProxyAndInfo createProxy(Configuration conf, - URI nameNodeUri, Class xface, AtomicBoolean fallbackToSimpleAuth, - boolean withRetries) - throws IOException { AbstractNNFailoverProxyProvider failoverProxyProvider = createFailoverProxyProvider(conf, nameNodeUri, xface, true, fallbackToSimpleAuth); @@ -193,7 +166,7 @@ public class NameNodeProxies { if (failoverProxyProvider == null) { // Non-HA case return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface, - UserGroupInformation.getCurrentUser(), withRetries, + UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth); } else { // HA case @@ -442,22 +415,8 @@ public class NameNodeProxies { if (withRetries) { // create the proxy with retries - RetryPolicy createPolicy = RetryPolicies - .retryUpToMaximumCountWithFixedSleep(5, - HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); - - Map, RetryPolicy> remoteExceptionToPolicyMap - = new HashMap, RetryPolicy>(); - remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class, - createPolicy); - - RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException( - defaultPolicy, remoteExceptionToPolicyMap); Map methodNameToPolicyMap = new HashMap(); - - methodNameToPolicyMap.put("create", methodPolicy); - ClientProtocol translatorProxy = new ClientNamenodeProtocolTranslatorPB(proxy); return (ClientProtocol) RetryProxy.create( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index 49770152f67..525d84ea218 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -65,7 +65,6 @@ import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -74,7 +73,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; @@ -380,10 +378,6 @@ public class TestFileCreation { SimulatedFSDataset.setFactory(conf); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); - // Force NameNodeProxies' createNNProxyWithClientProtocol to give - // up file creation after one failure. - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES, true); - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem();