HDFS-8270. create() always retried with hardcoded timeout when file already exists with open lease (Contributed by J.Andreina)

(cherry picked from commit 54f83d9bd9)
This commit is contained in:
Vinayakumar B 2015-06-03 12:11:46 +05:30
parent 84cae6c101
commit 066e45bcb6
5 changed files with 5 additions and 60 deletions

View File

@ -599,6 +599,9 @@ Release 2.7.1 - UNRELEASED
HDFS-8486. DN startup may cause severe data loss (Daryn Sharp via Colin P. HDFS-8486. DN startup may cause severe data loss (Daryn Sharp via Colin P.
McCabe) McCabe)
HDFS-8270. create() always retried with hardcoded timeout when file already
exists with open lease (J.Andreina via vinayakumarb)
Release 2.7.0 - 2015-04-20 Release 2.7.0 - 2015-04-20
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -344,13 +344,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
this.namenode = rpcNamenode; this.namenode = rpcNamenode;
dtService = null; dtService = null;
} else { } else {
boolean noRetries = conf.getBoolean(
DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES,
DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT);
Preconditions.checkArgument(nameNodeUri != null, Preconditions.checkArgument(nameNodeUri != null,
"null URI"); "null URI");
proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
ClientProtocol.class, nnFallbackToSimpleAuth, !noRetries); ClientProtocol.class, nnFallbackToSimpleAuth);
this.dtService = proxyInfo.getDelegationTokenService(); this.dtService = proxyInfo.getDelegationTokenService();
this.namenode = proxyInfo.getProxy(); this.namenode = proxyInfo.getProxy();
} }

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@ -1000,13 +999,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number"; public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number";
public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0; public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
// Create a NN proxy without retries for testing.
@VisibleForTesting
public static final String DFS_CLIENT_TEST_NO_PROXY_RETRIES =
"dfs.client.test.no.proxy.retries";
@VisibleForTesting
public static final boolean DFS_CLIENT_TEST_NO_PROXY_RETRIES_DEFAULT = false;
public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY = public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
"dfs.client.slow.io.warning.threshold.ms"; "dfs.client.slow.io.warning.threshold.ms";
public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000; public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000;

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf; import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
@ -43,7 +42,6 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider; import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
@ -161,31 +159,6 @@ public class NameNodeProxies {
public static <T> ProxyAndInfo<T> createProxy(Configuration conf, public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth) URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth)
throws IOException { throws IOException {
return createProxy(conf, nameNodeUri, xface, fallbackToSimpleAuth, true);
}
/**
* Creates the namenode proxy with the passed protocol. This will handle
* creation of either HA- or non-HA-enabled proxy objects, depending upon
* if the provided URI is a configured logical URI.
*
* @param conf the configuration containing the required IPC
* properties, client failover configurations, etc.
* @param nameNodeUri the URI pointing either to a specific NameNode
* or to a logical nameservice.
* @param xface the IPC interface which should be created
* @param fallbackToSimpleAuth set to true or false during calls to
* indicate if a secure client falls back to simple auth
* @param withRetries certain interfaces have a non-standard retry policy
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException if there is an error creating the proxy
**/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth,
boolean withRetries)
throws IOException {
AbstractNNFailoverProxyProvider<T> failoverProxyProvider = AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
createFailoverProxyProvider(conf, nameNodeUri, xface, true, createFailoverProxyProvider(conf, nameNodeUri, xface, true,
fallbackToSimpleAuth); fallbackToSimpleAuth);
@ -193,7 +166,7 @@ public class NameNodeProxies {
if (failoverProxyProvider == null) { if (failoverProxyProvider == null) {
// Non-HA case // Non-HA case
return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface, return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
UserGroupInformation.getCurrentUser(), withRetries, UserGroupInformation.getCurrentUser(), true,
fallbackToSimpleAuth); fallbackToSimpleAuth);
} else { } else {
// HA case // HA case
@ -442,22 +415,8 @@ public class NameNodeProxies {
if (withRetries) { // create the proxy with retries if (withRetries) { // create the proxy with retries
RetryPolicy createPolicy = RetryPolicies
.retryUpToMaximumCountWithFixedSleep(5,
HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy);
RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
defaultPolicy, remoteExceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>(); = new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("create", methodPolicy);
ClientProtocol translatorProxy = ClientProtocol translatorProxy =
new ClientNamenodeProtocolTranslatorPB(proxy); new ClientNamenodeProtocolTranslatorPB(proxy);
return (ClientProtocol) RetryProxy.create( return (ClientProtocol) RetryProxy.create(

View File

@ -65,7 +65,6 @@ import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -74,7 +73,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@ -380,10 +378,6 @@ public class TestFileCreation {
SimulatedFSDataset.setFactory(conf); SimulatedFSDataset.setFactory(conf);
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
// Force NameNodeProxies' createNNProxyWithClientProtocol to give
// up file creation after one failure.
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_TEST_NO_PROXY_RETRIES, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();