From c5602ea124986a1d5b133464881f174f48256c7a Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Sat, 24 Feb 2018 14:25:56 -0800 Subject: [PATCH] HDFS-12865. RequestHedgingProxyProvider should handle case when none of the proxies are available. Contributed by Mukul Kumar Singh. --- .../ha/RequestHedgingProxyProvider.java | 6 +++ .../ha/TestRequestHedgingProxyProvider.java | 45 +++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java index 010e9e5020c..7b9cd64dc5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; +import java.io.IOException; import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; @@ -87,6 +88,11 @@ public class RequestHedgingProxyProvider extends // Optimization : if only 2 proxies are configured and one had failed // over, then we dont need to create a threadpool etc. targetProxies.remove(toIgnore); + if (targetProxies.size() == 0) { + LOG.trace("No valid proxies left"); + throw new RemoteException(IOException.class.getName(), + "No valid proxies left. All NameNode proxies have failed over."); + } if (targetProxies.size() == 1) { ProxyInfo proxyInfo = targetProxies.values().iterator().next(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java index 65fbbf8948e..8d6b02dc158 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java @@ -28,6 +28,7 @@ import java.util.Iterator; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -289,6 +290,50 @@ public class TestRequestHedgingProxyProvider { } } + @Test + public void testSingleProxyFailover() throws Exception { + String singleNS = "mycluster-" + Time.monotonicNow(); + URI singleNNUri = new URI("hdfs://" + singleNS); + Configuration singleConf = new Configuration(); + singleConf.set(HdfsClientConfigKeys.DFS_NAMESERVICES, singleNS); + singleConf.set(HdfsClientConfigKeys. + DFS_HA_NAMENODES_KEY_PREFIX + "." + singleNS, "nn1"); + + singleConf.set(HdfsClientConfigKeys. + DFS_NAMENODE_RPC_ADDRESS_KEY + "." + singleNS + ".nn1", + RandomStringUtils.randomAlphabetic(8) + ".foo.bar:9820"); + ClientProtocol active = Mockito.mock(ClientProtocol.class); + Mockito + .when(active.getBlockLocations(Matchers.anyString(), + Matchers.anyLong(), Matchers.anyLong())) + .thenThrow(new RemoteException("java.io.FileNotFoundException", + "File does not exist!")); + + RequestHedgingProxyProvider provider = + new RequestHedgingProxyProvider<>(singleConf, singleNNUri, + ClientProtocol.class, createFactory(active)); + try { + provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L); + Assert.fail("Should fail since the active namenode throws" + + " FileNotFoundException!"); + } catch (RemoteException ex) { + Exception rEx = ex.unwrapRemoteException(); + Assert.assertTrue(rEx instanceof FileNotFoundException); + } + //Perform failover now, there will be no active proxies now + provider.performFailover(active); + try { + provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L); + Assert.fail("Should fail since the active namenode throws" + + " FileNotFoundException!"); + } catch (RemoteException ex) { + Exception rEx = ex.unwrapRemoteException(); + Assert.assertTrue(rEx instanceof IOException); + Assert.assertTrue(rEx.getMessage().equals("No valid proxies left." + + " All NameNode proxies have failed over.")); + } + } + @Test public void testPerformFailoverWith3Proxies() throws Exception { conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,