MAPREDUCE-6240. Hadoop client displays confusing error message. (gera)

This commit is contained in:
Gera Shegalov 2016-05-28 22:01:07 -07:00
parent 1ee9ea0026
commit 0af96a1c08
2 changed files with 32 additions and 9 deletions

View File

@ -102,6 +102,10 @@ public class Cluster {
throws IOException { throws IOException {
initProviderList(); initProviderList();
final IOException initEx = new IOException(
"Cannot initialize Cluster. Please check your configuration for "
+ MRConfig.FRAMEWORK_NAME
+ " and the correspond server addresses.");
for (ClientProtocolProvider provider : providerList) { for (ClientProtocolProvider provider : providerList) {
LOG.debug("Trying ClientProtocolProvider : " LOG.debug("Trying ClientProtocolProvider : "
+ provider.getClass().getName()); + provider.getClass().getName());
@ -124,16 +128,15 @@ public class Cluster {
+ " as the ClientProtocolProvider - returned null protocol"); + " as the ClientProtocolProvider - returned null protocol");
} }
} catch (Exception e) { } catch (Exception e) {
LOG.info("Failed to use " + provider.getClass().getName() final String errMsg = "Failed to use " + provider.getClass().getName()
+ " due to error: ", e); + " due to error: ";
initEx.addSuppressed(new IOException(errMsg, e));
LOG.info(errMsg, e);
} }
} }
if (null == clientProtocolProvider || null == client) { if (null == clientProtocolProvider || null == client) {
throw new IOException( throw initEx;
"Cannot initialize Cluster. Please check your configuration for "
+ MRConfig.FRAMEWORK_NAME
+ " and the correspond server addresses.");
} }
} }

View File

@ -18,17 +18,20 @@
package org.apache.hadoop.mapreduce; package org.apache.hadoop.mapreduce;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.mapred.LocalJobRunner; import org.apache.hadoop.mapred.LocalJobRunner;
import org.apache.hadoop.mapred.YARNRunner; import org.apache.hadoop.mapred.YARNRunner;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class TestClientProtocolProviderImpls { public class TestClientProtocolProviderImpls {
@Test @Test
@ -76,4 +79,21 @@ public class TestClientProtocolProviderImpls {
"Cannot initialize Cluster. Please check")); "Cannot initialize Cluster. Please check"));
} }
} }
@Test
public void testClusterExceptionRootCause() throws Exception {
final Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "nosuchfs:///");
conf.set(JTConfig.JT_IPC_ADDRESS, "local");
try {
new Cluster(conf);
fail("Cluster init should fail because of non-existing FileSystem");
} catch (IOException ioEx) {
final String stackTrace = StringUtils.stringifyException(ioEx);
assertTrue("No root cause detected",
stackTrace.contains(UnsupportedFileSystemException.class.getName())
&& stackTrace.contains("nosuchfs"));
}
}
} }