diff --git a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml index c056d2112d4..c43f1d5b5d6 100644 --- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml @@ -409,6 +409,13 @@ + + + + + + + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index f62a715a102..8a9aee6dbdc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -69,6 +69,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import javax.annotation.Nullable; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.stream.XMLInputFactory; @@ -3445,7 +3446,7 @@ public class Configuration implements Iterable>, writeXml(new OutputStreamWriter(out, "UTF-8")); } - public void writeXml(Writer out) throws IOException { + public void writeXml(@Nullable Writer out) throws IOException { writeXml(null, out); } @@ -3473,7 +3474,7 @@ public class Configuration implements Iterable>, *

* @param out the writer to write to. */ - public void writeXml(String propertyName, Writer out) + public void writeXml(@Nullable String propertyName, Writer out) throws IOException, IllegalArgumentException { Document doc = asXmlDocument(propertyName); @@ -3495,7 +3496,7 @@ public class Configuration implements Iterable>, /** * Return the XML DOM corresponding to this Configuration. */ - private synchronized Document asXmlDocument(String propertyName) + private synchronized Document asXmlDocument(@Nullable String propertyName) throws IOException, IllegalArgumentException { Document doc; try { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java index 63ec9a5d29e..b29278bd207 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java @@ -375,7 +375,7 @@ public class Groups { backgroundRefreshException.incrementAndGet(); backgroundRefreshRunning.decrementAndGet(); } - }); + }, MoreExecutors.directExecutor()); return listenableFuture; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java index bcc19e35e85..4ec77e75ba5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java @@ -107,7 +107,7 @@ public class SemaphoredDelegatingExecutor extends queueingPermits.acquire(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - return Futures.immediateFailedCheckedFuture(e); + return Futures.immediateFailedFuture(e); } return super.submit(new CallableWithPermitRelease<>(task)); } @@ -118,7 +118,7 @@ public class SemaphoredDelegatingExecutor extends queueingPermits.acquire(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - return Futures.immediateFailedCheckedFuture(e); + return Futures.immediateFailedFuture(e); } return super.submit(new RunnableWithPermitRelease(task), result); } @@ -129,7 +129,7 @@ public class SemaphoredDelegatingExecutor extends queueingPermits.acquire(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - return Futures.immediateFailedCheckedFuture(e); + return Futures.immediateFailedFuture(e); } return super.submit(new RunnableWithPermitRelease(task)); } @@ -173,10 +173,10 @@ public class SemaphoredDelegatingExecutor extends public String toString() { final StringBuilder sb = new StringBuilder( "SemaphoredDelegatingExecutor{"); - sb.append("permitCount=").append(getPermitCount()); - sb.append(", available=").append(getAvailablePermits()); - sb.append(", waiting=").append(getWaitingCount()); - sb.append('}'); + sb.append("permitCount=").append(getPermitCount()) + .append(", available=").append(getAvailablePermits()) + .append(", waiting=").append(getWaitingCount()) + .append('}'); return sb.toString(); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java index d23df79d27f..c6f8a959b1c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java @@ -172,7 +172,7 @@ public class ZKUtil { return valInConf; } String path = valInConf.substring(1).trim(); - return Files.toString(new File(path), Charsets.UTF_8).trim(); + return Files.asCharSource(new File(path), Charsets.UTF_8).read().trim(); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java index a93f9ea5e4e..4b8b02f0171 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java @@ -41,8 +41,8 @@ public class TestTableMapping { public void testResolve() throws IOException { File mapFile = File.createTempFile(getClass().getSimpleName() + ".testResolve", ".txt"); - Files.write(hostName1 + " /rack1\n" + - hostName2 + "\t/rack2\n", mapFile, Charsets.UTF_8); + Files.asCharSink(mapFile, Charsets.UTF_8).write( + hostName1 + " /rack1\n" + hostName2 + "\t/rack2\n"); mapFile.deleteOnExit(); TableMapping mapping = new TableMapping(); @@ -64,8 +64,8 @@ public class TestTableMapping { public void testTableCaching() throws IOException { File mapFile = File.createTempFile(getClass().getSimpleName() + ".testTableCaching", ".txt"); - Files.write(hostName1 + " /rack1\n" + - hostName2 + "\t/rack2\n", mapFile, Charsets.UTF_8); + Files.asCharSink(mapFile, Charsets.UTF_8).write( + hostName1 + " /rack1\n" + hostName2 + "\t/rack2\n"); mapFile.deleteOnExit(); TableMapping mapping = new TableMapping(); @@ -128,8 +128,8 @@ public class TestTableMapping { public void testClearingCachedMappings() throws IOException { File mapFile = File.createTempFile(getClass().getSimpleName() + ".testClearingCachedMappings", ".txt"); - Files.write(hostName1 + " /rack1\n" + - hostName2 + "\t/rack2\n", mapFile, Charsets.UTF_8); + Files.asCharSink(mapFile, Charsets.UTF_8).write( + hostName1 + " /rack1\n" + hostName2 + "\t/rack2\n"); mapFile.deleteOnExit(); TableMapping mapping = new TableMapping(); @@ -147,7 +147,7 @@ public class TestTableMapping { assertEquals("/rack1", result.get(0)); assertEquals("/rack2", result.get(1)); - Files.write("", mapFile, Charsets.UTF_8); + Files.asCharSink(mapFile, Charsets.UTF_8).write(""); mapping.reloadCachedMappings(); @@ -166,7 +166,7 @@ public class TestTableMapping { public void testBadFile() throws IOException { File mapFile = File.createTempFile(getClass().getSimpleName() + ".testBadFile", ".txt"); - Files.write("bad contents", mapFile, Charsets.UTF_8); + Files.asCharSink(mapFile, Charsets.UTF_8).write("bad contents"); mapFile.deleteOnExit(); TableMapping mapping = new TableMapping(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java index f7a7f8b2ddf..016c589ae3a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java @@ -434,7 +434,8 @@ public class TestSecurityUtil { Configuration conf = new Configuration(); File passwordTxtFile = File.createTempFile( getClass().getSimpleName() + ".testAuthAtPathNotation-", ".txt"); - Files.write(ZK_AUTH_VALUE, passwordTxtFile, StandardCharsets.UTF_8); + Files.asCharSink(passwordTxtFile, StandardCharsets.UTF_8) + .write(ZK_AUTH_VALUE); try { conf.set(CommonConfigurationKeys.ZK_AUTH, "@" + passwordTxtFile.getAbsolutePath()); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java index 0e39ca94dea..3d985e40fb8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java @@ -131,7 +131,7 @@ public class TestZKUtil { assertEquals("x", ZKUtil.resolveConfIndirection("x")); TEST_FILE.getParentFile().mkdirs(); - Files.write("hello world", TEST_FILE, Charsets.UTF_8); + Files.asCharSink(TEST_FILE, Charsets.UTF_8).write("hello world"); assertEquals("hello world", ZKUtil.resolveConfIndirection( "@" + TEST_FILE.getAbsolutePath())); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java index afc49c76f09..11c5cea215a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java @@ -196,7 +196,7 @@ public class LocalResolver extends RouterResolver { try { String nsId = nn.getNameserviceId(); String rpcAddress = nn.getRpcAddress(); - String hostname = HostAndPort.fromString(rpcAddress).getHostText(); + String hostname = HostAndPort.fromString(rpcAddress).getHost(); ret.put(hostname, nsId); if (hostname.equals(localHostname)) { ret.put(localIp, nsId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index f9d834ee434..746df6908f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -192,6 +192,12 @@ + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 30367357b82..53141eaea87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -446,7 +446,7 @@ public class IPCLoggerChannel implements AsyncLogger { public void onSuccess(Void t) { unreserveQueueSpace(data.length); } - }); + }, MoreExecutors.directExecutor()); } } return ret; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java index dee74e6fcfd..ef32eb11c35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java @@ -22,6 +22,7 @@ import java.util.Map.Entry; import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeUnit; +import com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.Timer; @@ -80,7 +81,7 @@ class QuorumCall { public void onSuccess(RESULT res) { qr.addResult(e.getKey(), res); } - }); + }, MoreExecutors.directExecutor()); } return qr; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java index 382d827d9e9..b4922875bdd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java @@ -24,6 +24,7 @@ import com.google.common.collect.Sets; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.MoreExecutors; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -224,12 +225,12 @@ public class DatasetVolumeChecker { Futures.addCallback(olf.get(), new ResultHandler(reference, healthyVolumes, failedVolumes, numVolumes, new Callback() { - @Override - public void call(Set ignored1, - Set ignored2) { - latch.countDown(); - } - })); + @Override + public void call(Set ignored1, + Set ignored2) { + latch.countDown(); + } + }), MoreExecutors.directExecutor()); } else { IOUtils.cleanup(null, reference); if (numVolumes.decrementAndGet() == 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java index bb1ed469696..88444539337 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java @@ -182,7 +182,7 @@ public class ThrottledAsyncChecker implements AsyncChecker { t, timer.monotonicNow())); } } - }); + }, MoreExecutors.directExecutor()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java index 5336b7b3b42..ba83852702a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestThrottledAsyncCheckerTimeout.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode.checker; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.util.FakeTimer; import org.junit.Before; import org.junit.Rule; @@ -101,7 +102,7 @@ public class TestThrottledAsyncCheckerTimeout { numCallbackInvocationsFailure.incrementAndGet(); callbackResult.set(true); } - }); + }, MoreExecutors.directExecutor()); while (!callbackResult.get()) { // Wait for the callback @@ -133,7 +134,8 @@ public class TestThrottledAsyncCheckerTimeout { .schedule(target, true); assertTrue(olf1.isPresent()); - Futures.addCallback(olf1.get(), futureCallback); + Futures.addCallback(olf1.get(), futureCallback, + MoreExecutors.directExecutor()); // Verify that timeout results in only 1 onFailure call and 0 onSuccess // calls. @@ -149,7 +151,8 @@ public class TestThrottledAsyncCheckerTimeout { .schedule(target, true); assertTrue(olf2.isPresent()); - Futures.addCallback(olf2.get(), futureCallback); + Futures.addCallback(olf2.get(), futureCallback, + MoreExecutors.directExecutor()); // Verify that normal check (dummy) results in only 1 onSuccess call. // Number of times onFailure is invoked should remain the same i.e. 1. @@ -187,7 +190,7 @@ public class TestThrottledAsyncCheckerTimeout { throwable[0] = t; callbackResult.set(true); } - }); + }, MoreExecutors.directExecutor()); while (!callbackResult.get()) { // Wait for the callback diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java index a21a31d9e0f..1d3fa455288 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java @@ -163,7 +163,7 @@ public class TestDFSHAAdminMiniCluster { assertEquals(0, runTool("-ns", "minidfs-ns", "-failover", "nn2", "nn1")); // Fencer has not run yet, since none of the above required fencing - assertEquals("", Files.toString(tmpFile, Charsets.UTF_8)); + assertEquals("", Files.asCharSource(tmpFile, Charsets.UTF_8).read()); // Test failover with fencer and forcefence option assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); @@ -171,8 +171,8 @@ public class TestDFSHAAdminMiniCluster { // The fence script should run with the configuration from the target // node, rather than the configuration from the fencing node. Strip // out any trailing spaces and CR/LFs which may be present on Windows. - String fenceCommandOutput =Files.toString(tmpFile, Charsets.UTF_8). - replaceAll(" *[\r\n]+", ""); + String fenceCommandOutput = Files.asCharSource(tmpFile, Charsets.UTF_8) + .read().replaceAll(" *[\r\n]+", ""); assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1", fenceCommandOutput); tmpFile.delete(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java index 1b1025e9da9..3869c493a06 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LocatedFileStatusFetcher.java @@ -120,7 +120,8 @@ public class LocatedFileStatusFetcher { runningTasks.incrementAndGet(); ListenableFuture future = exec .submit(new ProcessInitialInputPathCallable(p, conf, inputFilter)); - Futures.addCallback(future, processInitialInputPathCallback); + Futures.addCallback(future, processInitialInputPathCallback, + MoreExecutors.directExecutor()); } runningTasks.decrementAndGet(); @@ -267,7 +268,8 @@ public class LocatedFileStatusFetcher { ListenableFuture future = exec .submit(new ProcessInputDirCallable(result.fs, fileStatus, recursive, inputFilter)); - Futures.addCallback(future, processInputDirCallback); + Futures.addCallback(future, processInputDirCallback, + MoreExecutors.directExecutor()); } } decrementRunningAndCheckCompletion(); @@ -353,7 +355,8 @@ public class LocatedFileStatusFetcher { ListenableFuture future = exec .submit(new ProcessInputDirCallable(result.fs, matched, recursive, inputFilter)); - Futures.addCallback(future, processInputDirCallback); + Futures.addCallback(future, processInputDirCallback, + MoreExecutors.directExecutor()); } } decrementRunningAndCheckCompletion(); diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 4c03a23548e..2d8a06f73fb 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -88,7 +88,7 @@ 3.1.0-RC1 2.1.7 - 11.0.2 + 27.0-jre 4.0 2.9.9 diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 7d40c7041c3..fbea50b9d51 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -669,4 +669,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java index c6e85252593..60eb9b4019f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ZookeeperUtils.java @@ -87,7 +87,7 @@ public class ZookeeperUtils { public static String buildHostsOnlyList(List hostAndPorts) { StringBuilder sb = new StringBuilder(); for (HostAndPort hostAndPort : hostAndPorts) { - sb.append(hostAndPort.getHostText()).append(","); + sb.append(hostAndPort.getHost()).append(","); } if (sb.length() > 0) { sb.delete(sb.length() - 1, sb.length()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java index b4859af869e..bdf734114a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/ServiceTestUtils.java @@ -386,7 +386,8 @@ public class ServiceTestUtils { fs = new SliderFileSystem(conf); fs.setAppDir(new Path(serviceBasePath.toString())); } catch (IOException e) { - Throwables.propagate(e); + Throwables.throwIfUnchecked(e); + throw new RuntimeException(e); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java index 0083f40a860..4f2fb1ac06d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java @@ -1172,7 +1172,8 @@ public class TestAMRestart extends ParameterizedSchedulerTestBase { return true; } } catch (Exception e) { - Throwables.propagate(e); + Throwables.throwIfUnchecked(e); + throw new RuntimeException(e); } return false; }, 2000, 200000);