HADOOP-16213. Update guava to 27.0-jre. Contributed by Gabor Bota.
This commit is contained in:
parent
a9b8310584
commit
fee1e67453
|
@ -409,6 +409,13 @@
|
|||
<Bug pattern="NP_NULL_PARAM_DEREF"/>
|
||||
</Match>
|
||||
|
||||
<!-- propertyName is checked with isNullOrEmpty (fix after guava 27) -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.conf.Configuration"/>
|
||||
<Method name="asXmlDocument"/>
|
||||
<Bug pattern="NP_PARAMETER_MUST_BE_NONNULL_BUT_MARKED_AS_NULLABLE"/>
|
||||
</Match>
|
||||
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.ipc.ExternalCall"/>
|
||||
<Filed name="done"/>
|
||||
|
|
|
@ -69,6 +69,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.stream.XMLInputFactory;
|
||||
|
@ -3445,7 +3446,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
writeXml(new OutputStreamWriter(out, "UTF-8"));
|
||||
}
|
||||
|
||||
public void writeXml(Writer out) throws IOException {
|
||||
public void writeXml(@Nullable Writer out) throws IOException {
|
||||
writeXml(null, out);
|
||||
}
|
||||
|
||||
|
@ -3473,7 +3474,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
* <p>
|
||||
* @param out the writer to write to.
|
||||
*/
|
||||
public void writeXml(String propertyName, Writer out)
|
||||
public void writeXml(@Nullable String propertyName, Writer out)
|
||||
throws IOException, IllegalArgumentException {
|
||||
Document doc = asXmlDocument(propertyName);
|
||||
|
||||
|
@ -3495,7 +3496,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
/**
|
||||
* Return the XML DOM corresponding to this Configuration.
|
||||
*/
|
||||
private synchronized Document asXmlDocument(String propertyName)
|
||||
private synchronized Document asXmlDocument(@Nullable String propertyName)
|
||||
throws IOException, IllegalArgumentException {
|
||||
Document doc;
|
||||
try {
|
||||
|
|
|
@ -375,7 +375,7 @@ public class Groups {
|
|||
backgroundRefreshException.incrementAndGet();
|
||||
backgroundRefreshRunning.decrementAndGet();
|
||||
}
|
||||
});
|
||||
}, MoreExecutors.directExecutor());
|
||||
return listenableFuture;
|
||||
}
|
||||
|
||||
|
|
|
@ -107,7 +107,7 @@ public class SemaphoredDelegatingExecutor extends
|
|||
queueingPermits.acquire();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return Futures.immediateFailedCheckedFuture(e);
|
||||
return Futures.immediateFailedFuture(e);
|
||||
}
|
||||
return super.submit(new CallableWithPermitRelease<>(task));
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ public class SemaphoredDelegatingExecutor extends
|
|||
queueingPermits.acquire();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return Futures.immediateFailedCheckedFuture(e);
|
||||
return Futures.immediateFailedFuture(e);
|
||||
}
|
||||
return super.submit(new RunnableWithPermitRelease(task), result);
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ public class SemaphoredDelegatingExecutor extends
|
|||
queueingPermits.acquire();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return Futures.immediateFailedCheckedFuture(e);
|
||||
return Futures.immediateFailedFuture(e);
|
||||
}
|
||||
return super.submit(new RunnableWithPermitRelease(task));
|
||||
}
|
||||
|
@ -173,10 +173,10 @@ public class SemaphoredDelegatingExecutor extends
|
|||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder(
|
||||
"SemaphoredDelegatingExecutor{");
|
||||
sb.append("permitCount=").append(getPermitCount());
|
||||
sb.append(", available=").append(getAvailablePermits());
|
||||
sb.append(", waiting=").append(getWaitingCount());
|
||||
sb.append('}');
|
||||
sb.append("permitCount=").append(getPermitCount())
|
||||
.append(", available=").append(getAvailablePermits())
|
||||
.append(", waiting=").append(getWaitingCount())
|
||||
.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ public class ZKUtil {
|
|||
return valInConf;
|
||||
}
|
||||
String path = valInConf.substring(1).trim();
|
||||
return Files.toString(new File(path), Charsets.UTF_8).trim();
|
||||
return Files.asCharSource(new File(path), Charsets.UTF_8).read().trim();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -41,8 +41,8 @@ public class TestTableMapping {
|
|||
public void testResolve() throws IOException {
|
||||
File mapFile = File.createTempFile(getClass().getSimpleName() +
|
||||
".testResolve", ".txt");
|
||||
Files.write(hostName1 + " /rack1\n" +
|
||||
hostName2 + "\t/rack2\n", mapFile, Charsets.UTF_8);
|
||||
Files.asCharSink(mapFile, Charsets.UTF_8).write(
|
||||
hostName1 + " /rack1\n" + hostName2 + "\t/rack2\n");
|
||||
mapFile.deleteOnExit();
|
||||
TableMapping mapping = new TableMapping();
|
||||
|
||||
|
@ -64,8 +64,8 @@ public class TestTableMapping {
|
|||
public void testTableCaching() throws IOException {
|
||||
File mapFile = File.createTempFile(getClass().getSimpleName() +
|
||||
".testTableCaching", ".txt");
|
||||
Files.write(hostName1 + " /rack1\n" +
|
||||
hostName2 + "\t/rack2\n", mapFile, Charsets.UTF_8);
|
||||
Files.asCharSink(mapFile, Charsets.UTF_8).write(
|
||||
hostName1 + " /rack1\n" + hostName2 + "\t/rack2\n");
|
||||
mapFile.deleteOnExit();
|
||||
TableMapping mapping = new TableMapping();
|
||||
|
||||
|
@ -128,8 +128,8 @@ public class TestTableMapping {
|
|||
public void testClearingCachedMappings() throws IOException {
|
||||
File mapFile = File.createTempFile(getClass().getSimpleName() +
|
||||
".testClearingCachedMappings", ".txt");
|
||||
Files.write(hostName1 + " /rack1\n" +
|
||||
hostName2 + "\t/rack2\n", mapFile, Charsets.UTF_8);
|
||||
Files.asCharSink(mapFile, Charsets.UTF_8).write(
|
||||
hostName1 + " /rack1\n" + hostName2 + "\t/rack2\n");
|
||||
mapFile.deleteOnExit();
|
||||
|
||||
TableMapping mapping = new TableMapping();
|
||||
|
@ -147,7 +147,7 @@ public class TestTableMapping {
|
|||
assertEquals("/rack1", result.get(0));
|
||||
assertEquals("/rack2", result.get(1));
|
||||
|
||||
Files.write("", mapFile, Charsets.UTF_8);
|
||||
Files.asCharSink(mapFile, Charsets.UTF_8).write("");
|
||||
|
||||
mapping.reloadCachedMappings();
|
||||
|
||||
|
@ -166,7 +166,7 @@ public class TestTableMapping {
|
|||
public void testBadFile() throws IOException {
|
||||
File mapFile = File.createTempFile(getClass().getSimpleName() +
|
||||
".testBadFile", ".txt");
|
||||
Files.write("bad contents", mapFile, Charsets.UTF_8);
|
||||
Files.asCharSink(mapFile, Charsets.UTF_8).write("bad contents");
|
||||
mapFile.deleteOnExit();
|
||||
TableMapping mapping = new TableMapping();
|
||||
|
||||
|
|
|
@ -434,7 +434,8 @@ public class TestSecurityUtil {
|
|||
Configuration conf = new Configuration();
|
||||
File passwordTxtFile = File.createTempFile(
|
||||
getClass().getSimpleName() + ".testAuthAtPathNotation-", ".txt");
|
||||
Files.write(ZK_AUTH_VALUE, passwordTxtFile, StandardCharsets.UTF_8);
|
||||
Files.asCharSink(passwordTxtFile, StandardCharsets.UTF_8)
|
||||
.write(ZK_AUTH_VALUE);
|
||||
try {
|
||||
conf.set(CommonConfigurationKeys.ZK_AUTH,
|
||||
"@" + passwordTxtFile.getAbsolutePath());
|
||||
|
|
|
@ -131,7 +131,7 @@ public class TestZKUtil {
|
|||
assertEquals("x", ZKUtil.resolveConfIndirection("x"));
|
||||
|
||||
TEST_FILE.getParentFile().mkdirs();
|
||||
Files.write("hello world", TEST_FILE, Charsets.UTF_8);
|
||||
Files.asCharSink(TEST_FILE, Charsets.UTF_8).write("hello world");
|
||||
assertEquals("hello world", ZKUtil.resolveConfIndirection(
|
||||
"@" + TEST_FILE.getAbsolutePath()));
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ public class LocalResolver extends RouterResolver<String, String> {
|
|||
try {
|
||||
String nsId = nn.getNameserviceId();
|
||||
String rpcAddress = nn.getRpcAddress();
|
||||
String hostname = HostAndPort.fromString(rpcAddress).getHostText();
|
||||
String hostname = HostAndPort.fromString(rpcAddress).getHost();
|
||||
ret.put(hostname, nsId);
|
||||
if (hostname.equals(localHostname)) {
|
||||
ret.put(localIp, nsId);
|
||||
|
|
|
@ -192,6 +192,12 @@
|
|||
<Bug pattern="NP_NULL_PARAM_DEREF" />
|
||||
</Match>
|
||||
|
||||
<!-- guava 27.0 update: @Nullable is not detected, however it's there -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.hdfs.qjournal.server.Journal" />
|
||||
<Method name="getPersistedPaxosData" />
|
||||
<Bug pattern="NP_NULL_PARAM_DEREF" />
|
||||
</Match>
|
||||
|
||||
<!-- guava 27.0 update: @Nullable is not detected, however it's there -->
|
||||
<Match>
|
||||
|
|
|
@ -446,7 +446,7 @@ public class IPCLoggerChannel implements AsyncLogger {
|
|||
public void onSuccess(Void t) {
|
||||
unreserveQueueSpace(data.length);
|
||||
}
|
||||
});
|
||||
}, MoreExecutors.directExecutor());
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.util.Map.Entry;
|
|||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.google.common.util.concurrent.MoreExecutors;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.util.StopWatch;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
|
@ -80,7 +81,7 @@ class QuorumCall<KEY, RESULT> {
|
|||
public void onSuccess(RESULT res) {
|
||||
qr.addResult(e.getKey(), res);
|
||||
}
|
||||
});
|
||||
}, MoreExecutors.directExecutor());
|
||||
}
|
||||
return qr;
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import com.google.common.collect.Sets;
|
|||
import com.google.common.util.concurrent.FutureCallback;
|
||||
import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.MoreExecutors;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
@ -229,7 +230,7 @@ public class DatasetVolumeChecker {
|
|||
Set<FsVolumeSpi> ignored2) {
|
||||
latch.countDown();
|
||||
}
|
||||
}));
|
||||
}), MoreExecutors.directExecutor());
|
||||
} else {
|
||||
IOUtils.cleanup(null, reference);
|
||||
if (numVolumes.decrementAndGet() == 0) {
|
||||
|
|
|
@ -182,7 +182,7 @@ public class ThrottledAsyncChecker<K, V> implements AsyncChecker<K, V> {
|
|||
t, timer.monotonicNow()));
|
||||
}
|
||||
}
|
||||
});
|
||||
}, MoreExecutors.directExecutor());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode.checker;
|
|||
import com.google.common.util.concurrent.FutureCallback;
|
||||
import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.MoreExecutors;
|
||||
import org.apache.hadoop.util.FakeTimer;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
|
@ -101,7 +102,7 @@ public class TestThrottledAsyncCheckerTimeout {
|
|||
numCallbackInvocationsFailure.incrementAndGet();
|
||||
callbackResult.set(true);
|
||||
}
|
||||
});
|
||||
}, MoreExecutors.directExecutor());
|
||||
|
||||
while (!callbackResult.get()) {
|
||||
// Wait for the callback
|
||||
|
@ -133,7 +134,8 @@ public class TestThrottledAsyncCheckerTimeout {
|
|||
.schedule(target, true);
|
||||
|
||||
assertTrue(olf1.isPresent());
|
||||
Futures.addCallback(olf1.get(), futureCallback);
|
||||
Futures.addCallback(olf1.get(), futureCallback,
|
||||
MoreExecutors.directExecutor());
|
||||
|
||||
// Verify that timeout results in only 1 onFailure call and 0 onSuccess
|
||||
// calls.
|
||||
|
@ -149,7 +151,8 @@ public class TestThrottledAsyncCheckerTimeout {
|
|||
.schedule(target, true);
|
||||
|
||||
assertTrue(olf2.isPresent());
|
||||
Futures.addCallback(olf2.get(), futureCallback);
|
||||
Futures.addCallback(olf2.get(), futureCallback,
|
||||
MoreExecutors.directExecutor());
|
||||
|
||||
// Verify that normal check (dummy) results in only 1 onSuccess call.
|
||||
// Number of times onFailure is invoked should remain the same i.e. 1.
|
||||
|
@ -187,7 +190,7 @@ public class TestThrottledAsyncCheckerTimeout {
|
|||
throwable[0] = t;
|
||||
callbackResult.set(true);
|
||||
}
|
||||
});
|
||||
}, MoreExecutors.directExecutor());
|
||||
|
||||
while (!callbackResult.get()) {
|
||||
// Wait for the callback
|
||||
|
|
|
@ -163,7 +163,7 @@ public class TestDFSHAAdminMiniCluster {
|
|||
assertEquals(0, runTool("-ns", "minidfs-ns", "-failover", "nn2", "nn1"));
|
||||
|
||||
// Fencer has not run yet, since none of the above required fencing
|
||||
assertEquals("", Files.toString(tmpFile, Charsets.UTF_8));
|
||||
assertEquals("", Files.asCharSource(tmpFile, Charsets.UTF_8).read());
|
||||
|
||||
// Test failover with fencer and forcefence option
|
||||
assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
|
||||
|
@ -171,8 +171,8 @@ public class TestDFSHAAdminMiniCluster {
|
|||
// The fence script should run with the configuration from the target
|
||||
// node, rather than the configuration from the fencing node. Strip
|
||||
// out any trailing spaces and CR/LFs which may be present on Windows.
|
||||
String fenceCommandOutput =Files.toString(tmpFile, Charsets.UTF_8).
|
||||
replaceAll(" *[\r\n]+", "");
|
||||
String fenceCommandOutput = Files.asCharSource(tmpFile, Charsets.UTF_8)
|
||||
.read().replaceAll(" *[\r\n]+", "");
|
||||
assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1", fenceCommandOutput);
|
||||
tmpFile.delete();
|
||||
|
||||
|
|
|
@ -120,7 +120,8 @@ public class LocatedFileStatusFetcher {
|
|||
runningTasks.incrementAndGet();
|
||||
ListenableFuture<ProcessInitialInputPathCallable.Result> future = exec
|
||||
.submit(new ProcessInitialInputPathCallable(p, conf, inputFilter));
|
||||
Futures.addCallback(future, processInitialInputPathCallback);
|
||||
Futures.addCallback(future, processInitialInputPathCallback,
|
||||
MoreExecutors.directExecutor());
|
||||
}
|
||||
|
||||
runningTasks.decrementAndGet();
|
||||
|
@ -267,7 +268,8 @@ public class LocatedFileStatusFetcher {
|
|||
ListenableFuture<ProcessInputDirCallable.Result> future = exec
|
||||
.submit(new ProcessInputDirCallable(result.fs, fileStatus,
|
||||
recursive, inputFilter));
|
||||
Futures.addCallback(future, processInputDirCallback);
|
||||
Futures.addCallback(future, processInputDirCallback,
|
||||
MoreExecutors.directExecutor());
|
||||
}
|
||||
}
|
||||
decrementRunningAndCheckCompletion();
|
||||
|
@ -353,7 +355,8 @@ public class LocatedFileStatusFetcher {
|
|||
ListenableFuture<ProcessInputDirCallable.Result> future = exec
|
||||
.submit(new ProcessInputDirCallable(result.fs, matched,
|
||||
recursive, inputFilter));
|
||||
Futures.addCallback(future, processInputDirCallback);
|
||||
Futures.addCallback(future, processInputDirCallback,
|
||||
MoreExecutors.directExecutor());
|
||||
}
|
||||
}
|
||||
decrementRunningAndCheckCompletion();
|
||||
|
|
|
@ -88,7 +88,7 @@
|
|||
<spotbugs.version>3.1.0-RC1</spotbugs.version>
|
||||
<dnsjava.version>2.1.7</dnsjava.version>
|
||||
|
||||
<guava.version>11.0.2</guava.version>
|
||||
<guava.version>27.0-jre</guava.version>
|
||||
<guice.version>4.0</guice.version>
|
||||
<joda-time.version>2.9.9</joda-time.version>
|
||||
|
||||
|
|
|
@ -669,4 +669,39 @@
|
|||
<Bug pattern="EI_EXPOSE_REP" />
|
||||
</Match>
|
||||
|
||||
<!-- The called method signature is String emptyToNull(@Nullable String string) in guava 27, so this should be ignored -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService"/>
|
||||
<Method name="getHealthReport" />
|
||||
<Bug pattern="NP_NULL_PARAM_DEREF"/>
|
||||
</Match>
|
||||
|
||||
<!-- The variable is not used, but it's defined for the document model. -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.document.entity.TimelineEventSubDoc"/>
|
||||
<Method name="setValid" />
|
||||
<Bug pattern="URF_UNREAD_FIELD"/>
|
||||
</Match>
|
||||
|
||||
<!-- The variable is not used, but it's defined for the document model. -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.yarn.server.timelineservice.documentstore.collection.document.entity.TimelineMetricSubDoc"/>
|
||||
<Method name="setValid" />
|
||||
<Bug pattern="URF_UNREAD_FIELD"/>
|
||||
</Match>
|
||||
|
||||
<!-- The called method signature is public boolean set(@Nullable V value) in guava 27, so this should be ignored -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore$UpdateAppTransition"/>
|
||||
<Method name="transition" />
|
||||
<Bug pattern="NP_NONNULL_PARAM_VIOLATION"/>
|
||||
</Match>
|
||||
|
||||
<!-- The called method signature is public boolean set(@Nullable V value) in guava 27, so this should be ignored -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler"/>
|
||||
<Method name="updateApplicationPriority" />
|
||||
<Bug pattern="NP_NONNULL_PARAM_VIOLATION"/>
|
||||
</Match>
|
||||
|
||||
</FindBugsFilter>
|
||||
|
|
|
@ -87,7 +87,7 @@ public class ZookeeperUtils {
|
|||
public static String buildHostsOnlyList(List<HostAndPort> hostAndPorts) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (HostAndPort hostAndPort : hostAndPorts) {
|
||||
sb.append(hostAndPort.getHostText()).append(",");
|
||||
sb.append(hostAndPort.getHost()).append(",");
|
||||
}
|
||||
if (sb.length() > 0) {
|
||||
sb.delete(sb.length() - 1, sb.length());
|
||||
|
|
|
@ -386,7 +386,8 @@ public class ServiceTestUtils {
|
|||
fs = new SliderFileSystem(conf);
|
||||
fs.setAppDir(new Path(serviceBasePath.toString()));
|
||||
} catch (IOException e) {
|
||||
Throwables.propagate(e);
|
||||
Throwables.throwIfUnchecked(e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1172,7 +1172,8 @@ public class TestAMRestart extends ParameterizedSchedulerTestBase {
|
|||
return true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Throwables.propagate(e);
|
||||
Throwables.throwIfUnchecked(e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return false;
|
||||
}, 2000, 200000);
|
||||
|
|
Loading…
Reference in New Issue