fix concurrency bug when getting the host for a given request

It can happen that the list of healthy hosts is empty, then we get one from the blacklist. but some other operation might have sneaked in and emptied the blacklist in the meantime, so we have to retry till we manage to get some host, either from the healthy list or from the blacklist.
This commit is contained in:
javanna 2016-07-22 19:03:45 +02:00 committed by Luca Cavanna
parent e6054a931e
commit 46cb3f36ff
1 changed files with 29 additions and 23 deletions

View File

@ -46,6 +46,7 @@ import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashSet; import java.util.HashSet;
@ -373,30 +374,35 @@ public final class RestClient implements Closeable {
* In case there are no healthy hosts available, or dead ones to be be retried, one dead host gets returned. * In case there are no healthy hosts available, or dead ones to be be retried, one dead host gets returned.
*/ */
private Iterable<HttpHost> nextHost() { private Iterable<HttpHost> nextHost() {
Set<HttpHost> filteredHosts = new HashSet<>(hosts); Collection<HttpHost> nextHosts = Collections.emptySet();
for (Map.Entry<HttpHost, DeadHostState> entry : blacklist.entrySet()) { do {
if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) { Set<HttpHost> filteredHosts = new HashSet<>(hosts);
filteredHosts.remove(entry.getKey()); for (Map.Entry<HttpHost, DeadHostState> entry : blacklist.entrySet()) {
} if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) {
} filteredHosts.remove(entry.getKey());
if (filteredHosts.isEmpty()) {
//last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried
List<Map.Entry<HttpHost, DeadHostState>> sortedHosts = new ArrayList<>(blacklist.entrySet());
Collections.sort(sortedHosts, new Comparator<Map.Entry<HttpHost, DeadHostState>>() {
@Override
public int compare(Map.Entry<HttpHost, DeadHostState> o1, Map.Entry<HttpHost, DeadHostState> o2) {
return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos());
} }
}); }
HttpHost deadHost = sortedHosts.get(0).getKey(); if (filteredHosts.isEmpty()) {
logger.trace("resurrecting host [" + deadHost + "]"); //last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried
return Collections.singleton(deadHost); List<Map.Entry<HttpHost, DeadHostState>> sortedHosts = new ArrayList<>(blacklist.entrySet());
} if (sortedHosts.size() > 0) {
Collections.sort(sortedHosts, new Comparator<Map.Entry<HttpHost, DeadHostState>>() {
List<HttpHost> rotatedHosts = new ArrayList<>(filteredHosts); @Override
Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement()); public int compare(Map.Entry<HttpHost, DeadHostState> o1, Map.Entry<HttpHost, DeadHostState> o2) {
return rotatedHosts; return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos());
}
});
HttpHost deadHost = sortedHosts.get(0).getKey();
logger.trace("resurrecting host [" + deadHost + "]");
nextHosts = Collections.singleton(deadHost);
}
} else {
List<HttpHost> rotatedHosts = new ArrayList<>(filteredHosts);
Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement());
nextHosts = rotatedHosts;
}
} while(nextHosts.isEmpty());
return nextHosts;
} }
/** /**