HADOOP-17101. Replace Guava Function with Java8+ Function

Signed-off-by: Jonathan Eagles <jeagles@gmail.com>
This commit is contained in:
Ahmed Hussein 2020-07-15 09:53:18 -05:00 committed by Jonathan Eagles
parent 80046d1c8a
commit 98fcffe93f
7 changed files with 44 additions and 71 deletions

View File

@ -123,7 +123,7 @@
<property name="regexp" value="true"/>
<property name="illegalPkgs" value="^sun\.[^.]+"/>
<property name="illegalClasses"
value="^com\.google\.common\.base\.(Optional)"/>
value="^com\.google\.common\.base\.(Optional|Function), ^com\.google\.common\.collect\.(ImmutableListMultimap)"/>
</module>
<module name="RedundantImport"/>
<module name="UnusedImports"/>

View File

@ -17,15 +17,12 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Multimap;
import com.google.common.collect.UnmodifiableIterator;
import javax.annotation.Nullable;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Collection;
@ -101,14 +98,16 @@ public class HostSet implements Iterable<InetSocketAddress> {
@Override
public String toString() {
StringBuilder sb = new StringBuilder("HostSet(");
Joiner.on(",").appendTo(sb, Iterators.transform(iterator(),
new Function<InetSocketAddress, String>() {
@Override
public String apply(@Nullable InetSocketAddress addr) {
assert addr != null;
return addr.getAddress().getHostAddress() + ":" + addr.getPort();
}
}));
return sb.append(")").toString();
Iterator<InetSocketAddress> iter = iterator();
String sep = "";
while (iter.hasNext()) {
InetSocketAddress addr = iter.next();
sb.append(sep);
sb.append(addr.getAddress().getHostAddress());
sb.append(':');
sb.append(addr.getPort());
sep = ",";
}
return sb.append(')').toString();
}
}

View File

@ -24,8 +24,10 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.SortedSet;
import java.util.concurrent.CopyOnWriteArrayList;
@ -38,13 +40,9 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimaps;
import com.google.common.collect.Sets;
/**
@ -634,7 +632,7 @@ public class JournalSet implements JournalManager {
*/
public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) {
// Collect RemoteEditLogs available from each FileJournalManager
List<RemoteEditLog> allLogs = Lists.newArrayList();
List<RemoteEditLog> allLogs = new ArrayList<>();
for (JournalAndStream j : journals) {
if (j.getManager() instanceof FileJournalManager) {
FileJournalManager fjm = (FileJournalManager)j.getManager();
@ -645,15 +643,17 @@ public class JournalSet implements JournalManager {
}
}
}
// Group logs by their starting txid
ImmutableListMultimap<Long, RemoteEditLog> logsByStartTxId =
Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID);
final Map<Long, List<RemoteEditLog>> logsByStartTxId = new HashMap<>();
allLogs.forEach(input -> {
long key = RemoteEditLog.GET_START_TXID.apply(input);
logsByStartTxId.computeIfAbsent(key, k-> new ArrayList<>()).add(input);
});
long curStartTxId = fromTxId;
List<RemoteEditLog> logs = Lists.newArrayList();
List<RemoteEditLog> logs = new ArrayList<>();
while (true) {
ImmutableList<RemoteEditLog> logGroup = logsByStartTxId.get(curStartTxId);
List<RemoteEditLog> logGroup =
logsByStartTxId.getOrDefault(curStartTxId, Collections.emptyList());
if (logGroup.isEmpty()) {
// we have a gap in logs - for example because we recovered some old
// storage directory with ancient logs. Clear out any logs we've

View File

@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.protocol;
import com.google.common.base.Function;
import com.google.common.collect.ComparisonChain;
import java.util.function.Function;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
public class RemoteEditLog implements Comparable<RemoteEditLog> {
@ -82,16 +82,13 @@ public class RemoteEditLog implements Comparable<RemoteEditLog> {
}
/**
* Guava <code>Function</code> which applies {@link #getStartTxId()}
* Java <code>Function</code> which applies {@link #getStartTxId()}
*/
public static final Function<RemoteEditLog, Long> GET_START_TXID =
new Function<RemoteEditLog, Long>() {
@Override
public Long apply(RemoteEditLog log) {
log -> {
if (null == log) {
return HdfsServerConstants.INVALID_TXID;
}
return log.getStartTxId();
}
};
};
}

View File

@ -37,9 +37,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.LongAccumulator;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.collect.Iterables;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -304,15 +302,11 @@ public abstract class HATestUtil {
public static <P extends FailoverProxyProvider<?>> void
setFailoverConfigurations(Configuration conf, String logicalName,
List<InetSocketAddress> nnAddresses, Class<P> classFPP) {
setFailoverConfigurations(conf, logicalName,
Iterables.transform(nnAddresses, new Function<InetSocketAddress, String>() {
// transform the inet address to a simple string
@Override
public String apply(InetSocketAddress addr) {
return "hdfs://" + addr.getHostName() + ":" + addr.getPort();
}
}), classFPP);
final List<String> addresses = new ArrayList();
nnAddresses.forEach(
addr -> addresses.add(
"hdfs://" + addr.getHostName() + ":" + addr.getPort()));
setFailoverConfigurations(conf, logicalName, addresses, classFPP);
}
public static <P extends FailoverProxyProvider<?>>

View File

@ -23,8 +23,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import javax.annotation.Nullable;
import java.util.stream.Collectors;
import org.junit.Assert;
@ -55,8 +54,6 @@ import org.junit.runners.Parameterized.Parameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
@ -403,13 +400,10 @@ public class TestFileInputFormat {
List<FileStatus> fetchedStatuses, final FileSystem localFs) {
Assert.assertEquals(expectedPaths.size(), fetchedStatuses.size());
Iterable<Path> fqExpectedPaths = Iterables.transform(expectedPaths,
new Function<Path, Path>() {
@Override
public Path apply(Path input) {
return localFs.makeQualified(input);
}
});
Iterable<Path> fqExpectedPaths =
expectedPaths.stream().map(
input -> localFs.makeQualified(input)).collect(Collectors.toList());
Set<Path> expectedPathSet = Sets.newHashSet(fqExpectedPaths);
for (FileStatus fileStatus : fetchedStatuses) {
@ -424,13 +418,10 @@ public class TestFileInputFormat {
private void verifySplits(List<String> expected, List<InputSplit> splits) {
Iterable<String> pathsFromSplits = Iterables.transform(splits,
new Function<InputSplit, String>() {
@Override
public String apply(@Nullable InputSplit input) {
return ((FileSplit) input).getPath().toString();
}
});
Iterable<String> pathsFromSplits =
splits.stream().map(
input-> ((FileSplit) input).getPath().toString())
.collect(Collectors.toList());
Set<String> expectedSet = Sets.newHashSet(expected);
for (String splitPathString : pathsFromSplits) {

View File

@ -22,7 +22,6 @@ import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang3.Range;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@ -35,8 +34,6 @@ import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto;
import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProtoOrBuilder;
import com.google.common.base.Function;
import com.google.common.collect.Iterables;
import org.apache.hadoop.thirdparty.protobuf.TextFormat;
@Private
@ -88,13 +85,8 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
}
if (applicationStates != null && !applicationStates.isEmpty()) {
builder.clearApplicationStates();
builder.addAllApplicationStates(Iterables.transform(applicationStates,
new Function<YarnApplicationState, YarnApplicationStateProto>() {
@Override
public YarnApplicationStateProto apply(YarnApplicationState input) {
return ProtoUtils.convertToProtoFormat(input);
}
}));
applicationStates.forEach(input ->
builder.addApplicationStates(ProtoUtils.convertToProtoFormat(input)));
}
if (applicationTags != null && !applicationTags.isEmpty()) {
builder.clearApplicationTags();