HADOOP-18266. Using HashSet/ TreeSet Constructor for hadoop-common (#4365)
* HADOOP-18266. Using HashSet/ TreeSet Constructor for hadoop-common Co-authored-by: Deb <dbsamrat@3c22fba1b03f.ant.amazon.com>
This commit is contained in:
parent
efc2761d32
commit
477b67a335
|
@ -236,7 +236,7 @@ public class KerberosUtil {
|
|||
*/
|
||||
static final String[] getPrincipalNames(String keytabFileName) throws IOException {
|
||||
Keytab keytab = Keytab.loadKeytab(new File(keytabFileName));
|
||||
Set<String> principals = new HashSet<String>();
|
||||
Set<String> principals = new HashSet<>();
|
||||
List<PrincipalName> entries = keytab.getPrincipals();
|
||||
for (PrincipalName entry : entries) {
|
||||
principals.add(entry.getName().replace("\\", "/"));
|
||||
|
|
|
@ -108,9 +108,9 @@ public class KerberosTestUtils {
|
|||
public static <T> T doAs(String principal, final Callable<T> callable) throws Exception {
|
||||
LoginContext loginContext = null;
|
||||
try {
|
||||
Set<Principal> principals = new HashSet<Principal>();
|
||||
Set<Principal> principals = new HashSet<>();
|
||||
principals.add(new KerberosPrincipal(KerberosTestUtils.getClientPrincipal()));
|
||||
Subject subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>());
|
||||
Subject subject = new Subject(false, principals, new HashSet<>(), new HashSet<>());
|
||||
loginContext = new LoginContext("", subject, null, new KerberosConfiguration(principal));
|
||||
loginContext.login();
|
||||
subject = loginContext.getSubject();
|
||||
|
|
|
@ -774,7 +774,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
private void handleDeprecation() {
|
||||
LOG.debug("Handling deprecation for all properties in config...");
|
||||
DeprecationContext deprecations = deprecationContext.get();
|
||||
Set<Object> keys = new HashSet<Object>();
|
||||
Set<Object> keys = new HashSet<>();
|
||||
keys.addAll(getProps().keySet());
|
||||
for (Object item: keys) {
|
||||
LOG.debug("Handling deprecation for " + (String)item);
|
||||
|
|
|
@ -2372,8 +2372,7 @@ public class FileContext implements PathCapabilities {
|
|||
Set<AbstractFileSystem> resolveAbstractFileSystems(final Path f)
|
||||
throws IOException {
|
||||
final Path absF = fixRelativePart(f);
|
||||
final HashSet<AbstractFileSystem> result
|
||||
= new HashSet<AbstractFileSystem>();
|
||||
final HashSet<AbstractFileSystem> result = new HashSet<>();
|
||||
new FSLinkResolver<Void>() {
|
||||
@Override
|
||||
public Void next(final AbstractFileSystem fs, final Path p)
|
||||
|
|
|
@ -76,7 +76,7 @@ class SFTPConnectionPool {
|
|||
ConnectionInfo info = con2infoMap.get(channel);
|
||||
HashSet<ChannelSftp> cons = idleConnections.get(info);
|
||||
if (cons == null) {
|
||||
cons = new HashSet<ChannelSftp>();
|
||||
cons = new HashSet<>();
|
||||
idleConnections.put(info, cons);
|
||||
}
|
||||
cons.add(channel);
|
||||
|
@ -94,7 +94,7 @@ class SFTPConnectionPool {
|
|||
Set<ChannelSftp> cons = con2infoMap.keySet();
|
||||
if (cons != null && cons.size() > 0) {
|
||||
// make a copy since we need to modify the underlying Map
|
||||
Set<ChannelSftp> copy = new HashSet<ChannelSftp>(cons);
|
||||
Set<ChannelSftp> copy = new HashSet<>(cons);
|
||||
// Initiate disconnect from all outstanding connections
|
||||
for (ChannelSftp con : copy) {
|
||||
try {
|
||||
|
|
|
@ -165,7 +165,7 @@ public class CommandFormat {
|
|||
* @return Set{@literal <}String{@literal >} of the enabled options
|
||||
*/
|
||||
public Set<String> getOpts() {
|
||||
Set<String> optSet = new HashSet<String>();
|
||||
Set<String> optSet = new HashSet<>();
|
||||
for (Map.Entry<String, Boolean> entry : options.entrySet()) {
|
||||
if (entry.getValue()) {
|
||||
optSet.add(entry.getKey());
|
||||
|
|
|
@ -96,7 +96,7 @@ public class Find extends FsCommand {
|
|||
private Expression rootExpression;
|
||||
|
||||
/** Set of path items returning a {@link Result#STOP} result. */
|
||||
private HashSet<Path> stopPaths = new HashSet<Path>();
|
||||
private HashSet<Path> stopPaths = new HashSet<>();
|
||||
|
||||
/** Register the expressions with the expression factory. */
|
||||
private static void registerExpressions(ExpressionFactory factory) {
|
||||
|
|
|
@ -1037,7 +1037,7 @@ public class ViewFileSystem extends FileSystem {
|
|||
List<InodeTree.MountPoint<FileSystem>> mountPoints =
|
||||
fsState.getMountPoints();
|
||||
Map<String, FileSystem> fsMap = initializeMountedFileSystems(mountPoints);
|
||||
Set<FileSystem> children = new HashSet<FileSystem>();
|
||||
Set<FileSystem> children = new HashSet<>();
|
||||
for (InodeTree.MountPoint<FileSystem> mountPoint : mountPoints) {
|
||||
FileSystem targetFs = fsMap.get(mountPoint.src);
|
||||
children.addAll(Arrays.asList(targetFs.getChildFileSystems()));
|
||||
|
|
|
@ -109,7 +109,7 @@ public class CodecPool {
|
|||
synchronized (pool) {
|
||||
codecSet = pool.get(codecClass);
|
||||
if (codecSet == null) {
|
||||
codecSet = new HashSet<T>();
|
||||
codecSet = new HashSet<>();
|
||||
pool.put(codecClass, codecSet);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ public class AvroReflectSerialization extends AvroSerialization<Object>{
|
|||
|
||||
private void getPackages() {
|
||||
String[] pkgList = getConf().getStrings(AVRO_REFLECT_PACKAGES);
|
||||
packages = new HashSet<String>();
|
||||
packages = new HashSet<>();
|
||||
if (pkgList != null) {
|
||||
for (String pkg : pkgList) {
|
||||
packages.add(pkg.trim());
|
||||
|
|
|
@ -69,7 +69,7 @@ public class ProtocolProxy<T> {
|
|||
}
|
||||
int[] serverMethodsCodes = serverInfo.getMethods();
|
||||
if (serverMethodsCodes != null) {
|
||||
serverMethods = new HashSet<Integer>(serverMethodsCodes.length);
|
||||
serverMethods = new HashSet<>(serverMethodsCodes.length);
|
||||
for (int m : serverMethodsCodes) {
|
||||
this.serverMethods.add(Integer.valueOf(m));
|
||||
}
|
||||
|
|
|
@ -19,11 +19,10 @@
|
|||
package org.apache.hadoop.metrics2.lib;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.hadoop.util.Preconditions.*;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
|
@ -44,7 +43,7 @@ import org.slf4j.LoggerFactory;
|
|||
public class MutableRates extends MutableMetric {
|
||||
static final Logger LOG = LoggerFactory.getLogger(MutableRates.class);
|
||||
private final MetricsRegistry registry;
|
||||
private final Set<Class<?>> protocolCache = Sets.newHashSet();
|
||||
private final Set<Class<?>> protocolCache = new HashSet<>();
|
||||
|
||||
MutableRates(MetricsRegistry registry) {
|
||||
this.registry = checkNotNull(registry, "metrics registry");
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
|
||||
package org.apache.hadoop.metrics2.lib;
|
||||
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -52,7 +52,7 @@ public class MutableRatesWithAggregation extends MutableMetric {
|
|||
LoggerFactory.getLogger(MutableRatesWithAggregation.class);
|
||||
private final Map<String, MutableRate> globalMetrics =
|
||||
new ConcurrentHashMap<>();
|
||||
private final Set<Class<?>> protocolCache = Sets.newHashSet();
|
||||
private final Set<Class<?>> protocolCache = new HashSet<>();
|
||||
|
||||
private final ConcurrentLinkedDeque<WeakReference<ConcurrentMap<String, ThreadSafeSampleStat>>>
|
||||
weakReferenceQueue = new ConcurrentLinkedDeque<>();
|
||||
|
|
|
@ -115,7 +115,7 @@ public abstract class AbstractDNSToSwitchMapping
|
|||
builder.append("Mapping: ").append(toString()).append("\n");
|
||||
if (rack != null) {
|
||||
builder.append("Map:\n");
|
||||
Set<String> switches = new HashSet<String>();
|
||||
Set<String> switches = new HashSet<>();
|
||||
for (Map.Entry<String, String> entry : rack.entrySet()) {
|
||||
builder.append(" ")
|
||||
.append(entry.getKey())
|
||||
|
|
|
@ -1086,7 +1086,7 @@ public class NetworkTopology {
|
|||
String rackname = node.getNetworkLocation();
|
||||
Set<String> nodes = rackMap.get(rackname);
|
||||
if (nodes == null) {
|
||||
nodes = new HashSet<String>();
|
||||
nodes = new HashSet<>();
|
||||
}
|
||||
if (!decommissionNodes.contains(node.getName())) {
|
||||
nodes.add(node.getName());
|
||||
|
|
|
@ -109,7 +109,7 @@ public class CompositeGroupsMapping
|
|||
|
||||
@Override
|
||||
public synchronized Set<String> getGroupsSet(String user) throws IOException {
|
||||
Set<String> groupSet = new HashSet<String>();
|
||||
Set<String> groupSet = new HashSet<>();
|
||||
|
||||
Set<String> groups = null;
|
||||
for (GroupMappingServiceProvider provider : providersList) {
|
||||
|
|
|
@ -65,7 +65,7 @@ public class NetgroupCache {
|
|||
}
|
||||
|
||||
private static Set<String> getGroups() {
|
||||
Set<String> allGroups = new HashSet<String> ();
|
||||
Set<String> allGroups = new HashSet<>();
|
||||
for (Set<String> userGroups : userToNetgroupsMap.values()) {
|
||||
allGroups.addAll(userGroups);
|
||||
}
|
||||
|
|
|
@ -105,8 +105,8 @@ public class AccessControlList implements Writable {
|
|||
* @param userGroupStrings build ACL from array of Strings
|
||||
*/
|
||||
private void buildACL(String[] userGroupStrings) {
|
||||
users = new HashSet<String>();
|
||||
groups = new HashSet<String>();
|
||||
users = new HashSet<>();
|
||||
groups = new HashSet<>();
|
||||
for (String aclPart : userGroupStrings) {
|
||||
if (aclPart != null && isWildCardACLValue(aclPart)) {
|
||||
allAllowed = true;
|
||||
|
|
|
@ -33,7 +33,7 @@ public class ProxyServers {
|
|||
}
|
||||
|
||||
public static void refresh(Configuration conf){
|
||||
Collection<String> tempServers = new HashSet<String>();
|
||||
Collection<String> tempServers = new HashSet<>();
|
||||
// trusted proxy servers such as http proxies
|
||||
for (String host : conf.getTrimmedStrings(CONF_HADOOP_PROXYSERVERS)) {
|
||||
InetSocketAddress addr = new InetSocketAddress(host, 0);
|
||||
|
|
|
@ -94,7 +94,7 @@ public class RestCsrfPreventionFilter implements Filter {
|
|||
|
||||
void parseBrowserUserAgents(String userAgents) {
|
||||
String[] agentsArray = userAgents.split(",");
|
||||
browserUserAgents = new HashSet<Pattern>();
|
||||
browserUserAgents = new HashSet<>();
|
||||
for (String patternString : agentsArray) {
|
||||
browserUserAgents.add(Pattern.compile(patternString));
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ public class RestCsrfPreventionFilter implements Filter {
|
|||
|
||||
void parseMethodsToIgnore(String mti) {
|
||||
String[] methods = mti.split(",");
|
||||
methodsToIgnore = new HashSet<String>();
|
||||
methodsToIgnore = new HashSet<>();
|
||||
for (int i = 0; i < methods.length; i++) {
|
||||
methodsToIgnore.add(methods[i]);
|
||||
}
|
||||
|
|
|
@ -716,7 +716,7 @@ extends AbstractDelegationTokenIdentifier>
|
|||
/** Remove expired delegation tokens from cache */
|
||||
private void removeExpiredToken() throws IOException {
|
||||
long now = Time.now();
|
||||
Set<TokenIdent> expiredTokens = new HashSet<TokenIdent>();
|
||||
Set<TokenIdent> expiredTokens = new HashSet<>();
|
||||
synchronized (this) {
|
||||
Iterator<Map.Entry<TokenIdent, DelegationTokenInformation>> i =
|
||||
currentTokens.entrySet().iterator();
|
||||
|
|
|
@ -89,7 +89,7 @@ public abstract class DelegationTokenAuthenticationHandler
|
|||
|
||||
public static final String TOKEN_KIND = PREFIX + "token-kind";
|
||||
|
||||
private static final Set<String> DELEGATION_TOKEN_OPS = new HashSet<String>();
|
||||
private static final Set<String> DELEGATION_TOKEN_OPS = new HashSet<>();
|
||||
|
||||
public static final String DELEGATION_TOKEN_UGI_ATTRIBUTE =
|
||||
"hadoop.security.delegation-token.ugi";
|
||||
|
|
|
@ -58,7 +58,7 @@ public class FileBasedIPList implements IPList {
|
|||
lines = null;
|
||||
}
|
||||
if (lines != null) {
|
||||
addressList = new MachineList(new HashSet<String>(Arrays.asList(lines)));
|
||||
addressList = new MachineList(new HashSet<>(Arrays.asList(lines)));
|
||||
} else {
|
||||
addressList = null;
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ public class HostsFileReader {
|
|||
if (xmlInput) {
|
||||
readXmlFileToMapWithFileInputStream(type, filename, inputStream, map);
|
||||
} else {
|
||||
HashSet<String> nodes = new HashSet<String>();
|
||||
HashSet<String> nodes = new HashSet<>();
|
||||
readFileToSetWithFileInputStream(type, filename, inputStream, nodes);
|
||||
for (String node : nodes) {
|
||||
map.put(node, null);
|
||||
|
|
|
@ -249,7 +249,7 @@ public final class ShutdownHookManager {
|
|||
}
|
||||
|
||||
private final Set<HookEntry> hooks =
|
||||
Collections.synchronizedSet(new HashSet<HookEntry>());
|
||||
Collections.synchronizedSet(new HashSet<>());
|
||||
|
||||
private AtomicBoolean shutdownInProgress = new AtomicBoolean(false);
|
||||
|
||||
|
|
|
@ -80,9 +80,9 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
|
|||
};
|
||||
|
||||
// Initialize used variables
|
||||
xmlPropsToSkipCompare = new HashSet<String>();
|
||||
xmlPrefixToSkipCompare = new HashSet<String>();
|
||||
configurationPropsToSkipCompare = new HashSet<String>();
|
||||
xmlPropsToSkipCompare = new HashSet<>();
|
||||
xmlPrefixToSkipCompare = new HashSet<>();
|
||||
configurationPropsToSkipCompare = new HashSet<>();
|
||||
|
||||
// Set error modes
|
||||
errorIfMissingConfigProps = true;
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
package org.apache.hadoop.crypto.key;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
@ -32,7 +34,6 @@ import org.apache.hadoop.test.GenericTestUtils;
|
|||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.util.Sets;
|
||||
|
||||
public class TestValueQueue {
|
||||
Logger LOG = LoggerFactory.getLogger(TestValueQueue.class);
|
||||
|
@ -103,10 +104,10 @@ public class TestValueQueue {
|
|||
Assert.assertEquals(5, fillInfos[0].num);
|
||||
Assert.assertEquals(5, fillInfos[1].num);
|
||||
Assert.assertEquals(5, fillInfos[2].num);
|
||||
Assert.assertEquals(Sets.newHashSet("k1", "k2", "k3"),
|
||||
Sets.newHashSet(fillInfos[0].key,
|
||||
Assert.assertEquals(new HashSet<>(Arrays.asList("k1", "k2", "k3")),
|
||||
new HashSet<>(Arrays.asList(fillInfos[0].key,
|
||||
fillInfos[1].key,
|
||||
fillInfos[2].key));
|
||||
fillInfos[2].key)));
|
||||
vq.shutdown();
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,8 @@ import java.security.GeneralSecurityException;
|
|||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
|
@ -65,7 +67,6 @@ import org.junit.Test;
|
|||
import org.junit.rules.Timeout;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import org.apache.hadoop.util.Sets;
|
||||
|
||||
public class TestLoadBalancingKMSClientProvider {
|
||||
|
||||
|
@ -86,8 +87,8 @@ public class TestLoadBalancingKMSClientProvider {
|
|||
KMSClientProvider[] providers =
|
||||
((LoadBalancingKMSClientProvider) kp).getProviders();
|
||||
assertEquals(1, providers.length);
|
||||
assertEquals(Sets.newHashSet("http://host1:9600/kms/foo/v1/"),
|
||||
Sets.newHashSet(providers[0].getKMSUrl()));
|
||||
assertEquals(new HashSet<>(Collections.singleton("http://host1:9600/kms/foo/v1/")),
|
||||
new HashSet<>(Collections.singleton(providers[0].getKMSUrl())));
|
||||
|
||||
kp = new KMSClientProvider.Factory().createProvider(new URI(
|
||||
"kms://http@host1;host2;host3:9600/kms/foo"), conf);
|
||||
|
@ -95,12 +96,12 @@ public class TestLoadBalancingKMSClientProvider {
|
|||
providers =
|
||||
((LoadBalancingKMSClientProvider) kp).getProviders();
|
||||
assertEquals(3, providers.length);
|
||||
assertEquals(Sets.newHashSet("http://host1:9600/kms/foo/v1/",
|
||||
assertEquals(new HashSet<>(Arrays.asList("http://host1:9600/kms/foo/v1/",
|
||||
"http://host2:9600/kms/foo/v1/",
|
||||
"http://host3:9600/kms/foo/v1/"),
|
||||
Sets.newHashSet(providers[0].getKMSUrl(),
|
||||
"http://host3:9600/kms/foo/v1/")),
|
||||
new HashSet<>(Arrays.asList(providers[0].getKMSUrl(),
|
||||
providers[1].getKMSUrl(),
|
||||
providers[2].getKMSUrl()));
|
||||
providers[2].getKMSUrl())));
|
||||
|
||||
kp = new KMSClientProvider.Factory().createProvider(new URI(
|
||||
"kms://http@host1;host2;host3:9600/kms/foo"), conf);
|
||||
|
@ -108,12 +109,12 @@ public class TestLoadBalancingKMSClientProvider {
|
|||
providers =
|
||||
((LoadBalancingKMSClientProvider) kp).getProviders();
|
||||
assertEquals(3, providers.length);
|
||||
assertEquals(Sets.newHashSet("http://host1:9600/kms/foo/v1/",
|
||||
assertEquals(new HashSet<>(Arrays.asList("http://host1:9600/kms/foo/v1/",
|
||||
"http://host2:9600/kms/foo/v1/",
|
||||
"http://host3:9600/kms/foo/v1/"),
|
||||
Sets.newHashSet(providers[0].getKMSUrl(),
|
||||
"http://host3:9600/kms/foo/v1/")),
|
||||
new HashSet<>(Arrays.asList(providers[0].getKMSUrl(),
|
||||
providers[1].getKMSUrl(),
|
||||
providers[2].getKMSUrl()));
|
||||
providers[2].getKMSUrl())));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -43,9 +43,9 @@ public class TestCommandFormat {
|
|||
|
||||
@Before
|
||||
public void setUp() {
|
||||
args = new ArrayList<String>();
|
||||
expectedOpts = new HashSet<String>();
|
||||
expectedArgs = new ArrayList<String>();
|
||||
args = new ArrayList<>();
|
||||
expectedOpts = new HashSet<>();
|
||||
expectedArgs = new ArrayList<>();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -205,6 +205,6 @@ public class TestCommandFormat {
|
|||
}
|
||||
|
||||
private static Set<String> setOf(String ... objects) {
|
||||
return new HashSet<String>(listOf(objects));
|
||||
return new HashSet<>(listOf(objects));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -246,7 +246,7 @@ public class TestHarFileSystemBasics {
|
|||
// test.har has the following contents:
|
||||
// dir1/1.txt
|
||||
// dir1/2.txt
|
||||
Set<String> expectedFileNames = new HashSet<String>();
|
||||
Set<String> expectedFileNames = new HashSet<>();
|
||||
expectedFileNames.add("1.txt");
|
||||
expectedFileNames.add("2.txt");
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ public class TestListFiles {
|
|||
writeFile(fs, FILE1, FILE_LEN);
|
||||
writeFile(fs, FILE3, FILE_LEN);
|
||||
|
||||
Set<Path> filesToFind = new HashSet<Path>();
|
||||
Set<Path> filesToFind = new HashSet<>();
|
||||
filesToFind.add(fs.makeQualified(FILE1));
|
||||
filesToFind.add(fs.makeQualified(FILE2));
|
||||
filesToFind.add(fs.makeQualified(FILE3));
|
||||
|
|
|
@ -747,7 +747,7 @@ public class TestTrash {
|
|||
Path myPath = new Path(TEST_DIR, "test/mkdirs");
|
||||
mkdir(fs, myPath);
|
||||
int fileIndex = 0;
|
||||
Set<String> checkpoints = new HashSet<String>();
|
||||
Set<String> checkpoints = new HashSet<>();
|
||||
while (true) {
|
||||
// Create a file with a new name
|
||||
Path myFile = new Path(TEST_DIR, "test/mkdirs/myFile" + fileIndex++);
|
||||
|
|
|
@ -275,7 +275,7 @@ public class TestIOUtils {
|
|||
File dir = new File("testListDirectory");
|
||||
Files.createDirectory(dir.toPath());
|
||||
try {
|
||||
Set<String> entries = new HashSet<String>();
|
||||
Set<String> entries = new HashSet<>();
|
||||
entries.add("entry1");
|
||||
entries.add("entry2");
|
||||
entries.add("entry3");
|
||||
|
|
|
@ -69,7 +69,7 @@ public class TestCodecPool {
|
|||
Compressor comp = CodecPool.getCompressor(codec);
|
||||
CodecPool.returnCompressor(comp);
|
||||
CodecPool.returnCompressor(comp);
|
||||
Set<Compressor> compressors = new HashSet<Compressor>();
|
||||
Set<Compressor> compressors = new HashSet<>();
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
compressors.add(CodecPool.getCompressor(codec));
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ public class TestCodecPool {
|
|||
Decompressor decomp = CodecPool.getDecompressor(codec);
|
||||
CodecPool.returnDecompressor(decomp);
|
||||
CodecPool.returnDecompressor(decomp);
|
||||
Set<Decompressor> decompressors = new HashSet<Decompressor>();
|
||||
Set<Decompressor> decompressors = new HashSet<>();
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
decompressors.add(CodecPool.getDecompressor(codec));
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import java.util.Objects;
|
|||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.Enumeration;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -72,7 +73,6 @@ import org.mockito.stubbing.Answer;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
|
||||
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
|
||||
import static org.apache.hadoop.util.functional.CommonCallableSupplier.submit;
|
||||
|
@ -344,13 +344,13 @@ public abstract class GenericTestUtils {
|
|||
public static void assertGlobEquals(File dir, String pattern,
|
||||
String ... expectedMatches) throws IOException {
|
||||
|
||||
Set<String> found = Sets.newTreeSet();
|
||||
Set<String> found = new TreeSet<>();
|
||||
for (File f : FileUtil.listFiles(dir)) {
|
||||
if (f.getName().matches(pattern)) {
|
||||
found.add(f.getName());
|
||||
}
|
||||
}
|
||||
Set<String> expectedSet = Sets.newTreeSet(
|
||||
Set<String> expectedSet = new TreeSet<>(
|
||||
Arrays.asList(expectedMatches));
|
||||
Assert.assertEquals("Bad files matching " + pattern + " in " + dir,
|
||||
Joiner.on(",").join(expectedSet),
|
||||
|
|
|
@ -70,8 +70,8 @@ public abstract class MultithreadedTestUtil {
|
|||
public static class TestContext {
|
||||
private Throwable err = null;
|
||||
private boolean stopped = false;
|
||||
private Set<TestingThread> testThreads = new HashSet<TestingThread>();
|
||||
private Set<TestingThread> finishedThreads = new HashSet<TestingThread>();
|
||||
private Set<TestingThread> testThreads = new HashSet<>();
|
||||
private Set<TestingThread> finishedThreads = new HashSet<>();
|
||||
|
||||
/**
|
||||
* Check if the context can run threads.
|
||||
|
|
|
@ -36,9 +36,9 @@ import org.apache.hadoop.thirdparty.com.google.common.cache.Cache;
|
|||
import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalListener;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.cache.RemovalNotification;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
@ -56,10 +56,10 @@ import java.util.Set;
|
|||
*/
|
||||
public class KMSAudit {
|
||||
@VisibleForTesting
|
||||
static final Set<KMS.KMSOp> AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
|
||||
static final Set<KMS.KMSOp> AGGREGATE_OPS_WHITELIST = new HashSet<>(Arrays.asList(
|
||||
KMS.KMSOp.GET_KEY_VERSION, KMS.KMSOp.GET_CURRENT_KEY,
|
||||
KMS.KMSOp.DECRYPT_EEK, KMS.KMSOp.GENERATE_EEK, KMS.KMSOp.REENCRYPT_EEK
|
||||
);
|
||||
));
|
||||
|
||||
private Cache<String, AuditEvent> cache;
|
||||
|
||||
|
|
Loading…
Reference in New Issue