diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index ad5201841c2..81b2c2aeb5c 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -184,6 +184,9 @@ Build * LUCENE-5511: "ant precommit" / "ant check-svn-working-copy" now work again with any working copy format (thanks to svnkit 1.8.4). (Uwe Schindler) +* LUCENE-5512: Remove redundant typing (use diamond operator) throughout + the codebase. (Furkan KAMACI via Robert Muir) + ======================= Lucene 4.7.0 ======================= New Features diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLCharacterEntities.jflex b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLCharacterEntities.jflex index c717b03489e..b4249942ddc 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLCharacterEntities.jflex +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLCharacterEntities.jflex @@ -63,7 +63,7 @@ CharacterEntities = ( "AElig" | "Aacute" | "Acirc" | "Agrave" | "Alpha" | "zwj" | "zwnj" ) %{ private static final Map upperCaseVariantsAccepted - = new HashMap(); + = new HashMap<>(); static { upperCaseVariantsAccepted.put("quot", "QUOT"); upperCaseVariantsAccepted.put("copy", "COPY"); @@ -73,7 +73,7 @@ CharacterEntities = ( "AElig" | "Aacute" | "Acirc" | "Agrave" | "Alpha" upperCaseVariantsAccepted.put("amp", "AMP"); } private static final CharArrayMap entityValues - = new CharArrayMap(Version.LUCENE_CURRENT, 253, false); + = new CharArrayMap<>(Version.LUCENE_CURRENT, 253, false); static { String[] entities = { "AElig", "\u00C6", "Aacute", "\u00C1", "Acirc", "\u00C2", diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java index f39f4ffa084..611b7523a11 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilter.java @@ -30663,7 +30663,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter { /* user code: */ private static final Map upperCaseVariantsAccepted - = new HashMap(); + = new HashMap<>(); static { upperCaseVariantsAccepted.put("quot", "QUOT"); upperCaseVariantsAccepted.put("copy", "COPY"); @@ -30673,7 +30673,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter { upperCaseVariantsAccepted.put("amp", "AMP"); } private static final CharArrayMap entityValues - = new CharArrayMap(Version.LUCENE_CURRENT, 253, false); + = new CharArrayMap<>(Version.LUCENE_CURRENT, 253, false); static { String[] entities = { "AElig", "\u00C6", "Aacute", "\u00C1", "Acirc", "\u00C2", diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilter.java index 5bd456bbc43..095d0039ce7 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilter.java @@ -43,7 +43,7 @@ public class MappingCharFilter extends BaseCharFilter { private final FST map; private final FST.BytesReader fstReader; private final RollingCharBuffer buffer = new RollingCharBuffer(); - private final FST.Arc scratchArc = new FST.Arc(); + private final FST.Arc scratchArc = new FST.Arc<>(); private final Map> cachedRootArcs; private CharsRef replacement; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java index 80b7b1f76c6..29c115270db 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java @@ -69,7 +69,7 @@ public class MappingCharFilterFactory extends CharFilterFactory implements wlist = getLines(loader, mapping); } else { List files = splitFileNames(mapping); - wlist = new ArrayList(); + wlist = new ArrayList<>(); for (String file : files) { List lines = getLines(loader, file.trim()); wlist.addAll(lines); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/NormalizeCharMap.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/NormalizeCharMap.java index 9203784101c..499fdf0808a 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/NormalizeCharMap.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/NormalizeCharMap.java @@ -40,7 +40,7 @@ import org.apache.lucene.util.fst.Util; public class NormalizeCharMap { final FST map; - final Map> cachedRootArcs = new HashMap>(); + final Map> cachedRootArcs = new HashMap<>(); // Use the builder to create: private NormalizeCharMap(FST map) { @@ -48,7 +48,7 @@ public class NormalizeCharMap { if (map != null) { try { // Pre-cache root arcs: - final FST.Arc scratchArc = new FST.Arc(); + final FST.Arc scratchArc = new FST.Arc<>(); final FST.BytesReader fstReader = map.getBytesReader(); map.getFirstArc(scratchArc); if (FST.targetHasArcs(scratchArc)) { @@ -78,7 +78,7 @@ public class NormalizeCharMap { */ public static class Builder { - private final Map pendingPairs = new TreeMap(); + private final Map pendingPairs = new TreeMap<>(); /** Records a replacement to be applied to the input * stream. Whenever singleMatch occurs in @@ -108,7 +108,7 @@ public class NormalizeCharMap { final FST map; try { final Outputs outputs = CharSequenceOutputs.getSingleton(); - final org.apache.lucene.util.fst.Builder builder = new org.apache.lucene.util.fst.Builder(FST.INPUT_TYPE.BYTE2, outputs); + final org.apache.lucene.util.fst.Builder builder = new org.apache.lucene.util.fst.Builder<>(FST.INPUT_TYPE.BYTE2, outputs); final IntsRef scratch = new IntsRef(); for(Map.Entry ent : pendingPairs.entrySet()) { builder.add(Util.toUTF16(ent.getKey(), scratch), diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java index d85b64a2598..255a95a3360 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/CompoundWordTokenFilterBase.java @@ -84,7 +84,7 @@ public abstract class CompoundWordTokenFilterBase extends TokenFilter { protected CompoundWordTokenFilterBase(Version matchVersion, TokenStream input, CharArraySet dictionary, int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) { super(input); this.matchVersion = matchVersion; - this.tokens=new LinkedList(); + this.tokens=new LinkedList<>(); if (minWordSize < 0) { throw new IllegalArgumentException("minWordSize cannot be negative"); } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java index 119794eaf05..4ba5f27ae50 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java @@ -54,7 +54,7 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer { private transient TernaryTree ivalues; public HyphenationTree() { - stoplist = new HashMap>(23); // usually a small table + stoplist = new HashMap<>(23); // usually a small table classmap = new TernaryTree(); vspace = new ByteVector(); vspace.alloc(1); // this reserves index 0, which we don't use diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java index 3cf35cb9c75..d9901f10cc0 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java @@ -188,7 +188,7 @@ public class PatternParser extends DefaultHandler { } protected ArrayList normalizeException(ArrayList ex) { - ArrayList res = new ArrayList(); + ArrayList res = new ArrayList<>(); for (int i = 0; i < ex.size(); i++) { Object item = ex.get(i); if (item instanceof String) { @@ -287,7 +287,7 @@ public class PatternParser extends DefaultHandler { currElement = ELEM_PATTERNS; } else if (local.equals("exceptions")) { currElement = ELEM_EXCEPTIONS; - exception = new ArrayList(); + exception = new ArrayList<>(); } else if (local.equals("hyphen")) { if (token.length() > 0) { exception.add(token.toString()); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java index a48c571e486..c2651341a67 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java @@ -503,7 +503,7 @@ public class TernaryTree implements Cloneable { public Iterator() { cur = -1; - ns = new Stack(); + ns = new Stack<>(); ks = new StringBuilder(); rewind(); } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java index 790de6cfc81..2d9cf17d018 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java @@ -52,6 +52,6 @@ public class LowerCaseTokenizerFactory extends TokenizerFactory implements Multi @Override public AbstractAnalysisFactory getMultiTermComponent() { - return new LowerCaseFilterFactory(new HashMap(getOriginalArgs())); + return new LowerCaseFilterFactory(new HashMap<>(getOriginalArgs())); } } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java index 2dd2fa644c5..0545d754133 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java @@ -58,7 +58,7 @@ public class TypeTokenFilterFactory extends TokenFilterFactory implements Resour public void inform(ResourceLoader loader) throws IOException { List files = splitFileNames(stopTypesFiles); if (files.size() > 0) { - stopTypes = new HashSet(); + stopTypes = new HashSet<>(); for (String file : files) { List typesLines = getLines(loader, file.trim()); stopTypes.addAll(typesLines); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemmer.java index 8f87d91dee9..cdb397b93bf 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemmer.java @@ -280,7 +280,7 @@ public class KStemmer { DictEntry defaultEntry; DictEntry entry; - CharArrayMap d = new CharArrayMap(Version.LUCENE_CURRENT, 1000, false); + CharArrayMap d = new CharArrayMap<>(Version.LUCENE_CURRENT, 1000, false); for (int i = 0; i < exceptionWords.length; i++) { if (!d.containsKey(exceptionWords[i])) { entry = new DictEntry(exceptionWords[i], true); @@ -574,7 +574,7 @@ public class KStemmer { return matchedEntry != null; } - // Set lookups = new HashSet(); + // Set lookups = new HashSet<>(); /* convert past tense (-ed) to present, and `-ied' to `y' */ private void pastTense() { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java index a345f20fee5..01af90f3c4d 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Dictionary.java @@ -189,7 +189,7 @@ public class Dictionary { // read dictionary entries IntSequenceOutputs o = IntSequenceOutputs.getSingleton(); - Builder b = new Builder(FST.INPUT_TYPE.BYTE4, o); + Builder b = new Builder<>(FST.INPUT_TYPE.BYTE4, o); readDictionaryFiles(dictionaries, decoder, b); words = b.finish(); aliases = null; // no longer needed @@ -502,7 +502,7 @@ public class Dictionary { List list = affixes.get(affixArg); if (list == null) { - list = new ArrayList(); + list = new ArrayList<>(); affixes.put(affixArg, list); } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java index 8e9706925fd..a4f24e5dbba 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java @@ -82,10 +82,10 @@ public class HunspellStemFilterFactory extends TokenFilterFactory implements Res String dicts[] = dictionaryFiles.split(","); InputStream affix = null; - List dictionaries = new ArrayList(); + List dictionaries = new ArrayList<>(); try { - dictionaries = new ArrayList(); + dictionaries = new ArrayList<>(); for (String file : dicts) { dictionaries.add(loader.openResource(file)); } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java index 622a9a4ddc3..cb33ab48fee 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Stemmer.java @@ -84,7 +84,7 @@ final class Stemmer { word = scratchBuffer; } - List stems = new ArrayList(); + List stems = new ArrayList<>(); IntsRef forms = dictionary.lookupWord(word, 0, length); if (forms != null) { // TODO: some forms should not be added, e.g. ONLYINCOMPOUND @@ -158,7 +158,7 @@ final class Stemmer { private List stem(char word[], int length, int previous, int prevFlag, int prefixFlag, int recursionDepth, boolean doPrefix, boolean doSuffix, boolean previousWasPrefix, boolean circumfix) { // TODO: allow this stuff to be reused by tokenfilter - List stems = new ArrayList(); + List stems = new ArrayList<>(); if (doPrefix && dictionary.prefixes != null) { for (int i = length - 1; i >= 0; i--) { @@ -323,7 +323,7 @@ final class Stemmer { condition >>>= 1; char append = (char) (affixReader.readShort() & 0xffff); - List stems = new ArrayList(); + List stems = new ArrayList<>(); IntsRef forms = dictionary.lookupWord(strippedWord, 0, length); if (forms != null) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizer.java index bfef661b435..81bf4ed5936 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizer.java @@ -43,7 +43,7 @@ public class IndicNormalizer { } private static final IdentityHashMap scripts = - new IdentityHashMap(9); + new IdentityHashMap<>(9); private static int flag(Character.UnicodeBlock ub) { return scripts.get(ub).flag; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java index 7d6ea7a9720..4c9743caf2b 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java @@ -88,7 +88,7 @@ public class CapitalizationFilterFactory extends TokenFilterFactory { k = getSet(args, OK_PREFIX); if (k != null) { - okPrefix = new ArrayList(); + okPrefix = new ArrayList<>(); for (String item : k) { okPrefix.add(item.toCharArray()); } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java index 2aac745a0df..4badea1aac1 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java @@ -33,7 +33,7 @@ import java.util.Map; * *
  * {@code
- * Map analyzerPerField = new HashMap();
+ * Map analyzerPerField = new HashMap<>();
  * analyzerPerField.put("firstname", new KeywordAnalyzer());
  * analyzerPerField.put("lastname", new KeywordAnalyzer());
  *
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilter.java
index 2042c84d0a5..53b4ecdf057 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilter.java
@@ -44,7 +44,7 @@ public final class StemmerOverrideFilter extends TokenFilter {
   private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
   private final KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class);
   private final BytesReader fstReader;
-  private final Arc scratchArc = new FST.Arc();
+  private final Arc scratchArc = new FST.Arc<>();
   private final CharsRef spare = new CharsRef();
   
   /**
@@ -145,7 +145,7 @@ public final class StemmerOverrideFilter extends TokenFilter {
   public static class Builder {
     private final BytesRefHash hash = new BytesRefHash();
     private final BytesRef spare = new BytesRef();
-    private final ArrayList outputValues = new ArrayList();
+    private final ArrayList outputValues = new ArrayList<>();
     private final boolean ignoreCase;
     private final CharsRef charsSpare = new CharsRef();
     
@@ -200,7 +200,7 @@ public final class StemmerOverrideFilter extends TokenFilter {
      */
     public StemmerOverrideMap build() throws IOException {
       ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton();
-      org.apache.lucene.util.fst.Builder builder = new org.apache.lucene.util.fst.Builder(
+      org.apache.lucene.util.fst.Builder builder = new org.apache.lucene.util.fst.Builder<>(
           FST.INPUT_TYPE.BYTE4, outputs);
       final int[] sort = hash.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
       IntsRef intsSpare = new IntsRef();
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java
index b06e53e6ac2..cc66970a54e 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java
@@ -104,7 +104,7 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
     }
     if (types != null) {
       List files = splitFileNames( types );
-      List wlist = new ArrayList();
+      List wlist = new ArrayList<>();
       for( String file : files ){
         List lines = getLines(loader, file.trim());
         wlist.addAll( lines );
@@ -124,7 +124,7 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
   
   // parses a list of MappingCharFilter style rules into a custom byte[] type table
   private byte[] parseTypes(List rules) {
-    SortedMap typeMap = new TreeMap();
+    SortedMap typeMap = new TreeMap<>();
     for( String rule : rules ){
       Matcher m = typePattern.matcher(rule);
       if( !m.find() )
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java
index 3fbab34f58b..3904919139d 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java
@@ -82,7 +82,7 @@ public final class DutchAnalyzer extends Analyzer {
         throw new RuntimeException("Unable to load default stopword set");
       }
       
-      DEFAULT_STEM_DICT = new CharArrayMap(Version.LUCENE_CURRENT, 4, false);
+      DEFAULT_STEM_DICT = new CharArrayMap<>(Version.LUCENE_CURRENT, 4, false);
       DEFAULT_STEM_DICT.put("fiets", "fiets"); //otherwise fiet
       DEFAULT_STEM_DICT.put("bromfiets", "bromfiets"); //otherwise bromfiet
       DEFAULT_STEM_DICT.put("ei", "eier");
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/path/ReversePathHierarchyTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/path/ReversePathHierarchyTokenizer.java
index 5ae5ae2102b..71db68d4633 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/path/ReversePathHierarchyTokenizer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/path/ReversePathHierarchyTokenizer.java
@@ -98,7 +98,7 @@ public class ReversePathHierarchyTokenizer extends Tokenizer {
     this.skip = skip;
     resultToken = new StringBuilder(bufferSize);
     resultTokenBuffer = new char[bufferSize];
-    delimiterPositions = new ArrayList(bufferSize/10);
+    delimiterPositions = new ArrayList<>(bufferSize/10);
   }
 
   private static final int DEFAULT_BUFFER_SIZE = 1024;
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/RSLPStemmerBase.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/RSLPStemmerBase.java
index 0915d536fb0..f8da03451c3 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/RSLPStemmerBase.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/RSLPStemmerBase.java
@@ -248,7 +248,7 @@ public abstract class RSLPStemmerBase {
     try {
       InputStream is = clazz.getResourceAsStream(resource);
       LineNumberReader r = new LineNumberReader(new InputStreamReader(is, "UTF-8"));
-      Map steps = new HashMap();
+      Map steps = new HashMap<>();
       String step;
       while ((step = readLine(r)) != null) {
         Step s = parseStep(r, step);
@@ -285,7 +285,7 @@ public abstract class RSLPStemmerBase {
   }
   
   private static Rule[] parseRules(LineNumberReader r, int type) throws IOException {
-    List rules = new ArrayList();
+    List rules = new ArrayList<>();
     String line;
     while ((line = readLine(r)) != null) {
       Matcher matcher = stripPattern.matcher(line);
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
index 9dfdf7d4fdc..8a4b8aa52ad 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
@@ -46,7 +46,7 @@ import org.apache.lucene.util.Version;
 public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
 
   private final Analyzer delegate;
-  private final Map> stopWordsPerField = new HashMap>();
+  private final Map> stopWordsPerField = new HashMap<>();
   //The default maximum percentage (40%) of index documents which
   //can contain a term, after which the term is considered to be a stop word.
   public static final float defaultMaxDocFreqPercent = 0.4f;
@@ -153,7 +153,7 @@ public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
     this.delegate = delegate;
     
     for (String field : fields) {
-      Set stopWords = new HashSet();
+      Set stopWords = new HashSet<>();
       Terms terms = MultiFields.getTerms(indexReader, field);
       CharsRef spare = new CharsRef();
       if (terms != null) {
@@ -204,7 +204,7 @@ public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
    * @return the stop words (as terms)
    */
   public Term[] getStopWords() {
-    List allStopWords = new ArrayList();
+    List allStopWords = new ArrayList<>();
     for (String fieldName : stopWordsPerField.keySet()) {
       Set stopWords = stopWordsPerField.get(fieldName);
       for (String text : stopWords) {
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
index 507a9d9d27f..fb105f58868 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
@@ -74,7 +74,7 @@ public final class ShingleFilter extends TokenFilter {
    * that will be composed to form output shingles.
    */
   private LinkedList inputWindow
-    = new LinkedList();
+    = new LinkedList<>();
   
   /**
    * The number of input tokens in the next output token.  This is the "n" in
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java
index 80a0ce22277..69e90a9ed09 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/sinks/TeeSinkTokenFilter.java
@@ -75,7 +75,7 @@ sink2.consumeAllTokens();
  * 

Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene. */ public final class TeeSinkTokenFilter extends TokenFilter { - private final List> sinks = new LinkedList>(); + private final List> sinks = new LinkedList<>(); /** * Instantiates a new TeeSinkTokenFilter. @@ -98,7 +98,7 @@ public final class TeeSinkTokenFilter extends TokenFilter { */ public SinkTokenStream newSinkTokenStream(SinkFilter filter) { SinkTokenStream sink = new SinkTokenStream(this.cloneAttributes(), filter); - this.sinks.add(new WeakReference(sink)); + this.sinks.add(new WeakReference<>(sink)); return sink; } @@ -116,7 +116,7 @@ public final class TeeSinkTokenFilter extends TokenFilter { for (Iterator it = this.cloneAttributes().getAttributeImplsIterator(); it.hasNext(); ) { sink.addAttributeImpl(it.next()); } - this.sinks.add(new WeakReference(sink)); + this.sinks.add(new WeakReference<>(sink)); } /** @@ -186,7 +186,7 @@ public final class TeeSinkTokenFilter extends TokenFilter { * TokenStream output from a tee with optional filtering. */ public static final class SinkTokenStream extends TokenStream { - private final List cachedStates = new LinkedList(); + private final List cachedStates = new LinkedList<>(); private AttributeSource.State finalState; private Iterator it = null; private SinkFilter filter; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java index 7afa491b0c0..1817055af4d 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SolrSynonymParser.java @@ -130,7 +130,7 @@ public class SolrSynonymParser extends SynonymMap.Parser { } private static String[] split(String s, String separator) { - ArrayList list = new ArrayList(2); + ArrayList list = new ArrayList<>(2); StringBuilder sb = new StringBuilder(); int pos=0, end=s.length(); while (pos < end) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java index a83366739c0..df87946b2d8 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java @@ -282,7 +282,7 @@ public final class SynonymFilter extends TokenFilter { //System.out.println("FSTFilt maxH=" + synonyms.maxHorizontalContext); - scratchArc = new FST.Arc(); + scratchArc = new FST.Arc<>(); } private void capture() { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java index 4b962b56b53..c06b247815c 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java @@ -83,7 +83,7 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource private final String format; private final boolean expand; private final String analyzerName; - private final Map tokArgs = new HashMap(); + private final Map tokArgs = new HashMap<>(); private SynonymMap map; diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java index 8b455c5ba75..b748c6f7d72 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java @@ -67,7 +67,7 @@ public class SynonymMap { * @lucene.experimental */ public static class Builder { - private final HashMap workingSet = new HashMap(); + private final HashMap workingSet = new HashMap<>(); private final BytesRefHash words = new BytesRefHash(); private final BytesRef utf8Scratch = new BytesRef(8); private int maxHorizontalContext; @@ -82,7 +82,7 @@ public class SynonymMap { private static class MapEntry { boolean includeOrig; // we could sort for better sharing ultimately, but it could confuse people - ArrayList ords = new ArrayList(); + ArrayList ords = new ArrayList<>(); } /** Sugar: just joins the provided terms with {@link @@ -210,7 +210,7 @@ public class SynonymMap { ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton(); // TODO: are we using the best sharing options? org.apache.lucene.util.fst.Builder builder = - new org.apache.lucene.util.fst.Builder(FST.INPUT_TYPE.BYTE4, outputs); + new org.apache.lucene.util.fst.Builder<>(FST.INPUT_TYPE.BYTE4, outputs); BytesRef scratch = new BytesRef(64); ByteArrayDataOutput scratchOutput = new ByteArrayDataOutput(); @@ -218,7 +218,7 @@ public class SynonymMap { final Set dedupSet; if (dedup) { - dedupSet = new HashSet(); + dedupSet = new HashSet<>(); } else { dedupSet = null; } diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/AbstractAnalysisFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/AbstractAnalysisFactory.java index 534c16651fd..4ad1473ed84 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/AbstractAnalysisFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/AbstractAnalysisFactory.java @@ -65,7 +65,7 @@ public abstract class AbstractAnalysisFactory { * Initialize this factory via a set of key-value pairs. */ protected AbstractAnalysisFactory(Map args) { - originalArgs = Collections.unmodifiableMap(new HashMap(args)); + originalArgs = Collections.unmodifiableMap(new HashMap<>(args)); String version = get(args, LUCENE_MATCH_VERSION_PARAM); luceneMatchVersion = version == null ? null : Version.parseLeniently(version); args.remove(CLASS_NAME); // consume the class arg @@ -202,7 +202,7 @@ public abstract class AbstractAnalysisFactory { Set set = null; Matcher matcher = ITEM_PATTERN.matcher(s); if (matcher.find()) { - set = new HashSet(); + set = new HashSet<>(); set.add(matcher.group(0)); while (matcher.find()) { set.add(matcher.group(0)); @@ -296,7 +296,7 @@ public abstract class AbstractAnalysisFactory { if (fileNames == null) return Collections.emptyList(); - List result = new ArrayList(); + List result = new ArrayList<>(); for (String file : fileNames.split("(? { */ public synchronized void reload(ClassLoader classloader) { final LinkedHashMap> services = - new LinkedHashMap>(this.services); + new LinkedHashMap<>(this.services); final SPIClassIterator loader = SPIClassIterator.get(clazz, classloader); while (loader.hasNext()) { final Class service = loader.next(); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java index 442bf92f8f0..f867cf7ea88 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArrayMap.java @@ -52,7 +52,7 @@ import org.apache.lucene.util.Version; */ public class CharArrayMap extends AbstractMap { // private only because missing generics - private static final CharArrayMap EMPTY_MAP = new EmptyCharArrayMap(); + private static final CharArrayMap EMPTY_MAP = new EmptyCharArrayMap<>(); private final static int INIT_SIZE = 8; private final CharacterUtils charUtils; @@ -559,7 +559,7 @@ public class CharArrayMap extends AbstractMap { return emptyMap(); if (map instanceof UnmodifiableCharArrayMap) return map; - return new UnmodifiableCharArrayMap(map); + return new UnmodifiableCharArrayMap<>(map); } /** @@ -595,12 +595,12 @@ public class CharArrayMap extends AbstractMap { System.arraycopy(m.keys, 0, keys, 0, keys.length); final V[] values = (V[]) new Object[m.values.length]; System.arraycopy(m.values, 0, values, 0, values.length); - m = new CharArrayMap(m); + m = new CharArrayMap<>(m); m.keys = keys; m.values = values; return m; } - return new CharArrayMap(matchVersion, map, false); + return new CharArrayMap<>(matchVersion, map, false); } /** Returns an empty, unmodifiable map. */ diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArraySet.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArraySet.java index 1c73b783916..109f2472867 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArraySet.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharArraySet.java @@ -74,7 +74,7 @@ public class CharArraySet extends AbstractSet { * otherwise true. */ public CharArraySet(Version matchVersion, int startSize, boolean ignoreCase) { - this(new CharArrayMap(matchVersion, startSize, ignoreCase)); + this(new CharArrayMap<>(matchVersion, startSize, ignoreCase)); } /** diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharFilterFactory.java index b2d079901bb..fb832886247 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharFilterFactory.java @@ -30,7 +30,7 @@ import org.apache.lucene.analysis.CharFilter; public abstract class CharFilterFactory extends AbstractAnalysisFactory { private static final AnalysisSPILoader loader = - new AnalysisSPILoader(CharFilterFactory.class); + new AnalysisSPILoader<>(CharFilterFactory.class); /** looks up a charfilter by name from context classpath */ public static CharFilterFactory forName(String name, Map args) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenFilterFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenFilterFactory.java index 0015114e32e..12cb556e725 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenFilterFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenFilterFactory.java @@ -29,7 +29,7 @@ import org.apache.lucene.analysis.TokenStream; public abstract class TokenFilterFactory extends AbstractAnalysisFactory { private static final AnalysisSPILoader loader = - new AnalysisSPILoader(TokenFilterFactory.class, + new AnalysisSPILoader<>(TokenFilterFactory.class, new String[] { "TokenFilterFactory", "FilterFactory" }); /** looks up a tokenfilter by name from context classpath */ diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenizerFactory.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenizerFactory.java index 6d4bbadc0c5..3436930bfc4 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenizerFactory.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenizerFactory.java @@ -31,7 +31,7 @@ import java.util.Set; public abstract class TokenizerFactory extends AbstractAnalysisFactory { private static final AnalysisSPILoader loader = - new AnalysisSPILoader(TokenizerFactory.class); + new AnalysisSPILoader<>(TokenizerFactory.class); /** looks up a tokenizer by name from context classpath */ public static TokenizerFactory forName(String name, Map args) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/WordlistLoader.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/WordlistLoader.java index 5f97fc772cf..8fec2c00d19 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/WordlistLoader.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/util/WordlistLoader.java @@ -219,7 +219,7 @@ public class WordlistLoader { try { input = getBufferedReader(IOUtils.getDecodingReader(stream, charset)); - lines = new ArrayList(); + lines = new ArrayList<>(); for (String word=null; (word=input.readLine())!=null;) { // skip initial bom marker if (lines.isEmpty() && word.length() > 0 && word.charAt(0) == '\uFEFF') diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java index 6fbd8070a76..b168b8a416a 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java @@ -215,7 +215,7 @@ public final class WikipediaTokenizer extends Tokenizer { int lastPos = theStart + numAdded; int tmpTokType; int numSeen = 0; - List tmp = new ArrayList(); + List tmp = new ArrayList<>(); setupSavedToken(0, type); tmp.add(captureState()); //while we can get a token and that token is the same type and we have not transitioned to a new wiki-item of the same type diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java index 9c6c7304cb7..85e4d69119f 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java @@ -114,7 +114,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testGamma() throws Exception { String test = "Γ"; String gold = "\u0393"; - Set set = new HashSet(); + Set set = new HashSet<>(); set.add("reserved"); Reader reader = new HTMLStripCharFilter(new StringReader(test), set); StringBuilder builder = new StringBuilder(); @@ -129,7 +129,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testEntities() throws Exception { String test = "  <foo> Übermensch = Γ bar Γ"; String gold = " \u00DCbermensch = \u0393 bar \u0393"; - Set set = new HashSet(); + Set set = new HashSet<>(); set.add("reserved"); Reader reader = new HTMLStripCharFilter(new StringReader(test), set); StringBuilder builder = new StringBuilder(); @@ -144,7 +144,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testMoreEntities() throws Exception { String test = "  <junk/>   ! @ and ’"; String gold = " ! @ and ’"; - Set set = new HashSet(); + Set set = new HashSet<>(); set.add("reserved"); Reader reader = new HTMLStripCharFilter(new StringReader(test), set); StringBuilder builder = new StringBuilder(); @@ -158,7 +158,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testReserved() throws Exception { String test = "aaa bbb eeee ffff "; - Set set = new HashSet(); + Set set = new HashSet<>(); set.add("reserved"); Reader reader = new HTMLStripCharFilter(new StringReader(test), set); StringBuilder builder = new StringBuilder(); @@ -588,7 +588,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testEscapeScript() throws Exception { String test = "onetwo"; String gold = "onetwo"; - Set escapedTags = new HashSet(Arrays.asList("SCRIPT")); + Set escapedTags = new HashSet<>(Arrays.asList("SCRIPT")); Reader reader = new HTMLStripCharFilter (new StringReader(test), escapedTags); int ch = 0; @@ -628,7 +628,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testEscapeStyle() throws Exception { String test = "onetwo"; String gold = "onetwo"; - Set escapedTags = new HashSet(Arrays.asList("STYLE")); + Set escapedTags = new HashSet<>(Arrays.asList("STYLE")); Reader reader = new HTMLStripCharFilter (new StringReader(test), escapedTags); int ch = 0; @@ -668,7 +668,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testEscapeBR() throws Exception { String test = "one
two"; String gold = "one
two"; - Set escapedTags = new HashSet(Arrays.asList("BR")); + Set escapedTags = new HashSet<>(Arrays.asList("BR")); Reader reader = new HTMLStripCharFilter (new StringReader(test), escapedTags); int ch = 0; diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java index 55975a30c12..e6f5e95c847 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java @@ -270,7 +270,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { Random random = random(); NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder(); // we can't add duplicate keys, or NormalizeCharMap gets angry - Set keys = new HashSet(); + Set keys = new HashSet<>(); int num = random.nextInt(5); //System.out.println("NormalizeCharMap="); for (int i = 0; i < num; i++) { @@ -296,7 +296,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { final char endLetter = (char) TestUtil.nextInt(random, 'b', 'z'); - final Map map = new HashMap(); + final Map map = new HashMap<>(); final NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder(); final int numMappings = atLeast(5); if (VERBOSE) { @@ -333,7 +333,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { final StringBuilder output = new StringBuilder(); // Maps output offset to input offset: - final List inputOffsets = new ArrayList(); + final List inputOffsets = new ArrayList<>(); int cumDiff = 0; int charIdx = 0; @@ -416,7 +416,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { final MappingCharFilter mapFilter = new MappingCharFilter(charMap, new StringReader(content)); final StringBuilder actualBuilder = new StringBuilder(); - final List actualInputOffsets = new ArrayList(); + final List actualInputOffsets = new ArrayList<>(); // Now consume the actual mapFilter, somewhat randomly: while (true) { diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAllAnalyzersHaveFactories.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAllAnalyzersHaveFactories.java index 4c69d00a23d..b410fab9be0 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAllAnalyzersHaveFactories.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAllAnalyzersHaveFactories.java @@ -117,7 +117,7 @@ public class TestAllAnalyzersHaveFactories extends LuceneTestCase { continue; } - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString()); if (Tokenizer.class.isAssignableFrom(c)) { diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java index 97da57fbdce..f1708ea37a0 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java @@ -122,7 +122,7 @@ public class TestFactories extends BaseTokenStreamTestCase { /** tries to initialize a factory with no arguments */ private AbstractAnalysisFactory initialize(Class factoryClazz) throws IOException { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString()); Constructor ctor; try { diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java index 617e7523b69..8a91a9e4875 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestRandomChains.java @@ -110,7 +110,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { }; }; - private static final Map,Predicate> brokenConstructors = new HashMap, Predicate>(); + private static final Map,Predicate> brokenConstructors = new HashMap<>(); static { try { brokenConstructors.put( @@ -158,7 +158,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { // TODO: also fix these and remove (maybe): // Classes/options that don't produce consistent graph offsets: - private static final Map,Predicate> brokenOffsetsConstructors = new HashMap, Predicate>(); + private static final Map,Predicate> brokenOffsetsConstructors = new HashMap<>(); static { try { for (Class c : Arrays.>asList( @@ -188,9 +188,9 @@ public class TestRandomChains extends BaseTokenStreamTestCase { @BeforeClass public static void beforeClass() throws Exception { List> analysisClasses = getClassesForPackage("org.apache.lucene.analysis"); - tokenizers = new ArrayList>(); - tokenfilters = new ArrayList>(); - charfilters = new ArrayList>(); + tokenizers = new ArrayList<>(); + tokenfilters = new ArrayList<>(); + charfilters = new ArrayList<>(); for (final Class c : analysisClasses) { final int modifiers = c.getModifiers(); if ( @@ -257,7 +257,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { } public static List> getClassesForPackage(String pckgname) throws Exception { - final List> classes = new ArrayList>(); + final List> classes = new ArrayList<>(); collectClassesForPackage(pckgname, classes); assertFalse("No classes found in package '"+pckgname+"'; maybe your test classes are packaged as JAR file?", classes.isEmpty()); return classes; @@ -358,7 +358,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { put(Set.class, new ArgProducer() { @Override public Object create(Random random) { // TypeTokenFilter - Set set = new HashSet(); + Set set = new HashSet<>(); int num = random.nextInt(5); for (int i = 0; i < num; i++) { set.add(StandardTokenizer.TOKEN_TYPES[random.nextInt(StandardTokenizer.TOKEN_TYPES.length)]); @@ -369,7 +369,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { put(Collection.class, new ArgProducer() { @Override public Object create(Random random) { // CapitalizationFilter - Collection col = new ArrayList(); + Collection col = new ArrayList<>(); int num = random.nextInt(5); for (int i = 0; i < num; i++) { col.add(TestUtil.randomSimpleString(random).toCharArray()); @@ -459,7 +459,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { @Override public Object create(Random random) { NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder(); // we can't add duplicate keys, or NormalizeCharMap gets angry - Set keys = new HashSet(); + Set keys = new HashSet<>(); int num = random.nextInt(5); //System.out.println("NormalizeCharMap="); for (int i = 0; i < num; i++) { @@ -489,7 +489,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { put(CharArrayMap.class, new ArgProducer() { @Override public Object create(Random random) { int num = random.nextInt(10); - CharArrayMap map = new CharArrayMap(TEST_VERSION_CURRENT, num, random.nextBoolean()); + CharArrayMap map = new CharArrayMap<>(TEST_VERSION_CURRENT, num, random.nextBoolean()); for (int i = 0; i < num; i++) { // TODO: make nastier map.put(TestUtil.randomSimpleString(random), TestUtil.randomSimpleString(random)); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java index 0656c2832ae..9838fe1f8a2 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java @@ -32,7 +32,7 @@ import java.util.HashSet; public class TestStopAnalyzer extends BaseTokenStreamTestCase { private StopAnalyzer stop = new StopAnalyzer(TEST_VERSION_CURRENT); - private Set inValidTokens = new HashSet(); + private Set inValidTokens = new HashSet<>(); @Override public void setUp() throws Exception { diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopFilter.java index 0d6c26d7c43..8f43c011891 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopFilter.java @@ -59,7 +59,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase { */ public void testStopPositons() throws IOException { StringBuilder sb = new StringBuilder(); - ArrayList a = new ArrayList(); + ArrayList a = new ArrayList<>(); for (int i=0; i<20; i++) { String w = English.intToEnglish(i).trim(); sb.append(w).append(" "); @@ -76,8 +76,8 @@ public class TestStopFilter extends BaseTokenStreamTestCase { StopFilter stpf = new StopFilter(Version.LUCENE_40, in, stopSet); doTestStopPositons(stpf); // with increments, concatenating two stop filters - ArrayList a0 = new ArrayList(); - ArrayList a1 = new ArrayList(); + ArrayList a0 = new ArrayList<>(); + ArrayList a1 = new ArrayList<>(); for (int i=0; i urlList = new ArrayList(); + List urlList = new ArrayList<>(); bufferedReader = new BufferedReader(new InputStreamReader (getClass().getResourceAsStream("LuceneResourcesWikiPageURLs.txt"), "UTF-8")); String line; @@ -331,7 +331,7 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase { BufferedReader bufferedReader = null; String[] emails; try { - List emailList = new ArrayList(); + List emailList = new ArrayList<>(); bufferedReader = new BufferedReader(new InputStreamReader (getClass().getResourceAsStream ("email.addresses.from.random.text.with.email.addresses.txt"), "UTF-8")); @@ -401,7 +401,7 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase { BufferedReader bufferedReader = null; String[] urls; try { - List urlList = new ArrayList(); + List urlList = new ArrayList<>(); bufferedReader = new BufferedReader(new InputStreamReader (getClass().getResourceAsStream ("urls.from.random.text.with.urls.txt"), "UTF-8")); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java index c982b651a5a..0b07d3c9543 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java @@ -73,7 +73,7 @@ public class TestKStemmer extends BaseTokenStreamTestCase { // tf = new KStemFilter(tf); KStemmer kstem = new KStemmer(); - Map map = new TreeMap(); + Map map = new TreeMap<>(); for(;;) { Token t = tf.next(); if (t==null) break; diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java index 81495b14af6..19ceeccf2e8 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java @@ -1888,8 +1888,8 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase { }; // Construct input text and expected output tokens - List expectedUnfoldedTokens = new ArrayList(); - List expectedFoldedTokens = new ArrayList(); + List expectedUnfoldedTokens = new ArrayList<>(); + List expectedFoldedTokens = new ArrayList<>(); StringBuilder inputText = new StringBuilder(); for (int n = 0 ; n < foldings.length ; n += 2) { if (n > 0) { diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java index a6d78e63942..00ef72e1a63 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java @@ -78,7 +78,7 @@ public class TestCapitalizationFilter extends BaseTokenStreamTestCase { true, keep, true, null, 0, DEFAULT_MAX_WORD_COUNT, DEFAULT_MAX_TOKEN_LENGTH); // Now try some prefixes - List okPrefix = new ArrayList(); + List okPrefix = new ArrayList<>(); okPrefix.add("McK".toCharArray()); assertCapitalizesTo("McKinley", diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java index d0df8b8c076..465c54c31f1 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java @@ -32,7 +32,7 @@ public class TestKeepWordFilter extends BaseTokenStreamTestCase { public void testStopAndGo() throws Exception { - Set words = new HashSet(); + Set words = new HashSet<>(); words.add( "aaa" ); words.add( "bbb" ); @@ -51,7 +51,7 @@ public class TestKeepWordFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - final Set words = new HashSet(); + final Set words = new HashSet<>(); words.add( "a" ); words.add( "b" ); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java index 0bf5b0483dd..91f929b274d 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalyzerWrapper.java @@ -31,7 +31,7 @@ public class TestPerFieldAnalyzerWrapper extends BaseTokenStreamTestCase { public void testPerField() throws Exception { String text = "Qwerty"; - Map analyzerPerField = new HashMap(); + Map analyzerPerField = new HashMap<>(); analyzerPerField.put("special", new SimpleAnalyzer(TEST_VERSION_CURRENT)); PerFieldAnalyzerWrapper analyzer = diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java index 17f2fbbc931..a75bfa038cd 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java @@ -78,7 +78,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase { } public void testRandomRealisticWhiteSpace() throws IOException { - Map map = new HashMap(); + Map map = new HashMap<>(); int numTerms = atLeast(50); for (int i = 0; i < numTerms; i++) { String randomRealisticUnicodeString = TestUtil @@ -105,7 +105,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase { StemmerOverrideFilter.Builder builder = new StemmerOverrideFilter.Builder(random().nextBoolean()); Set> entrySet = map.entrySet(); StringBuilder input = new StringBuilder(); - List output = new ArrayList(); + List output = new ArrayList<>(); for (Entry entry : entrySet) { builder.add(entry.getKey(), entry.getValue()); if (random().nextBoolean() || output.isEmpty()) { @@ -121,7 +121,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase { } public void testRandomRealisticKeyword() throws IOException { - Map map = new HashMap(); + Map map = new HashMap<>(); int numTerms = atLeast(50); for (int i = 0; i < numTerms; i++) { String randomRealisticUnicodeString = TestUtil diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java index c1c58296fb9..45ba5d5d21b 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java @@ -210,7 +210,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase { @Test public void testPositionIncrements() throws Exception { final int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE; - final CharArraySet protWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet(Arrays.asList("NUTCH")), false); + final CharArraySet protWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<>(Arrays.asList("NUTCH")), false); /* analyzer that uses whitespace + wdf */ Analyzer a = new Analyzer() { @@ -332,7 +332,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase { final int flags = random().nextInt(512); final CharArraySet protectedWords; if (random().nextBoolean()) { - protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet(Arrays.asList("a", "b", "cd")), false); + protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<>(Arrays.asList("a", "b", "cd")), false); } else { protectedWords = null; } @@ -355,7 +355,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase { final int flags = i; final CharArraySet protectedWords; if (random.nextBoolean()) { - protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet(Arrays.asList("a", "b", "cd")), false); + protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<>(Arrays.asList("a", "b", "cd")), false); } else { protectedWords = null; } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java index e83ecb0e38a..ba1d4a5351d 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java @@ -78,7 +78,7 @@ public class TestPatternTokenizer extends BaseTokenStreamTestCase final String INPUT = "Günther Günther is here"; // create MappingCharFilter - List mappingRules = new ArrayList(); + List mappingRules = new ArrayList<>(); mappingRules.add( "\"ü\" => \"ü\"" ); NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder(); builder.add("ü", "ü"); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java index ceabc4a32c2..b495e8ed40f 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java @@ -396,8 +396,8 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { final int numSyn = atLeast(5); //final int numSyn = 2; - final Map synMap = new HashMap(); - final List syns = new ArrayList(); + final Map synMap = new HashMap<>(); + final List syns = new ArrayList<>(); final boolean dedup = random().nextBoolean(); if (VERBOSE) { System.out.println(" dedup=" + dedup); @@ -410,7 +410,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { s = new OneSyn(); s.in = synIn; syns.add(s); - s.out = new ArrayList(); + s.out = new ArrayList<>(); synMap.put(synIn, s); s.keepOrig = random().nextBoolean(); } @@ -453,7 +453,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { } private void pruneDups(List syns) { - Set seen = new HashSet(); + Set seen = new HashSet<>(); for(OneSyn syn : syns) { int idx = 0; while(idx < syn.out.size()) { diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java index 746d1fde3ff..8c366894e26 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/BaseTokenStreamFactoryTestCase.java @@ -47,7 +47,7 @@ public abstract class BaseTokenStreamFactoryTestCase extends BaseTokenStreamTest if (keysAndValues.length % 2 == 1) { throw new IllegalArgumentException("invalid keysAndValues map"); } - Map args = new HashMap(); + Map args = new HashMap<>(); for (int i = 0; i < keysAndValues.length; i += 2) { String previous = args.put(keysAndValues[i], keysAndValues[i+1]); assertNull("duplicate values for key: " + keysAndValues[i], previous); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java index ce8db402dd6..9c137c21372 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java @@ -25,8 +25,8 @@ import org.apache.lucene.util.LuceneTestCase; public class TestCharArrayMap extends LuceneTestCase { public void doRandom(int iter, boolean ignoreCase) { - CharArrayMap map = new CharArrayMap(TEST_VERSION_CURRENT, 1, ignoreCase); - HashMap hmap = new HashMap(); + CharArrayMap map = new CharArrayMap<>(TEST_VERSION_CURRENT, 1, ignoreCase); + HashMap hmap = new HashMap<>(); char[] key; for (int i=0; i cm = new CharArrayMap(TEST_VERSION_CURRENT, 2, false); - HashMap hm = new HashMap(); + CharArrayMap cm = new CharArrayMap<>(TEST_VERSION_CURRENT, 2, false); + HashMap hm = new HashMap<>(); hm.put("foo",1); hm.put("bar",2); cm.putAll(hm); @@ -133,7 +133,7 @@ public class TestCharArrayMap extends LuceneTestCase { } public void testModifyOnUnmodifiable(){ - CharArrayMap map = new CharArrayMap(TEST_VERSION_CURRENT, 2, false); + CharArrayMap map = new CharArrayMap<>(TEST_VERSION_CURRENT, 2, false); map.put("foo",1); map.put("bar",2); final int size = map.size(); @@ -230,7 +230,7 @@ public class TestCharArrayMap extends LuceneTestCase { } public void testToString() { - CharArrayMap cm = new CharArrayMap(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false); + CharArrayMap cm = new CharArrayMap<>(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false); assertEquals("[test]",cm.keySet().toString()); assertEquals("[1]",cm.values().toString()); assertEquals("[test=1]",cm.entrySet().toString()); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java index db4d30bfb34..9af7447588e 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArraySet.java @@ -256,7 +256,7 @@ public class TestCharArraySet extends LuceneTestCase { CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false); List stopwords = Arrays.asList(TEST_STOP_WORDS); - List stopwordsUpper = new ArrayList(); + List stopwordsUpper = new ArrayList<>(); for (String string : stopwords) { stopwordsUpper.add(string.toUpperCase(Locale.ROOT)); } @@ -278,7 +278,7 @@ public class TestCharArraySet extends LuceneTestCase { assertFalse(copyCaseSens.contains(string)); } // test adding terms to the copy - List newWords = new ArrayList(); + List newWords = new ArrayList<>(); for (String string : stopwords) { newWords.add(string+"_1"); } @@ -303,7 +303,7 @@ public class TestCharArraySet extends LuceneTestCase { CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false); List stopwords = Arrays.asList(TEST_STOP_WORDS); - List stopwordsUpper = new ArrayList(); + List stopwordsUpper = new ArrayList<>(); for (String string : stopwords) { stopwordsUpper.add(string.toUpperCase(Locale.ROOT)); } @@ -325,7 +325,7 @@ public class TestCharArraySet extends LuceneTestCase { assertFalse(copyCaseSens.contains(string)); } // test adding terms to the copy - List newWords = new ArrayList(); + List newWords = new ArrayList<>(); for (String string : stopwords) { newWords.add(string+"_1"); } @@ -346,10 +346,10 @@ public class TestCharArraySet extends LuceneTestCase { * Test the static #copy() function with a JDK {@link Set} as a source */ public void testCopyJDKSet() { - Set set = new HashSet(); + Set set = new HashSet<>(); List stopwords = Arrays.asList(TEST_STOP_WORDS); - List stopwordsUpper = new ArrayList(); + List stopwordsUpper = new ArrayList<>(); for (String string : stopwords) { stopwordsUpper.add(string.toUpperCase(Locale.ROOT)); } @@ -365,7 +365,7 @@ public class TestCharArraySet extends LuceneTestCase { assertFalse(copy.contains(string)); } - List newWords = new ArrayList(); + List newWords = new ArrayList<>(); for (String string : stopwords) { newWords.add(string+"_1"); } diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java index bca3d3f25e0..1d19d21097e 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestElision.java @@ -51,7 +51,7 @@ public class TestElision extends BaseTokenStreamTestCase { } private List filter(TokenFilter filter) throws IOException { - List tas = new ArrayList(); + List tas = new ArrayList<>(); CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class); filter.reset(); while (filter.incrementToken()) { diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java index fe7c9165c3a..5459556a5d7 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java @@ -130,7 +130,7 @@ public class WikipediaTokenizerTest extends BaseTokenStreamTestCase { } public void testLucene1133() throws Exception { - Set untoks = new HashSet(); + Set untoks = new HashSet<>(); untoks.add(WikipediaTokenizer.CATEGORY); untoks.add(WikipediaTokenizer.ITALICS); //should be exactly the same, regardless of untoks @@ -150,7 +150,7 @@ public class WikipediaTokenizerTest extends BaseTokenStreamTestCase { } public void testBoth() throws Exception { - Set untoks = new HashSet(); + Set untoks = new HashSet<>(); untoks.add(WikipediaTokenizer.CATEGORY); untoks.add(WikipediaTokenizer.ITALICS); String test = "[[Category:a b c d]] [[Category:e f g]] [[link here]] [[link there]] ''italics here'' something ''more italics'' [[Category:h i j]]"; diff --git a/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java b/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java index 0cfea3fafd8..69b4b392f94 100644 --- a/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java +++ b/lucene/analysis/common/src/tools/java/org/apache/lucene/analysis/standard/GenerateJflexTLDMacros.java @@ -111,7 +111,7 @@ public class GenerateJflexTLDMacros { * @throws java.io.IOException if there is a problem downloading the database */ private SortedSet getIANARootZoneDatabase() throws IOException { - final SortedSet TLDs = new TreeSet(); + final SortedSet TLDs = new TreeSet<>(); final URLConnection connection = tldFileURL.openConnection(); connection.setUseCaches(false); connection.addRequestProperty("Cache-Control", "no-cache"); diff --git a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java index b9c5981f72e..307becc0cc9 100644 --- a/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java +++ b/lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java @@ -84,7 +84,7 @@ public class ICUTokenizerFactory extends TokenizerFactory implements ResourceLoa /** Creates a new ICUTokenizerFactory */ public ICUTokenizerFactory(Map args) { super(args); - tailored = new HashMap(); + tailored = new HashMap<>(); String rulefilesArg = get(args, RULEFILES); if (rulefilesArg != null) { List scriptAndResourcePaths = splitFileNames(rulefilesArg); diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilterFactory.java b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilterFactory.java index 9660be7a873..720570c1d5f 100644 --- a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilterFactory.java +++ b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilterFactory.java @@ -32,7 +32,7 @@ public class TestICUTransformFilterFactory extends BaseTokenStreamTestCase { /** ensure the transform is working */ public void test() throws Exception { Reader reader = new StringReader("簡化字"); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("id", "Traditional-Simplified"); ICUTransformFilterFactory factory = new ICUTransformFilterFactory(args); TokenStream stream = whitespaceMockTokenizer(reader); @@ -44,7 +44,7 @@ public class TestICUTransformFilterFactory extends BaseTokenStreamTestCase { public void testForwardDirection() throws Exception { // forward Reader reader = new StringReader("Российская Федерация"); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("id", "Cyrillic-Latin"); ICUTransformFilterFactory factory = new ICUTransformFilterFactory(args); TokenStream stream = whitespaceMockTokenizer(reader); @@ -55,7 +55,7 @@ public class TestICUTransformFilterFactory extends BaseTokenStreamTestCase { public void testReverseDirection() throws Exception { // backward (invokes Latin-Cyrillic) Reader reader = new StringReader("Rossijskaâ Federaciâ"); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("id", "Cyrillic-Latin"); args.put("direction", "reverse"); ICUTransformFilterFactory factory = new ICUTransformFilterFactory(args); diff --git a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerFactory.java b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerFactory.java index 1ac528fd560..033da1c1dae 100644 --- a/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerFactory.java +++ b/lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerFactory.java @@ -44,7 +44,7 @@ public class TestICUTokenizerFactory extends BaseTokenStreamTestCase { // “ U+201C LEFT DOUBLE QUOTATION MARK; ” U+201D RIGHT DOUBLE QUOTATION MARK Reader reader = new StringReader (" Don't,break.at?/(punct)! \u201Cnice\u201D\r\n\r\n85_At:all; `really\" +2=3$5,&813 !@#%$^)(*@#$ "); - final Map args = new HashMap(); + final Map args = new HashMap<>(); args.put(ICUTokenizerFactory.RULEFILES, "Latn:Latin-break-only-on-whitespace.rbbi"); ICUTokenizerFactory factory = new ICUTokenizerFactory(args); factory.inform(new ClasspathResourceLoader(this.getClass())); @@ -58,7 +58,7 @@ public class TestICUTokenizerFactory extends BaseTokenStreamTestCase { public void testTokenizeLatinDontBreakOnHyphens() throws Exception { Reader reader = new StringReader ("One-two punch. Brang-, not brung-it. This one--not that one--is the right one, -ish."); - final Map args = new HashMap(); + final Map args = new HashMap<>(); args.put(ICUTokenizerFactory.RULEFILES, "Latn:Latin-dont-break-on-hyphens.rbbi"); ICUTokenizerFactory factory = new ICUTokenizerFactory(args); factory.inform(new ClasspathResourceLoader(getClass())); @@ -78,7 +78,7 @@ public class TestICUTokenizerFactory extends BaseTokenStreamTestCase { public void testKeywordTokenizeCyrillicAndThai() throws Exception { Reader reader = new StringReader ("Some English. Немного русский. ข้อความภาษาไทยเล็ก ๆ น้อย ๆ More English."); - final Map args = new HashMap(); + final Map args = new HashMap<>(); args.put(ICUTokenizerFactory.RULEFILES, "Cyrl:KeywordTokenizer.rbbi,Thai:KeywordTokenizer.rbbi"); ICUTokenizerFactory factory = new ICUTokenizerFactory(args); factory.inform(new ClasspathResourceLoader(getClass())); diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateHTMLStripCharFilterSupplementaryMacros.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateHTMLStripCharFilterSupplementaryMacros.java index e50abbdccc0..b211cc27b3c 100644 --- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateHTMLStripCharFilterSupplementaryMacros.java +++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateHTMLStripCharFilterSupplementaryMacros.java @@ -76,7 +76,7 @@ public class GenerateHTMLStripCharFilterSupplementaryMacros { System.out.println("\t []"); } - HashMap utf16ByLead = new HashMap(); + HashMap utf16ByLead = new HashMap<>(); for (UnicodeSetIterator it = new UnicodeSetIterator(set); it.next();) { char utf16[] = Character.toChars(it.codepoint); UnicodeSet trails = utf16ByLead.get(utf16[0]); @@ -87,7 +87,7 @@ public class GenerateHTMLStripCharFilterSupplementaryMacros { trails.add(utf16[1]); } - Map utf16ByTrail = new HashMap(); + Map utf16ByTrail = new HashMap<>(); for (Map.Entry entry : utf16ByLead.entrySet()) { String trail = entry.getValue().getRegexEquivalent(); UnicodeSet leads = utf16ByTrail.get(trail); diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateJFlexSupplementaryMacros.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateJFlexSupplementaryMacros.java index 2b0ba48cfbb..3abcd2ce8c9 100644 --- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateJFlexSupplementaryMacros.java +++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateJFlexSupplementaryMacros.java @@ -95,7 +95,7 @@ public class GenerateJFlexSupplementaryMacros { System.out.println("\t []"); } - HashMap utf16ByLead = new HashMap(); + HashMap utf16ByLead = new HashMap<>(); for (UnicodeSetIterator it = new UnicodeSetIterator(set); it.next();) { char utf16[] = Character.toChars(it.codepoint); UnicodeSet trails = utf16ByLead.get(utf16[0]); diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java index 9fb5cee5c89..7f1bdfe1269 100644 --- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java +++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java @@ -188,7 +188,7 @@ public class GenerateUTR30DataFiles { if (matcher.matches()) { final String leftHandSide = matcher.group(1); final String rightHandSide = matcher.group(2).trim(); - List diacritics = new ArrayList(); + List diacritics = new ArrayList<>(); for (String outputCodePoint : rightHandSide.split("\\s+")) { int ch = Integer.parseInt(outputCodePoint, 16); if (UCharacter.hasBinaryProperty(ch, UProperty.DIACRITIC) diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/GraphvizFormatter.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/GraphvizFormatter.java index cb8999f9e49..ac5ba306889 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/GraphvizFormatter.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/GraphvizFormatter.java @@ -48,7 +48,7 @@ public class GraphvizFormatter { public GraphvizFormatter(ConnectionCosts costs) { this.costs = costs; - this.bestPathMap = new HashMap(); + this.bestPathMap = new HashMap<>(); sb.append(formatHeader()); sb.append(" init [style=invis]\n"); sb.append(" init -> 0.0 [label=\"" + BOS_LABEL + "\"]\n"); diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java index 0e129ad34be..f738e4a10b3 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseAnalyzer.java @@ -73,7 +73,7 @@ public class JapaneseAnalyzer extends StopwordAnalyzerBase { try { DEFAULT_STOP_SET = loadStopwordSet(true, JapaneseAnalyzer.class, "stopwords.txt", "#"); // ignore case final CharArraySet tagset = loadStopwordSet(false, JapaneseAnalyzer.class, "stoptags.txt", "#"); - DEFAULT_STOP_TAGS = new HashSet(); + DEFAULT_STOP_TAGS = new HashSet<>(); for (Object element : tagset) { char chars[] = (char[]) element; DEFAULT_STOP_TAGS.add(new String(chars)); diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java index 2a550aedc1f..18cc27a36da 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java @@ -58,7 +58,7 @@ public class JapanesePartOfSpeechStopFilterFactory extends TokenFilterFactory im stopTags = null; CharArraySet cas = getWordSet(loader, stopTagFiles, false); if (cas != null) { - stopTags = new HashSet(); + stopTags = new HashSet<>(); for (Object element : cas) { char chars[] = (char[]) element; stopTags.add(new String(chars)); diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java index b99ba31a052..ede01ccdae0 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizer.java @@ -132,7 +132,7 @@ public final class JapaneseTokenizer extends Tokenizer { private static final int MAX_UNKNOWN_WORD_LENGTH = 1024; private static final int MAX_BACKTRACE_GAP = 1024; - private final EnumMap dictionaryMap = new EnumMap(Type.class); + private final EnumMap dictionaryMap = new EnumMap<>(Type.class); private final TokenInfoFST fst; private final TokenInfoDictionary dictionary; @@ -141,7 +141,7 @@ public final class JapaneseTokenizer extends Tokenizer { private final UserDictionary userDictionary; private final CharacterDefinition characterDefinition; - private final FST.Arc arc = new FST.Arc(); + private final FST.Arc arc = new FST.Arc<>(); private final FST.BytesReader fstReader; private final IntsRef wordIdRef = new IntsRef(); @@ -174,7 +174,7 @@ public final class JapaneseTokenizer extends Tokenizer { private int pos; // Already parsed, but not yet passed to caller, tokens: - private final List pending = new ArrayList(); + private final List pending = new ArrayList<>(); private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionary.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionary.java index 6edcf345b03..b6283438d22 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionary.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionary.java @@ -44,7 +44,7 @@ public final class TokenInfoDictionary extends BinaryDictionary { try { is = getResource(FST_FILENAME_SUFFIX); is = new BufferedInputStream(is); - fst = new FST(new InputStreamDataInput(is), PositiveIntOutputs.getSingleton()); + fst = new FST<>(new InputStreamDataInput(is), PositiveIntOutputs.getSingleton()); } catch (IOException ioe) { priorE = ioe; } finally { diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoFST.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoFST.java index c386910044c..dfeae8ccb02 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoFST.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoFST.java @@ -51,9 +51,9 @@ public final class TokenInfoFST { @SuppressWarnings({"rawtypes","unchecked"}) private FST.Arc[] cacheRootArcs() throws IOException { FST.Arc rootCache[] = new FST.Arc[1+(cacheCeiling-0x3040)]; - FST.Arc firstArc = new FST.Arc(); + FST.Arc firstArc = new FST.Arc<>(); fst.getFirstArc(firstArc); - FST.Arc arc = new FST.Arc(); + FST.Arc arc = new FST.Arc<>(); final FST.BytesReader fstReader = fst.getBytesReader(); // TODO: jump to 3040, readNextRealArc to ceiling? (just be careful we don't add bugs) for (int i = 0; i < rootCache.length; i++) { diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java index 10df235c5c7..2b76d2c24c0 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/UserDictionary.java @@ -60,7 +60,7 @@ public final class UserDictionary implements Dictionary { BufferedReader br = new BufferedReader(reader); String line = null; int wordId = CUSTOM_DICTIONARY_WORD_ID_OFFSET; - List featureEntries = new ArrayList(); + List featureEntries = new ArrayList<>(); // text, segmentation, readings, POS while ((line = br.readLine()) != null) { @@ -85,11 +85,11 @@ public final class UserDictionary implements Dictionary { } }); - List data = new ArrayList(featureEntries.size()); - List segmentations = new ArrayList(featureEntries.size()); + List data = new ArrayList<>(featureEntries.size()); + List segmentations = new ArrayList<>(featureEntries.size()); PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton(); - Builder fstBuilder = new Builder(FST.INPUT_TYPE.BYTE2, fstOutput); + Builder fstBuilder = new Builder<>(FST.INPUT_TYPE.BYTE2, fstOutput); IntsRef scratch = new IntsRef(); long ord = 0; @@ -136,12 +136,12 @@ public final class UserDictionary implements Dictionary { */ public int[][] lookup(char[] chars, int off, int len) throws IOException { // TODO: can we avoid this treemap/toIndexArray? - TreeMap result = new TreeMap(); // index, [length, length...] + TreeMap result = new TreeMap<>(); // index, [length, length...] boolean found = false; // true if we found any results final FST.BytesReader fstReader = fst.getBytesReader(); - FST.Arc arc = new FST.Arc(); + FST.Arc arc = new FST.Arc<>(); int end = off + len; for (int startOffset = off; startOffset < end; startOffset++) { arc = fst.getFirstArc(arc); @@ -175,7 +175,7 @@ public final class UserDictionary implements Dictionary { * @return array of {wordId, index, length} */ private int[][] toIndexArray(Map input) { - ArrayList result = new ArrayList(); + ArrayList result = new ArrayList<>(); for (int i : input.keySet()) { int[] wordIdAndLength = input.get(i); int wordId = wordIdAndLength[0]; diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/CSVUtil.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/CSVUtil.java index 6e35b0259e1..4ce4fb002aa 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/CSVUtil.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/CSVUtil.java @@ -42,7 +42,7 @@ public final class CSVUtil { */ public static String[] parse(String line) { boolean insideQuote = false; - ArrayList result = new ArrayList(); + ArrayList result = new ArrayList<>(); int quoteCount = 0; StringBuilder sb = new StringBuilder(); for(int i = 0; i < line.length(); i++) { diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/ToStringUtil.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/ToStringUtil.java index 1821003f0bf..d5a62119bc5 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/ToStringUtil.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/util/ToStringUtil.java @@ -26,7 +26,7 @@ import java.util.HashMap; */ public class ToStringUtil { // a translation map for parts of speech, only used for reflectWith - private static final HashMap posTranslations = new HashMap(); + private static final HashMap posTranslations = new HashMap<>(); static { posTranslations.put("名詞", "noun"); posTranslations.put("名詞-一般", "noun-common"); @@ -127,7 +127,7 @@ public class ToStringUtil { } // a translation map for inflection types, only used for reflectWith - private static final HashMap inflTypeTranslations = new HashMap(); + private static final HashMap inflTypeTranslations = new HashMap<>(); static { inflTypeTranslations.put("*", "*"); inflTypeTranslations.put("形容詞・アウオ段", "adj-group-a-o-u"); @@ -197,7 +197,7 @@ public class ToStringUtil { } // a translation map for inflection forms, only used for reflectWith - private static final HashMap inflFormTranslations = new HashMap(); + private static final HashMap inflFormTranslations = new HashMap<>(); static { inflFormTranslations.put("*", "*"); inflFormTranslations.put("基本形", "base"); diff --git a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilterFactory.java b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilterFactory.java index 4dbf4f461e2..9008f86fe64 100644 --- a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilterFactory.java +++ b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilterFactory.java @@ -59,7 +59,7 @@ public class TestJapaneseIterationMarkCharFilterFactory extends BaseTokenStreamT JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(new HashMap()); tokenizerFactory.inform(new StringMockResourceLoader("")); - Map filterArgs = new HashMap(); + Map filterArgs = new HashMap<>(); filterArgs.put("normalizeKanji", "true"); filterArgs.put("normalizeKana", "false"); JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(filterArgs); @@ -76,7 +76,7 @@ public class TestJapaneseIterationMarkCharFilterFactory extends BaseTokenStreamT JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(new HashMap()); tokenizerFactory.inform(new StringMockResourceLoader("")); - Map filterArgs = new HashMap(); + Map filterArgs = new HashMap<>(); filterArgs.put("normalizeKanji", "false"); filterArgs.put("normalizeKana", "true"); JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(filterArgs); diff --git a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapanesePartOfSpeechStopFilterFactory.java b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapanesePartOfSpeechStopFilterFactory.java index aae9e64eab6..23161e1ef65 100644 --- a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapanesePartOfSpeechStopFilterFactory.java +++ b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapanesePartOfSpeechStopFilterFactory.java @@ -39,7 +39,7 @@ public class TestJapanesePartOfSpeechStopFilterFactory extends BaseTokenStreamTe tokenizerFactory.inform(new StringMockResourceLoader("")); TokenStream ts = tokenizerFactory.create(); ((Tokenizer)ts).setReader(new StringReader("私は制限スピードを超える。")); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString()); args.put("tags", "stoptags.txt"); JapanesePartOfSpeechStopFilterFactory factory = new JapanesePartOfSpeechStopFilterFactory(args); diff --git a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizerFactory.java b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizerFactory.java index 8962d6e6c3e..48bca5564ad 100644 --- a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizerFactory.java +++ b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizerFactory.java @@ -60,7 +60,7 @@ public class TestJapaneseTokenizerFactory extends BaseTokenStreamTestCase { * Test mode parameter: specifying normal mode */ public void testMode() throws IOException { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("mode", "normal"); JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(args); factory.inform(new StringMockResourceLoader("")); @@ -81,7 +81,7 @@ public class TestJapaneseTokenizerFactory extends BaseTokenStreamTestCase { "関西国際空港,関西 国際 空港,カンサイ コクサイ クウコウ,テスト名詞\n" + "# Custom reading for sumo wrestler\n" + "朝青龍,朝青龍,アサショウリュウ,カスタム人名\n"; - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("userDictionary", "userdict.txt"); JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(args); factory.inform(new StringMockResourceLoader(userDict)); @@ -96,7 +96,7 @@ public class TestJapaneseTokenizerFactory extends BaseTokenStreamTestCase { * Test preserving punctuation */ public void testPreservePunctuation() throws IOException { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("discardPunctuation", "false"); JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(args); factory.inform(new StringMockResourceLoader("")); diff --git a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TestTokenInfoDictionary.java b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TestTokenInfoDictionary.java index 77f25bed139..d08809d2415 100644 --- a/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TestTokenInfoDictionary.java +++ b/lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/dict/TestTokenInfoDictionary.java @@ -37,7 +37,7 @@ public class TestTokenInfoDictionary extends LuceneTestCase { TokenInfoDictionary tid = TokenInfoDictionary.getInstance(); ConnectionCosts matrix = ConnectionCosts.getInstance(); FST fst = tid.getFST().getInternalFST(); - IntsRefFSTEnum fstEnum = new IntsRefFSTEnum(fst); + IntsRefFSTEnum fstEnum = new IntsRefFSTEnum<>(fst); InputOutput mapping; IntsRef scratch = new IntsRef(); while ((mapping = fstEnum.next()) != null) { diff --git a/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/BinaryDictionaryWriter.java b/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/BinaryDictionaryWriter.java index 3e5ed728f38..5c6a260a081 100644 --- a/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/BinaryDictionaryWriter.java +++ b/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/BinaryDictionaryWriter.java @@ -40,7 +40,7 @@ public abstract class BinaryDictionaryWriter { private int targetMapEndOffset = 0, lastWordId = -1, lastSourceId = -1; private int[] targetMap = new int[8192]; private int[] targetMapOffsets = new int[8192]; - private final ArrayList posDict = new ArrayList(); + private final ArrayList posDict = new ArrayList<>(); public BinaryDictionaryWriter(Class implClazz, int size) { this.implClazz = implClazz; diff --git a/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java b/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java index 253bc87ed53..26ed58484af 100644 --- a/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java +++ b/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/TokenInfoDictionaryBuilder.java @@ -69,7 +69,7 @@ public class TokenInfoDictionaryBuilder { return name.endsWith(".csv"); } }; - ArrayList csvFiles = new ArrayList(); + ArrayList csvFiles = new ArrayList<>(); for (File file : new File(dirname).listFiles(filter)) { csvFiles.add(file); } @@ -82,7 +82,7 @@ public class TokenInfoDictionaryBuilder { // all lines in the file System.out.println(" parse..."); - List lines = new ArrayList(400000); + List lines = new ArrayList<>(400000); for (File file : csvFiles){ FileInputStream inputStream = new FileInputStream(file); Charset cs = Charset.forName(encoding); @@ -132,7 +132,7 @@ public class TokenInfoDictionaryBuilder { System.out.println(" encode..."); PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton(); - Builder fstBuilder = new Builder(FST.INPUT_TYPE.BYTE2, 0, 0, true, true, Integer.MAX_VALUE, fstOutput, null, true, PackedInts.DEFAULT, true, 15); + Builder fstBuilder = new Builder<>(FST.INPUT_TYPE.BYTE2, 0, 0, true, true, Integer.MAX_VALUE, fstOutput, null, true, PackedInts.DEFAULT, true, 15); IntsRef scratch = new IntsRef(); long ord = -1; // first ord will be 0 String lastValue = null; diff --git a/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/UnknownDictionaryBuilder.java b/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/UnknownDictionaryBuilder.java index 5676799e1fe..d1dced97fea 100644 --- a/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/UnknownDictionaryBuilder.java +++ b/lucene/analysis/kuromoji/src/tools/java/org/apache/lucene/analysis/ja/util/UnknownDictionaryBuilder.java @@ -66,7 +66,7 @@ public class UnknownDictionaryBuilder { dictionary.put(CSVUtil.parse(NGRAM_DICTIONARY_ENTRY)); - List lines = new ArrayList(); + List lines = new ArrayList<>(); String line = null; while ((line = lineReader.readLine()) != null) { // note: unk.def only has 10 fields, it simplifies the writer to just append empty reading and pronunciation, diff --git a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java index 5ac14cdeb55..6bb08036400 100644 --- a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java +++ b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilter.java @@ -56,7 +56,7 @@ public class MorfologikFilter extends TokenFilter { private final IStemmer stemmer; private List lemmaList; - private final ArrayList tagsList = new ArrayList(); + private final ArrayList tagsList = new ArrayList<>(); private int lemmaListIndex; diff --git a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttributeImpl.java b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttributeImpl.java index 700e3f27df6..aad39bcbca3 100644 --- a/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttributeImpl.java +++ b/lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorphosyntacticTagsAttributeImpl.java @@ -82,7 +82,7 @@ public class MorphosyntacticTagsAttributeImpl extends AttributeImpl public void copyTo(AttributeImpl target) { List cloned = null; if (tags != null) { - cloned = new ArrayList(tags.size()); + cloned = new ArrayList<>(tags.size()); for (StringBuilder b : tags) { cloned.add(new StringBuilder(b)); } diff --git a/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java b/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java index d038c66624c..3624071c93f 100644 --- a/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java +++ b/lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java @@ -124,8 +124,8 @@ public class TestMorfologikAnalyzer extends BaseTokenStreamTestCase { ts.incrementToken(); assertEquals(term, ts.getAttribute(CharTermAttribute.class).toString()); - TreeSet actual = new TreeSet(); - TreeSet expected = new TreeSet(); + TreeSet actual = new TreeSet<>(); + TreeSet expected = new TreeSet<>(); for (StringBuilder b : ts.getAttribute(MorphosyntacticTagsAttribute.class).getTags()) { actual.add(b.toString()); } diff --git a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilter.java b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilter.java index ef04430005a..ea6d36255af 100644 --- a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilter.java +++ b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilter.java @@ -32,7 +32,7 @@ public final class DoubleMetaphoneFilter extends TokenFilter { private static final String TOKEN_TYPE = "DoubleMetaphone"; - private final LinkedList remainingTokens = new LinkedList(); + private final LinkedList remainingTokens = new LinkedList<>(); private final DoubleMetaphone encoder = new DoubleMetaphone(); private final boolean inject; private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); diff --git a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java index 4979e520665..b2620d9bb13 100644 --- a/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java +++ b/lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java @@ -73,7 +73,7 @@ public class PhoneticFilterFactory extends TokenFilterFactory implements Resourc private static final String PACKAGE_CONTAINING_ENCODERS = "org.apache.commons.codec.language."; //Effectively constants; uppercase keys - private static final Map> registry = new HashMap>(6); + private static final Map> registry = new HashMap<>(6); static { registry.put("DoubleMetaphone".toUpperCase(Locale.ROOT), DoubleMetaphone.class); diff --git a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilterFactory.java b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilterFactory.java index 17a94a6d177..b6151d02e41 100644 --- a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilterFactory.java +++ b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilterFactory.java @@ -38,7 +38,7 @@ public class TestBeiderMorseFilterFactory extends BaseTokenStreamTestCase { } public void testLanguageSet() throws Exception { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("languageSet", "polish"); BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(args); TokenStream ts = factory.create(whitespaceMockTokenizer("Weinberg")); @@ -50,7 +50,7 @@ public class TestBeiderMorseFilterFactory extends BaseTokenStreamTestCase { } public void testOptions() throws Exception { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("nameType", "ASHKENAZI"); args.put("ruleType", "EXACT"); BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(args); diff --git a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestDoubleMetaphoneFilterFactory.java b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestDoubleMetaphoneFilterFactory.java index 07334083c75..ca1ba05a55b 100644 --- a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestDoubleMetaphoneFilterFactory.java +++ b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestDoubleMetaphoneFilterFactory.java @@ -39,7 +39,7 @@ public class TestDoubleMetaphoneFilterFactory extends BaseTokenStreamTestCase { } public void testSettingSizeAndInject() throws Exception { - Map parameters = new HashMap(); + Map parameters = new HashMap<>(); parameters.put("inject", "false"); parameters.put("maxCodeLength", "8"); DoubleMetaphoneFilterFactory factory = new DoubleMetaphoneFilterFactory(parameters); diff --git a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilterFactory.java b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilterFactory.java index 83e4e1adcea..c919da4b0f6 100644 --- a/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilterFactory.java +++ b/lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilterFactory.java @@ -36,7 +36,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase { * Case: default */ public void testFactoryDefaults() throws IOException { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(PhoneticFilterFactory.ENCODER, "Metaphone"); PhoneticFilterFactory factory = new PhoneticFilterFactory(args); factory.inform(new ClasspathResourceLoader(factory.getClass())); @@ -45,7 +45,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase { } public void testInjectFalse() throws IOException { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(PhoneticFilterFactory.ENCODER, "Metaphone"); args.put(PhoneticFilterFactory.INJECT, "false"); PhoneticFilterFactory factory = new PhoneticFilterFactory(args); @@ -54,7 +54,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase { } public void testMaxCodeLength() throws IOException { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(PhoneticFilterFactory.ENCODER, "Metaphone"); args.put(PhoneticFilterFactory.MAX_CODE_LENGTH, "2"); PhoneticFilterFactory factory = new PhoneticFilterFactory(args); @@ -76,7 +76,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase { public void testUnknownEncoder() throws IOException { try { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("encoder", "XXX"); PhoneticFilterFactory factory = new PhoneticFilterFactory(args); factory.inform(new ClasspathResourceLoader(factory.getClass())); @@ -88,7 +88,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase { public void testUnknownEncoderReflection() throws IOException { try { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("encoder", "org.apache.commons.codec.language.NonExistence"); PhoneticFilterFactory factory = new PhoneticFilterFactory(args); factory.inform(new ClasspathResourceLoader(factory.getClass())); @@ -102,7 +102,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase { * Case: Reflection */ public void testFactoryReflection() throws IOException { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(PhoneticFilterFactory.ENCODER, "org.apache.commons.codec.language.Metaphone"); PhoneticFilterFactory factory = new PhoneticFilterFactory(args); factory.inform(new ClasspathResourceLoader(factory.getClass())); @@ -115,7 +115,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase { * so this effectively tests reflection without package name */ public void testFactoryReflectionCaverphone2() throws IOException { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(PhoneticFilterFactory.ENCODER, "Caverphone2"); PhoneticFilterFactory factory = new PhoneticFilterFactory(args); factory.inform(new ClasspathResourceLoader(factory.getClass())); @@ -124,7 +124,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase { } public void testFactoryReflectionCaverphone() throws IOException { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(PhoneticFilterFactory.ENCODER, "Caverphone"); PhoneticFilterFactory factory = new PhoneticFilterFactory(args); factory.inform(new ClasspathResourceLoader(factory.getClass())); @@ -182,7 +182,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase { static void assertAlgorithm(String algName, String inject, String input, String[] expected) throws Exception { Tokenizer tokenizer = whitespaceMockTokenizer(input); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("encoder", algName); args.put("inject", inject); PhoneticFilterFactory factory = new PhoneticFilterFactory(args); diff --git a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BiSegGraph.java b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BiSegGraph.java index e71f20f4d1f..c3b91869386 100644 --- a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BiSegGraph.java +++ b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/BiSegGraph.java @@ -34,7 +34,7 @@ import org.apache.lucene.analysis.cn.smart.Utility; */ class BiSegGraph { - private Map> tokenPairListTable = new HashMap>(); + private Map> tokenPairListTable = new HashMap<>(); private List segTokenList; @@ -144,7 +144,7 @@ class BiSegGraph { public void addSegTokenPair(SegTokenPair tokenPair) { int to = tokenPair.to; if (!isToExist(to)) { - ArrayList newlist = new ArrayList(); + ArrayList newlist = new ArrayList<>(); newlist.add(tokenPair); tokenPairListTable.put(to, newlist); } else { @@ -168,7 +168,7 @@ class BiSegGraph { public List getShortPath() { int current; int nodeCount = getToCount(); - List path = new ArrayList(); + List path = new ArrayList<>(); PathNode zeroPath = new PathNode(); zeroPath.weight = 0; zeroPath.preNode = 0; @@ -197,8 +197,8 @@ class BiSegGraph { int preNode, lastNode; lastNode = path.size() - 1; current = lastNode; - List rpath = new ArrayList(); - List resultPath = new ArrayList(); + List rpath = new ArrayList<>(); + List resultPath = new ArrayList<>(); rpath.add(current); while (current != 0) { diff --git a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/SegGraph.java b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/SegGraph.java index 93b06779bde..9c1f95a1935 100644 --- a/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/SegGraph.java +++ b/lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/hhmm/SegGraph.java @@ -34,7 +34,7 @@ class SegGraph { /** * Map of start offsets to ArrayList of tokens at that position */ - private Map> tokenListTable = new HashMap>(); + private Map> tokenListTable = new HashMap<>(); private int maxStart = -1; @@ -72,7 +72,7 @@ class SegGraph { * @return a {@link List} of these ordered tokens. */ public List makeIndex() { - List result = new ArrayList(); + List result = new ArrayList<>(); int s = -1, count = 0, size = tokenListTable.size(); List tokenList; int index = 0; @@ -98,7 +98,7 @@ class SegGraph { public void addToken(SegToken token) { int s = token.startOffset; if (!isStartExist(s)) { - ArrayList newlist = new ArrayList(); + ArrayList newlist = new ArrayList<>(); newlist.add(token); tokenListTable.put(s, newlist); } else { @@ -115,7 +115,7 @@ class SegGraph { * @return {@link List} of all tokens in the map. */ public List toTokenList() { - List result = new ArrayList(); + List result = new ArrayList<>(); int s = -1, count = 0, size = tokenListTable.size(); List tokenList; diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java index a0734261aee..983c67f1950 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Gener.java @@ -78,7 +78,7 @@ public class Gener extends Reduce { @Override public Trie optimize(Trie orig) { List cmds = orig.cmds; - List rows = new ArrayList(); + List rows = new ArrayList<>(); List orows = orig.rows; int remap[] = new int[orows.size()]; diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java index 11c869dd4b5..16da8c8149a 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Lift.java @@ -88,7 +88,7 @@ public class Lift extends Reduce { @Override public Trie optimize(Trie orig) { List cmds = orig.cmds; - List rows = new ArrayList(); + List rows = new ArrayList<>(); List orows = orig.rows; int remap[] = new int[orows.size()]; diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie.java index e2e9173736a..e0d9376df6d 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie.java @@ -70,7 +70,7 @@ public class MultiTrie extends Trie { final char EOM = '*'; final String EOM_NODE = "" + EOM; - List tries = new ArrayList(); + List tries = new ArrayList<>(); int BY = 1; @@ -186,7 +186,7 @@ public class MultiTrie extends Trie { */ @Override public Trie reduce(Reduce by) { - List h = new ArrayList(); + List h = new ArrayList<>(); for (Trie trie : tries) h.add(trie.reduce(by)); diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java index be9faa2d83b..cfe3181ad23 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/MultiTrie2.java @@ -277,7 +277,7 @@ public class MultiTrie2 extends MultiTrie { */ @Override public Trie reduce(Reduce by) { - List h = new ArrayList(); + List h = new ArrayList<>(); for (Trie trie : tries) h.add(trie.reduce(by)); diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Optimizer.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Optimizer.java index 3bd612e2242..25b72353380 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Optimizer.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Optimizer.java @@ -81,7 +81,7 @@ public class Optimizer extends Reduce { @Override public Trie optimize(Trie orig) { List cmds = orig.cmds; - List rows = new ArrayList(); + List rows = new ArrayList<>(); List orows = orig.rows; int remap[] = new int[orows.size()]; diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Reduce.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Reduce.java index 6ebb595418d..2cd2decc26b 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Reduce.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Reduce.java @@ -78,7 +78,7 @@ public class Reduce { */ public Trie optimize(Trie orig) { List cmds = orig.cmds; - List rows = new ArrayList(); + List rows = new ArrayList<>(); List orows = orig.rows; int remap[] = new int[orows.size()]; diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Row.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Row.java index 1489a57f4b6..600b0081663 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Row.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Row.java @@ -65,7 +65,7 @@ import java.util.TreeMap; * The Row class represents a row in a matrix representation of a trie. */ public class Row { - TreeMap cells = new TreeMap(); + TreeMap cells = new TreeMap<>(); int uniformCnt = 0; int uniformSkip = 0; diff --git a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java index b330e83d1d2..3830746ff01 100644 --- a/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java +++ b/lucene/analysis/stempel/src/java/org/egothor/stemmer/Trie.java @@ -70,8 +70,8 @@ import java.util.List; * for which a Trie is constructed. */ public class Trie { - List rows = new ArrayList(); - List cmds = new ArrayList(); + List rows = new ArrayList<>(); + List cmds = new ArrayList<>(); int root; boolean forward = false; diff --git a/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java b/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java index 2649950ad54..8a1990c7a15 100644 --- a/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java +++ b/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java @@ -31,7 +31,7 @@ public class UIMAAnnotationsTokenizerFactory extends TokenizerFactory { private String descriptorPath; private String tokenType; - private final Map configurationParameters = new HashMap(); + private final Map configurationParameters = new HashMap<>(); /** Creates a new UIMAAnnotationsTokenizerFactory */ public UIMAAnnotationsTokenizerFactory(Map args) { diff --git a/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java b/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java index 674e27a4df7..b78788675b9 100644 --- a/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java +++ b/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java @@ -32,7 +32,7 @@ public class UIMATypeAwareAnnotationsTokenizerFactory extends TokenizerFactory { private String descriptorPath; private String tokenType; private String featurePath; - private final Map configurationParameters = new HashMap(); + private final Map configurationParameters = new HashMap<>(); /** Creates a new UIMATypeAwareAnnotationsTokenizerFactory */ public UIMATypeAwareAnnotationsTokenizerFactory(Map args) { diff --git a/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/ae/AEProviderFactory.java b/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/ae/AEProviderFactory.java index d08c85044d9..1a3e2bec8e6 100644 --- a/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/ae/AEProviderFactory.java +++ b/lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/ae/AEProviderFactory.java @@ -27,7 +27,7 @@ public class AEProviderFactory { private static final AEProviderFactory instance = new AEProviderFactory(); - private final Map providerCache = new HashMap(); + private final Map providerCache = new HashMap<>(); private AEProviderFactory() { // Singleton diff --git a/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java b/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java index 702fff495e3..2b88463c3cd 100644 --- a/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java +++ b/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java @@ -127,7 +127,7 @@ public class UIMABaseAnalyzerTest extends BaseTokenStreamTestCase { @Test public void testRandomStringsWithConfigurationParameters() throws Exception { - Map cp = new HashMap(); + Map cp = new HashMap<>(); cp.put("line-end", "\r"); checkRandomData(random(), new UIMABaseAnalyzer("/uima/TestWSTokenizerAE.xml", "org.apache.lucene.uima.ts.TokenAnnotation", cp), 100 * RANDOM_MULTIPLIER); diff --git a/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/ae/OverridingParamsAEProviderTest.java b/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/ae/OverridingParamsAEProviderTest.java index 0922184a273..ee0f4b5c26b 100644 --- a/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/ae/OverridingParamsAEProviderTest.java +++ b/lucene/analysis/uima/src/test/org/apache/lucene/analysis/uima/ae/OverridingParamsAEProviderTest.java @@ -53,7 +53,7 @@ public class OverridingParamsAEProviderTest { @Test public void testOverridingParamsInitialization() throws Exception { - Map runtimeParameters = new HashMap(); + Map runtimeParameters = new HashMap<>(); runtimeParameters.put("ngramsize", "3"); AEProvider aeProvider = new OverridingParamsAEProvider("/uima/AggregateSentenceAE.xml", runtimeParameters); AnalysisEngine analysisEngine = aeProvider.getAE(); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java index 5d26625e92f..cf11edce6f4 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/PerfRunData.java @@ -81,7 +81,7 @@ public class PerfRunData implements Closeable { // directory, analyzer, docMaker - created at startup. // reader, writer, searcher - maintained by basic tasks. private Directory directory; - private Map analyzerFactories = new HashMap(); + private Map analyzerFactories = new HashMap<>(); private Analyzer analyzer; private DocMaker docMaker; private ContentSource contentSource; @@ -102,7 +102,7 @@ public class PerfRunData implements Closeable { private Config config; private long startTimeMillis; - private final HashMap perfObjects = new HashMap(); + private final HashMap perfObjects = new HashMap<>(); // constructor public PerfRunData (Config config) throws Exception { @@ -125,7 +125,7 @@ public class PerfRunData implements Closeable { "org.apache.lucene.benchmark.byTask.feeds.RandomFacetSource")).asSubclass(FacetSource.class).newInstance(); facetSource.setConfig(config); // query makers - readTaskQueryMaker = new HashMap,QueryMaker>(); + readTaskQueryMaker = new HashMap<>(); qmkrClass = Class.forName(config.get("query.maker","org.apache.lucene.benchmark.byTask.feeds.SimpleQueryMaker")).asSubclass(QueryMaker.class); // index stuff @@ -147,7 +147,7 @@ public class PerfRunData implements Closeable { docMaker, facetSource, contentSource); // close all perf objects that are closeable. - ArrayList perfObjectsToClose = new ArrayList(); + ArrayList perfObjectsToClose = new ArrayList<>(); for (Object obj : perfObjects.values()) { if (obj instanceof Closeable) { perfObjectsToClose.add((Closeable) obj); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java index f2850d7f45a..1f76f8cebfe 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DemoHTMLParser.java @@ -147,7 +147,7 @@ public class DemoHTMLParser implements HTMLParser { } private static final Set createElementNameSet(String... names) { - return Collections.unmodifiableSet(new HashSet(Arrays.asList(names))); + return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(names))); } /** HTML elements that cause a line break (they are block-elements) */ diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DirContentSource.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DirContentSource.java index 9bf7cc81be5..031a0f0b5bf 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DirContentSource.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DirContentSource.java @@ -82,7 +82,7 @@ public class DirContentSource extends ContentSource { int count = 0; - Stack stack = new Stack(); + Stack stack = new Stack<>(); /* this seems silly ... there must be a better way ... not that this is good, but can it matter? */ @@ -156,7 +156,7 @@ public class DirContentSource extends ContentSource { } - private ThreadLocal dateFormat = new ThreadLocal(); + private ThreadLocal dateFormat = new ThreadLocal<>(); private File dataDir = null; private int iteration = 0; private Iterator inputFiles = null; diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java index 5e471b18884..96768779817 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java @@ -108,8 +108,8 @@ public class DocMaker implements Closeable { this.reuseFields = reuseFields; if (reuseFields) { - fields = new HashMap(); - numericFields = new HashMap(); + fields = new HashMap<>(); + numericFields = new HashMap<>(); // Initialize the map with the default fields. fields.put(BODY_FIELD, new Field(BODY_FIELD, "", bodyFt)); @@ -192,9 +192,9 @@ public class DocMaker implements Closeable { } // leftovers are thread local, because it is unsafe to share residues between threads - private ThreadLocal leftovr = new ThreadLocal(); - private ThreadLocal docState = new ThreadLocal(); - private ThreadLocal dateParsers = new ThreadLocal(); + private ThreadLocal leftovr = new ThreadLocal<>(); + private ThreadLocal docState = new ThreadLocal<>(); + private ThreadLocal dateParsers = new ThreadLocal<>(); public static final String BODY_FIELD = "body"; public static final String TITLE_FIELD = "doctitle"; @@ -459,7 +459,7 @@ public class DocMaker implements Closeable { // In a multi-rounds run, it is important to reset DocState since settings // of fields may change between rounds, and this is the only way to reset // the cache of all threads. - docState = new ThreadLocal(); + docState = new ThreadLocal<>(); indexProperties = config.get("doc.index.props", false); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSource.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSource.java index 8c82628d0f8..86a2efafb07 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSource.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiContentSource.java @@ -254,7 +254,7 @@ public class EnwikiContentSource extends ContentSource { } - private static final Map ELEMENTS = new HashMap(); + private static final Map ELEMENTS = new HashMap<>(); private static final int TITLE = 0; private static final int DATE = TITLE + 1; private static final int BODY = DATE + 1; diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java index aaf12b238d2..bcc9b601c92 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java @@ -94,7 +94,7 @@ public class EnwikiQueryMaker extends AbstractQueryMaker implements */ private static Query[] createQueries(List qs, Analyzer a) { QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, DocMaker.BODY_FIELD, a); - List queries = new ArrayList(); + List queries = new ArrayList<>(); for (int i = 0; i < qs.size(); i++) { try { @@ -127,7 +127,7 @@ public class EnwikiQueryMaker extends AbstractQueryMaker implements // analyzer (default is standard analyzer) Analyzer anlzr = NewAnalyzerTask.createAnalyzer(config.get("analyzer", StandardAnalyzer.class.getName())); - List queryList = new ArrayList(20); + List queryList = new ArrayList<>(20); queryList.addAll(Arrays.asList(STANDARD_QUERIES)); if(!config.get("enwikiQueryMaker.disableSpanQueries", false)) queryList.addAll(Arrays.asList(getPrebuiltQueries(DocMaker.BODY_FIELD))); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java index 79b25f9700a..d7e3378c048 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java @@ -54,7 +54,7 @@ public class FileBasedQueryMaker extends AbstractQueryMaker implements QueryMake QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, defaultField, anlzr); qp.setAllowLeadingWildcard(true); - List qq = new ArrayList(); + List qq = new ArrayList<>(); String fileName = config.get("file.query.maker.file", null); if (fileName != null) { diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersContentSource.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersContentSource.java index a6ddcfd905e..b3106af48b1 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersContentSource.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersContentSource.java @@ -49,9 +49,9 @@ public class ReutersContentSource extends ContentSource { ParsePosition pos; } - private ThreadLocal dateFormat = new ThreadLocal(); + private ThreadLocal dateFormat = new ThreadLocal<>(); private File dataDir = null; - private ArrayList inputFiles = new ArrayList(); + private ArrayList inputFiles = new ArrayList<>(); private int nextFile = 0; private int iteration = 0; diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java index 2275c282f4a..259928d270c 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java @@ -74,7 +74,7 @@ public class ReutersQueryMaker extends AbstractQueryMaker implements QueryMaker */ private static Query[] createQueries(List qs, Analyzer a) { QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, DocMaker.BODY_FIELD, a); - List queries = new ArrayList(); + List queries = new ArrayList<>(); for (int i = 0; i < qs.size(); i++) { try { @@ -108,7 +108,7 @@ public class ReutersQueryMaker extends AbstractQueryMaker implements QueryMaker Analyzer anlzr= NewAnalyzerTask.createAnalyzer(config.get("analyzer", "org.apache.lucene.analysis.standard.StandardAnalyzer")); - List queryList = new ArrayList(20); + List queryList = new ArrayList<>(20); queryList.addAll(Arrays.asList(STANDARD_QUERIES)); queryList.addAll(Arrays.asList(getPrebuiltQueries(DocMaker.BODY_FIELD))); return createQueries(queryList, anlzr); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java index 28aa733bc4d..840d2dc3a6e 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java @@ -49,7 +49,7 @@ public class SimpleQueryMaker extends AbstractQueryMaker implements QueryMaker { "org.apache.lucene.analysis.standard.StandardAnalyzer")); QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, DocMaker.BODY_FIELD,anlzr); - ArrayList qq = new ArrayList(); + ArrayList qq = new ArrayList<>(); Query q1 = new TermQuery(new Term(DocMaker.ID_FIELD,"doc2")); qq.add(q1); Query q2 = new TermQuery(new Term(DocMaker.BODY_FIELD,"simple")); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java index c8b6e7deb2c..6a5730c0bfa 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleSloppyPhraseQueryMaker.java @@ -36,7 +36,7 @@ public class SimpleSloppyPhraseQueryMaker extends SimpleQueryMaker { protected Query[] prepareQueries() throws Exception { // extract some 100 words from doc text to an array String words[]; - ArrayList w = new ArrayList(); + ArrayList w = new ArrayList<>(); StringTokenizer st = new StringTokenizer(SingleDocSource.DOC_TEXT); while (st.hasMoreTokens() && w.size()<100) { w.add(st.nextToken()); @@ -44,7 +44,7 @@ public class SimpleSloppyPhraseQueryMaker extends SimpleQueryMaker { words = w.toArray(new String[0]); // create queries (that would find stuff) with varying slops - ArrayList queries = new ArrayList(); + ArrayList queries = new ArrayList<>(); for (int slop=0; slop<8; slop++) { for (int qlen=2; qlen<6; qlen++) { for (int wd=0; wd spatialStrategyCache = new HashMap(); + private static Map spatialStrategyCache = new HashMap<>(); private SpatialStrategy strategy; private ShapeConverter shapeConverter; diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java index 7d9ce5f4cf1..1942684b379 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecContentSource.java @@ -79,10 +79,10 @@ public class TrecContentSource extends ContentSource { "hhmm z.z.z. MMM dd, yyyy", // 0901 u.t.c. April 28, 1994 }; - private ThreadLocal dateFormats = new ThreadLocal(); - private ThreadLocal trecDocBuffer = new ThreadLocal(); + private ThreadLocal dateFormats = new ThreadLocal<>(); + private ThreadLocal trecDocBuffer = new ThreadLocal<>(); private File dataDir = null; - private ArrayList inputFiles = new ArrayList(); + private ArrayList inputFiles = new ArrayList<>(); private int nextFile = 0; // Use to synchronize threads on reading from the TREC documents. private Object lock = new Object(); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java index 8e24f72983b..24b9801a6b7 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/TrecDocParser.java @@ -35,7 +35,7 @@ public abstract class TrecDocParser { /** trec parser type used for unknown extensions */ public static final ParsePathType DEFAULT_PATH_TYPE = ParsePathType.GOV2; - static final Map pathType2parser = new HashMap(); + static final Map pathType2parser = new HashMap<>(); static { pathType2parser.put(ParsePathType.GOV2, new TrecGov2Parser()); pathType2parser.put(ParsePathType.FBIS, new TrecFBISParser()); @@ -44,7 +44,7 @@ public abstract class TrecDocParser { pathType2parser.put(ParsePathType.LATIMES, new TrecLATimesParser()); } - static final Map pathName2Type = new HashMap(); + static final Map pathName2Type = new HashMap<>(); static { for (ParsePathType ppt : ParsePathType.values()) { pathName2Type.put(ppt.name().toUpperCase(Locale.ROOT),ppt); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java index 52cf68b2f58..a12b6fff5f9 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java @@ -32,7 +32,7 @@ public class Points { // stat points ordered by their start time. // for now we collect points as TaskStats objects. // later might optimize to collect only native data. - private ArrayList points = new ArrayList(); + private ArrayList points = new ArrayList<>(); private int nextTaskRunNum = 0; diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddFacetedDocTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddFacetedDocTask.java index 87f2e12f1c5..5063e0af99d 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddFacetedDocTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddFacetedDocTask.java @@ -74,7 +74,7 @@ public class AddFacetedDocTask extends AddDocTask { @Override public int doLogic() throws Exception { if (config != null) { - List facets = new ArrayList(); + List facets = new ArrayList<>(); getRunData().getFacetSource().getNextFacets(facets); for(FacetField ff : facets) { doc.add(ff); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java index 9ad49df1ea8..4a376ab3022 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AnalyzerFactoryTask.java @@ -88,9 +88,9 @@ public class AnalyzerFactoryTask extends PerfTask { String factoryName = null; Integer positionIncrementGap = null; Integer offsetGap = null; - private List charFilterFactories = new ArrayList(); + private List charFilterFactories = new ArrayList<>(); private TokenizerFactory tokenizerFactory = null; - private List tokenFilterFactories = new ArrayList(); + private List tokenFilterFactories = new ArrayList<>(); public AnalyzerFactoryTask(PerfRunData runData) { super(runData); @@ -287,7 +287,7 @@ public class AnalyzerFactoryTask extends PerfTask { */ private void createAnalysisPipelineComponent (StreamTokenizer stok, Class clazz) { - Map argMap = new HashMap(); + Map argMap = new HashMap<>(); boolean parenthetical = false; try { WHILE_LOOP: while (stok.nextToken() != StreamTokenizer.TT_EOF) { diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTask.java index ae953c66795..d6b2e9ebb01 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CommitIndexTask.java @@ -42,7 +42,7 @@ public class CommitIndexTask extends PerfTask { @Override public void setParams(String params) { super.setParams(params); - commitUserData = new HashMap(); + commitUserData = new HashMap<>(); commitUserData.put(OpenReaderTask.USER_DATA, params); } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ConsumeContentSourceTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ConsumeContentSourceTask.java index f8c260db159..f1395193859 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ConsumeContentSourceTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ConsumeContentSourceTask.java @@ -25,7 +25,7 @@ import org.apache.lucene.benchmark.byTask.feeds.DocData; public class ConsumeContentSourceTask extends PerfTask { private final ContentSource source; - private ThreadLocal dd = new ThreadLocal(); + private ThreadLocal dd = new ThreadLocal<>(); public ConsumeContentSourceTask(PerfRunData runData) { super(runData); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java index 08543944b20..cb8ea744cd3 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewAnalyzerTask.java @@ -39,7 +39,7 @@ public class NewAnalyzerTask extends PerfTask { public NewAnalyzerTask(PerfRunData runData) { super(runData); - analyzerNames = new ArrayList(); + analyzerNames = new ArrayList<>(); } public static final Analyzer createAnalyzer(String className) throws Exception{ diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java index c768b98789e..4480f1c0360 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java @@ -300,7 +300,7 @@ public abstract class ReadTask extends PerfTask { */ protected Collection getFieldsToHighlight(StoredDocument document) { List fields = document.getFields(); - Set result = new HashSet(fields.size()); + Set result = new HashSet<>(fields.size()); for (final StorableField f : fields) { result.add(f.name()); } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByNameRoundTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByNameRoundTask.java index 2c933f141c4..731e3ef308e 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByNameRoundTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByNameRoundTask.java @@ -53,7 +53,7 @@ public class RepSumByNameRoundTask extends ReportTask { */ protected Report reportSumByNameRound(List taskStats) { // aggregate by task name and round - LinkedHashMap p2 = new LinkedHashMap(); + LinkedHashMap p2 = new LinkedHashMap<>(); int reported = 0; for (final TaskStats stat1 : taskStats) { if (stat1.getElapsed()>=0) { // consider only tasks that ended diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByNameTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByNameTask.java index 1aa5c513f50..2da03ec5051 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByNameTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByNameTask.java @@ -54,7 +54,7 @@ public class RepSumByNameTask extends ReportTask { protected Report reportSumByName(List taskStats) { // aggregate by task name int reported = 0; - LinkedHashMap p2 = new LinkedHashMap(); + LinkedHashMap p2 = new LinkedHashMap<>(); for (final TaskStats stat1: taskStats) { if (stat1.getElapsed()>=0) { // consider only tasks that ended reported++; diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByPrefRoundTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByPrefRoundTask.java index e5e047b5ccb..7cebb1ecaec 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByPrefRoundTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByPrefRoundTask.java @@ -50,7 +50,7 @@ public class RepSumByPrefRoundTask extends RepSumByPrefTask { protected Report reportSumByPrefixRound(List taskStats) { // aggregate by task name and by round int reported = 0; - LinkedHashMap p2 = new LinkedHashMap(); + LinkedHashMap p2 = new LinkedHashMap<>(); for (final TaskStats stat1 : taskStats) { if (stat1.getElapsed()>=0 && stat1.getTask().getName().startsWith(prefix)) { // only ended tasks with proper name reported++; diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByPrefTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByPrefTask.java index 3a4a961a52a..241f252b2b9 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByPrefTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/RepSumByPrefTask.java @@ -52,7 +52,7 @@ public class RepSumByPrefTask extends ReportTask { protected Report reportSumByPrefix (List taskStats) { // aggregate by task name int reported = 0; - LinkedHashMap p2 = new LinkedHashMap(); + LinkedHashMap p2 = new LinkedHashMap<>(); for (final TaskStats stat1 : taskStats) { if (stat1.getElapsed()>=0 && stat1.getTask().getName().startsWith(prefix)) { // only ended tasks with proper name reported++; diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java index 8e3d87e57af..63c5f3369bb 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java @@ -138,7 +138,7 @@ public class SearchTravRetHighlightTask extends SearchTravTask { } else if (splits[i].startsWith("mergeContiguous[") == true){ mergeContiguous = Boolean.valueOf(splits[i].substring("mergeContiguous[".length(),splits[i].length() - 1)).booleanValue(); } else if (splits[i].startsWith("fields[") == true){ - paramFields = new HashSet(); + paramFields = new HashSet<>(); String fieldNames = splits[i].substring("fields[".length(), splits[i].length() - 1); String [] fieldSplits = fieldNames.split(";"); for (int j = 0; j < fieldSplits.length; j++) { diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java index 406e994685b..70167de1a30 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetLoadFieldSelectorTask.java @@ -68,7 +68,7 @@ public class SearchTravRetLoadFieldSelectorTask extends SearchTravTask { @Override public void setParams(String params) { this.params = params; // cannot just call super.setParams(), b/c it's params differ. - fieldsToLoad = new HashSet(); + fieldsToLoad = new HashSet<>(); for (StringTokenizer tokenizer = new StringTokenizer(params, ","); tokenizer.hasMoreTokens();) { String s = tokenizer.nextToken(); fieldsToLoad.add(s); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java index 4993aff7134..6d9eeb37773 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetVectorHighlightTask.java @@ -135,7 +135,7 @@ public class SearchTravRetVectorHighlightTask extends SearchTravTask { } else if (splits[i].startsWith("fragSize[") == true){ fragSize = (int)Float.parseFloat(splits[i].substring("fragSize[".length(),splits[i].length() - 1)); } else if (splits[i].startsWith("fields[") == true){ - paramFields = new HashSet(); + paramFields = new HashSet<>(); String fieldNames = splits[i].substring("fields[".length(), splits[i].length() - 1); String [] fieldSplits = fieldNames.split(";"); for (int j = 0; j < fieldSplits.length; j++) { diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/TaskSequence.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/TaskSequence.java index 10922204369..9ee2ab11180 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/TaskSequence.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/TaskSequence.java @@ -58,7 +58,7 @@ public class TaskSequence extends PerfTask { setSequenceName(); this.parent = parent; this.parallel = parallel; - tasks = new ArrayList(); + tasks = new ArrayList<>(); logByTimeMsec = runData.getConfig().get("report.time.step.msec", 0); } @@ -186,7 +186,7 @@ public class TaskSequence extends PerfTask { final PerfTask task = tasksArray[l]; if (task.getRunInBackground()) { if (bgTasks == null) { - bgTasks = new ArrayList(); + bgTasks = new ArrayList<>(); } RunBackgroundTask bgTask = new RunBackgroundTask(task, letChildReport); bgTask.setPriority(task.getBackgroundDeltaPriority() + Thread.currentThread().getPriority()); @@ -518,7 +518,7 @@ public class TaskSequence extends PerfTask { @Override protected TaskSequence clone() throws CloneNotSupportedException { TaskSequence res = (TaskSequence) super.clone(); - res.tasks = new ArrayList(); + res.tasks = new ArrayList<>(); for (int i = 0; i < tasks.size(); i++) { res.tasks.add(tasks.get(i).clone()); } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java index 6ea37b6c855..d0f1c5292ed 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.java @@ -86,8 +86,8 @@ public class WriteLineDocTask extends PerfTask { protected final String fname; private final PrintWriter lineFileOut; private final DocMaker docMaker; - private final ThreadLocal threadBuffer = new ThreadLocal(); - private final ThreadLocal threadNormalizer = new ThreadLocal(); + private final ThreadLocal threadBuffer = new ThreadLocal<>(); + private final ThreadLocal threadNormalizer = new ThreadLocal<>(); private final String[] fieldsToWrite; private final boolean[] sufficientFields; private final boolean checkSufficientFields; @@ -122,7 +122,7 @@ public class WriteLineDocTask extends PerfTask { checkSufficientFields = false; } else { checkSufficientFields = true; - HashSet sf = new HashSet(Arrays.asList(suff.split(","))); + HashSet sf = new HashSet<>(Arrays.asList(suff.split(","))); for (int i=0; i pkgs = new ArrayList(); + ArrayList pkgs = new ArrayList<>(); pkgs.add(dfltPkg); for (String alt : alts.split(",")) { pkgs.add(alt); @@ -339,7 +339,7 @@ public class Algorithm { * @return all tasks participating in this algorithm. */ public ArrayList extractTasks() { - ArrayList res = new ArrayList(); + ArrayList res = new ArrayList<>(); extractTasks(res, sequence); return res; } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java index 8d615f93e97..f9456a38789 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java @@ -50,8 +50,8 @@ public class Config { private int roundNumber = 0; private Properties props; - private HashMap valByRound = new HashMap(); - private HashMap colForValByRound = new HashMap(); + private HashMap valByRound = new HashMap<>(); + private HashMap colForValByRound = new HashMap<>(); private String algorithmText; /** @@ -62,7 +62,7 @@ public class Config { */ public Config(Reader algReader) throws IOException { // read alg file to array of lines - ArrayList lines = new ArrayList(); + ArrayList lines = new ArrayList<>(); BufferedReader r = new BufferedReader(algReader); int lastConfigLine = 0; for (String line = r.readLine(); line != null; line = r.readLine()) { @@ -314,7 +314,7 @@ public class Config { return new String[]{s}; } - ArrayList a = new ArrayList(); + ArrayList a = new ArrayList<>(); StringTokenizer st = new StringTokenizer(s, ":"); while (st.hasMoreTokens()) { String t = st.nextToken(); @@ -329,7 +329,7 @@ public class Config { return new int[]{Integer.parseInt(s)}; } - ArrayList a = new ArrayList(); + ArrayList a = new ArrayList<>(); StringTokenizer st = new StringTokenizer(s, ":"); while (st.hasMoreTokens()) { String t = st.nextToken(); @@ -348,7 +348,7 @@ public class Config { return new double[]{Double.parseDouble(s)}; } - ArrayList a = new ArrayList(); + ArrayList a = new ArrayList<>(); StringTokenizer st = new StringTokenizer(s, ":"); while (st.hasMoreTokens()) { String t = st.nextToken(); @@ -367,7 +367,7 @@ public class Config { return new boolean[]{Boolean.valueOf(s).booleanValue()}; } - ArrayList a = new ArrayList(); + ArrayList a = new ArrayList<>(); StringTokenizer st = new StringTokenizer(s, ":"); while (st.hasMoreTokens()) { String t = st.nextToken(); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StreamUtils.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StreamUtils.java index 9f20d48ef6d..6a3dd3cfb81 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StreamUtils.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/StreamUtils.java @@ -71,7 +71,7 @@ public class StreamUtils { } } - private static final Map extensionToType = new HashMap(); + private static final Map extensionToType = new HashMap<>(); static { // these in are lower case, we will lower case at the test as well extensionToType.put(".bz2", Type.BZIP2); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityStats.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityStats.java index dcafbb4352d..60a031a85fe 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityStats.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/QualityStats.java @@ -68,7 +68,7 @@ public class QualityStats { public QualityStats(double maxGoodPoints, long searchTime) { this.maxGoodPoints = maxGoodPoints; this.searchTime = searchTime; - this.recallPoints = new ArrayList(); + this.recallPoints = new ArrayList<>(); pAt = new double[MAX_POINTS+1]; // pAt[0] unused. } diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java index cc4f3226ffa..b3bcb5544b1 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/QueryDriver.java @@ -74,7 +74,7 @@ public class QueryDriver { // validate topics & judgments match each other judge.validateData(qqs, logger); - Set fieldSet = new HashSet(); + Set fieldSet = new HashSet<>(); if (fieldSpec.indexOf('T') >= 0) fieldSet.add("title"); if (fieldSpec.indexOf('D') >= 0) fieldSet.add("description"); if (fieldSpec.indexOf('N') >= 0) fieldSet.add("narrative"); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/Trec1MQReader.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/Trec1MQReader.java index 632d93da770..a1346e7b968 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/Trec1MQReader.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/Trec1MQReader.java @@ -55,7 +55,7 @@ public class Trec1MQReader { * @throws IOException if cannot read the queries. */ public QualityQuery[] readQueries(BufferedReader reader) throws IOException { - ArrayList res = new ArrayList(); + ArrayList res = new ArrayList<>(); String line; try { while (null!=(line=reader.readLine())) { @@ -69,7 +69,7 @@ public class Trec1MQReader { // qtext String qtext = line.substring(k+1).trim(); // we got a topic! - HashMap fields = new HashMap(); + HashMap fields = new HashMap<>(); fields.put(name,qtext); //System.out.println("id: "+id+" qtext: "+qtext+" line: "+line); QualityQuery topic = new QualityQuery(id,fields); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/TrecJudge.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/TrecJudge.java index a82af995923..156b0d595eb 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/TrecJudge.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/trec/TrecJudge.java @@ -49,7 +49,7 @@ public class TrecJudge implements Judge { * @throws IOException If there is a low-level I/O error. */ public TrecJudge (BufferedReader reader) throws IOException { - judgements = new HashMap(); + judgements = new HashMap<>(); QRelJudgement curr = null; String zero = "0"; String line; @@ -96,7 +96,7 @@ public class TrecJudge implements Judge { QRelJudgement(String queryID) { this.queryID = queryID; - relevantDocs = new HashMap(); + relevantDocs = new HashMap<>(); } public void addRelevandDoc(String docName) { @@ -115,8 +115,8 @@ public class TrecJudge implements Judge { // inherit javadocs @Override public boolean validateData(QualityQuery[] qq, PrintWriter logger) { - HashMap missingQueries = new HashMap(judgements); - ArrayList missingJudgements = new ArrayList(); + HashMap missingQueries = new HashMap<>(judgements); + ArrayList missingJudgements = new ArrayList<>(); for (int i=0; i res = new ArrayList(); + ArrayList res = new ArrayList<>(); StringBuilder sb; try { while (null!=(sb=read(reader,"",null,false,false))) { - HashMap fields = new HashMap(); + HashMap fields = new HashMap<>(); // id sb = read(reader,"",null,true,false); int k = sb.indexOf(":"); diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java index 25b48dfcf90..6f133f428f1 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/DocNameExtractor.java @@ -47,7 +47,7 @@ public class DocNameExtractor { * @throws IOException if cannot extract the doc name from the index. */ public String docName(IndexSearcher searcher, int docid) throws IOException { - final List name = new ArrayList(); + final List name = new ArrayList<>(); searcher.getIndexReader().document(docid, new StoredFieldVisitor() { @Override public void stringField(FieldInfo fieldInfo, String value) { diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java index 1add4ff0d96..a0b33c5422c 100644 --- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java +++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java @@ -35,7 +35,7 @@ public class SimpleQQParser implements QualityQueryParser { private String qqNames[]; private String indexField; - ThreadLocal queryParser = new ThreadLocal(); + ThreadLocal queryParser = new ThreadLocal<>(); /** * Constructor of a simple qq parser. diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java index 81d251365e5..006fbba0d05 100644 --- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java +++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java @@ -360,7 +360,7 @@ public class TrecContentSourceTest extends LuceneTestCase { DocData dd = new DocData(); int n = 0; boolean gotExpectedException = false; - HashSet unseenTypes = new HashSet(Arrays.asList(ParsePathType.values())); + HashSet unseenTypes = new HashSet<>(Arrays.asList(ParsePathType.values())); try { while (n<100) { // arbiterary limit to prevent looping forever in case of test failure dd = tcs.getNextDocData(dd); diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java index 439fa967f77..47aa48213b6 100644 --- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java +++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java @@ -344,7 +344,7 @@ public class WriteLineDocTaskTest extends BenchmarkTestCase { wldt.close(); - Set ids = new HashSet(); + Set ids = new HashSet<>(); BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(file), "utf-8")); try { String line = br.readLine(); diff --git a/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java index 22d530ac87d..f4d7347ae47 100644 --- a/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java +++ b/lucene/classification/src/java/org/apache/lucene/classification/KNearestNeighborClassifier.java @@ -97,7 +97,7 @@ public class KNearestNeighborClassifier implements Classifier { private ClassificationResult selectClassFromNeighbors(TopDocs topDocs) throws IOException { // TODO : improve the nearest neighbor selection - Map classCounts = new HashMap(); + Map classCounts = new HashMap<>(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { BytesRef cl = new BytesRef(indexSearcher.doc(scoreDoc.doc).getField(classFieldName).stringValue()); Integer count = classCounts.get(cl); @@ -117,7 +117,7 @@ public class KNearestNeighborClassifier implements Classifier { } } double score = max / (double) k; - return new ClassificationResult(assignedClass, score); + return new ClassificationResult<>(assignedClass, score); } /** diff --git a/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java index f039e70f844..cf11e5cf0e4 100644 --- a/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java +++ b/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java @@ -109,7 +109,7 @@ public class SimpleNaiveBayesClassifier implements Classifier { } private String[] tokenizeDoc(String doc) throws IOException { - Collection result = new LinkedList(); + Collection result = new LinkedList<>(); for (String textFieldName : textFieldNames) { try (TokenStream tokenStream = analyzer.tokenStream(textFieldName, doc)) { CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class); @@ -146,7 +146,7 @@ public class SimpleNaiveBayesClassifier implements Classifier { } } double score = 10 / Math.abs(max); - return new ClassificationResult(foundClass, score); + return new ClassificationResult<>(foundClass, score); } diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java index bb81e8b49fe..0a75f893b6f 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java @@ -66,7 +66,7 @@ public class BlockTermsReader extends FieldsProducer { // produce DocsEnum on demand private final PostingsReaderBase postingsReader; - private final TreeMap fields = new TreeMap(); + private final TreeMap fields = new TreeMap<>(); // Reads the terms index private TermsIndexReaderBase indexReader; diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java index 6ac103b92df..579cd02273a 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java @@ -96,7 +96,7 @@ public class BlockTermsWriter extends FieldsConsumer implements Closeable { } } - private final List fields = new ArrayList(); + private final List fields = new ArrayList<>(); // private final String segment; diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexReader.java index 621d9cea5af..b13966b34dc 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexReader.java @@ -61,7 +61,7 @@ public class FixedGapTermsIndexReader extends TermsIndexReaderBase { private final PagedBytes termBytes = new PagedBytes(PAGED_BYTES_BITS); private PagedBytes.Reader termBytesReader; - final HashMap fields = new HashMap(); + final HashMap fields = new HashMap<>(); // start of the field info data private long dirOffset; diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexWriter.java index 789300ef785..4787b7b6c78 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexWriter.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/FixedGapTermsIndexWriter.java @@ -58,7 +58,7 @@ public class FixedGapTermsIndexWriter extends TermsIndexWriterBase { final private int termIndexInterval; public static final int DEFAULT_TERM_INDEX_INTERVAL = 32; - private final List fields = new ArrayList(); + private final List fields = new ArrayList<>(); public FixedGapTermsIndexWriter(SegmentWriteState state) throws IOException { this(state, DEFAULT_TERM_INDEX_INTERVAL); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexReader.java index 7da8bd96be2..914d661a11f 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexReader.java @@ -45,7 +45,7 @@ public class VariableGapTermsIndexReader extends TermsIndexReaderBase { private final PositiveIntOutputs fstOutputs = PositiveIntOutputs.getSingleton(); - final HashMap fields = new HashMap(); + final HashMap fields = new HashMap<>(); // start of the field info data private long dirOffset; @@ -104,7 +104,7 @@ public class VariableGapTermsIndexReader extends TermsIndexReaderBase { private BytesRefFSTEnum.InputOutput current; public IndexEnum(FST fst) { - fstEnum = new BytesRefFSTEnum(fst); + fstEnum = new BytesRefFSTEnum<>(fst); } @Override @@ -158,7 +158,7 @@ public class VariableGapTermsIndexReader extends TermsIndexReaderBase { public FieldIndexData(IndexInput in, FieldInfo fieldInfo, long indexStart) throws IOException { IndexInput clone = in.clone(); clone.seek(indexStart); - fst = new FST(clone, fstOutputs); + fst = new FST<>(clone, fstOutputs); clone.close(); /* diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java index 6d3f6ba1b8c..4b9be3672e0 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/VariableGapTermsIndexWriter.java @@ -55,7 +55,7 @@ public class VariableGapTermsIndexWriter extends TermsIndexWriterBase { final static int VERSION_APPEND_ONLY = 1; final static int VERSION_CURRENT = VERSION_APPEND_ONLY; - private final List fields = new ArrayList(); + private final List fields = new ArrayList<>(); @SuppressWarnings("unused") private final FieldInfos fieldInfos; // unread private final IndexTermSelector policy; @@ -236,7 +236,7 @@ public class VariableGapTermsIndexWriter extends TermsIndexWriterBase { public FSTFieldWriter(FieldInfo fieldInfo, long termsFilePointer) throws IOException { this.fieldInfo = fieldInfo; fstOutputs = PositiveIntOutputs.getSingleton(); - fstBuilder = new Builder(FST.INPUT_TYPE.BYTE1, fstOutputs); + fstBuilder = new Builder<>(FST.INPUT_TYPE.BYTE1, fstOutputs); indexStart = out.getFilePointer(); ////System.out.println("VGW: field=" + fieldInfo.name); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java index 69603876bc6..b06c8a216d5 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java @@ -150,7 +150,7 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat { public class BloomFilteredFieldsProducer extends FieldsProducer { private FieldsProducer delegateFieldsProducer; - HashMap bloomsByFieldName = new HashMap(); + HashMap bloomsByFieldName = new HashMap<>(); public BloomFilteredFieldsProducer(SegmentReadState state) throws IOException { @@ -394,7 +394,7 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat { class BloomFilteredFieldsConsumer extends FieldsConsumer { private FieldsConsumer delegateFieldsConsumer; - private Map bloomFilters = new HashMap(); + private Map bloomFilters = new HashMap<>(); private SegmentWriteState state; public BloomFilteredFieldsConsumer(FieldsConsumer fieldsConsumer, @@ -454,7 +454,7 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat { public void close() throws IOException { // Now we are done accumulating values for these fields - List> nonSaturatedBlooms = new ArrayList>(); + List> nonSaturatedBlooms = new ArrayList<>(); for (Entry entry : bloomFilters.entrySet()) { FuzzySet bloomFilter = entry.getValue(); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesProducer.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesProducer.java index bf7c5789d9c..cbca82eb374 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesProducer.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectDocValuesProducer.java @@ -46,22 +46,22 @@ import org.apache.lucene.util.RamUsageEstimator; class DirectDocValuesProducer extends DocValuesProducer { // metadata maps (just file pointers and minimal stuff) - private final Map numerics = new HashMap(); - private final Map binaries = new HashMap(); - private final Map sorteds = new HashMap(); - private final Map sortedSets = new HashMap(); + private final Map numerics = new HashMap<>(); + private final Map binaries = new HashMap<>(); + private final Map sorteds = new HashMap<>(); + private final Map sortedSets = new HashMap<>(); private final IndexInput data; // ram instances we have already loaded private final Map numericInstances = - new HashMap(); + new HashMap<>(); private final Map binaryInstances = - new HashMap(); + new HashMap<>(); private final Map sortedInstances = - new HashMap(); + new HashMap<>(); private final Map sortedSetInstances = - new HashMap(); - private final Map docsWithFieldInstances = new HashMap(); + new HashMap<>(); + private final Map docsWithFieldInstances = new HashMap<>(); private final int maxDoc; private final AtomicLong ramBytesUsed; diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java index 0e02e7e6fbf..d0e04f2177d 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java @@ -121,7 +121,7 @@ public final class DirectPostingsFormat extends PostingsFormat { } private static final class DirectFields extends FieldsProducer { - private final Map fields = new TreeMap(); + private final Map fields = new TreeMap<>(); public DirectFields(SegmentReadState state, Fields fields, int minSkipCount, int lowFreqCutoff) throws IOException { for (String field : fields) { diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java index 4403a308cdc..7a90867359f 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java @@ -69,7 +69,7 @@ import org.apache.lucene.codecs.memory.FSTTermsReader.TermsReader; */ public class FSTOrdTermsReader extends FieldsProducer { static final int INTERVAL = FSTOrdTermsWriter.SKIP_INTERVAL; - final TreeMap fields = new TreeMap(); + final TreeMap fields = new TreeMap<>(); final PostingsReaderBase postingsReader; IndexInput indexIn = null; IndexInput blockIn = null; @@ -98,7 +98,7 @@ public class FSTOrdTermsReader extends FieldsProducer { long sumDocFreq = blockIn.readVLong(); int docCount = blockIn.readVInt(); int longsSize = blockIn.readVInt(); - FST index = new FST(indexIn, PositiveIntOutputs.getSingleton()); + FST index = new FST<>(indexIn, PositiveIntOutputs.getSingleton()); TermsReader current = new TermsReader(fieldInfo, numTerms, sumTotalTermFreq, sumDocFreq, docCount, longsSize, index); TermsReader previous = fields.put(fieldInfo.name, current); @@ -427,7 +427,7 @@ public class FSTOrdTermsReader extends FieldsProducer { boolean seekPending; SegmentTermsEnum() throws IOException { - this.fstEnum = new BytesRefFSTEnum(index); + this.fstEnum = new BytesRefFSTEnum<>(index); this.decoded = false; this.seekPending = false; } @@ -521,7 +521,7 @@ public class FSTOrdTermsReader extends FieldsProducer { int state; Frame() { - this.arc = new FST.Arc(); + this.arc = new FST.Arc<>(); this.state = -1; } @@ -781,7 +781,7 @@ public class FSTOrdTermsReader extends FieldsProducer { } static void walk(FST fst) throws IOException { - final ArrayList> queue = new ArrayList>(); + final ArrayList> queue = new ArrayList<>(); final BitSet seen = new BitSet(); final FST.BytesReader reader = fst.getBytesReader(); final FST.Arc startArc = fst.getFirstArc(new FST.Arc()); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java index f1eec364bab..d854c36029a 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java @@ -154,7 +154,7 @@ public class FSTOrdTermsWriter extends FieldsConsumer { final PostingsWriterBase postingsWriter; final FieldInfos fieldInfos; final int maxDoc; - final List fields = new ArrayList(); + final List fields = new ArrayList<>(); IndexOutput blockOut = null; IndexOutput indexOut = null; @@ -305,7 +305,7 @@ public class FSTOrdTermsWriter extends FieldsConsumer { this.fieldInfo = fieldInfo; this.longsSize = postingsWriter.setField(fieldInfo); this.outputs = PositiveIntOutputs.getSingleton(); - this.builder = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + this.builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); this.lastBlockStatsFP = 0; this.lastBlockMetaLongsFP = 0; diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java index f88ae9ff214..cfa4d03f326 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java @@ -65,7 +65,7 @@ import org.apache.lucene.codecs.CodecUtil; */ public class FSTTermsReader extends FieldsProducer { - final TreeMap fields = new TreeMap(); + final TreeMap fields = new TreeMap<>(); final PostingsReaderBase postingsReader; final IndexInput in; //static boolean TEST = false; @@ -172,7 +172,7 @@ public class FSTTermsReader extends FieldsProducer { this.sumDocFreq = sumDocFreq; this.docCount = docCount; this.longsSize = longsSize; - this.dict = new FST(in, new FSTTermOutputs(fieldInfo, longsSize)); + this.dict = new FST<>(in, new FSTTermOutputs(fieldInfo, longsSize)); } @Override @@ -307,7 +307,7 @@ public class FSTTermsReader extends FieldsProducer { SegmentTermsEnum() throws IOException { super(); - this.fstEnum = new BytesRefFSTEnum(dict); + this.fstEnum = new BytesRefFSTEnum<>(dict); this.decoded = false; this.seekPending = false; this.meta = null; @@ -411,7 +411,7 @@ public class FSTTermsReader extends FieldsProducer { int fsaState; Frame() { - this.fstArc = new FST.Arc(); + this.fstArc = new FST.Arc<>(); this.fsaState = -1; } @@ -697,7 +697,7 @@ public class FSTTermsReader extends FieldsProducer { } static void walk(FST fst) throws IOException { - final ArrayList> queue = new ArrayList>(); + final ArrayList> queue = new ArrayList<>(); final BitSet seen = new BitSet(); final FST.BytesReader reader = fst.getBytesReader(); final FST.Arc startArc = fst.getFirstArc(new FST.Arc()); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java index 4d3b9f9ed98..433a24007f0 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java @@ -130,7 +130,7 @@ public class FSTTermsWriter extends FieldsConsumer { final FieldInfos fieldInfos; final IndexOutput out; final int maxDoc; - final List fields = new ArrayList(); + final List fields = new ArrayList<>(); public FSTTermsWriter(SegmentWriteState state, PostingsWriterBase postingsWriter) throws IOException { final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION); @@ -259,7 +259,7 @@ public class FSTTermsWriter extends FieldsConsumer { this.fieldInfo = fieldInfo; this.longsSize = postingsWriter.setField(fieldInfo); this.outputs = new FSTTermOutputs(fieldInfo, longsSize); - this.builder = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + this.builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); } public void finishTerm(BytesRef text, BlockTermState state) throws IOException { diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesConsumer.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesConsumer.java index 2b3b9901fc5..5365a813f64 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesConsumer.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesConsumer.java @@ -158,7 +158,7 @@ class MemoryDocValuesConsumer extends DocValuesConsumer { } else { meta.writeByte(TABLE_COMPRESSED); // table-compressed Long[] decode = uniqueValues.toArray(new Long[uniqueValues.size()]); - final HashMap encode = new HashMap(); + final HashMap encode = new HashMap<>(); data.writeVInt(decode.length); for (int i = 0; i < decode.length; i++) { data.writeLong(decode[i]); @@ -281,7 +281,7 @@ class MemoryDocValuesConsumer extends DocValuesConsumer { meta.writeByte(FST); meta.writeLong(data.getFilePointer()); PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); - Builder builder = new Builder(INPUT_TYPE.BYTE1, outputs); + Builder builder = new Builder<>(INPUT_TYPE.BYTE1, outputs); IntsRef scratch = new IntsRef(); long ord = 0; for (BytesRef v : values) { diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java index 0264b1e3e88..4b75e88332e 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java @@ -68,12 +68,12 @@ class MemoryDocValuesProducer extends DocValuesProducer { // ram instances we have already loaded private final Map numericInstances = - new HashMap(); + new HashMap<>(); private final Map binaryInstances = - new HashMap(); + new HashMap<>(); private final Map> fstInstances = - new HashMap>(); - private final Map docsWithFieldInstances = new HashMap(); + new HashMap<>(); + private final Map docsWithFieldInstances = new HashMap<>(); private final int maxDoc; private final AtomicLong ramBytesUsed; @@ -104,9 +104,9 @@ class MemoryDocValuesProducer extends DocValuesProducer { version = CodecUtil.checkHeader(in, metaCodec, VERSION_START, VERSION_CURRENT); - numerics = new HashMap(); - binaries = new HashMap(); - fsts = new HashMap(); + numerics = new HashMap<>(); + binaries = new HashMap<>(); + fsts = new HashMap<>(); readFields(in, state.fieldInfos); ramBytesUsed = new AtomicLong(RamUsageEstimator.shallowSizeOfInstance(getClass())); success = true; @@ -314,7 +314,7 @@ class MemoryDocValuesProducer extends DocValuesProducer { instance = fstInstances.get(field.number); if (instance == null) { data.seek(entry.offset); - instance = new FST(data, PositiveIntOutputs.getSingleton()); + instance = new FST<>(data, PositiveIntOutputs.getSingleton()); ramBytesUsed.addAndGet(instance.sizeInBytes()); fstInstances.put(field.number, instance); } @@ -324,10 +324,10 @@ class MemoryDocValuesProducer extends DocValuesProducer { // per-thread resources final BytesReader in = fst.getBytesReader(); - final Arc firstArc = new Arc(); - final Arc scratchArc = new Arc(); + final Arc firstArc = new Arc<>(); + final Arc scratchArc = new Arc<>(); final IntsRef scratchInts = new IntsRef(); - final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum(fst); + final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum<>(fst); return new SortedDocValues() { @Override @@ -389,7 +389,7 @@ class MemoryDocValuesProducer extends DocValuesProducer { instance = fstInstances.get(field.number); if (instance == null) { data.seek(entry.offset); - instance = new FST(data, PositiveIntOutputs.getSingleton()); + instance = new FST<>(data, PositiveIntOutputs.getSingleton()); ramBytesUsed.addAndGet(instance.sizeInBytes()); fstInstances.put(field.number, instance); } @@ -399,10 +399,10 @@ class MemoryDocValuesProducer extends DocValuesProducer { // per-thread resources final BytesReader in = fst.getBytesReader(); - final Arc firstArc = new Arc(); - final Arc scratchArc = new Arc(); + final Arc firstArc = new Arc<>(); + final Arc scratchArc = new Arc<>(); final IntsRef scratchInts = new IntsRef(); - final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum(fst); + final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum<>(fst); final BytesRef ref = new BytesRef(); final ByteArrayDataInput input = new ByteArrayDataInput(); return new SortedSetDocValues() { @@ -546,14 +546,14 @@ class MemoryDocValuesProducer extends DocValuesProducer { // maybe we should add a FSTEnum that supports this operation? final FST fst; final FST.BytesReader bytesReader; - final Arc firstArc = new Arc(); - final Arc scratchArc = new Arc(); + final Arc firstArc = new Arc<>(); + final Arc scratchArc = new Arc<>(); final IntsRef scratchInts = new IntsRef(); final BytesRef scratchBytes = new BytesRef(); FSTTermsEnum(FST fst) { this.fst = fst; - in = new BytesRefFSTEnum(fst); + in = new BytesRefFSTEnum<>(fst); bytesReader = fst.getBytesReader(); } diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java index 728e1eb69a8..0614c9520a8 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java @@ -119,7 +119,7 @@ public final class MemoryPostingsFormat extends PostingsFormat { this.field = field; this.doPackFST = doPackFST; this.acceptableOverheadRatio = acceptableOverheadRatio; - builder = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, doPackFST, acceptableOverheadRatio, true, 15); + builder = new Builder<>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, doPackFST, acceptableOverheadRatio, true, 15); } private class PostingsWriter { @@ -740,7 +740,7 @@ public final class MemoryPostingsFormat extends PostingsFormat { public FSTTermsEnum(FieldInfo field, FST fst) { this.field = field; - fstEnum = new BytesRefFSTEnum(fst); + fstEnum = new BytesRefFSTEnum<>(fst); } private void decodeMetaData() { @@ -895,7 +895,7 @@ public final class MemoryPostingsFormat extends PostingsFormat { sumDocFreq = in.readVLong(); docCount = in.readVInt(); - fst = new FST(in, outputs); + fst = new FST<>(in, outputs); } @Override @@ -953,7 +953,7 @@ public final class MemoryPostingsFormat extends PostingsFormat { final String fileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION); final IndexInput in = state.directory.openInput(fileName, IOContext.READONCE); - final SortedMap fields = new TreeMap(); + final SortedMap fields = new TreeMap<>(); try { while(true) { diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsReader.java index af85c4a71bb..4a2e295f0fa 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsReader.java @@ -76,7 +76,7 @@ public class PulsingPostingsReader extends PostingsReaderBase { version < PulsingPostingsWriter.VERSION_META_ARRAY) { fields = null; } else { - fields = new TreeMap(); + fields = new TreeMap<>(); String summaryFileName = IndexFileNames.segmentFileName(segmentState.segmentInfo.name, segmentState.segmentSuffix, PulsingPostingsWriter.SUMMARY_EXTENSION); IndexInput in = null; try { @@ -628,7 +628,7 @@ public class PulsingPostingsReader extends PostingsReaderBase { // another pulsing, because this is just stupid and wasteful. // we still have to be careful in case someone does Pulsing(Stomping(Pulsing(... private final Map enums = - new IdentityHashMap(); + new IdentityHashMap<>(); @Override public Map enums() { diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsWriter.java index 4a3c214fbf2..c9b8863c6a0 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsWriter.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/pulsing/PulsingPostingsWriter.java @@ -125,7 +125,7 @@ public final class PulsingPostingsWriter extends PostingsWriterBase { * for this term) is <= maxPositions, then the postings are * inlined into terms dict */ public PulsingPostingsWriter(SegmentWriteState state, int maxPositions, PostingsWriterBase wrappedPostingsWriter) { - fields = new ArrayList(); + fields = new ArrayList<>(); this.maxPositions = maxPositions; // We simply wrap another postings writer, but only call // on it when tot positions is >= the cutoff: diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java index 37952fb4449..7d1798b84a5 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesReader.java @@ -67,7 +67,7 @@ class SimpleTextDocValuesReader extends DocValuesProducer { final int maxDoc; final IndexInput data; final BytesRef scratch = new BytesRef(); - final Map fields = new HashMap(); + final Map fields = new HashMap<>(); public SimpleTextDocValuesReader(SegmentReadState state, String ext) throws IOException { // System.out.println("dir=" + state.directory + " seg=" + state.segmentInfo.name + " file=" + IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, ext)); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesWriter.java index 78c6ea0e6a0..70ad8973c53 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesWriter.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesWriter.java @@ -52,7 +52,7 @@ class SimpleTextDocValuesWriter extends DocValuesConsumer { final IndexOutput data; final BytesRef scratch = new BytesRef(); final int numDocs; - private final Set fieldsSeen = new HashSet(); // for asserting + private final Set fieldsSeen = new HashSet<>(); // for asserting public SimpleTextDocValuesWriter(SegmentWriteState state, String ext) throws IOException { // System.out.println("WRITE: " + IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, ext) + " " + state.segmentInfo.getDocCount() + " docs"); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosReader.java index caa8d39fe74..79206a79a3c 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldInfosReader.java @@ -111,7 +111,7 @@ public class SimpleTextFieldInfosReader extends FieldInfosReader { SimpleTextUtil.readLine(input, scratch); assert StringHelper.startsWith(scratch, NUM_ATTS); int numAtts = Integer.parseInt(readString(NUM_ATTS.length, scratch)); - Map atts = new HashMap(); + Map atts = new HashMap<>(); for (int j = 0; j < numAtts; j++) { SimpleTextUtil.readLine(input, scratch); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java index 9968377f514..17b6014de9a 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java @@ -83,7 +83,7 @@ class SimpleTextFieldsReader extends FieldsProducer { private TreeMap readFields(IndexInput in) throws IOException { BytesRef scratch = new BytesRef(10); - TreeMap fields = new TreeMap(); + TreeMap fields = new TreeMap<>(); while (true) { SimpleTextUtil.readLine(in, scratch); @@ -106,7 +106,7 @@ class SimpleTextFieldsReader extends FieldsProducer { public SimpleTextTermsEnum(FST>> fst, IndexOptions indexOptions) { this.indexOptions = indexOptions; - fstEnum = new BytesRefFSTEnum>>(fst); + fstEnum = new BytesRefFSTEnum<>(fst); } @Override @@ -513,10 +513,10 @@ class SimpleTextFieldsReader extends FieldsProducer { private void loadTerms() throws IOException { PositiveIntOutputs posIntOutputs = PositiveIntOutputs.getSingleton(); final Builder>> b; - final PairOutputs outputsInner = new PairOutputs(posIntOutputs, posIntOutputs); - final PairOutputs> outputs = new PairOutputs>(posIntOutputs, + final PairOutputs outputsInner = new PairOutputs<>(posIntOutputs, posIntOutputs); + final PairOutputs> outputs = new PairOutputs<>(posIntOutputs, outputsInner); - b = new Builder>>(FST.INPUT_TYPE.BYTE1, outputs); + b = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); IndexInput in = SimpleTextFieldsReader.this.in.clone(); in.seek(termsStart); final BytesRef lastTerm = new BytesRef(10); @@ -633,7 +633,7 @@ class SimpleTextFieldsReader extends FieldsProducer { return Collections.unmodifiableSet(fields.keySet()).iterator(); } - private final Map termsCache = new HashMap(); + private final Map termsCache = new HashMap<>(); @Override synchronized public Terms terms(String field) throws IOException { diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoReader.java index 0bc6c3d8ea9..e117155a77d 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextSegmentInfoReader.java @@ -72,7 +72,7 @@ public class SimpleTextSegmentInfoReader extends SegmentInfoReader { SimpleTextUtil.readLine(input, scratch); assert StringHelper.startsWith(scratch, SI_NUM_DIAG); int numDiag = Integer.parseInt(readString(SI_NUM_DIAG.length, scratch)); - Map diagnostics = new HashMap(); + Map diagnostics = new HashMap<>(); for (int i = 0; i < numDiag; i++) { SimpleTextUtil.readLine(input, scratch); @@ -88,7 +88,7 @@ public class SimpleTextSegmentInfoReader extends SegmentInfoReader { SimpleTextUtil.readLine(input, scratch); assert StringHelper.startsWith(scratch, SI_NUM_FILES); int numFiles = Integer.parseInt(readString(SI_NUM_FILES.length, scratch)); - Set files = new HashSet(); + Set files = new HashSet<>(); for (int i = 0; i < numFiles; i++) { SimpleTextUtil.readLine(input, scratch); diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java index d415177b1ed..c9e9c9e7b4d 100644 --- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java +++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java @@ -96,7 +96,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader { @Override public Fields get(int doc) throws IOException { - SortedMap fields = new TreeMap(); + SortedMap fields = new TreeMap<>(); in.seek(offsets[doc]); readLine(); assert StringHelper.startsWith(scratch, NUMFIELDS); @@ -261,7 +261,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader { this.hasOffsets = hasOffsets; this.hasPositions = hasPositions; this.hasPayloads = hasPayloads; - terms = new TreeMap(); + terms = new TreeMap<>(); } @Override diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java b/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java index a0e3282e3bb..b5de9169f96 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java @@ -56,7 +56,7 @@ public class TestPulsingReuse extends LuceneTestCase { AtomicReader segment = getOnlySegmentReader(ir); DocsEnum reuse = null; - Map allEnums = new IdentityHashMap(); + Map allEnums = new IdentityHashMap<>(); TermsEnum te = segment.terms("foo").iterator(null); while (te.next() != null) { reuse = te.docs(null, reuse, DocsEnum.FLAG_NONE); @@ -97,7 +97,7 @@ public class TestPulsingReuse extends LuceneTestCase { AtomicReader segment = getOnlySegmentReader(ir); DocsEnum reuse = null; - Map allEnums = new IdentityHashMap(); + Map allEnums = new IdentityHashMap<>(); TermsEnum te = segment.terms("foo").iterator(null); while (te.next() != null) { reuse = te.docs(null, reuse, DocsEnum.FLAG_NONE); diff --git a/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java b/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java index a5cf57609e0..da56a5e064a 100644 --- a/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java +++ b/lucene/core/src/java/org/apache/lucene/analysis/Analyzer.java @@ -73,7 +73,7 @@ public abstract class Analyzer implements Closeable { private final ReuseStrategy reuseStrategy; // non final as it gets nulled if closed; pkg private for access by ReuseStrategy's final helper methods: - CloseableThreadLocal storedValue = new CloseableThreadLocal(); + CloseableThreadLocal storedValue = new CloseableThreadLocal<>(); /** * Create a new Analyzer, reusing the same set of components per-thread @@ -417,7 +417,7 @@ public abstract class Analyzer implements Closeable { public void setReusableComponents(Analyzer analyzer, String fieldName, TokenStreamComponents components) { Map componentsPerField = (Map) getStoredValue(analyzer); if (componentsPerField == null) { - componentsPerField = new HashMap(); + componentsPerField = new HashMap<>(); setStoredValue(analyzer, componentsPerField); } componentsPerField.put(fieldName, components); diff --git a/lucene/core/src/java/org/apache/lucene/analysis/CachingTokenFilter.java b/lucene/core/src/java/org/apache/lucene/analysis/CachingTokenFilter.java index e71452af165..aef4a70feb9 100644 --- a/lucene/core/src/java/org/apache/lucene/analysis/CachingTokenFilter.java +++ b/lucene/core/src/java/org/apache/lucene/analysis/CachingTokenFilter.java @@ -51,7 +51,7 @@ public final class CachingTokenFilter extends TokenFilter { public final boolean incrementToken() throws IOException { if (cache == null) { // fill cache lazily - cache = new LinkedList(); + cache = new LinkedList<>(); fillCache(); iterator = cache.iterator(); } diff --git a/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java index 0b41ac37d5a..396315bb492 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsReader.java @@ -96,7 +96,7 @@ public class BlockTreeTermsReader extends FieldsProducer { // produce DocsEnum on demand private final PostingsReaderBase postingsReader; - private final TreeMap fields = new TreeMap(); + private final TreeMap fields = new TreeMap<>(); /** File offset where the directory starts in the terms file. */ private long dirOffset; @@ -474,7 +474,7 @@ public class BlockTreeTermsReader extends FieldsProducer { final IndexInput clone = indexIn.clone(); //System.out.println("start=" + indexStartFP + " field=" + fieldInfo.name); clone.seek(indexStartFP); - index = new FST(clone, ByteSequenceOutputs.getSingleton()); + index = new FST<>(clone, ByteSequenceOutputs.getSingleton()); /* if (false) { @@ -848,7 +848,7 @@ public class BlockTreeTermsReader extends FieldsProducer { stack[idx] = new Frame(idx); } for(int arcIdx=0;arcIdx(); + arcs[arcIdx] = new FST.Arc<>(); } if (index == null) { @@ -917,7 +917,7 @@ public class BlockTreeTermsReader extends FieldsProducer { new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; System.arraycopy(arcs, 0, next, 0, arcs.length); for(int arcOrd=arcs.length;arcOrd(); + next[arcOrd] = new FST.Arc<>(); } arcs = next; } @@ -1299,7 +1299,7 @@ public class BlockTreeTermsReader extends FieldsProducer { // Init w/ root block; don't use index since it may // not (and need not) have been loaded for(int arcIdx=0;arcIdx(); + arcs[arcIdx] = new FST.Arc<>(); } currentFrame = staticFrame; @@ -1441,7 +1441,7 @@ public class BlockTreeTermsReader extends FieldsProducer { new FST.Arc[ArrayUtil.oversize(1+ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; System.arraycopy(arcs, 0, next, 0, arcs.length); for(int arcOrd=arcs.length;arcOrd(); + next[arcOrd] = new FST.Arc<>(); } arcs = next; } diff --git a/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java index 55290012dd7..f363ba0052c 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/BlockTreeTermsWriter.java @@ -264,7 +264,7 @@ public class BlockTreeTermsWriter extends FieldsConsumer implements Closeable { } } - private final List fields = new ArrayList(); + private final List fields = new ArrayList<>(); // private final String segment; /** Create a new writer. The number of items (terms or @@ -462,7 +462,7 @@ public class BlockTreeTermsWriter extends FieldsConsumer implements Closeable { } final ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton(); - final Builder indexBuilder = new Builder(FST.INPUT_TYPE.BYTE1, + final Builder indexBuilder = new Builder<>(FST.INPUT_TYPE.BYTE1, 0, 0, true, false, Integer.MAX_VALUE, outputs, null, false, PackedInts.COMPACT, true, 15); @@ -510,7 +510,7 @@ public class BlockTreeTermsWriter extends FieldsConsumer implements Closeable { // Builder? Takes FST and unions it w/ current // FST. private void append(Builder builder, FST subIndex) throws IOException { - final BytesRefFSTEnum subIndexEnum = new BytesRefFSTEnum(subIndex); + final BytesRefFSTEnum subIndexEnum = new BytesRefFSTEnum<>(subIndex); BytesRefFSTEnum.InputOutput indexEnt; while((indexEnt = subIndexEnum.next()) != null) { //if (DEBUG) { @@ -538,7 +538,7 @@ public class BlockTreeTermsWriter extends FieldsConsumer implements Closeable { private final Builder blockBuilder; // PendingTerm or PendingBlock: - private final List pending = new ArrayList(); + private final List pending = new ArrayList<>(); // Index into pending of most recently written block private int lastBlockIndex = -1; @@ -593,7 +593,7 @@ public class BlockTreeTermsWriter extends FieldsConsumer implements Closeable { // stragglers! carry count upwards node.inputCount = totCount; } - frontier[idx] = new Builder.UnCompiledNode(blockBuilder, idx); + frontier[idx] = new Builder.UnCompiledNode<>(blockBuilder, idx); } } } @@ -743,7 +743,7 @@ public class BlockTreeTermsWriter extends FieldsConsumer implements Closeable { int curStart = count; subCount = 0; - final List floorBlocks = new ArrayList(); + final List floorBlocks = new ArrayList<>(); PendingBlock firstBlock = null; for(int sub=0;sub>(); + subIndices = new ArrayList<>(); termCount = 0; for (PendingEntry ent : slice) { if (ent.isTerm) { @@ -1042,7 +1042,7 @@ public class BlockTreeTermsWriter extends FieldsConsumer implements Closeable { // This Builder is just used transiently to fragment // terms into "good" blocks; we don't save the // resulting FST: - blockBuilder = new Builder(FST.INPUT_TYPE.BYTE1, + blockBuilder = new Builder<>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, noOutputs, diff --git a/lucene/core/src/java/org/apache/lucene/codecs/Codec.java b/lucene/core/src/java/org/apache/lucene/codecs/Codec.java index 72b2e1e0a0b..757f3439e6a 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/Codec.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/Codec.java @@ -39,7 +39,7 @@ import org.apache.lucene.util.NamedSPILoader; public abstract class Codec implements NamedSPILoader.NamedSPI { private static final NamedSPILoader loader = - new NamedSPILoader(Codec.class); + new NamedSPILoader<>(Codec.class); private final String name; diff --git a/lucene/core/src/java/org/apache/lucene/codecs/DocValuesFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/DocValuesFormat.java index 2e47bb101c7..1b556c0895a 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/DocValuesFormat.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/DocValuesFormat.java @@ -41,7 +41,7 @@ import org.apache.lucene.util.NamedSPILoader; public abstract class DocValuesFormat implements NamedSPILoader.NamedSPI { private static final NamedSPILoader loader = - new NamedSPILoader(DocValuesFormat.class); + new NamedSPILoader<>(DocValuesFormat.class); /** Unique name that's used to retrieve this format when * reading the index. diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsFormat.java index f9b676f62f9..2b145c689b3 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsFormat.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsFormat.java @@ -42,7 +42,7 @@ import org.apache.lucene.util.NamedSPILoader; public abstract class PostingsFormat implements NamedSPILoader.NamedSPI { private static final NamedSPILoader loader = - new NamedSPILoader(PostingsFormat.class); + new NamedSPILoader<>(PostingsFormat.class); /** Zero-length {@code PostingsFormat} array. */ public static final PostingsFormat[] EMPTY = new PostingsFormat[0]; diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java index b3b466276d3..9b05c14a34e 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java @@ -92,7 +92,7 @@ public final class CompressingTermVectorsWriter extends TermVectorsWriter { final int posStart, offStart, payStart; DocData(int numFields, int posStart, int offStart, int payStart) { this.numFields = numFields; - this.fields = new ArrayDeque(numFields); + this.fields = new ArrayDeque<>(numFields); this.posStart = posStart; this.offStart = offStart; this.payStart = payStart; @@ -214,7 +214,7 @@ public final class CompressingTermVectorsWriter extends TermVectorsWriter { this.chunkSize = chunkSize; numDocs = 0; - pendingDocs = new ArrayDeque(); + pendingDocs = new ArrayDeque<>(); termSuffixes = new GrowableByteArrayDataOutput(ArrayUtil.oversize(chunkSize, 1)); payloadBytes = new GrowableByteArrayDataOutput(ArrayUtil.oversize(1, 1)); lastTerm = new BytesRef(ArrayUtil.oversize(30, 1)); @@ -393,7 +393,7 @@ public final class CompressingTermVectorsWriter extends TermVectorsWriter { /** Returns a sorted array containing unique field numbers */ private int[] flushFieldNums() throws IOException { - SortedSet fieldNums = new TreeSet(); + SortedSet fieldNums = new TreeSet<>(); for (DocData dd : pendingDocs) { for (FieldData fd : dd.fields) { fieldNums.add(fd.fieldNum); diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesReader.java index 9913da98419..a5b545266c5 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesReader.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesReader.java @@ -57,11 +57,11 @@ final class Lucene40DocValuesReader extends DocValuesProducer { // ram instances we have already loaded private final Map numericInstances = - new HashMap(); + new HashMap<>(); private final Map binaryInstances = - new HashMap(); + new HashMap<>(); private final Map sortedInstances = - new HashMap(); + new HashMap<>(); private final AtomicLong ramBytesUsed; diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java index 8de01a94aac..fc830ce869b 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsReader.java @@ -221,7 +221,7 @@ public class Lucene40TermVectorsReader extends TermVectorsReader implements Clos private class TVFields extends Fields { private final int[] fieldNumbers; private final long[] fieldFPs; - private final Map fieldNumberToIndex = new HashMap(); + private final Map fieldNumberToIndex = new HashMap<>(); public TVFields(int docID) throws IOException { seekTvx(docID); diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java index ff555690aec..2a28eeb5936 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesProducer.java @@ -67,11 +67,11 @@ class Lucene42DocValuesProducer extends DocValuesProducer { // ram instances we have already loaded private final Map numericInstances = - new HashMap(); + new HashMap<>(); private final Map binaryInstances = - new HashMap(); + new HashMap<>(); private final Map> fstInstances = - new HashMap>(); + new HashMap<>(); private final int maxDoc; private final AtomicLong ramBytesUsed; @@ -103,9 +103,9 @@ class Lucene42DocValuesProducer extends DocValuesProducer { version = CodecUtil.checkHeader(in, metaCodec, VERSION_START, VERSION_CURRENT); - numerics = new HashMap(); - binaries = new HashMap(); - fsts = new HashMap(); + numerics = new HashMap<>(); + binaries = new HashMap<>(); + fsts = new HashMap<>(); readFields(in, state.fieldInfos); success = true; @@ -297,7 +297,7 @@ class Lucene42DocValuesProducer extends DocValuesProducer { instance = fstInstances.get(field.number); if (instance == null) { data.seek(entry.offset); - instance = new FST(data, PositiveIntOutputs.getSingleton()); + instance = new FST<>(data, PositiveIntOutputs.getSingleton()); ramBytesUsed.addAndGet(instance.sizeInBytes()); fstInstances.put(field.number, instance); } @@ -307,10 +307,10 @@ class Lucene42DocValuesProducer extends DocValuesProducer { // per-thread resources final BytesReader in = fst.getBytesReader(); - final Arc firstArc = new Arc(); - final Arc scratchArc = new Arc(); + final Arc firstArc = new Arc<>(); + final Arc scratchArc = new Arc<>(); final IntsRef scratchInts = new IntsRef(); - final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum(fst); + final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum<>(fst); return new SortedDocValues() { @Override @@ -372,7 +372,7 @@ class Lucene42DocValuesProducer extends DocValuesProducer { instance = fstInstances.get(field.number); if (instance == null) { data.seek(entry.offset); - instance = new FST(data, PositiveIntOutputs.getSingleton()); + instance = new FST<>(data, PositiveIntOutputs.getSingleton()); ramBytesUsed.addAndGet(instance.sizeInBytes()); fstInstances.put(field.number, instance); } @@ -382,10 +382,10 @@ class Lucene42DocValuesProducer extends DocValuesProducer { // per-thread resources final BytesReader in = fst.getBytesReader(); - final Arc firstArc = new Arc(); - final Arc scratchArc = new Arc(); + final Arc firstArc = new Arc<>(); + final Arc scratchArc = new Arc<>(); final IntsRef scratchInts = new IntsRef(); - final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum(fst); + final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum<>(fst); final BytesRef ref = new BytesRef(); final ByteArrayDataInput input = new ByteArrayDataInput(); return new SortedSetDocValues() { @@ -493,14 +493,14 @@ class Lucene42DocValuesProducer extends DocValuesProducer { // maybe we should add a FSTEnum that supports this operation? final FST fst; final FST.BytesReader bytesReader; - final Arc firstArc = new Arc(); - final Arc scratchArc = new Arc(); + final Arc firstArc = new Arc<>(); + final Arc scratchArc = new Arc<>(); final IntsRef scratchInts = new IntsRef(); final BytesRef scratchBytes = new BytesRef(); FSTTermsEnum(FST fst) { this.fst = fst; - in = new BytesRefFSTEnum(fst); + in = new BytesRefFSTEnum<>(fst); bytesReader = fst.getBytesReader(); } diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42NormsConsumer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42NormsConsumer.java index 797dd807992..4c87bce59a9 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42NormsConsumer.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene42/Lucene42NormsConsumer.java @@ -131,7 +131,7 @@ class Lucene42NormsConsumer extends DocValuesConsumer { } else { meta.writeByte(TABLE_COMPRESSED); // table-compressed Long[] decode = uniqueValues.toArray(new Long[uniqueValues.size()]); - final HashMap encode = new HashMap(); + final HashMap encode = new HashMap<>(); data.writeVInt(decode.length); for (int i = 0; i < decode.length; i++) { data.writeLong(decode[i]); diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesConsumer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesConsumer.java index c6652cf3ef5..aa6cdb85634 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesConsumer.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesConsumer.java @@ -189,7 +189,7 @@ public class Lucene45DocValuesConsumer extends DocValuesConsumer implements Clos break; case TABLE_COMPRESSED: final Long[] decode = uniqueValues.toArray(new Long[uniqueValues.size()]); - final HashMap encode = new HashMap(); + final HashMap encode = new HashMap<>(); meta.writeVInt(decode.length); for (int i = 0; i < decode.length; i++) { meta.writeLong(decode[i]); diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesProducer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesProducer.java index 25c3842424f..d1acdafbfbb 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesProducer.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene45/Lucene45DocValuesProducer.java @@ -73,8 +73,8 @@ public class Lucene45DocValuesProducer extends DocValuesProducer implements Clos private final int version; // memory-resident structures - private final Map addressInstances = new HashMap(); - private final Map ordIndexInstances = new HashMap(); + private final Map addressInstances = new HashMap<>(); + private final Map ordIndexInstances = new HashMap<>(); /** expert: instantiates a new reader */ protected Lucene45DocValuesProducer(SegmentReadState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException { @@ -87,11 +87,11 @@ public class Lucene45DocValuesProducer extends DocValuesProducer implements Clos version = CodecUtil.checkHeader(in, metaCodec, Lucene45DocValuesFormat.VERSION_START, Lucene45DocValuesFormat.VERSION_CURRENT); - numerics = new HashMap(); - ords = new HashMap(); - ordIndexes = new HashMap(); - binaries = new HashMap(); - sortedSets = new HashMap(); + numerics = new HashMap<>(); + ords = new HashMap<>(); + ordIndexes = new HashMap<>(); + binaries = new HashMap<>(); + sortedSets = new HashMap<>(); readFields(in, state.fieldInfos); success = true; diff --git a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldDocValuesFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldDocValuesFormat.java index 2021bce0ecf..cf960ac63d4 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldDocValuesFormat.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldDocValuesFormat.java @@ -92,8 +92,8 @@ public abstract class PerFieldDocValuesFormat extends DocValuesFormat { private class FieldsWriter extends DocValuesConsumer { - private final Map formats = new HashMap(); - private final Map suffixes = new HashMap(); + private final Map formats = new HashMap<>(); + private final Map suffixes = new HashMap<>(); private final SegmentWriteState segmentWriteState; @@ -209,8 +209,8 @@ public abstract class PerFieldDocValuesFormat extends DocValuesFormat { private class FieldsReader extends DocValuesProducer { - private final Map fields = new TreeMap(); - private final Map formats = new HashMap(); + private final Map fields = new TreeMap<>(); + private final Map formats = new HashMap<>(); public FieldsReader(final SegmentReadState readState) throws IOException { @@ -245,7 +245,7 @@ public abstract class PerFieldDocValuesFormat extends DocValuesFormat { private FieldsReader(FieldsReader other) { - Map oldToNew = new IdentityHashMap(); + Map oldToNew = new IdentityHashMap<>(); // First clone all formats for(Map.Entry ent : other.formats.entrySet()) { DocValuesProducer values = ent.getValue(); diff --git a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java index a472af77633..0091a9ed1a3 100644 --- a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java +++ b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java @@ -75,7 +75,7 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat { /** Group of fields written by one PostingsFormat */ static class FieldsGroup { - final Set fields = new TreeSet(); + final Set fields = new TreeSet<>(); int suffix; /** Custom SegmentWriteState for this group of fields, @@ -111,10 +111,10 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat { // Maps a PostingsFormat instance to the suffix it // should use - Map formatToGroups = new HashMap(); + Map formatToGroups = new HashMap<>(); // Holds last suffix of each PostingFormat name - Map suffixes = new HashMap(); + Map suffixes = new HashMap<>(); // First pass: assign field -> PostingsFormat for(String field : fields) { @@ -182,8 +182,8 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat { private class FieldsReader extends FieldsProducer { - private final Map fields = new TreeMap(); - private final Map formats = new HashMap(); + private final Map fields = new TreeMap<>(); + private final Map formats = new HashMap<>(); public FieldsReader(final SegmentReadState readState) throws IOException { diff --git a/lucene/core/src/java/org/apache/lucene/document/Document.java b/lucene/core/src/java/org/apache/lucene/document/Document.java index 1ce27aca6a0..d63fd09acec 100644 --- a/lucene/core/src/java/org/apache/lucene/document/Document.java +++ b/lucene/core/src/java/org/apache/lucene/document/Document.java @@ -44,7 +44,7 @@ import org.apache.lucene.util.FilterIterator; public final class Document implements IndexDocument { - private final List fields = new ArrayList(); + private final List fields = new ArrayList<>(); /** Constructs a new document with no fields. */ public Document() {} @@ -140,7 +140,7 @@ public final class Document implements IndexDocument { * @return a BytesRef[] of binary field values */ public final BytesRef[] getBinaryValues(String name) { - final List result = new ArrayList(); + final List result = new ArrayList<>(); for (Iterator it = storedFieldsIterator(); it.hasNext(); ) { StorableField field = it.next(); @@ -199,7 +199,7 @@ public final class Document implements IndexDocument { * @return a Field[] array */ public Field[] getFields(String name) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (Field field : fields) { if (field.name().equals(name)) { result.add(field); @@ -234,7 +234,7 @@ public final class Document implements IndexDocument { * @return a String[] of field values */ public final String[] getValues(String name) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (Iterator it = storedFieldsIterator(); it.hasNext(); ) { StorableField field = it.next(); diff --git a/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java b/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java index b9ed951f898..c01b5ce0245 100644 --- a/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java +++ b/lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java @@ -49,7 +49,7 @@ public class DocumentStoredFieldVisitor extends StoredFieldVisitor { /** Load only fields named in the provided fields. */ public DocumentStoredFieldVisitor(String... fields) { - fieldsToAdd = new HashSet(fields.length); + fieldsToAdd = new HashSet<>(fields.length); for(String field : fields) { fieldsToAdd.add(field); } diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java index 8600a9193ae..245ff8ef22f 100644 --- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java +++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java @@ -95,9 +95,9 @@ class BufferedUpdates { final AtomicInteger numTermDeletes = new AtomicInteger(); final AtomicInteger numNumericUpdates = new AtomicInteger(); - final Map terms = new HashMap(); - final Map queries = new HashMap(); - final List docIDs = new ArrayList(); + final Map terms = new HashMap<>(); + final Map queries = new HashMap<>(); + final List docIDs = new ArrayList<>(); // Map> // For each field we keep an ordered list of NumericUpdates, key'd by the @@ -106,7 +106,7 @@ class BufferedUpdates { // one that came in wins), and helps us detect faster if the same Term is // used to update the same field multiple times (so we later traverse it // only once). - final Map> numericUpdates = new HashMap>(); + final Map> numericUpdates = new HashMap<>(); public static final Integer MAX_INT = Integer.valueOf(Integer.MAX_VALUE); @@ -187,7 +187,7 @@ class BufferedUpdates { public void addNumericUpdate(NumericUpdate update, int docIDUpto) { LinkedHashMap fieldUpdates = numericUpdates.get(update.field); if (fieldUpdates == null) { - fieldUpdates = new LinkedHashMap(); + fieldUpdates = new LinkedHashMap<>(); numericUpdates.put(update.field, fieldUpdates); bytesUsed.addAndGet(BYTES_PER_NUMERIC_FIELD_ENTRY); } diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java index 009b98203a7..b20c5a9f559 100644 --- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java +++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java @@ -54,7 +54,7 @@ import org.apache.lucene.util.InfoStream; class BufferedUpdatesStream { // TODO: maybe linked list? - private final List updates = new ArrayList(); + private final List updates = new ArrayList<>(); // Starts at 1 so that SegmentInfos that have never had // deletes applied (whose bufferedDelGen defaults to 0) @@ -167,7 +167,7 @@ class BufferedUpdatesStream { final long gen = nextGen++; - List infos2 = new ArrayList(); + List infos2 = new ArrayList<>(); infos2.addAll(infos); Collections.sort(infos2, sortSegInfoByDelGen); @@ -240,7 +240,7 @@ class BufferedUpdatesStream { if (segAllDeletes) { if (allDeleted == null) { - allDeleted = new ArrayList(); + allDeleted = new ArrayList<>(); } allDeleted.add(info); } @@ -290,7 +290,7 @@ class BufferedUpdatesStream { if (segAllDeletes) { if (allDeleted == null) { - allDeleted = new ArrayList(); + allDeleted = new ArrayList<>(); } allDeleted.add(info); } diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java index 4b915f1635f..fc3951a5aab 100644 --- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java +++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java @@ -92,13 +92,13 @@ public class CheckIndex { /** Empty unless you passed specific segments list to check as optional 3rd argument. * @see CheckIndex#checkIndex(List) */ - public List segmentsChecked = new ArrayList(); + public List segmentsChecked = new ArrayList<>(); /** True if the index was created with a newer version of Lucene than the CheckIndex tool. */ public boolean toolOutOfDate; /** List of {@link SegmentInfoStatus} instances, detailing status of each segment. */ - public List segmentInfos = new ArrayList(); + public List segmentInfos = new ArrayList<>(); /** Directory index is in. */ public Directory dir; @@ -1069,7 +1069,7 @@ public class CheckIndex { final BlockTreeTermsReader.Stats stats = ((BlockTreeTermsReader.FieldReader) fieldTerms).computeStats(); assert stats != null; if (status.blockTreeStats == null) { - status.blockTreeStats = new HashMap(); + status.blockTreeStats = new HashMap<>(); } status.blockTreeStats.put(field, stats); } @@ -1831,7 +1831,7 @@ public class CheckIndex { boolean doFix = false; boolean doCrossCheckTermVectors = false; boolean verbose = false; - List onlySegments = new ArrayList(); + List onlySegments = new ArrayList<>(); String indexPath = null; String dirImpl = null; int i = 0; diff --git a/lucene/core/src/java/org/apache/lucene/index/CoalescedUpdates.java b/lucene/core/src/java/org/apache/lucene/index/CoalescedUpdates.java index 687386cea25..8a0bbd35b18 100644 --- a/lucene/core/src/java/org/apache/lucene/index/CoalescedUpdates.java +++ b/lucene/core/src/java/org/apache/lucene/index/CoalescedUpdates.java @@ -28,9 +28,9 @@ import org.apache.lucene.index.BufferedUpdatesStream.QueryAndLimit; import org.apache.lucene.util.MergedIterator; class CoalescedUpdates { - final Map queries = new HashMap(); - final List> iterables = new ArrayList>(); - final List numericDVUpdates = new ArrayList(); + final Map queries = new HashMap<>(); + final List> iterables = new ArrayList<>(); + final List numericDVUpdates = new ArrayList<>(); @Override public String toString() { @@ -62,7 +62,7 @@ class CoalescedUpdates { for (int i = 0; i < iterables.size(); i++) { subs[i] = iterables.get(i).iterator(); } - return new MergedIterator(subs); + return new MergedIterator<>(subs); } }; } diff --git a/lucene/core/src/java/org/apache/lucene/index/CompositeReaderContext.java b/lucene/core/src/java/org/apache/lucene/index/CompositeReaderContext.java index 0c51872085a..d892e184c79 100644 --- a/lucene/core/src/java/org/apache/lucene/index/CompositeReaderContext.java +++ b/lucene/core/src/java/org/apache/lucene/index/CompositeReaderContext.java @@ -80,7 +80,7 @@ public final class CompositeReaderContext extends IndexReaderContext { private static final class Builder { private final CompositeReader reader; - private final List leaves = new ArrayList(); + private final List leaves = new ArrayList<>(); private int leafDocBase = 0; public Builder(CompositeReader reader) { diff --git a/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java b/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java index 73e726155ec..06615894eb3 100644 --- a/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java +++ b/lucene/core/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java @@ -47,7 +47,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler { private int mergeThreadPriority = -1; /** List of currently active {@link MergeThread}s. */ - protected List mergeThreads = new ArrayList(); + protected List mergeThreads = new ArrayList<>(); /** * Default {@code maxThreadCount}. @@ -171,7 +171,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler { // Only look at threads that are alive & not in the // process of stopping (ie have an active merge): - final List activeMerges = new ArrayList(); + final List activeMerges = new ArrayList<>(); int threadIdx = 0; while (threadIdx < mergeThreads.size()) { @@ -571,7 +571,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler { ConcurrentMergeScheduler clone = (ConcurrentMergeScheduler) super.clone(); clone.writer = null; clone.dir = null; - clone.mergeThreads = new ArrayList(); + clone.mergeThreads = new ArrayList<>(); return clone; } } diff --git a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java index e2a61213594..84c721d5199 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/DirectoryReader.java @@ -224,7 +224,7 @@ public abstract class DirectoryReader extends BaseCompositeReader public static List listCommits(Directory dir) throws IOException { final String[] files = dir.listAll(); - List commits = new ArrayList(); + List commits = new ArrayList<>(); SegmentInfos latest = new SegmentInfos(); latest.read(dir); diff --git a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java index 23c60a09c25..0ecfcdb1687 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java @@ -69,7 +69,7 @@ final class DocFieldProcessor extends DocConsumer { @Override public void flush(SegmentWriteState state) throws IOException { - Map childFields = new HashMap(); + Map childFields = new HashMap<>(); Collection fields = fields(); for (DocFieldConsumerPerField f : fields) { childFields.put(f.getFieldInfo().name, f); @@ -132,7 +132,7 @@ final class DocFieldProcessor extends DocConsumer { } public Collection fields() { - Collection fields = new HashSet(); + Collection fields = new HashSet<>(); for(int i=0;i fieldsToFlush, SegmentWriteState state) throws IOException { - Map childFieldsToFlush = new HashMap(); - Map endChildFieldsToFlush = new HashMap(); + Map childFieldsToFlush = new HashMap<>(); + Map endChildFieldsToFlush = new HashMap<>(); for (Map.Entry fieldToFlush : fieldsToFlush.entrySet()) { DocInverterPerField perField = (DocInverterPerField) fieldToFlush.getValue(); diff --git a/lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java b/lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java index 42f1b2164f7..8358312c8dc 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocTermOrds.java @@ -342,7 +342,7 @@ public class DocTermOrds { } catch (UnsupportedOperationException uoe) { // Reader cannot provide ord support, so we wrap // our own support by creating our own terms index: - indexedTerms = new ArrayList(); + indexedTerms = new ArrayList<>(); indexedTermsBytes = new PagedBytes(15); //System.out.println("NO ORDS"); } diff --git a/lucene/core/src/java/org/apache/lucene/index/DocValuesProcessor.java b/lucene/core/src/java/org/apache/lucene/index/DocValuesProcessor.java index cb1b30154dd..29e5570550b 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocValuesProcessor.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocValuesProcessor.java @@ -33,7 +33,7 @@ final class DocValuesProcessor extends StoredFieldsConsumer { // TODO: somewhat wasteful we also keep a map here; would // be more efficient if we could "reuse" the map/hash // lookup DocFieldProcessor already did "above" - private final Map writers = new HashMap(); + private final Map writers = new HashMap<>(); private final Counter bytesUsed; public DocValuesProcessor(Counter bytesUsed) { diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java index 2c40549384f..4ad6748ca7e 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriter.java @@ -135,7 +135,7 @@ final class DocumentsWriter { this.perThreadPool = config.getIndexerThreadPool(); flushPolicy = config.getFlushPolicy(); this.writer = writer; - this.events = new ConcurrentLinkedQueue(); + this.events = new ConcurrentLinkedQueue<>(); flushControl = new DocumentsWriterFlushControl(this, config, writer.bufferedUpdatesStream); } @@ -207,7 +207,7 @@ final class DocumentsWriter { synchronized void abort(IndexWriter writer) { assert !Thread.holdsLock(writer) : "IndexWriter lock should never be hold when aborting"; boolean success = false; - final Set newFilesSet = new HashSet(); + final Set newFilesSet = new HashSet<>(); try { deleteQueue.clear(); if (infoStream.isEnabled("DW")) { @@ -243,7 +243,7 @@ final class DocumentsWriter { try { deleteQueue.clear(); final int limit = perThreadPool.getMaxThreadStates(); - final Set newFilesSet = new HashSet(); + final Set newFilesSet = new HashSet<>(); for (int i = 0; i < limit; i++) { final ThreadState perThread = perThreadPool.getThreadState(i); perThread.lock(); diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterDeleteQueue.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterDeleteQueue.java index b36531fdaf7..f951cdff25d 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterDeleteQueue.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterDeleteQueue.java @@ -93,7 +93,7 @@ final class DocumentsWriterDeleteQueue { * we use a sentinel instance as our initial tail. No slice will ever try to * apply this tail since the head is always omitted. */ - tail = new Node(null); // sentinel + tail = new Node<>(null); // sentinel globalSlice = new DeleteSlice(tail); } diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java index bd03c4eae40..d6ec30d739c 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java @@ -51,10 +51,10 @@ final class DocumentsWriterFlushControl { private int numDocsSinceStalled = 0; // only with assert final AtomicBoolean flushDeletes = new AtomicBoolean(false); private boolean fullFlush = false; - private final Queue flushQueue = new LinkedList(); + private final Queue flushQueue = new LinkedList<>(); // only for safety reasons if a DWPT is close to the RAM limit - private final Queue blockedFlushes = new LinkedList(); - private final IdentityHashMap flushingWriters = new IdentityHashMap(); + private final Queue blockedFlushes = new LinkedList<>(); + private final IdentityHashMap flushingWriters = new IdentityHashMap<>(); double maxConfiguredRamBuffer = 0; @@ -531,7 +531,7 @@ final class DocumentsWriterFlushControl { return true; } - private final List fullFlushBuffer = new ArrayList(); + private final List fullFlushBuffer = new ArrayList<>(); void addFlushableState(ThreadState perThread) { if (infoStream.isEnabled("DWFC")) { diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushQueue.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushQueue.java index 110ff2ad20a..898f2cd8c63 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushQueue.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterFlushQueue.java @@ -28,7 +28,7 @@ import org.apache.lucene.index.DocumentsWriterPerThread.FlushedSegment; * @lucene.internal */ class DocumentsWriterFlushQueue { - private final Queue queue = new LinkedList(); + private final Queue queue = new LinkedList<>(); // we track tickets separately since count must be present even before the ticket is // constructed ie. queue.size would not reflect it. private final AtomicInteger ticketCount = new AtomicInteger(); diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java index a50bd1b8644..9a108436862 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java @@ -464,7 +464,7 @@ class DocumentsWriterPerThread { try { consumer.flush(flushState); pendingUpdates.terms.clear(); - segmentInfo.setFiles(new HashSet(directory.getCreatedFiles())); + segmentInfo.setFiles(new HashSet<>(directory.getCreatedFiles())); final SegmentCommitInfo segmentInfoPerCommit = new SegmentCommitInfo(segmentInfo, 0, -1L, -1L); if (infoStream.isEnabled("DWPT")) { @@ -510,7 +510,7 @@ class DocumentsWriterPerThread { } } - private final Set filesToDelete = new HashSet(); + private final Set filesToDelete = new HashSet<>(); public Set pendingFilesToDelete() { return filesToDelete; diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterStallControl.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterStallControl.java index cfdb21867cb..9cfe6a533eb 100644 --- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterStallControl.java +++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterStallControl.java @@ -42,7 +42,7 @@ final class DocumentsWriterStallControl { private volatile boolean stalled; private int numWaiting; // only with assert private boolean wasStalled; // only with assert - private final Map waiting = new IdentityHashMap(); // only with assert + private final Map waiting = new IdentityHashMap<>(); // only with assert /** * Update the stalled flag status. This method will set the stalled flag to diff --git a/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java b/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java index 8c156bc7053..00b5b8b5cf7 100644 --- a/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java +++ b/lucene/core/src/java/org/apache/lucene/index/FieldInfo.java @@ -324,7 +324,7 @@ public final class FieldInfo { */ public String putAttribute(String key, String value) { if (attributes == null) { - attributes = new HashMap(); + attributes = new HashMap<>(); } return attributes.put(key, value); } diff --git a/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java b/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java index bcee7f26f7b..1159710761e 100644 --- a/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java +++ b/lucene/core/src/java/org/apache/lucene/index/FieldInfos.java @@ -41,8 +41,8 @@ public class FieldInfos implements Iterable { private final boolean hasNorms; private final boolean hasDocValues; - private final SortedMap byNumber = new TreeMap(); - private final HashMap byName = new HashMap(); + private final SortedMap byNumber = new TreeMap<>(); + private final HashMap byName = new HashMap<>(); private final Collection values; // for an unmodifiable iterator /** @@ -174,9 +174,9 @@ public class FieldInfos implements Iterable { private int lowestUnassignedFieldNumber = -1; FieldNumbers() { - this.nameToNumber = new HashMap(); - this.numberToName = new HashMap(); - this.docValuesType = new HashMap(); + this.nameToNumber = new HashMap<>(); + this.numberToName = new HashMap<>(); + this.docValuesType = new HashMap<>(); } /** @@ -250,7 +250,7 @@ public class FieldInfos implements Iterable { } static final class Builder { - private final HashMap byName = new HashMap(); + private final HashMap byName = new HashMap<>(); final FieldNumbers globalFieldNumbers; Builder() { diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java index 605dbfd5316..84d8c2e02d9 100644 --- a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java +++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java @@ -35,7 +35,7 @@ import org.apache.lucene.util.BytesRef; * PostingsFormat. */ class FreqProxFields extends Fields { - final Map fields = new LinkedHashMap(); + final Map fields = new LinkedHashMap<>(); public FreqProxFields(List fieldList) { // NOTE: fields are already sorted by field name diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java index 1df0b10d17b..82d7dc88022 100644 --- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java +++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java @@ -36,7 +36,7 @@ final class FreqProxTermsWriter extends TermsHashConsumer { // flushed segment: if (state.segUpdates != null && state.segUpdates.terms.size() > 0) { Map segDeletes = state.segUpdates.terms; - List deleteTerms = new ArrayList(segDeletes.keySet()); + List deleteTerms = new ArrayList<>(segDeletes.keySet()); Collections.sort(deleteTerms); String lastField = null; TermsEnum termsEnum = null; @@ -87,7 +87,7 @@ final class FreqProxTermsWriter extends TermsHashConsumer { // Gather all FieldData's that have postings, across all // ThreadStates - List allFields = new ArrayList(); + List allFields = new ArrayList<>(); for (TermsHashConsumerPerField f : fieldsToFlush.values()) { final FreqProxTermsWriterPerField perField = (FreqProxTermsWriterPerField) f; diff --git a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java index c0d52b3e73e..d23139e4278 100644 --- a/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java +++ b/lucene/core/src/java/org/apache/lucene/index/FrozenBufferedUpdates.java @@ -83,7 +83,7 @@ class FrozenBufferedUpdates { // so that it maps to all fields it affects, sorted by their docUpto, and traverse // that Term only once, applying the update to all fields that still need to be // updated. - List allUpdates = new ArrayList(); + List allUpdates = new ArrayList<>(); int numericUpdatesSize = 0; for (LinkedHashMap fieldUpdates : deletes.numericUpdates.values()) { for (NumericUpdate update : fieldUpdates.values()) { diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java index c2f02eb53b1..db7d11f2469 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexFileDeleter.java @@ -81,21 +81,21 @@ final class IndexFileDeleter implements Closeable { /* Reference count for all files in the index. * Counts how many existing commits reference a file. **/ - private Map refCounts = new HashMap(); + private Map refCounts = new HashMap<>(); /* Holds all commits (segments_N) currently in the index. * This will have just 1 commit if you are using the * default delete policy (KeepOnlyLastCommitDeletionPolicy). * Other policies may leave commit points live for longer * in which case this list would be longer than 1: */ - private List commits = new ArrayList(); + private List commits = new ArrayList<>(); /* Holds files we had incref'd from the previous * non-commit checkpoint: */ - private final List lastFiles = new ArrayList(); + private final List lastFiles = new ArrayList<>(); /* Commits that the IndexDeletionPolicy have decided to delete: */ - private List commitsToDelete = new ArrayList(); + private List commitsToDelete = new ArrayList<>(); private final InfoStream infoStream; private Directory directory; @@ -597,7 +597,7 @@ final class IndexFileDeleter implements Closeable { infoStream.message("IFD", "unable to remove file \"" + fileName + "\": " + e.toString() + "; Will re-try later."); } if (deletable == null) { - deletable = new ArrayList(); + deletable = new ArrayList<>(); } deletable.add(fileName); // add to deletable } diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java index aee6ef689a8..4083a314989 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java @@ -235,7 +235,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ final IndexFileDeleter deleter; // used by forceMerge to note those needing merging - private Map segmentsToMerge = new HashMap(); + private Map segmentsToMerge = new HashMap<>(); private int mergeMaxNumSegments; private Lock writeLock; @@ -245,13 +245,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ // Holds all SegmentInfo instances currently involved in // merges - private HashSet mergingSegments = new HashSet(); + private HashSet mergingSegments = new HashSet<>(); private MergePolicy mergePolicy; private final MergeScheduler mergeScheduler; - private LinkedList pendingMerges = new LinkedList(); - private Set runningMerges = new HashSet(); - private List mergeExceptions = new ArrayList(); + private LinkedList pendingMerges = new LinkedList<>(); + private Set runningMerges = new HashSet<>(); + private List mergeExceptions = new ArrayList<>(); private long mergeGen; private boolean stopMerges; @@ -422,7 +422,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ class ReaderPool { - private final Map readerMap = new HashMap(); + private final Map readerMap = new HashMap<>(); // used only by asserts public synchronized boolean infoIsLive(SegmentCommitInfo info) { @@ -603,7 +603,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ // Make sure that every segment appears only once in the // pool: private boolean noDups() { - Set seen = new HashSet(); + Set seen = new HashSet<>(); for(SegmentCommitInfo info : readerMap.keySet()) { assert !seen.contains(info.info.name); seen.add(info.info.name); @@ -2346,12 +2346,12 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ } private synchronized void resetMergeExceptions() { - mergeExceptions = new ArrayList(); + mergeExceptions = new ArrayList<>(); mergeGen++; } private void noDupDirs(Directory... dirs) { - HashSet dups = new HashSet(); + HashSet dups = new HashSet<>(); for(int i=0;i acquireWriteLocks(Directory... dirs) throws IOException { - List locks = new ArrayList(); + List locks = new ArrayList<>(); for(int i=0;i infos = new ArrayList(); + List infos = new ArrayList<>(); boolean success = false; try { @@ -2567,7 +2567,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ flush(false, true); String mergedName = newSegmentName(); - final List mergeReaders = new ArrayList(); + final List mergeReaders = new ArrayList<>(); for (IndexReader indexReader : readers) { numDocs += indexReader.numDocs(); for (AtomicReaderContext ctx : indexReader.leaves()) { @@ -2606,7 +2606,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ SegmentCommitInfo infoPerCommit = new SegmentCommitInfo(info, 0, -1L, -1L); - info.setFiles(new HashSet(trackingDir.getCreatedFiles())); + info.setFiles(new HashSet<>(trackingDir.getCreatedFiles())); trackingDir.getCreatedFiles().clear(); setDiagnostics(info, SOURCE_ADDINDEXES_READERS); @@ -2684,7 +2684,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ info.info.getDiagnostics()); SegmentCommitInfo newInfoPerCommit = new SegmentCommitInfo(newInfo, info.getDelCount(), info.getDelGen(), info.getFieldInfosGen()); - Set segFiles = new HashSet(); + Set segFiles = new HashSet<>(); // Build up new segment's file names. Must do this // before writing SegmentInfo: @@ -2880,7 +2880,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ * contents after calling this method has no effect. */ public final synchronized void setCommitData(Map commitUserData) { - segmentInfos.setUserData(new HashMap(commitUserData)); + segmentInfos.setUserData(new HashMap<>(commitUserData)); ++changeCount; } @@ -3203,7 +3203,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ ReadersAndUpdates mergedDeletesAndUpdates = null; boolean initWritableLiveDocs = false; MergePolicy.DocMap docMap = null; - final Map mergedFieldUpdates = new HashMap(); + final Map mergedFieldUpdates = new HashMap<>(); for (int i = 0; i < sourceSegments.size(); i++) { SegmentCommitInfo info = sourceSegments.get(i); @@ -3857,7 +3857,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ // names. final String mergeSegmentName = newSegmentName(); SegmentInfo si = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergeSegmentName, -1, false, codec, null); - Map details = new HashMap(); + Map details = new HashMap<>(); details.put("mergeMaxNumSegments", "" + merge.maxNumSegments); details.put("mergeFactor", Integer.toString(merge.segments.size())); setDiagnostics(si, SOURCE_MERGE, details); @@ -3878,7 +3878,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ } private static void setDiagnostics(SegmentInfo info, String source, Map details) { - Map diagnostics = new HashMap(); + Map diagnostics = new HashMap<>(); diagnostics.put("source", source); diagnostics.put("lucene.version", Constants.LUCENE_VERSION); diagnostics.put("os", Constants.OS_NAME); @@ -3972,7 +3972,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ infoStream.message("IW", "merging " + segString(merge.segments)); } - merge.readers = new ArrayList(); + merge.readers = new ArrayList<>(); // This is try/finally to make sure merger's readers are // closed: @@ -4069,7 +4069,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ } } assert mergeState.segmentInfo == merge.info.info; - merge.info.info.setFiles(new HashSet(dirWrapper.getCreatedFiles())); + merge.info.info.setFiles(new HashSet<>(dirWrapper.getCreatedFiles())); // Record which codec was used to write the segment @@ -4316,7 +4316,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ // For infoStream output synchronized SegmentInfos toLiveInfos(SegmentInfos sis) { final SegmentInfos newSIS = new SegmentInfos(); - final Map liveSIS = new HashMap(); + final Map liveSIS = new HashMap<>(); for(SegmentCommitInfo info : segmentInfos) { liveSIS.put(info, info); } @@ -4608,7 +4608,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{ } // Replace all previous files with the CFS/CFE files: - Set siFiles = new HashSet(); + Set siFiles = new HashSet<>(); siFiles.add(fileName); siFiles.add(IndexFileNames.segmentFileName(info.name, "", IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION)); info.setFiles(siFiles); diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java index a3a305bebb8..e94e421e750 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java @@ -130,7 +130,7 @@ public final class IndexWriterConfig extends LiveIndexWriterConfig implements Cl // indicates whether this config instance is already attached to a writer. // not final so that it can be cloned properly. - private SetOnce writer = new SetOnce(); + private SetOnce writer = new SetOnce<>(); /** * Sets the {@link IndexWriter} this config is attached to. diff --git a/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java index 047f210084b..c931f801411 100644 --- a/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java +++ b/lucene/core/src/java/org/apache/lucene/index/LogMergePolicy.java @@ -468,7 +468,7 @@ public abstract class LogMergePolicy extends MergePolicy { // Compute levels, which is just log (base mergeFactor) // of the size of each segment - final List levels = new ArrayList(); + final List levels = new ArrayList<>(); final float norm = (float) Math.log(mergeFactor); final Collection mergingSegments = writer.get().getMergingSegments(); @@ -570,7 +570,7 @@ public abstract class LogMergePolicy extends MergePolicy { } else if (!anyTooLarge) { if (spec == null) spec = new MergeSpecification(); - final List mergeInfos = new ArrayList(); + final List mergeInfos = new ArrayList<>(); for(int i=start;i(segments); + this.segments = new ArrayList<>(segments); int count = 0; for(SegmentCommitInfo info : segments) { count += info.info.getDocCount(); @@ -140,7 +140,7 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable { if (readers == null) { throw new IllegalStateException("IndexWriter has not initialized readers from the segment infos yet"); } - final List readers = new ArrayList(this.readers.size()); + final List readers = new ArrayList<>(this.readers.size()); for (AtomicReader reader : this.readers) { if (reader.numDocs() > 0) { readers.add(reader); @@ -295,7 +295,7 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable { * The subset of segments to be included in the primitive merge. */ - public final List merges = new ArrayList(); + public final List merges = new ArrayList<>(); /** Sole constructor. Use {@link * #add(MergePolicy.OneMerge)} to add merges. */ @@ -393,7 +393,7 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable { // should not happen throw new RuntimeException(e); } - clone.writer = new SetOnce(); + clone.writer = new SetOnce<>(); return clone; } @@ -412,7 +412,7 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable { * defaults than the {@link MergePolicy} */ protected MergePolicy(double defaultNoCFSRatio, long defaultMaxCFSSegmentSize) { - writer = new SetOnce(); + writer = new SetOnce<>(); this.noCFSRatio = defaultNoCFSRatio; this.maxCFSSegmentSize = defaultMaxCFSSegmentSize; } diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java index b25d6556610..c16738ccf03 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java @@ -49,7 +49,7 @@ import org.apache.lucene.util.MergedIterator; public final class MultiFields extends Fields { private final Fields[] subs; private final ReaderSlice[] subSlices; - private final Map terms = new ConcurrentHashMap(); + private final Map terms = new ConcurrentHashMap<>(); /** Returns a single {@link Fields} instance for this * reader, merging fields/terms/docs/positions on the @@ -69,8 +69,8 @@ public final class MultiFields extends Fields { // already an atomic reader / reader with one leave return leaves.get(0).reader().fields(); default: - final List fields = new ArrayList(); - final List slices = new ArrayList(); + final List fields = new ArrayList<>(); + final List slices = new ArrayList<>(); for (final AtomicReaderContext ctx : leaves) { final AtomicReader r = ctx.reader(); final Fields f = r.fields(); @@ -203,7 +203,7 @@ public final class MultiFields extends Fields { for(int i=0;i(subIterators); + return new MergedIterator<>(subIterators); } @Override @@ -215,8 +215,8 @@ public final class MultiFields extends Fields { // Lazy init: first time this field is requested, we // create & add to terms: - final List subs2 = new ArrayList(); - final List slices2 = new ArrayList(); + final List subs2 = new ArrayList<>(); + final List slices2 = new ArrayList<>(); // Gather all sub-readers that share this field for(int i=0;i getIndexedFields(IndexReader reader) { - final Collection fields = new HashSet(); + final Collection fields = new HashSet<>(); for(final FieldInfo fieldInfo : getMergedFieldInfos(reader)) { if (fieldInfo.isIndexed()) { fields.add(fieldInfo.name); diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java b/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java index 34c5b92c625..9ad1a1a61b2 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java +++ b/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java @@ -70,7 +70,7 @@ public final class MultiTerms extends Terms { @Override public TermsEnum intersect(CompiledAutomaton compiled, BytesRef startTerm) throws IOException { - final List termsEnums = new ArrayList(); + final List termsEnums = new ArrayList<>(); for(int i=0;i termsEnums = new ArrayList(); + final List termsEnums = new ArrayList<>(); for(int i=0;i fieldToReader = new TreeMap(); - private final SortedMap tvFieldToReader = new TreeMap(); + private final SortedMap fieldToReader = new TreeMap<>(); + private final SortedMap tvFieldToReader = new TreeMap<>(); /** Create a ParallelAtomicReader based on the provided * readers; auto-closes the given readers on {@link #close()}. */ @@ -151,7 +151,7 @@ public class ParallelAtomicReader extends AtomicReader { // Single instance of this, per ParallelReader instance private final class ParallelFields extends Fields { - final Map fields = new TreeMap(); + final Map fields = new TreeMap<>(); ParallelFields() { } diff --git a/lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java b/lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java index 052302d469c..ed56fcef209 100644 --- a/lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java +++ b/lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java @@ -241,13 +241,13 @@ public class PersistentSnapshotDeletionPolicy extends SnapshotDeletionPolicy { private synchronized void loadPriorSnapshots() throws IOException { long genLoaded = -1; IOException ioe = null; - List snapshotFiles = new ArrayList(); + List snapshotFiles = new ArrayList<>(); for(String file : dir.listAll()) { if (file.startsWith(SNAPSHOTS_PREFIX)) { long gen = Long.parseLong(file.substring(SNAPSHOTS_PREFIX.length())); if (genLoaded == -1 || gen > genLoaded) { snapshotFiles.add(file); - Map m = new HashMap(); + Map m = new HashMap<>(); IndexInput in = dir.openInput(file, IOContext.DEFAULT); try { CodecUtil.checkHeader(in, CODEC_NAME, VERSION_START, VERSION_START); diff --git a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java index 0e683148502..23b849ea53d 100644 --- a/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java +++ b/lucene/core/src/java/org/apache/lucene/index/ReadersAndUpdates.java @@ -78,7 +78,7 @@ class ReadersAndUpdates { // updates on the merged segment too. private boolean isMerging = false; - private final Map mergingNumericUpdates = new HashMap(); + private final Map mergingNumericUpdates = new HashMap<>(); public ReadersAndUpdates(IndexWriter writer, SegmentCommitInfo info) { this.info = info; @@ -448,7 +448,7 @@ class ReadersAndUpdates { // create a new map, keeping only the gens that are in use Map> genUpdatesFiles = info.getUpdatesFiles(); - Map> newGenUpdatesFiles = new HashMap>(); + Map> newGenUpdatesFiles = new HashMap<>(); final long fieldInfosGen = info.getFieldInfosGen(); for (FieldInfo fi : fieldInfos) { long dvGen = fi.getDocValuesGen(); diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentCommitInfo.java b/lucene/core/src/java/org/apache/lucene/index/SegmentCommitInfo.java index e2cba48b148..9b437a2a69e 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentCommitInfo.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentCommitInfo.java @@ -56,7 +56,7 @@ public class SegmentCommitInfo { private long nextWriteFieldInfosGen; // Track the per-generation updates files - private final Map> genUpdatesFiles = new HashMap>(); + private final Map> genUpdatesFiles = new HashMap<>(); private volatile long sizeInBytes = -1; @@ -147,7 +147,7 @@ public class SegmentCommitInfo { /** Returns all files in use by this segment. */ public Collection files() throws IOException { // Start from the wrapped info's files: - Collection files = new HashSet(info.files()); + Collection files = new HashSet<>(info.files()); // TODO we could rely on TrackingDir.getCreatedFiles() (like we do for // updates) and then maybe even be able to remove LiveDocsFormat.files(). @@ -257,7 +257,7 @@ public class SegmentCommitInfo { // deep clone for (Entry> e : genUpdatesFiles.entrySet()) { - other.genUpdatesFiles.put(e.getKey(), new HashSet(e.getValue())); + other.genUpdatesFiles.put(e.getKey(), new HashSet<>(e.getValue())); } return other; diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java b/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java index 057c98dff51..d6a67df599b 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentCoreReaders.java @@ -79,7 +79,7 @@ final class SegmentCoreReaders { final CloseableThreadLocal> normsLocal = new CloseableThreadLocal>() { @Override protected Map initialValue() { - return new HashMap(); + return new HashMap<>(); } }; diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java b/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java index 7290c830c8f..4f09296af0f 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentDocValues.java @@ -35,7 +35,7 @@ import org.apache.lucene.util.RefCount; */ final class SegmentDocValues { - private final Map> genDVProducers = new HashMap>(); + private final Map> genDVProducers = new HashMap<>(); private RefCount newDocValuesProducer(SegmentCommitInfo si, IOContext context, Directory dir, DocValuesFormat dvFormat, final Long gen, List infos) throws IOException { diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java index c7bbbf5b740..c9931378c37 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentInfos.java @@ -139,9 +139,9 @@ public final class SegmentInfos implements Cloneable, Iterable userData = Collections.emptyMap(); + public Map userData = Collections.emptyMap(); - private List segments = new ArrayList(); + private List segments = new ArrayList<>(); /** * If non-null, information about loading segments_N files @@ -355,7 +355,7 @@ public final class SegmentInfos implements Cloneable, Iterable>(numGensUpdatesFiles); + genUpdatesFiles = new HashMap<>(numGensUpdatesFiles); for (int i = 0; i < numGensUpdatesFiles; i++) { genUpdatesFiles.put(input.readLong(), input.readStringSet()); } @@ -471,13 +471,13 @@ public final class SegmentInfos implements Cloneable, Iterable(size()); + sis.segments = new ArrayList<>(size()); for(final SegmentCommitInfo info : this) { assert info.info.getCodec() != null; // dont directly access segments, use add method!!! sis.add(info.clone()); } - sis.userData = new HashMap(userData); + sis.userData = new HashMap<>(userData); return sis; } catch (CloneNotSupportedException e) { throw new RuntimeException("should not happen", e); @@ -832,7 +832,7 @@ public final class SegmentInfos implements Cloneable, Iterable files(Directory dir, boolean includeSegmentsFile) throws IOException { - HashSet files = new HashSet(); + HashSet files = new HashSet<>(); if (includeSegmentsFile) { final String segmentFileName = getSegmentsFileName(); if (segmentFileName != null) { @@ -978,7 +978,7 @@ public final class SegmentInfos implements Cloneable, Iterable mergedAway = new HashSet(merge.segments); + final Set mergedAway = new HashSet<>(merge.segments); boolean inserted = false; int newSegIdx = 0; for (int segIdx = 0, cnt = segments.size(); segIdx < cnt; segIdx++) { @@ -1010,7 +1010,7 @@ public final class SegmentInfos implements Cloneable, Iterable createBackupSegmentInfos() { - final List list = new ArrayList(size()); + final List list = new ArrayList<>(size()); for(final SegmentCommitInfo info : this) { assert info.info.getCodec() != null; list.add(info.clone()); diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java index fec031ef5d8..9570c184d37 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java @@ -155,8 +155,8 @@ final class SegmentMerger { DocValuesType type = field.getDocValuesType(); if (type != null) { if (type == DocValuesType.NUMERIC) { - List toMerge = new ArrayList(); - List docsWithField = new ArrayList(); + List toMerge = new ArrayList<>(); + List docsWithField = new ArrayList<>(); for (AtomicReader reader : mergeState.readers) { NumericDocValues values = reader.getNumericDocValues(field.name); Bits bits = reader.getDocsWithField(field.name); @@ -169,8 +169,8 @@ final class SegmentMerger { } consumer.mergeNumericField(field, mergeState, toMerge, docsWithField); } else if (type == DocValuesType.BINARY) { - List toMerge = new ArrayList(); - List docsWithField = new ArrayList(); + List toMerge = new ArrayList<>(); + List docsWithField = new ArrayList<>(); for (AtomicReader reader : mergeState.readers) { BinaryDocValues values = reader.getBinaryDocValues(field.name); Bits bits = reader.getDocsWithField(field.name); @@ -183,7 +183,7 @@ final class SegmentMerger { } consumer.mergeBinaryField(field, mergeState, toMerge, docsWithField); } else if (type == DocValuesType.SORTED) { - List toMerge = new ArrayList(); + List toMerge = new ArrayList<>(); for (AtomicReader reader : mergeState.readers) { SortedDocValues values = reader.getSortedDocValues(field.name); if (values == null) { @@ -193,7 +193,7 @@ final class SegmentMerger { } consumer.mergeSortedField(field, mergeState, toMerge); } else if (type == DocValuesType.SORTED_SET) { - List toMerge = new ArrayList(); + List toMerge = new ArrayList<>(); for (AtomicReader reader : mergeState.readers) { SortedSetDocValues values = reader.getSortedSetDocValues(field.name); if (values == null) { @@ -223,8 +223,8 @@ final class SegmentMerger { try { for (FieldInfo field : mergeState.fieldInfos) { if (field.hasNorms()) { - List toMerge = new ArrayList(); - List docsWithField = new ArrayList(); + List toMerge = new ArrayList<>(); + List docsWithField = new ArrayList<>(); for (AtomicReader reader : mergeState.readers) { NumericDocValues norms = reader.getNormValues(field.name); if (norms == null) { @@ -358,8 +358,8 @@ final class SegmentMerger { private void mergeTerms(SegmentWriteState segmentWriteState) throws IOException { - final List fields = new ArrayList(); - final List slices = new ArrayList(); + final List fields = new ArrayList<>(); + final List slices = new ArrayList<>(); int docBase = 0; diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java index f148f7740c7..907516e71a6 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/SegmentReader.java @@ -60,22 +60,22 @@ public final class SegmentReader extends AtomicReader { final CloseableThreadLocal> docValuesLocal = new CloseableThreadLocal>() { @Override protected Map initialValue() { - return new HashMap(); + return new HashMap<>(); } }; final CloseableThreadLocal> docsWithFieldLocal = new CloseableThreadLocal>() { @Override protected Map initialValue() { - return new HashMap(); + return new HashMap<>(); } }; - final Map dvProducers = new HashMap(); + final Map dvProducers = new HashMap<>(); final FieldInfos fieldInfos; - private final List dvGens = new ArrayList(); + private final List dvGens = new ArrayList<>(); /** * Constructs a new SegmentReader with a new core. @@ -221,7 +221,7 @@ public final class SegmentReader extends AtomicReader { // returns a gen->List mapping. Fields without DV updates have gen=-1 private Map> getGenInfos() { - final Map> genInfos = new HashMap>(); + final Map> genInfos = new HashMap<>(); for (FieldInfo fi : fieldInfos) { if (fi.getDocValuesType() == null) { continue; @@ -229,7 +229,7 @@ public final class SegmentReader extends AtomicReader { long gen = fi.getDocValuesGen(); List infos = genInfos.get(gen); if (infos == null) { - infos = new ArrayList(); + infos = new ArrayList<>(); genInfos.put(gen, infos); } infos.add(fi); diff --git a/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java b/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java index d81825e2dac..dd56512b827 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java +++ b/lucene/core/src/java/org/apache/lucene/index/SlowCompositeReaderWrapper.java @@ -180,7 +180,7 @@ public final class SlowCompositeReaderWrapper extends AtomicReader { // TODO: this could really be a weak map somewhere else on the coreCacheKey, // but do we really need to optimize slow-wrapper any more? - private final Map cachedOrdMaps = new HashMap(); + private final Map cachedOrdMaps = new HashMap<>(); @Override public NumericDocValues getNormValues(String field) throws IOException { diff --git a/lucene/core/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java b/lucene/core/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java index 6cc7b8c3197..0f78e3d8a54 100644 --- a/lucene/core/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java +++ b/lucene/core/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java @@ -47,10 +47,10 @@ public class SnapshotDeletionPolicy extends IndexDeletionPolicy { /** Records how many snapshots are held against each * commit generation */ - protected Map refCounts = new HashMap(); + protected Map refCounts = new HashMap<>(); /** Used to map gen to IndexCommit. */ - protected Map indexCommits = new HashMap(); + protected Map indexCommits = new HashMap<>(); /** Wrapped {@link IndexDeletionPolicy} */ private IndexDeletionPolicy primary; @@ -167,7 +167,7 @@ public class SnapshotDeletionPolicy extends IndexDeletionPolicy { /** Returns all IndexCommits held by at least one snapshot. */ public synchronized List getSnapshots() { - return new ArrayList(indexCommits.values()); + return new ArrayList<>(indexCommits.values()); } /** Returns the total number of snapshots currently held. */ @@ -192,15 +192,15 @@ public class SnapshotDeletionPolicy extends IndexDeletionPolicy { SnapshotDeletionPolicy other = (SnapshotDeletionPolicy) super.clone(); other.primary = this.primary.clone(); other.lastCommit = null; - other.refCounts = new HashMap(refCounts); - other.indexCommits = new HashMap(indexCommits); + other.refCounts = new HashMap<>(refCounts); + other.indexCommits = new HashMap<>(indexCommits); return other; } /** Wraps each {@link IndexCommit} as a {@link * SnapshotCommitPoint}. */ private List wrapCommits(List commits) { - List wrappedCommits = new ArrayList(commits.size()); + List wrappedCommits = new ArrayList<>(commits.size()); for (IndexCommit ic : commits) { wrappedCommits.add(new SnapshotCommitPoint(ic)); } diff --git a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java index 04d847bdb73..fbe4376ae60 100644 --- a/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java +++ b/lucene/core/src/java/org/apache/lucene/index/StandardDirectoryReader.java @@ -78,7 +78,7 @@ final class StandardDirectoryReader extends DirectoryReader { // no need to process segments in reverse order final int numSegments = infos.size(); - List readers = new ArrayList(); + List readers = new ArrayList<>(); final Directory dir = writer.getDirectory(); final SegmentInfos segmentInfos = infos.clone(); @@ -134,7 +134,7 @@ final class StandardDirectoryReader extends DirectoryReader { // we put the old SegmentReaders in a map, that allows us // to lookup a reader using its segment name - final Map segmentReaders = new HashMap(); + final Map segmentReaders = new HashMap<>(); if (oldReaders != null) { // create a Map SegmentName->SegmentReader diff --git a/lucene/core/src/java/org/apache/lucene/index/StoredDocument.java b/lucene/core/src/java/org/apache/lucene/index/StoredDocument.java index b1a9fbac920..b79c6b62ab9 100644 --- a/lucene/core/src/java/org/apache/lucene/index/StoredDocument.java +++ b/lucene/core/src/java/org/apache/lucene/index/StoredDocument.java @@ -34,7 +34,7 @@ import org.apache.lucene.util.BytesRef; // TODO: shouldn't this really be in the .document package? public class StoredDocument implements Iterable { - private final List fields = new ArrayList(); + private final List fields = new ArrayList<>(); /** Sole constructor. */ public StoredDocument() { @@ -61,7 +61,7 @@ public class StoredDocument implements Iterable { * @return a StorableField[] array */ public StorableField[] getFields(String name) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (StorableField field : fields) { if (field.name().equals(name)) { result.add(field); @@ -112,7 +112,7 @@ public class StoredDocument implements Iterable { * @return a BytesRef[] of binary field values */ public final BytesRef[] getBinaryValues(String name) { - final List result = new ArrayList(); + final List result = new ArrayList<>(); for (StorableField field : fields) { if (field.name().equals(name)) { final BytesRef bytes = field.binaryValue(); @@ -158,7 +158,7 @@ public class StoredDocument implements Iterable { * @return a String[] of field values */ public final String[] getValues(String name) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (StorableField field : fields) { if (field.name().equals(name) && field.stringValue() != null) { result.add(field.stringValue()); diff --git a/lucene/core/src/java/org/apache/lucene/index/TermsHash.java b/lucene/core/src/java/org/apache/lucene/index/TermsHash.java index bdceb25b10f..ce3887c930a 100644 --- a/lucene/core/src/java/org/apache/lucene/index/TermsHash.java +++ b/lucene/core/src/java/org/apache/lucene/index/TermsHash.java @@ -96,11 +96,11 @@ final class TermsHash extends InvertedDocConsumer { @Override void flush(Map fieldsToFlush, final SegmentWriteState state) throws IOException { - Map childFields = new HashMap(); + Map childFields = new HashMap<>(); Map nextChildFields; if (nextTermsHash != null) { - nextChildFields = new HashMap(); + nextChildFields = new HashMap<>(); } else { nextChildFields = null; } diff --git a/lucene/core/src/java/org/apache/lucene/index/ThreadAffinityDocumentsWriterThreadPool.java b/lucene/core/src/java/org/apache/lucene/index/ThreadAffinityDocumentsWriterThreadPool.java index 82a0477a6c2..a1fc1181c05 100644 --- a/lucene/core/src/java/org/apache/lucene/index/ThreadAffinityDocumentsWriterThreadPool.java +++ b/lucene/core/src/java/org/apache/lucene/index/ThreadAffinityDocumentsWriterThreadPool.java @@ -31,7 +31,7 @@ import org.apache.lucene.index.DocumentsWriterPerThreadPool.ThreadState; //javad * minimal contended {@link ThreadState}. */ class ThreadAffinityDocumentsWriterThreadPool extends DocumentsWriterPerThreadPool { - private Map threadBindings = new ConcurrentHashMap(); + private Map threadBindings = new ConcurrentHashMap<>(); /** * Creates a new {@link ThreadAffinityDocumentsWriterThreadPool} with a given maximum of {@link ThreadState}s. @@ -78,7 +78,7 @@ class ThreadAffinityDocumentsWriterThreadPool extends DocumentsWriterPerThreadPo @Override public ThreadAffinityDocumentsWriterThreadPool clone() { ThreadAffinityDocumentsWriterThreadPool clone = (ThreadAffinityDocumentsWriterThreadPool) super.clone(); - clone.threadBindings = new ConcurrentHashMap(); + clone.threadBindings = new ConcurrentHashMap<>(); return clone; } } diff --git a/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java index 45b8d7e6a77..f1213cba5f7 100644 --- a/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java +++ b/lucene/core/src/java/org/apache/lucene/index/TieredMergePolicy.java @@ -280,9 +280,9 @@ public class TieredMergePolicy extends MergePolicy { return null; } final Collection merging = writer.get().getMergingSegments(); - final Collection toBeMerged = new HashSet(); + final Collection toBeMerged = new HashSet<>(); - final List infosSorted = new ArrayList(infos.asList()); + final List infosSorted = new ArrayList<>(infos.asList()); Collections.sort(infosSorted, new SegmentByteSizeDescending()); // Compute total index bytes & print details about the index @@ -341,7 +341,7 @@ public class TieredMergePolicy extends MergePolicy { // Gather eligible segments for merging, ie segments // not already being merged and not already picked (by // prior iteration of this loop) for merging: - final List eligible = new ArrayList(); + final List eligible = new ArrayList<>(); for(int idx = tooBigCount; idx candidate = new ArrayList(); + final List candidate = new ArrayList<>(); boolean hitTooLarge = false; for(int idx = startIdx;idx newInfos = new ArrayList(); + final List newInfos = new ArrayList<>(); for (final SegmentCommitInfo si : segmentInfos) { if (oldSegments.containsKey(si)) { newInfos.add(si); diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java index 23154f9289a..9c65f824eaf 100644 --- a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java @@ -68,7 +68,7 @@ public class BooleanQuery extends Query implements Iterable { BooleanQuery.maxClauseCount = maxClauseCount; } - private ArrayList clauses = new ArrayList(); + private ArrayList clauses = new ArrayList<>(); private final boolean disableCoord; /** Constructs an empty boolean query. */ @@ -179,7 +179,7 @@ public class BooleanQuery extends Query implements Iterable { throws IOException { this.similarity = searcher.getSimilarity(); this.disableCoord = disableCoord; - weights = new ArrayList(clauses.size()); + weights = new ArrayList<>(clauses.size()); for (int i = 0 ; i < clauses.size(); i++) { BooleanClause c = clauses.get(i); Weight w = c.getQuery().createWeight(searcher); @@ -343,9 +343,9 @@ public class BooleanQuery extends Query implements Iterable { @Override public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException { - List required = new ArrayList(); - List prohibited = new ArrayList(); - List optional = new ArrayList(); + List required = new ArrayList<>(); + List prohibited = new ArrayList<>(); + List optional = new ArrayList<>(); Iterator cIter = clauses.iterator(); for (Weight w : weights) { BooleanClause c = cIter.next(); diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java index ff2d870edbf..8e506355539 100644 --- a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java +++ b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java @@ -238,7 +238,7 @@ class BooleanScorer2 extends Scorer { private Scorer makeCountingSumScorerSomeReq(boolean disableCoord) throws IOException { // At least one required scorer. if (optionalScorers.size() == minNrShouldMatch) { // all optional scorers also required. - ArrayList allReq = new ArrayList(requiredScorers); + ArrayList allReq = new ArrayList<>(requiredScorers); allReq.addAll(optionalScorers); return addProhibitedScorers(countingConjunctionSumScorer(disableCoord, allReq)); } else { // optionalScorers.size() > minNrShouldMatch, and at least one required scorer @@ -313,7 +313,7 @@ class BooleanScorer2 extends Scorer { @Override public Collection getChildren() { - ArrayList children = new ArrayList(); + ArrayList children = new ArrayList<>(); for (Scorer s : optionalScorers) { children.add(new ChildScorer(s, "SHOULD")); } diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java index 554da526fcc..23e159069ba 100644 --- a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java +++ b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java @@ -107,7 +107,7 @@ public abstract class CachingCollector extends Collector { super(other, maxRAMMB, true); cachedScorer = new CachedScorer(); - cachedScores = new ArrayList(); + cachedScores = new ArrayList<>(); curScores = new float[INITIAL_ARRAY_SIZE]; cachedScores.add(curScores); } @@ -116,7 +116,7 @@ public abstract class CachingCollector extends Collector { super(other, maxDocsToCache); cachedScorer = new CachedScorer(); - cachedScores = new ArrayList(); + cachedScores = new ArrayList<>(); curScores = new float[INITIAL_ARRAY_SIZE]; cachedScores.add(curScores); } @@ -315,7 +315,7 @@ public abstract class CachingCollector extends Collector { protected final Collector other; protected final int maxDocsToCache; - protected final List cachedSegs = new ArrayList(); + protected final List cachedSegs = new ArrayList<>(); protected final List cachedDocs; private AtomicReaderContext lastReaderContext; @@ -393,7 +393,7 @@ public abstract class CachingCollector extends Collector { private CachingCollector(Collector other, double maxRAMMB, boolean cacheScores) { this.other = other; - cachedDocs = new ArrayList(); + cachedDocs = new ArrayList<>(); curDocs = new int[INITIAL_ARRAY_SIZE]; cachedDocs.add(curDocs); @@ -407,7 +407,7 @@ public abstract class CachingCollector extends Collector { private CachingCollector(Collector other, int maxDocsToCache) { this.other = other; - cachedDocs = new ArrayList(); + cachedDocs = new ArrayList<>(); curDocs = new int[INITIAL_ARRAY_SIZE]; cachedDocs.add(curDocs); this.maxDocsToCache = maxDocsToCache; diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingWrapperFilter.java b/lucene/core/src/java/org/apache/lucene/search/CachingWrapperFilter.java index f8b75660ede..0ad87d5bcf0 100644 --- a/lucene/core/src/java/org/apache/lucene/search/CachingWrapperFilter.java +++ b/lucene/core/src/java/org/apache/lucene/search/CachingWrapperFilter.java @@ -155,7 +155,7 @@ public class CachingWrapperFilter extends Filter { // Sync only to pull the current set of values: List docIdSets; synchronized(cache) { - docIdSets = new ArrayList(cache.values()); + docIdSets = new ArrayList<>(cache.values()); } long total = 0; diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java index 22476e777b6..3e811873618 100644 --- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java @@ -122,7 +122,7 @@ class ConjunctionScorer extends Scorer { @Override public Collection getChildren() { - ArrayList children = new ArrayList(docsAndFreqs.length); + ArrayList children = new ArrayList<>(docsAndFreqs.length); for (DocsAndFreqs docs : docsAndFreqs) { children.add(new ChildScorer(docs.scorer, "MUST")); } diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java index 2e49ca108c2..e81097866a9 100644 --- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java @@ -45,7 +45,7 @@ import org.apache.lucene.util.Bits; public class DisjunctionMaxQuery extends Query implements Iterable { /* The subqueries */ - private ArrayList disjuncts = new ArrayList(); + private ArrayList disjuncts = new ArrayList<>(); /* Multiple of the non-max disjunct scores added into our final score. Non-zero values support tie-breaking. */ private float tieBreakerMultiplier = 0.0f; @@ -115,7 +115,7 @@ public class DisjunctionMaxQuery extends Query implements Iterable { protected class DisjunctionMaxWeight extends Weight { /** The Weights for our subqueries, in 1-1 correspondence with disjuncts */ - protected ArrayList weights = new ArrayList(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts + protected ArrayList weights = new ArrayList<>(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts /** Construct the Weight for this Query searched by searcher. Recursively construct subquery weights. */ public DisjunctionMaxWeight(IndexSearcher searcher) throws IOException { @@ -154,7 +154,7 @@ public class DisjunctionMaxQuery extends Query implements Iterable { /** Create the scorer used to score our associated DisjunctionMaxQuery */ @Override public Scorer scorer(AtomicReaderContext context, Bits acceptDocs) throws IOException { - List scorers = new ArrayList(); + List scorers = new ArrayList<>(); for (Weight w : weights) { // we will advance() subscorers Scorer subScorer = w.scorer(context, acceptDocs); diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java index 05522dd5594..31c1d1090b2 100644 --- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java @@ -102,7 +102,7 @@ abstract class DisjunctionScorer extends Scorer { @Override public final Collection getChildren() { - ArrayList children = new ArrayList(numScorers); + ArrayList children = new ArrayList<>(numScorers); for (int i = 0; i < numScorers; i++) { children.add(new ChildScorer(subScorers[i], "SHOULD")); } diff --git a/lucene/core/src/java/org/apache/lucene/search/Explanation.java b/lucene/core/src/java/org/apache/lucene/search/Explanation.java index ce3060276be..5dad05ff601 100644 --- a/lucene/core/src/java/org/apache/lucene/search/Explanation.java +++ b/lucene/core/src/java/org/apache/lucene/search/Explanation.java @@ -76,7 +76,7 @@ public class Explanation { /** Adds a sub-node to this explanation node. */ public void addDetail(Explanation detail) { if (details == null) - details = new ArrayList(); + details = new ArrayList<>(); details.add(detail); } diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java b/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java index fcafbbe4119..9da1de627a2 100644 --- a/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java +++ b/lucene/core/src/java/org/apache/lucene/search/FieldCacheImpl.java @@ -61,7 +61,7 @@ class FieldCacheImpl implements FieldCache { } private synchronized void init() { - caches = new HashMap,Cache>(9); + caches = new HashMap<>(9); caches.put(Integer.TYPE, new IntCache(this)); caches.put(Float.TYPE, new FloatCache(this)); caches.put(Long.TYPE, new LongCache(this)); @@ -86,7 +86,7 @@ class FieldCacheImpl implements FieldCache { @Override public synchronized CacheEntry[] getCacheEntries() { - List result = new ArrayList(17); + List result = new ArrayList<>(17); for(final Map.Entry,Cache> cacheEntry: caches.entrySet()) { final Cache cache = cacheEntry.getValue(); final Class cacheType = cacheEntry.getKey(); @@ -149,7 +149,7 @@ class FieldCacheImpl implements FieldCache { final FieldCacheImpl wrapper; - final Map> readerCache = new WeakHashMap>(); + final Map> readerCache = new WeakHashMap<>(); protected abstract Object createValue(AtomicReader reader, CacheKey key, boolean setDocsWithField) throws IOException; @@ -169,7 +169,7 @@ class FieldCacheImpl implements FieldCache { Map innerCache = readerCache.get(readerKey); if (innerCache == null) { // First time this reader is using FieldCache - innerCache = new HashMap(); + innerCache = new HashMap<>(); readerCache.put(readerKey, innerCache); wrapper.initReader(reader); } @@ -190,7 +190,7 @@ class FieldCacheImpl implements FieldCache { innerCache = readerCache.get(readerKey); if (innerCache == null) { // First time this reader is using FieldCache - innerCache = new HashMap(); + innerCache = new HashMap<>(); readerCache.put(readerKey, innerCache); wrapper.initReader(reader); value = null; @@ -438,7 +438,7 @@ class FieldCacheImpl implements FieldCache { return wrapper.getInts(reader, key.field, NUMERIC_UTILS_INT_PARSER, setDocsWithField); } - final HoldsOneThing valuesRef = new HoldsOneThing(); + final HoldsOneThing valuesRef = new HoldsOneThing<>(); Uninvert u = new Uninvert() { private int minValue; @@ -626,7 +626,7 @@ class FieldCacheImpl implements FieldCache { return wrapper.getFloats(reader, key.field, NUMERIC_UTILS_FLOAT_PARSER, setDocsWithField); } - final HoldsOneThing valuesRef = new HoldsOneThing(); + final HoldsOneThing valuesRef = new HoldsOneThing<>(); Uninvert u = new Uninvert() { private float currentValue; @@ -733,7 +733,7 @@ class FieldCacheImpl implements FieldCache { return wrapper.getLongs(reader, key.field, NUMERIC_UTILS_LONG_PARSER, setDocsWithField); } - final HoldsOneThing valuesRef = new HoldsOneThing(); + final HoldsOneThing valuesRef = new HoldsOneThing<>(); Uninvert u = new Uninvert() { private long minValue; @@ -851,7 +851,7 @@ class FieldCacheImpl implements FieldCache { return wrapper.getDoubles(reader, key.field, NUMERIC_UTILS_DOUBLE_PARSER, setDocsWithField); } - final HoldsOneThing valuesRef = new HoldsOneThing(); + final HoldsOneThing valuesRef = new HoldsOneThing<>(); Uninvert u = new Uninvert() { private double currentValue; diff --git a/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java b/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java index a77c9f8422a..a4a73de4adc 100644 --- a/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java +++ b/lucene/core/src/java/org/apache/lucene/search/FieldValueHitQueue.java @@ -168,9 +168,9 @@ public abstract class FieldValueHitQueue ext } if (fields.length == 1) { - return new OneComparatorFieldValueHitQueue(fields, size); + return new OneComparatorFieldValueHitQueue<>(fields, size); } else { - return new MultiComparatorsFieldValueHitQueue(fields, size); + return new MultiComparatorsFieldValueHitQueue<>(fields, size); } } diff --git a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java index 8e2bf8bc4d1..6d732397d75 100644 --- a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java +++ b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java @@ -405,7 +405,7 @@ public class FuzzyTermsEnum extends TermsEnum { * Stores compiled automata as a list (indexed by edit distance) * @lucene.internal */ public static final class LevenshteinAutomataAttributeImpl extends AttributeImpl implements LevenshteinAutomataAttribute { - private final List automata = new ArrayList(); + private final List automata = new ArrayList<>(); @Override public List automata() { diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java index 8a9aec99a4e..8b33ae736e5 100644 --- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java +++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java @@ -441,7 +441,7 @@ public class IndexSearcher { } else { final HitQueue hq = new HitQueue(nDocs, false); final Lock lock = new ReentrantLock(); - final ExecutionHelper runner = new ExecutionHelper(executor); + final ExecutionHelper runner = new ExecutionHelper<>(executor); for (int i = 0; i < leafSlices.length; i++) { // search each sub runner.submit(new SearcherCallableNoSort(lock, this, leafSlices[i], weight, after, nDocs, hq)); @@ -532,7 +532,7 @@ public class IndexSearcher { false); final Lock lock = new ReentrantLock(); - final ExecutionHelper runner = new ExecutionHelper(executor); + final ExecutionHelper runner = new ExecutionHelper<>(executor); for (int i = 0; i < leafSlices.length; i++) { // search each leaf slice runner.submit( new SearcherCallableWithSort(lock, this, leafSlices[i], weight, after, nDocs, topCollector, sort, doDocScores, doMaxScore)); @@ -810,7 +810,7 @@ public class IndexSearcher { private int numTasks; ExecutionHelper(final Executor executor) { - this.service = new ExecutorCompletionService(executor); + this.service = new ExecutorCompletionService<>(executor); } @Override diff --git a/lucene/core/src/java/org/apache/lucene/search/LiveFieldValues.java b/lucene/core/src/java/org/apache/lucene/search/LiveFieldValues.java index 76ffae3be29..ced906659bc 100644 --- a/lucene/core/src/java/org/apache/lucene/search/LiveFieldValues.java +++ b/lucene/core/src/java/org/apache/lucene/search/LiveFieldValues.java @@ -35,8 +35,8 @@ import java.util.concurrent.ConcurrentHashMap; public abstract class LiveFieldValues implements ReferenceManager.RefreshListener, Closeable { - private volatile Map current = new ConcurrentHashMap(); - private volatile Map old = new ConcurrentHashMap(); + private volatile Map current = new ConcurrentHashMap<>(); + private volatile Map old = new ConcurrentHashMap<>(); private final ReferenceManager mgr; private final T missingValue; @@ -58,7 +58,7 @@ public abstract class LiveFieldValues implements ReferenceManager.RefreshLi // map. While reopen is running, any lookup will first // try this new map, then fallback to old, then to the // current searcher: - current = new ConcurrentHashMap(); + current = new ConcurrentHashMap<>(); } @Override @@ -69,7 +69,7 @@ public abstract class LiveFieldValues implements ReferenceManager.RefreshLi // entries in it, which is fine: it means they were // actually already included in the previously opened // reader. So we can safely clear old here: - old = new ConcurrentHashMap(); + old = new ConcurrentHashMap<>(); } /** Call this after you've successfully added a document diff --git a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java index 061ebcc499b..a4a2429a809 100644 --- a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java @@ -119,7 +119,7 @@ class MinShouldMatchSumScorer extends Scorer { @Override public final Collection getChildren() { - ArrayList children = new ArrayList(numScorers); + ArrayList children = new ArrayList<>(numScorers); for (int i = 0; i < numScorers; i++) { children.add(new ChildScorer(subScorers[i], "SHOULD")); } diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java index 93fb1b64223..fe326b73ded 100644 --- a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java @@ -50,8 +50,8 @@ import org.apache.lucene.util.ToStringUtils; */ public class MultiPhraseQuery extends Query { private String field; - private ArrayList termArrays = new ArrayList(); - private ArrayList positions = new ArrayList(); + private ArrayList termArrays = new ArrayList<>(); + private ArrayList positions = new ArrayList<>(); private int slop = 0; @@ -141,7 +141,7 @@ public class MultiPhraseQuery extends Query { private class MultiPhraseWeight extends Weight { private final Similarity similarity; private final Similarity.SimWeight stats; - private final Map termContexts = new HashMap(); + private final Map termContexts = new HashMap<>(); public MultiPhraseWeight(IndexSearcher searcher) throws IOException { @@ -149,7 +149,7 @@ public class MultiPhraseQuery extends Query { final IndexReaderContext context = searcher.getTopReaderContext(); // compute idf - ArrayList allTermStats = new ArrayList(); + ArrayList allTermStats = new ArrayList<>(); for(final Term[] terms: termArrays) { for (Term term: terms) { TermContext termContext = termContexts.get(term); @@ -479,7 +479,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum { private long cost; public UnionDocsAndPositionsEnum(Bits liveDocs, AtomicReaderContext context, Term[] terms, Map termContexts, TermsEnum termsEnum) throws IOException { - List docsEnums = new LinkedList(); + List docsEnums = new LinkedList<>(); for (int i = 0; i < terms.length; i++) { final Term term = terms[i]; TermState termState = termContexts.get(term).get(context.ord); diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/MultiTermQuery.java index a6ed25ad791..7fb8da6b40b 100644 --- a/lucene/core/src/java/org/apache/lucene/search/MultiTermQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/MultiTermQuery.java @@ -92,7 +92,7 @@ public abstract class MultiTermQuery extends Query { public static final RewriteMethod CONSTANT_SCORE_FILTER_REWRITE = new RewriteMethod() { @Override public Query rewrite(IndexReader reader, MultiTermQuery query) { - Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter(query)); + Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter<>(query)); result.setBoost(query.getBoost()); return result; } diff --git a/lucene/core/src/java/org/apache/lucene/search/NumericRangeFilter.java b/lucene/core/src/java/org/apache/lucene/search/NumericRangeFilter.java index 7fd669989d2..96fe8a4ede0 100644 --- a/lucene/core/src/java/org/apache/lucene/search/NumericRangeFilter.java +++ b/lucene/core/src/java/org/apache/lucene/search/NumericRangeFilter.java @@ -61,7 +61,7 @@ public final class NumericRangeFilter extends MultiTermQueryWr public static NumericRangeFilter newLongRange(final String field, final int precisionStep, Long min, Long max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeFilter( + return new NumericRangeFilter<>( NumericRangeQuery.newLongRange(field, precisionStep, min, max, minInclusive, maxInclusive) ); } @@ -76,7 +76,7 @@ public final class NumericRangeFilter extends MultiTermQueryWr public static NumericRangeFilter newLongRange(final String field, Long min, Long max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeFilter( + return new NumericRangeFilter<>( NumericRangeQuery.newLongRange(field, min, max, minInclusive, maxInclusive) ); } @@ -91,7 +91,7 @@ public final class NumericRangeFilter extends MultiTermQueryWr public static NumericRangeFilter newIntRange(final String field, final int precisionStep, Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeFilter( + return new NumericRangeFilter<>( NumericRangeQuery.newIntRange(field, precisionStep, min, max, minInclusive, maxInclusive) ); } @@ -106,7 +106,7 @@ public final class NumericRangeFilter extends MultiTermQueryWr public static NumericRangeFilter newIntRange(final String field, Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeFilter( + return new NumericRangeFilter<>( NumericRangeQuery.newIntRange(field, min, max, minInclusive, maxInclusive) ); } @@ -123,7 +123,7 @@ public final class NumericRangeFilter extends MultiTermQueryWr public static NumericRangeFilter newDoubleRange(final String field, final int precisionStep, Double min, Double max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeFilter( + return new NumericRangeFilter<>( NumericRangeQuery.newDoubleRange(field, precisionStep, min, max, minInclusive, maxInclusive) ); } @@ -140,7 +140,7 @@ public final class NumericRangeFilter extends MultiTermQueryWr public static NumericRangeFilter newDoubleRange(final String field, Double min, Double max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeFilter( + return new NumericRangeFilter<>( NumericRangeQuery.newDoubleRange(field, min, max, minInclusive, maxInclusive) ); } @@ -157,7 +157,7 @@ public final class NumericRangeFilter extends MultiTermQueryWr public static NumericRangeFilter newFloatRange(final String field, final int precisionStep, Float min, Float max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeFilter( + return new NumericRangeFilter<>( NumericRangeQuery.newFloatRange(field, precisionStep, min, max, minInclusive, maxInclusive) ); } @@ -174,7 +174,7 @@ public final class NumericRangeFilter extends MultiTermQueryWr public static NumericRangeFilter newFloatRange(final String field, Float min, Float max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeFilter( + return new NumericRangeFilter<>( NumericRangeQuery.newFloatRange(field, min, max, minInclusive, maxInclusive) ); } diff --git a/lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java b/lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java index 1ba70306f7f..102523e0b5a 100644 --- a/lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/NumericRangeQuery.java @@ -191,7 +191,7 @@ public final class NumericRangeQuery extends MultiTermQuery { public static NumericRangeQuery newLongRange(final String field, final int precisionStep, Long min, Long max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeQuery(field, precisionStep, NumericType.LONG, min, max, minInclusive, maxInclusive); + return new NumericRangeQuery<>(field, precisionStep, NumericType.LONG, min, max, minInclusive, maxInclusive); } /** @@ -204,7 +204,7 @@ public final class NumericRangeQuery extends MultiTermQuery { public static NumericRangeQuery newLongRange(final String field, Long min, Long max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeQuery(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.LONG, min, max, minInclusive, maxInclusive); + return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.LONG, min, max, minInclusive, maxInclusive); } /** @@ -217,7 +217,7 @@ public final class NumericRangeQuery extends MultiTermQuery { public static NumericRangeQuery newIntRange(final String field, final int precisionStep, Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeQuery(field, precisionStep, NumericType.INT, min, max, minInclusive, maxInclusive); + return new NumericRangeQuery<>(field, precisionStep, NumericType.INT, min, max, minInclusive, maxInclusive); } /** @@ -230,7 +230,7 @@ public final class NumericRangeQuery extends MultiTermQuery { public static NumericRangeQuery newIntRange(final String field, Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeQuery(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.INT, min, max, minInclusive, maxInclusive); + return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.INT, min, max, minInclusive, maxInclusive); } /** @@ -245,7 +245,7 @@ public final class NumericRangeQuery extends MultiTermQuery { public static NumericRangeQuery newDoubleRange(final String field, final int precisionStep, Double min, Double max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeQuery(field, precisionStep, NumericType.DOUBLE, min, max, minInclusive, maxInclusive); + return new NumericRangeQuery<>(field, precisionStep, NumericType.DOUBLE, min, max, minInclusive, maxInclusive); } /** @@ -260,7 +260,7 @@ public final class NumericRangeQuery extends MultiTermQuery { public static NumericRangeQuery newDoubleRange(final String field, Double min, Double max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeQuery(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.DOUBLE, min, max, minInclusive, maxInclusive); + return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.DOUBLE, min, max, minInclusive, maxInclusive); } /** @@ -275,7 +275,7 @@ public final class NumericRangeQuery extends MultiTermQuery { public static NumericRangeQuery newFloatRange(final String field, final int precisionStep, Float min, Float max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeQuery(field, precisionStep, NumericType.FLOAT, min, max, minInclusive, maxInclusive); + return new NumericRangeQuery<>(field, precisionStep, NumericType.FLOAT, min, max, minInclusive, maxInclusive); } /** @@ -290,7 +290,7 @@ public final class NumericRangeQuery extends MultiTermQuery { public static NumericRangeQuery newFloatRange(final String field, Float min, Float max, final boolean minInclusive, final boolean maxInclusive ) { - return new NumericRangeQuery(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.FLOAT, min, max, minInclusive, maxInclusive); + return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.FLOAT, min, max, minInclusive, maxInclusive); } @Override @SuppressWarnings("unchecked") @@ -390,7 +390,7 @@ public final class NumericRangeQuery extends MultiTermQuery { private BytesRef currentLowerBound, currentUpperBound; - private final LinkedList rangeBounds = new LinkedList(); + private final LinkedList rangeBounds = new LinkedList<>(); NumericRangeTermsEnum(final TermsEnum tenum) { super(tenum); diff --git a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java index cd40a11029f..f19ae223e4c 100644 --- a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java @@ -46,8 +46,8 @@ import org.apache.lucene.util.ToStringUtils; */ public class PhraseQuery extends Query { private String field; - private ArrayList terms = new ArrayList(4); - private ArrayList positions = new ArrayList(4); + private ArrayList terms = new ArrayList<>(4); + private ArrayList positions = new ArrayList<>(4); private int maxPosition = 0; private int slop = 0; diff --git a/lucene/core/src/java/org/apache/lucene/search/ReferenceManager.java b/lucene/core/src/java/org/apache/lucene/search/ReferenceManager.java index 41383cbcf08..2abb23e38a0 100644 --- a/lucene/core/src/java/org/apache/lucene/search/ReferenceManager.java +++ b/lucene/core/src/java/org/apache/lucene/search/ReferenceManager.java @@ -47,7 +47,7 @@ public abstract class ReferenceManager implements Closeable { private final Lock refreshLock = new ReentrantLock(); - private final List refreshListeners = new CopyOnWriteArrayList(); + private final List refreshListeners = new CopyOnWriteArrayList<>(); private void ensureOpen() { if (current == null) { diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java index 4e1c40c56da..7378e0a81c8 100644 --- a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java @@ -94,7 +94,7 @@ class ReqOptSumScorer extends Scorer { @Override public Collection getChildren() { - ArrayList children = new ArrayList(2); + ArrayList children = new ArrayList<>(2); children.add(new ChildScorer(reqScorer, "MUST")); children.add(new ChildScorer(optScorer, "SHOULD")); return children; diff --git a/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java b/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java index 3d6cbb3a097..8a81156758f 100644 --- a/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java +++ b/lucene/core/src/java/org/apache/lucene/search/SearcherLifetimeManager.java @@ -132,7 +132,7 @@ public class SearcherLifetimeManager implements Closeable { // TODO: we could get by w/ just a "set"; need to have // Tracker hash by its version and have compareTo(Long) // compare to its version - private final ConcurrentHashMap searchers = new ConcurrentHashMap(); + private final ConcurrentHashMap searchers = new ConcurrentHashMap<>(); private void ensureOpen() { if (closed) { @@ -246,7 +246,7 @@ public class SearcherLifetimeManager implements Closeable { // (not thread-safe since the values can change while // ArrayList is init'ing itself); must instead iterate // ourselves: - final List trackers = new ArrayList(); + final List trackers = new ArrayList<>(); for(SearcherTracker tracker : searchers.values()) { trackers.add(tracker); } @@ -285,7 +285,7 @@ public class SearcherLifetimeManager implements Closeable { @Override public synchronized void close() throws IOException { closed = true; - final List toClose = new ArrayList(searchers.values()); + final List toClose = new ArrayList<>(searchers.values()); // Remove up front in case exc below, so we don't // over-decRef on double-close: diff --git a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java index ed7a271e65b..986ab066ccb 100644 --- a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java +++ b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java @@ -379,7 +379,7 @@ final class SloppyPhraseScorer extends Scorer { /** Detect repetition groups. Done once - for first doc */ private ArrayList> gatherRptGroups(LinkedHashMap rptTerms) throws IOException { PhrasePositions[] rpp = repeatingPPs(rptTerms); - ArrayList> res = new ArrayList>(); + ArrayList> res = new ArrayList<>(); if (!hasMultiTermRpts) { // simpler - no multi-terms - can base on positions in first doc for (int i=0; i rl = new ArrayList(2); + ArrayList rl = new ArrayList<>(2); rl.add(pp); res.add(rl); } @@ -409,11 +409,11 @@ final class SloppyPhraseScorer extends Scorer { } } else { // more involved - has multi-terms - ArrayList> tmp = new ArrayList>(); + ArrayList> tmp = new ArrayList<>(); ArrayList bb = ppTermsBitSets(rpp, rptTerms); unionTermGroups(bb); HashMap tg = termGroups(rptTerms, bb); - HashSet distinctGroupIDs = new HashSet(tg.values()); + HashSet distinctGroupIDs = new HashSet<>(tg.values()); for (int i=0; i()); } @@ -428,7 +428,7 @@ final class SloppyPhraseScorer extends Scorer { } } for (HashSet hs : tmp) { - res.add(new ArrayList(hs)); + res.add(new ArrayList<>(hs)); } } return res; @@ -441,8 +441,8 @@ final class SloppyPhraseScorer extends Scorer { /** find repeating terms and assign them ordinal values */ private LinkedHashMap repeatingTerms() { - LinkedHashMap tord = new LinkedHashMap(); - HashMap tcnt = new HashMap(); + LinkedHashMap tord = new LinkedHashMap<>(); + HashMap tcnt = new HashMap<>(); for (PhrasePositions pp=min,prev=null; prev!=max; pp=(prev=pp).next) { // iterate cyclic list: done once handled max for (Term t : pp.terms) { Integer cnt0 = tcnt.get(t); @@ -458,7 +458,7 @@ final class SloppyPhraseScorer extends Scorer { /** find repeating pps, and for each, if has multi-terms, update this.hasMultiTermRpts */ private PhrasePositions[] repeatingPPs(HashMap rptTerms) { - ArrayList rp = new ArrayList(); + ArrayList rp = new ArrayList<>(); for (PhrasePositions pp=min,prev=null; prev!=max; pp=(prev=pp).next) { // iterate cyclic list: done once handled max for (Term t : pp.terms) { if (rptTerms.containsKey(t)) { @@ -473,7 +473,7 @@ final class SloppyPhraseScorer extends Scorer { /** bit-sets - for each repeating pp, for each of its repeating terms, the term ordinal values is set */ private ArrayList ppTermsBitSets(PhrasePositions[] rpp, HashMap tord) { - ArrayList bb = new ArrayList(rpp.length); + ArrayList bb = new ArrayList<>(rpp.length); for (PhrasePositions pp : rpp) { FixedBitSet b = new FixedBitSet(tord.size()); Integer ord; @@ -507,7 +507,7 @@ final class SloppyPhraseScorer extends Scorer { /** map each term to the single group that contains it */ private HashMap termGroups(LinkedHashMap tord, ArrayList bb) throws IOException { - HashMap tg = new HashMap(); + HashMap tg = new HashMap<>(); Term[] t = tord.keySet().toArray(new Term[0]); for (int i=0; i extends TermCollectingRew @Override public final Q rewrite(final IndexReader reader, final MultiTermQuery query) throws IOException { final int maxSize = Math.min(size, getMaxSize()); - final PriorityQueue stQueue = new PriorityQueue(); + final PriorityQueue stQueue = new PriorityQueue<>(); collectTerms(reader, query, new TermCollector() { private final MaxNonCompetitiveBoostAttribute maxBoostAtt = attributes.addAttribute(MaxNonCompetitiveBoostAttribute.class); - private final Map visitedTerms = new HashMap(); + private final Map visitedTerms = new HashMap<>(); private TermsEnum termsEnum; private BoostAttribute boostAtt; diff --git a/lucene/core/src/java/org/apache/lucene/search/WildcardQuery.java b/lucene/core/src/java/org/apache/lucene/search/WildcardQuery.java index a116ad6a2d3..12cd77001f0 100644 --- a/lucene/core/src/java/org/apache/lucene/search/WildcardQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/WildcardQuery.java @@ -63,7 +63,7 @@ public class WildcardQuery extends AutomatonQuery { */ @SuppressWarnings("fallthrough") public static Automaton toAutomaton(Term wildcardquery) { - List automata = new ArrayList(); + List automata = new ArrayList<>(); String wildcardText = wildcardquery.text(); diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java index a3075983b7d..e48f4f9c786 100644 --- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java +++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java @@ -74,7 +74,7 @@ public class PayloadSpanUtil { * @throws IOException if there is a low-level I/O error */ public Collection getPayloadsForQuery(Query query) throws IOException { - Collection payloads = new ArrayList(); + Collection payloads = new ArrayList<>(); queryToSpanQuery(query, payloads); return payloads; } @@ -143,7 +143,7 @@ public class PayloadSpanUtil { final Term[] termArray = termArrays.get(i); List disjuncts = disjunctLists[positions[i]]; if (disjuncts == null) { - disjuncts = (disjunctLists[positions[i]] = new ArrayList( + disjuncts = (disjunctLists[positions[i]] = new ArrayList<>( termArray.length)); ++distinctPositions; } @@ -178,8 +178,8 @@ public class PayloadSpanUtil { private void getPayloads(Collection payloads, SpanQuery query) throws IOException { - Map termContexts = new HashMap(); - TreeSet terms = new TreeSet(); + Map termContexts = new HashMap<>(); + TreeSet terms = new TreeSet<>(); query.extractTerms(terms); for (Term term : terms) { termContexts.put(term, TermContext.build(context, term)); diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java b/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java index 95451f4586b..aea50c6cadd 100644 --- a/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java +++ b/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java @@ -103,7 +103,7 @@ public class NearSpansOrdered extends Spans { allowedSlop = spanNearQuery.getSlop(); SpanQuery[] clauses = spanNearQuery.getClauses(); subSpans = new Spans[clauses.length]; - matchPayload = new LinkedList(); + matchPayload = new LinkedList<>(); subSpansByDoc = new Spans[clauses.length]; for (int i = 0; i < clauses.length; i++) { subSpans[i] = clauses[i].getSpans(context, acceptDocs, termContexts); @@ -282,7 +282,7 @@ public class NearSpansOrdered extends Spans { private boolean shrinkToAfterShortestMatch() throws IOException { matchStart = subSpans[subSpans.length - 1].start(); matchEnd = subSpans[subSpans.length - 1].end(); - Set possibleMatchPayloads = new HashSet(); + Set possibleMatchPayloads = new HashSet<>(); if (subSpans[subSpans.length - 1].isPayloadAvailable()) { possibleMatchPayloads.addAll(subSpans[subSpans.length - 1].getPayload()); } @@ -296,7 +296,7 @@ public class NearSpansOrdered extends Spans { Spans prevSpans = subSpans[i]; if (collectPayloads && prevSpans.isPayloadAvailable()) { Collection payload = prevSpans.getPayload(); - possiblePayload = new ArrayList(payload.size()); + possiblePayload = new ArrayList<>(payload.size()); possiblePayload.addAll(payload); } @@ -320,7 +320,7 @@ public class NearSpansOrdered extends Spans { prevEnd = ppEnd; if (collectPayloads && prevSpans.isPayloadAvailable()) { Collection payload = prevSpans.getPayload(); - possiblePayload = new ArrayList(payload.size()); + possiblePayload = new ArrayList<>(payload.size()); possiblePayload.addAll(payload); } } diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java b/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java index 8c3f989bed1..544d932383a 100644 --- a/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java +++ b/lucene/core/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java @@ -40,7 +40,7 @@ import java.util.HashSet; public class NearSpansUnordered extends Spans { private SpanNearQuery query; - private List ordered = new ArrayList(); // spans in query order + private List ordered = new ArrayList<>(); // spans in query order private Spans[] subSpans; private int slop; // from query @@ -121,7 +121,7 @@ public class NearSpansUnordered extends Spans { // TODO: Remove warning after API has been finalized @Override public Collection getPayload() throws IOException { - return new ArrayList(spans.getPayload()); + return new ArrayList<>(spans.getPayload()); } // TODO: Remove warning after API has been finalized @@ -250,7 +250,7 @@ public class NearSpansUnordered extends Spans { */ @Override public Collection getPayload() throws IOException { - Set matchPayload = new HashSet(); + Set matchPayload = new HashSet<>(); for (SpansCell cell = first; cell != null; cell = cell.next) { if (cell.isPayloadAvailable()) { matchPayload.addAll(cell.getPayload()); diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java index c412f133953..0774d357df1 100644 --- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java @@ -61,7 +61,7 @@ public class SpanNearQuery extends SpanQuery implements Cloneable { public SpanNearQuery(SpanQuery[] clauses, int slop, boolean inOrder, boolean collectPayloads) { // copy clauses array into an ArrayList - this.clauses = new ArrayList(clauses.length); + this.clauses = new ArrayList<>(clauses.length); for (int i = 0; i < clauses.length; i++) { SpanQuery clause = clauses[i]; if (field == null) { // check field diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNotQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNotQuery.java index 055ced6b14b..16ae1c6b7b5 100644 --- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNotQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNotQuery.java @@ -175,7 +175,7 @@ public class SpanNotQuery extends SpanQuery implements Cloneable { public Collection getPayload() throws IOException { ArrayList result = null; if (includeSpans.isPayloadAvailable()) { - result = new ArrayList(includeSpans.getPayload()); + result = new ArrayList<>(includeSpans.getPayload()); } return result; } diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanOrQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanOrQuery.java index d4ab76f4f18..5f8bcbc1d18 100644 --- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanOrQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanOrQuery.java @@ -44,7 +44,7 @@ public class SpanOrQuery extends SpanQuery implements Cloneable { public SpanOrQuery(SpanQuery... clauses) { // copy clauses array into an ArrayList - this.clauses = new ArrayList(clauses.length); + this.clauses = new ArrayList<>(clauses.length); for (int i = 0; i < clauses.length; i++) { addClause(clauses[i]); } @@ -242,7 +242,7 @@ public class SpanOrQuery extends SpanQuery implements Cloneable { ArrayList result = null; Spans theTop = top(); if (theTop != null && theTop.isPayloadAvailable()) { - result = new ArrayList(theTop.getPayload()); + result = new ArrayList<>(theTop.getPayload()); } return result; } diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java index edb1e62276a..7c0994e6340 100644 --- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanPositionCheckQuery.java @@ -169,7 +169,7 @@ public abstract class SpanPositionCheckQuery extends SpanQuery implements Clonea public Collection getPayload() throws IOException { ArrayList result = null; if (spans.isPayloadAvailable()) { - result = new ArrayList(spans.getPayload()); + result = new ArrayList<>(spans.getPayload()); } return result;//TODO: any way to avoid the new construction? } diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java index 9acff49e508..0b20cdb4ac7 100644 --- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java +++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java @@ -45,8 +45,8 @@ public class SpanWeight extends Weight { this.similarity = searcher.getSimilarity(); this.query = query; - termContexts = new HashMap(); - TreeSet terms = new TreeSet(); + termContexts = new HashMap<>(); + TreeSet terms = new TreeSet<>(); query.extractTerms(terms); final IndexReaderContext context = searcher.getTopReaderContext(); final TermStatistics termStats[] = new TermStatistics[terms.size()]; diff --git a/lucene/core/src/java/org/apache/lucene/store/CompoundFileDirectory.java b/lucene/core/src/java/org/apache/lucene/store/CompoundFileDirectory.java index d95dca59414..3f7ea846dd9 100644 --- a/lucene/core/src/java/org/apache/lucene/store/CompoundFileDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/CompoundFileDirectory.java @@ -130,7 +130,7 @@ public final class CompoundFileDirectory extends BaseDirectory { entriesStream = dir.openInput(entriesFileName, IOContext.READONCE); CodecUtil.checkHeader(entriesStream, CompoundFileWriter.ENTRY_CODEC, CompoundFileWriter.VERSION_START, CompoundFileWriter.VERSION_START); final int numEntries = entriesStream.readVInt(); - final Map mapping = new HashMap(numEntries); + final Map mapping = new HashMap<>(numEntries); for (int i = 0; i < numEntries; i++) { final FileEntry fileEntry = new FileEntry(); final String id = entriesStream.readString(); diff --git a/lucene/core/src/java/org/apache/lucene/store/CompoundFileWriter.java b/lucene/core/src/java/org/apache/lucene/store/CompoundFileWriter.java index 694bc11f774..f6da132208d 100644 --- a/lucene/core/src/java/org/apache/lucene/store/CompoundFileWriter.java +++ b/lucene/core/src/java/org/apache/lucene/store/CompoundFileWriter.java @@ -60,10 +60,10 @@ final class CompoundFileWriter implements Closeable{ static final String ENTRY_CODEC = "CompoundFileWriterEntries"; private final Directory directory; - private final Map entries = new HashMap(); - private final Set seenIDs = new HashSet(); + private final Map entries = new HashMap<>(); + private final Set seenIDs = new HashSet<>(); // all entries that are written to a sep. file but not yet moved into CFS - private final Queue pendingEntries = new LinkedList(); + private final Queue pendingEntries = new LinkedList<>(); private boolean closed = false; private IndexOutput dataOut; private final AtomicBoolean outputTaken = new AtomicBoolean(false); diff --git a/lucene/core/src/java/org/apache/lucene/store/DataInput.java b/lucene/core/src/java/org/apache/lucene/store/DataInput.java index c5fe95aa923..6bcf18ee523 100644 --- a/lucene/core/src/java/org/apache/lucene/store/DataInput.java +++ b/lucene/core/src/java/org/apache/lucene/store/DataInput.java @@ -212,7 +212,7 @@ public abstract class DataInput implements Cloneable { /** Reads a Map<String,String> previously written * with {@link DataOutput#writeStringStringMap(Map)}. */ public Map readStringStringMap() throws IOException { - final Map map = new HashMap(); + final Map map = new HashMap<>(); final int count = readInt(); for(int i=0;i readStringSet() throws IOException { - final Set set = new HashSet(); + final Set set = new HashSet<>(); final int count = readInt(); for(int i=0;i names) throws IOException { ensureOpen(); - Set toSync = new HashSet(names); + Set toSync = new HashSet<>(names); toSync.retainAll(staleFiles); for (String name : toSync) diff --git a/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java index 981670378a6..bb7a257c5ad 100644 --- a/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/FileSwitchDirectory.java @@ -77,7 +77,7 @@ public class FileSwitchDirectory extends BaseDirectory { @Override public String[] listAll() throws IOException { - Set files = new HashSet(); + Set files = new HashSet<>(); // LUCENE-3380: either or both of our dirs could be FSDirs, // but if one underlying delegate is an FSDir and mkdirs() has not // yet been called, because so far everything is written to the other, @@ -154,8 +154,8 @@ public class FileSwitchDirectory extends BaseDirectory { @Override public void sync(Collection names) throws IOException { - List primaryNames = new ArrayList(); - List secondaryNames = new ArrayList(); + List primaryNames = new ArrayList<>(); + List secondaryNames = new ArrayList<>(); for (String name : names) if (primaryExtensions.contains(getExtension(name))) diff --git a/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java b/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java index c4c092486ec..3823d33265c 100644 --- a/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/NRTCachingDirectory.java @@ -120,7 +120,7 @@ public class NRTCachingDirectory extends Directory { @Override public synchronized String[] listAll() throws IOException { - final Set files = new HashSet(); + final Set files = new HashSet<>(); for(String f : cache.listAll()) { files.add(f); } diff --git a/lucene/core/src/java/org/apache/lucene/store/NativeFSLockFactory.java b/lucene/core/src/java/org/apache/lucene/store/NativeFSLockFactory.java index 60c6bf58fb0..85c2a11904e 100644 --- a/lucene/core/src/java/org/apache/lucene/store/NativeFSLockFactory.java +++ b/lucene/core/src/java/org/apache/lucene/store/NativeFSLockFactory.java @@ -146,7 +146,7 @@ class NativeFSLock extends Lock { * (same FileChannel instance or not), so we may want to * change this when Lucene moves to Java 1.6. */ - private static HashSet LOCK_HELD = new HashSet(); + private static HashSet LOCK_HELD = new HashSet<>(); public NativeFSLock(File lockDir, String lockFileName) { this.lockDir = lockDir; diff --git a/lucene/core/src/java/org/apache/lucene/store/RAMDirectory.java b/lucene/core/src/java/org/apache/lucene/store/RAMDirectory.java index 9ae0d3639df..cac6224a432 100644 --- a/lucene/core/src/java/org/apache/lucene/store/RAMDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/RAMDirectory.java @@ -46,7 +46,7 @@ import java.util.concurrent.atomic.AtomicLong; * operating system, so copying data to Java heap space is not useful. */ public class RAMDirectory extends BaseDirectory { - protected final Map fileMap = new ConcurrentHashMap(); + protected final Map fileMap = new ConcurrentHashMap<>(); protected final AtomicLong sizeInBytes = new AtomicLong(); // ***** @@ -113,7 +113,7 @@ public class RAMDirectory extends BaseDirectory { // NOTE: fileMap.keySet().toArray(new String[0]) is broken in non Sun JDKs, // and the code below is resilient to map changes during the array population. Set fileNames = fileMap.keySet(); - List names = new ArrayList(fileNames.size()); + List names = new ArrayList<>(fileNames.size()); for (String name : fileNames) names.add(name); return names.toArray(new String[names.size()]); } diff --git a/lucene/core/src/java/org/apache/lucene/store/RAMFile.java b/lucene/core/src/java/org/apache/lucene/store/RAMFile.java index b89d308f41a..1840ac5491c 100644 --- a/lucene/core/src/java/org/apache/lucene/store/RAMFile.java +++ b/lucene/core/src/java/org/apache/lucene/store/RAMFile.java @@ -23,7 +23,7 @@ import java.util.ArrayList; * Represents a file in RAM as a list of byte[] buffers. * @lucene.internal */ public class RAMFile { - protected ArrayList buffers = new ArrayList(); + protected ArrayList buffers = new ArrayList<>(); long length; RAMDirectory directory; protected long sizeInBytes; diff --git a/lucene/core/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java b/lucene/core/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java index 29a8f19245f..4bc471ee4c7 100644 --- a/lucene/core/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java +++ b/lucene/core/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java @@ -33,7 +33,7 @@ import java.util.HashSet; public class SingleInstanceLockFactory extends LockFactory { - private HashSet locks = new HashSet(); + private HashSet locks = new HashSet<>(); @Override public Lock makeLock(String lockName) { diff --git a/lucene/core/src/java/org/apache/lucene/util/AttributeSource.java b/lucene/core/src/java/org/apache/lucene/util/AttributeSource.java index 30575cc1699..83d87fa4f89 100644 --- a/lucene/core/src/java/org/apache/lucene/util/AttributeSource.java +++ b/lucene/core/src/java/org/apache/lucene/util/AttributeSource.java @@ -145,8 +145,8 @@ public class AttributeSource { * An AttributeSource using the supplied {@link AttributeFactory} for creating new {@link Attribute} instances. */ public AttributeSource(AttributeFactory factory) { - this.attributes = new LinkedHashMap, AttributeImpl>(); - this.attributeImpls = new LinkedHashMap, AttributeImpl>(); + this.attributes = new LinkedHashMap<>(); + this.attributeImpls = new LinkedHashMap<>(); this.currentState = new State[1]; this.factory = factory; } @@ -207,7 +207,7 @@ public class AttributeSource { LinkedList>> foundInterfaces = knownImplClasses.get(clazz); if (foundInterfaces == null) { // we have the slight chance that another thread may do the same, but who cares? - foundInterfaces = new LinkedList>>(); + foundInterfaces = new LinkedList<>(); // find all interfaces that this attribute instance implements // and that extend the Attribute interface Class actClazz = clazz; diff --git a/lucene/core/src/java/org/apache/lucene/util/CloseableThreadLocal.java b/lucene/core/src/java/org/apache/lucene/util/CloseableThreadLocal.java index e4436cefbbb..9681f42ac38 100644 --- a/lucene/core/src/java/org/apache/lucene/util/CloseableThreadLocal.java +++ b/lucene/core/src/java/org/apache/lucene/util/CloseableThreadLocal.java @@ -55,11 +55,11 @@ import java.util.concurrent.atomic.AtomicInteger; public class CloseableThreadLocal implements Closeable { - private ThreadLocal> t = new ThreadLocal>(); + private ThreadLocal> t = new ThreadLocal<>(); // Use a WeakHashMap so that if a Thread exits and is // GC'able, its entry may be removed: - private Map hardRefs = new WeakHashMap(); + private Map hardRefs = new WeakHashMap<>(); // Increase this to decrease frequency of purging in get: private static int PURGE_MULTIPLIER = 20; @@ -92,7 +92,7 @@ public class CloseableThreadLocal implements Closeable { public void set(T object) { - t.set(new WeakReference(object)); + t.set(new WeakReference<>(object)); synchronized(hardRefs) { hardRefs.put(Thread.currentThread(), object); diff --git a/lucene/core/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java b/lucene/core/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java index 14f2e44de48..00115509955 100644 --- a/lucene/core/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java +++ b/lucene/core/src/java/org/apache/lucene/util/DoubleBarrelLRUCache.java @@ -59,8 +59,8 @@ final public class DoubleBarrelLRUCache(); - cache2 = new ConcurrentHashMap(); + cache1 = new ConcurrentHashMap<>(); + cache2 = new ConcurrentHashMap<>(); } @SuppressWarnings("unchecked") diff --git a/lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java b/lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java index 5e27861b285..febc4e3b9a3 100644 --- a/lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java +++ b/lucene/core/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java @@ -109,13 +109,13 @@ public final class FieldCacheSanityChecker { // // maps the (valId) identityhashCode of cache values to // sets of CacheEntry instances - final MapOfSets valIdToItems = new MapOfSets(new HashMap>(17)); + final MapOfSets valIdToItems = new MapOfSets<>(new HashMap>(17)); // maps ReaderField keys to Sets of ValueIds - final MapOfSets readerFieldToValIds = new MapOfSets(new HashMap>(17)); + final MapOfSets readerFieldToValIds = new MapOfSets<>(new HashMap>(17)); // // any keys that we know result in more then one valId - final Set valMismatchKeys = new HashSet(); + final Set valMismatchKeys = new HashSet<>(); // iterate over all the cacheEntries to get the mappings we'll need for (int i = 0; i < cacheEntries.length; i++) { @@ -144,7 +144,7 @@ public final class FieldCacheSanityChecker { } } - final List insanity = new ArrayList(valMismatchKeys.size() * 3); + final List insanity = new ArrayList<>(valMismatchKeys.size() * 3); insanity.addAll(checkValueMismatch(valIdToItems, readerFieldToValIds, @@ -166,7 +166,7 @@ public final class FieldCacheSanityChecker { MapOfSets readerFieldToValIds, Set valMismatchKeys) { - final List insanity = new ArrayList(valMismatchKeys.size() * 3); + final List insanity = new ArrayList<>(valMismatchKeys.size() * 3); if (! valMismatchKeys.isEmpty() ) { // we have multiple values for some ReaderFields @@ -174,7 +174,7 @@ public final class FieldCacheSanityChecker { final Map> rfMap = readerFieldToValIds.getMap(); final Map> valMap = valIdToItems.getMap(); for (final ReaderField rf : valMismatchKeys) { - final List badEntries = new ArrayList(valMismatchKeys.size() * 2); + final List badEntries = new ArrayList<>(valMismatchKeys.size() * 2); for(final Integer value: rfMap.get(rf)) { for (final CacheEntry cacheEntry : valMap.get(value)) { badEntries.add(cacheEntry); @@ -203,15 +203,15 @@ public final class FieldCacheSanityChecker { private Collection checkSubreaders( MapOfSets valIdToItems, MapOfSets readerFieldToValIds) { - final List insanity = new ArrayList(23); + final List insanity = new ArrayList<>(23); - Map> badChildren = new HashMap>(17); - MapOfSets badKids = new MapOfSets(badChildren); // wrapper + Map> badChildren = new HashMap<>(17); + MapOfSets badKids = new MapOfSets<>(badChildren); // wrapper Map> viToItemSets = valIdToItems.getMap(); Map> rfToValIdSets = readerFieldToValIds.getMap(); - Set seen = new HashSet(17); + Set seen = new HashSet<>(17); Set readerFields = rfToValIdSets.keySet(); for (final ReaderField rf : readerFields) { @@ -242,7 +242,7 @@ public final class FieldCacheSanityChecker { for (final ReaderField parent : badChildren.keySet()) { Set kids = badChildren.get(parent); - List badEntries = new ArrayList(kids.size() * 2); + List badEntries = new ArrayList<>(kids.size() * 2); // put parent entr(ies) in first { @@ -277,7 +277,7 @@ public final class FieldCacheSanityChecker { * returned by {@code seed.getCoreCacheKey()} */ private List getAllDescendantReaderKeys(Object seed) { - List all = new ArrayList(17); // will grow as we iter + List all = new ArrayList<>(17); // will grow as we iter all.add(seed); for (int i = 0; i < all.size(); i++) { final Object obj = all.get(i); diff --git a/lucene/core/src/java/org/apache/lucene/util/MapOfSets.java b/lucene/core/src/java/org/apache/lucene/util/MapOfSets.java index b08eb76a3f8..e81449540f5 100644 --- a/lucene/core/src/java/org/apache/lucene/util/MapOfSets.java +++ b/lucene/core/src/java/org/apache/lucene/util/MapOfSets.java @@ -55,7 +55,7 @@ public class MapOfSets { if (theMap.containsKey(key)) { theSet = theMap.get(key); } else { - theSet = new HashSet(23); + theSet = new HashSet<>(23); theMap.put(key, theSet); } theSet.add(val); @@ -72,7 +72,7 @@ public class MapOfSets { if (theMap.containsKey(key)) { theSet = theMap.get(key); } else { - theSet = new HashSet(23); + theSet = new HashSet<>(23); theMap.put(key, theSet); } theSet.addAll(vals); diff --git a/lucene/core/src/java/org/apache/lucene/util/MergedIterator.java b/lucene/core/src/java/org/apache/lucene/util/MergedIterator.java index 9a2770c0ba5..082a68d81e0 100644 --- a/lucene/core/src/java/org/apache/lucene/util/MergedIterator.java +++ b/lucene/core/src/java/org/apache/lucene/util/MergedIterator.java @@ -59,12 +59,12 @@ public final class MergedIterator> implements Iterator... iterators) { this.removeDuplicates = removeDuplicates; - queue = new TermMergeQueue(iterators.length); + queue = new TermMergeQueue<>(iterators.length); top = new SubIterator[iterators.length]; int index = 0; for (Iterator iterator : iterators) { if (iterator.hasNext()) { - SubIterator sub = new SubIterator(); + SubIterator sub = new SubIterator<>(); sub.current = iterator.next(); sub.iterator = iterator; sub.index = index++; diff --git a/lucene/core/src/java/org/apache/lucene/util/NamedSPILoader.java b/lucene/core/src/java/org/apache/lucene/util/NamedSPILoader.java index 40caaf95cc9..d41cfeb6597 100644 --- a/lucene/core/src/java/org/apache/lucene/util/NamedSPILoader.java +++ b/lucene/core/src/java/org/apache/lucene/util/NamedSPILoader.java @@ -59,7 +59,7 @@ public final class NamedSPILoader implements * of new service providers on the given classpath/classloader! */ public synchronized void reload(ClassLoader classloader) { - final LinkedHashMap services = new LinkedHashMap(this.services); + final LinkedHashMap services = new LinkedHashMap<>(this.services); final SPIClassIterator loader = SPIClassIterator.get(clazz, classloader); while (loader.hasNext()) { final Class c = loader.next(); diff --git a/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java b/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java index 76781f8b8c7..3d5119c0d60 100644 --- a/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java +++ b/lucene/core/src/java/org/apache/lucene/util/OfflineSorter.java @@ -225,7 +225,7 @@ public final class OfflineSorter { output.delete(); - ArrayList merges = new ArrayList(); + ArrayList merges = new ArrayList<>(); boolean success2 = false; try { ByteSequencesReader is = new ByteSequencesReader(input); diff --git a/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java b/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java index 99504e69958..5d5f1a1461a 100644 --- a/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java +++ b/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java @@ -35,9 +35,9 @@ import org.apache.lucene.store.IndexInput; // TODO: refactor this, byteblockpool, fst.bytestore, and any // other "shift/mask big arrays". there are too many of these classes! public final class PagedBytes { - private final List blocks = new ArrayList(); + private final List blocks = new ArrayList<>(); // TODO: these are unused? - private final List blockEnd = new ArrayList(); + private final List blockEnd = new ArrayList<>(); private final int blockSize; private final int blockBits; private final int blockMask; diff --git a/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java b/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java index aedf14aa7c2..9e6ae7d8049 100644 --- a/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java +++ b/lucene/core/src/java/org/apache/lucene/util/QueryBuilder.java @@ -305,7 +305,7 @@ public class QueryBuilder { // phrase query: MultiPhraseQuery mpq = newMultiPhraseQuery(); mpq.setSlop(phraseSlop); - List multiTerms = new ArrayList(); + List multiTerms = new ArrayList<>(); int position = -1; for (int i = 0; i < numTokens; i++) { int positionIncrement = 1; diff --git a/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java b/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java index 06cad08ec68..4176539f155 100644 --- a/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java +++ b/lucene/core/src/java/org/apache/lucene/util/RamUsageEstimator.java @@ -105,7 +105,7 @@ public final class RamUsageEstimator { */ private static final Map,Integer> primitiveSizes; static { - primitiveSizes = new IdentityHashMap,Integer>(); + primitiveSizes = new IdentityHashMap<>(); primitiveSizes.put(boolean.class, Integer.valueOf(NUM_BYTES_BOOLEAN)); primitiveSizes.put(byte.class, Integer.valueOf(NUM_BYTES_BYTE)); primitiveSizes.put(char.class, Integer.valueOf(NUM_BYTES_CHAR)); @@ -403,11 +403,11 @@ public final class RamUsageEstimator { */ private static long measureObjectSize(Object root) { // Objects seen so far. - final IdentityHashSet seen = new IdentityHashSet(); + final IdentityHashSet seen = new IdentityHashSet<>(); // Class cache with reference Field and precalculated shallow size. - final IdentityHashMap, ClassCache> classCache = new IdentityHashMap, ClassCache>(); + final IdentityHashMap, ClassCache> classCache = new IdentityHashMap<>(); // Stack of objects pending traversal. Recursion caused stack overflows. - final ArrayList stack = new ArrayList(); + final ArrayList stack = new ArrayList<>(); stack.add(root); long totalSize = 0; @@ -486,7 +486,7 @@ public final class RamUsageEstimator { private static ClassCache createCacheEntry(final Class clazz) { ClassCache cachedInfo; long shallowInstanceSize = NUM_BYTES_OBJECT_HEADER; - final ArrayList referenceFields = new ArrayList(32); + final ArrayList referenceFields = new ArrayList<>(32); for (Class c = clazz; c != null; c = c.getSuperclass()) { final Field[] fields = c.getDeclaredFields(); for (final Field f : fields) { diff --git a/lucene/core/src/java/org/apache/lucene/util/SPIClassIterator.java b/lucene/core/src/java/org/apache/lucene/util/SPIClassIterator.java index 0cfa85194ea..764713c88a6 100644 --- a/lucene/core/src/java/org/apache/lucene/util/SPIClassIterator.java +++ b/lucene/core/src/java/org/apache/lucene/util/SPIClassIterator.java @@ -47,11 +47,11 @@ public final class SPIClassIterator implements Iterator> { private Iterator linesIterator; public static SPIClassIterator get(Class clazz) { - return new SPIClassIterator(clazz, Thread.currentThread().getContextClassLoader()); + return new SPIClassIterator<>(clazz, Thread.currentThread().getContextClassLoader()); } public static SPIClassIterator get(Class clazz, ClassLoader loader) { - return new SPIClassIterator(clazz, loader); + return new SPIClassIterator<>(clazz, loader); } /** Utility method to check if some class loader is a (grand-)parent of or the same as another one. @@ -84,7 +84,7 @@ public final class SPIClassIterator implements Iterator> { if (lines != null) { lines.clear(); } else { - lines = new ArrayList(); + lines = new ArrayList<>(); } final URL url = profilesEnum.nextElement(); try { diff --git a/lucene/core/src/java/org/apache/lucene/util/SetOnce.java b/lucene/core/src/java/org/apache/lucene/util/SetOnce.java index 74c2fa8ccb3..5b3cfd0f43b 100644 --- a/lucene/core/src/java/org/apache/lucene/util/SetOnce.java +++ b/lucene/core/src/java/org/apache/lucene/util/SetOnce.java @@ -77,7 +77,7 @@ public final class SetOnce implements Cloneable { @Override public SetOnce clone() { - return obj == null ? new SetOnce() : new SetOnce(obj); + return obj == null ? new SetOnce() : new SetOnce<>(obj); } } diff --git a/lucene/core/src/java/org/apache/lucene/util/WeakIdentityMap.java b/lucene/core/src/java/org/apache/lucene/util/WeakIdentityMap.java index d1fbaaf329a..3ac42973248 100644 --- a/lucene/core/src/java/org/apache/lucene/util/WeakIdentityMap.java +++ b/lucene/core/src/java/org/apache/lucene/util/WeakIdentityMap.java @@ -62,7 +62,7 @@ import java.util.concurrent.ConcurrentHashMap; * @lucene.internal */ public final class WeakIdentityMap { - private final ReferenceQueue queue = new ReferenceQueue(); + private final ReferenceQueue queue = new ReferenceQueue<>(); private final Map backingStore; private final boolean reapOnRead; @@ -80,7 +80,7 @@ public final class WeakIdentityMap { * @param reapOnRead controls if the map cleans up the reference queue on every read operation. */ public static WeakIdentityMap newHashMap(boolean reapOnRead) { - return new WeakIdentityMap(new HashMap(), reapOnRead); + return new WeakIdentityMap<>(new HashMap(), reapOnRead); } /** @@ -96,7 +96,7 @@ public final class WeakIdentityMap { * @param reapOnRead controls if the map cleans up the reference queue on every read operation. */ public static WeakIdentityMap newConcurrentHashMap(boolean reapOnRead) { - return new WeakIdentityMap(new ConcurrentHashMap(), reapOnRead); + return new WeakIdentityMap<>(new ConcurrentHashMap(), reapOnRead); } /** Private only constructor, to create use the static factory methods. */ diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java b/lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java index f9434891738..8ad8bc0a125 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/Automaton.java @@ -269,8 +269,8 @@ public class Automaton implements Cloneable { public State[] getNumberedStates() { if (numberedStates == null) { expandSingleton(); - final Set visited = new HashSet(); - final LinkedList worklist = new LinkedList(); + final Set visited = new HashSet<>(); + final LinkedList worklist = new LinkedList<>(); numberedStates = new State[4]; int upto = 0; worklist.add(initial); @@ -333,9 +333,9 @@ public class Automaton implements Cloneable { */ public Set getAcceptStates() { expandSingleton(); - HashSet accepts = new HashSet(); - HashSet visited = new HashSet(); - LinkedList worklist = new LinkedList(); + HashSet accepts = new HashSet<>(); + HashSet visited = new HashSet<>(); + LinkedList worklist = new LinkedList<>(); worklist.add(initial); visited.add(initial); while (worklist.size() > 0) { @@ -399,7 +399,7 @@ public class Automaton implements Cloneable { */ int[] getStartPoints() { final State[] states = getNumberedStates(); - Set pointset = new HashSet(); + Set pointset = new HashSet<>(); pointset.add(Character.MIN_CODE_POINT); for (State s : states) { for (Transition t : s.getTransitions()) { @@ -423,7 +423,7 @@ public class Automaton implements Cloneable { */ private State[] getLiveStates() { final State[] states = getNumberedStates(); - Set live = new HashSet(); + Set live = new HashSet<>(); for (State q : states) { if (q.isAccept()) { live.add(q); @@ -432,13 +432,13 @@ public class Automaton implements Cloneable { // map> @SuppressWarnings({"rawtypes","unchecked"}) Set map[] = new Set[states.length]; for (int i = 0; i < map.length; i++) - map[i] = new HashSet(); + map[i] = new HashSet<>(); for (State s : states) { for(int i=0;i worklist = new LinkedList(live); + LinkedList worklist = new LinkedList<>(live); while (worklist.size() > 0) { State s = worklist.removeFirst(); for (State p : map[s.number]) @@ -639,7 +639,7 @@ public class Automaton implements Cloneable { try { Automaton a = (Automaton) super.clone(); if (!isSingleton()) { - HashMap m = new HashMap(); + HashMap m = new HashMap<>(); State[] states = getNumberedStates(); for (State s : states) m.put(s, new State()); diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/BasicAutomata.java b/lucene/core/src/java/org/apache/lucene/util/automaton/BasicAutomata.java index 0a793a638cc..7f51a37db95 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/BasicAutomata.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/BasicAutomata.java @@ -216,10 +216,10 @@ final public class BasicAutomata { by.append('0'); by.append(y); y = by.toString(); - Collection initials = new ArrayList(); + Collection initials = new ArrayList<>(); a.initial = between(x, y, 0, initials, digits <= 0); if (digits <= 0) { - ArrayList pairs = new ArrayList(); + ArrayList pairs = new ArrayList<>(); for (State p : initials) if (a.initial != p) pairs.add(new StatePair(a.initial, p)); BasicOperations.addEpsilons(a, pairs); diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/BasicOperations.java b/lucene/core/src/java/org/apache/lucene/util/automaton/BasicOperations.java index 781a4e262e3..1d953585a7b 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/BasicOperations.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/BasicOperations.java @@ -106,7 +106,7 @@ final public class BasicOperations { } else { for (Automaton a : l) if (BasicOperations.isEmpty(a)) return BasicAutomata.makeEmpty(); - Set ids = new HashSet(); + Set ids = new HashSet<>(); for (Automaton a : l) ids.add(System.identityHashCode(a)); boolean has_aliases = ids.size() != l.size(); @@ -187,7 +187,7 @@ final public class BasicOperations { */ static public Automaton repeat(Automaton a, int min) { if (min == 0) return repeat(a); - List as = new ArrayList(); + List as = new ArrayList<>(); while (min-- > 0) as.add(a); as.add(repeat(a)); @@ -210,7 +210,7 @@ final public class BasicOperations { if (min == 0) b = BasicAutomata.makeEmptyString(); else if (min == 1) b = a.clone(); else { - List as = new ArrayList(); + List as = new ArrayList<>(); while (min-- > 0) as.add(a); b = concatenate(as); @@ -287,8 +287,8 @@ final public class BasicOperations { Transition[][] transitions1 = a1.getSortedTransitions(); Transition[][] transitions2 = a2.getSortedTransitions(); Automaton c = new Automaton(); - LinkedList worklist = new LinkedList(); - HashMap newstates = new HashMap(); + LinkedList worklist = new LinkedList<>(); + HashMap newstates = new HashMap<>(); StatePair p = new StatePair(c.initial, a1.initial, a2.initial); worklist.add(p); newstates.put(p, p); @@ -356,8 +356,8 @@ final public class BasicOperations { a2.determinize(); Transition[][] transitions1 = a1.getSortedTransitions(); Transition[][] transitions2 = a2.getSortedTransitions(); - LinkedList worklist = new LinkedList(); - HashSet visited = new HashSet(); + LinkedList worklist = new LinkedList<>(); + HashSet visited = new HashSet<>(); StatePair p = new StatePair(a1.initial, a2.initial); worklist.add(p); visited.add(p); @@ -431,7 +431,7 @@ final public class BasicOperations { * Complexity: linear in number of states. */ public static Automaton union(Collection l) { - Set ids = new HashSet(); + Set ids = new HashSet<>(); for (Automaton a : l) ids.add(System.identityHashCode(a)); boolean has_aliases = ids.size() != l.size(); @@ -500,7 +500,7 @@ final public class BasicOperations { PointTransitions[] points = new PointTransitions[5]; private final static int HASHMAP_CUTOVER = 30; - private final HashMap map = new HashMap(); + private final HashMap map = new HashMap<>(); private boolean useHash = false; private PointTransitions next(int point) { @@ -597,8 +597,8 @@ final public class BasicOperations { a.initial = new State(); SortedIntSet.FrozenIntSet initialset = new SortedIntSet.FrozenIntSet(initNumber, a.initial); - LinkedList worklist = new LinkedList(); - Map newstate = new HashMap(); + LinkedList worklist = new LinkedList<>(); + Map newstate = new HashMap<>(); worklist.add(initialset); @@ -713,25 +713,25 @@ final public class BasicOperations { */ public static void addEpsilons(Automaton a, Collection pairs) { a.expandSingleton(); - HashMap> forward = new HashMap>(); - HashMap> back = new HashMap>(); + HashMap> forward = new HashMap<>(); + HashMap> back = new HashMap<>(); for (StatePair p : pairs) { HashSet to = forward.get(p.s1); if (to == null) { - to = new HashSet(); + to = new HashSet<>(); forward.put(p.s1, to); } to.add(p.s2); HashSet from = back.get(p.s2); if (from == null) { - from = new HashSet(); + from = new HashSet<>(); back.put(p.s2, from); } from.add(p.s1); } // calculate epsilon closure - LinkedList worklist = new LinkedList(pairs); - HashSet workset = new HashSet(pairs); + LinkedList worklist = new LinkedList<>(pairs); + HashSet workset = new HashSet<>(pairs); while (!worklist.isEmpty()) { StatePair p = worklist.removeFirst(); workset.remove(p); @@ -817,12 +817,12 @@ final public class BasicOperations { return p.accept; } else { State[] states = a.getNumberedStates(); - LinkedList pp = new LinkedList(); - LinkedList pp_other = new LinkedList(); + LinkedList pp = new LinkedList<>(); + LinkedList pp_other = new LinkedList<>(); BitSet bb = new BitSet(states.length); BitSet bb_other = new BitSet(states.length); pp.add(a.initial); - ArrayList dest = new ArrayList(); + ArrayList dest = new ArrayList<>(); boolean accept = a.initial.accept; for (int i = 0, c = 0; i < s.length(); i += Character.charCount(c)) { c = s.codePointAt(i); diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java b/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java index cfc46b17607..dcafe709a38 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java @@ -272,7 +272,7 @@ public class CompiledAutomaton { } } - final List stack = new ArrayList(); + final List stack = new ArrayList<>(); int idx = 0; while (true) { diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/DaciukMihovAutomatonBuilder.java b/lucene/core/src/java/org/apache/lucene/util/automaton/DaciukMihovAutomatonBuilder.java index c6be628bdb7..68ce1e9a051 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/DaciukMihovAutomatonBuilder.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/DaciukMihovAutomatonBuilder.java @@ -187,7 +187,7 @@ final class DaciukMihovAutomatonBuilder { /** * A "registry" for state interning. */ - private HashMap stateRegistry = new HashMap(); + private HashMap stateRegistry = new HashMap<>(); /** * Root automaton state. diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/LevenshteinAutomata.java b/lucene/core/src/java/org/apache/lucene/util/automaton/LevenshteinAutomata.java index 92384c450f1..869dc3ad551 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/LevenshteinAutomata.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/LevenshteinAutomata.java @@ -63,7 +63,7 @@ public class LevenshteinAutomata { this.alphaMax = alphaMax; // calculate the alphabet - SortedSet set = new TreeSet(); + SortedSet set = new TreeSet<>(); for (int i = 0; i < word.length; i++) { int v = word[i]; if (v > alphaMax) { diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/MinimizationOperations.java b/lucene/core/src/java/org/apache/lucene/util/automaton/MinimizationOperations.java index b5fd0cad33b..85f8d58762d 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/MinimizationOperations.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/MinimizationOperations.java @@ -83,13 +83,13 @@ final public class MinimizationOperations { final int[] block = new int[statesLen]; final StateList[][] active = new StateList[statesLen][sigmaLen]; final StateListNode[][] active2 = new StateListNode[statesLen][sigmaLen]; - final LinkedList pending = new LinkedList(); + final LinkedList pending = new LinkedList<>(); final BitSet pending2 = new BitSet(sigmaLen*statesLen); final BitSet split = new BitSet(statesLen), refine = new BitSet(statesLen), refine2 = new BitSet(statesLen); for (int q = 0; q < statesLen; q++) { - splitblock[q] = new ArrayList(); - partition[q] = new HashSet(); + splitblock[q] = new ArrayList<>(); + partition[q] = new HashSet<>(); for (int x = 0; x < sigmaLen; x++) { active[q][x] = new StateList(); } @@ -104,7 +104,7 @@ final public class MinimizationOperations { final ArrayList[] r = reverse[qq.step(sigma[x]).number]; if (r[x] == null) - r[x] = new ArrayList(); + r[x] = new ArrayList<>(); r[x].add(qq); } } diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java b/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java index dec19cb2877..bf5b4be4206 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/RegExp.java @@ -486,14 +486,14 @@ public class RegExp { Automaton a = null; switch (kind) { case REGEXP_UNION: - list = new ArrayList(); + list = new ArrayList<>(); findLeaves(exp1, Kind.REGEXP_UNION, list, automata, automaton_provider); findLeaves(exp2, Kind.REGEXP_UNION, list, automata, automaton_provider); a = BasicOperations.union(list); MinimizationOperations.minimize(a); break; case REGEXP_CONCATENATION: - list = new ArrayList(); + list = new ArrayList<>(); findLeaves(exp1, Kind.REGEXP_CONCATENATION, list, automata, automaton_provider); findLeaves(exp2, Kind.REGEXP_CONCATENATION, list, automata, @@ -664,7 +664,7 @@ public class RegExp { * Returns set of automaton identifiers that occur in this regular expression. */ public Set getIdentifiers() { - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); getIdentifiers(set); return set; } diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/SortedIntSet.java b/lucene/core/src/java/org/apache/lucene/util/automaton/SortedIntSet.java index 0cfeae696ba..546c3075998 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/SortedIntSet.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/SortedIntSet.java @@ -35,7 +35,7 @@ final class SortedIntSet { // O(N^2) linear ops to O(N log(N)) TreeMap private final static int TREE_MAP_CUTOVER = 30; - private final Map map = new TreeMap(); + private final Map map = new TreeMap<>(); private boolean useTreeMap; diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/SpecialOperations.java b/lucene/core/src/java/org/apache/lucene/util/automaton/SpecialOperations.java index d479cd78923..aff2bea591e 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/SpecialOperations.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/SpecialOperations.java @@ -95,7 +95,7 @@ final public class SpecialOperations { public static String getCommonPrefix(Automaton a) { if (a.isSingleton()) return a.singleton; StringBuilder b = new StringBuilder(); - HashSet visited = new HashSet(); + HashSet visited = new HashSet<>(); State s = a.initial; boolean done; do { @@ -119,7 +119,7 @@ final public class SpecialOperations { public static BytesRef getCommonPrefixBytesRef(Automaton a) { if (a.isSingleton()) return new BytesRef(a.singleton); BytesRef ref = new BytesRef(10); - HashSet visited = new HashSet(); + HashSet visited = new HashSet<>(); State s = a.initial; boolean done; do { @@ -185,9 +185,9 @@ final public class SpecialOperations { public static Set reverse(Automaton a) { a.expandSingleton(); // reverse all edges - HashMap> m = new HashMap>(); + HashMap> m = new HashMap<>(); State[] states = a.getNumberedStates(); - Set accept = new HashSet(); + Set accept = new HashSet<>(); for (State s : states) if (s.isAccept()) accept.add(s); @@ -223,7 +223,7 @@ final public class SpecialOperations { * the limit is infinite. */ public static Set getFiniteStrings(Automaton a, int limit) { - HashSet strings = new HashSet(); + HashSet strings = new HashSet<>(); if (a.isSingleton()) { if (limit > 0) { strings.add(Util.toUTF32(a.singleton, new IntsRef())); diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/UTF32ToUTF8.java b/lucene/core/src/java/org/apache/lucene/util/automaton/UTF32ToUTF8.java index 6f52a7a231a..17be0ec152b 100644 --- a/lucene/core/src/java/org/apache/lucene/util/automaton/UTF32ToUTF8.java +++ b/lucene/core/src/java/org/apache/lucene/util/automaton/UTF32ToUTF8.java @@ -261,7 +261,7 @@ public final class UTF32ToUTF8 { } State[] map = new State[utf32.getNumberedStates().length]; - List pending = new ArrayList(); + List pending = new ArrayList<>(); State utf32State = utf32.getInitialState(); pending.add(utf32State); Automaton utf8 = new Automaton(); diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/Builder.java b/lucene/core/src/java/org/apache/lucene/util/fst/Builder.java index 7a2ee75fa82..5502bba6e08 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/Builder.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/Builder.java @@ -160,9 +160,9 @@ public class Builder { this.shareMaxTailLength = shareMaxTailLength; this.doPackFST = doPackFST; this.acceptableOverheadRatio = acceptableOverheadRatio; - fst = new FST(inputType, outputs, doPackFST, acceptableOverheadRatio, allowArrayArcs, bytesPageBits); + fst = new FST<>(inputType, outputs, doPackFST, acceptableOverheadRatio, allowArrayArcs, bytesPageBits); if (doShareSuffix) { - dedupHash = new NodeHash(fst, fst.bytes.getReverseReader(false)); + dedupHash = new NodeHash<>(fst, fst.bytes.getReverseReader(false)); } else { dedupHash = null; } @@ -172,7 +172,7 @@ public class Builder { (UnCompiledNode[]) new UnCompiledNode[10]; frontier = f; for(int idx=0;idx(this, idx); + frontier[idx] = new UnCompiledNode<>(this, idx); } } @@ -301,7 +301,7 @@ public class Builder { // undecided on whether to prune it. later, it // will be either compiled or pruned, so we must // allocate a new node: - frontier[idx] = new UnCompiledNode(this, idx); + frontier[idx] = new UnCompiledNode<>(this, idx); } } } @@ -384,7 +384,7 @@ public class Builder { new UnCompiledNode[ArrayUtil.oversize(input.length+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; System.arraycopy(frontier, 0, next, 0, frontier.length); for(int idx=frontier.length;idx(this, idx); + next[idx] = new UnCompiledNode<>(this, idx); } frontier = next; } @@ -553,7 +553,7 @@ public class Builder { public UnCompiledNode(Builder owner, int depth) { this.owner = owner; arcs = (Arc[]) new Arc[1]; - arcs[0] = new Arc(); + arcs[0] = new Arc<>(); output = owner.NO_OUTPUT; this.depth = depth; } @@ -587,7 +587,7 @@ public class Builder { new Arc[ArrayUtil.oversize(numArcs+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF)]; System.arraycopy(arcs, 0, newArcs, 0, arcs.length); for(int arcIdx=numArcs;arcIdx(); + newArcs[arcIdx] = new Arc<>(); } arcs = newArcs; } diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/BytesRefFSTEnum.java b/lucene/core/src/java/org/apache/lucene/util/fst/BytesRefFSTEnum.java index baff0b07175..53f35855388 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/BytesRefFSTEnum.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/BytesRefFSTEnum.java @@ -30,7 +30,7 @@ import org.apache.lucene.util.BytesRef; public final class BytesRefFSTEnum extends FSTEnum { private final BytesRef current = new BytesRef(10); - private final InputOutput result = new InputOutput(); + private final InputOutput result = new InputOutput<>(); private BytesRef target; /** Holds a single input (BytesRef) + output pair. */ diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/BytesStore.java b/lucene/core/src/java/org/apache/lucene/util/fst/BytesStore.java index 4b20947e3a7..c7a0c898781 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/BytesStore.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/BytesStore.java @@ -29,7 +29,7 @@ import org.apache.lucene.store.DataOutput; class BytesStore extends DataOutput { - private final List blocks = new ArrayList(); + private final List blocks = new ArrayList<>(); private final int blockSize; private final int blockBits; diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/FST.java b/lucene/core/src/java/org/apache/lucene/util/fst/FST.java index 74e5a0f4a78..f5977e00db6 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/FST.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/FST.java @@ -437,7 +437,7 @@ public final class FST { } public void readRootArcs(Arc[] arcs) throws IOException { - final Arc arc = new Arc(); + final Arc arc = new Arc<>(); getFirstArc(arc); final BytesReader in = getBytesReader(); if (targetHasArcs(arc)) { @@ -592,7 +592,7 @@ public final class FST { InputStream is = new BufferedInputStream(new FileInputStream(file)); boolean success = false; try { - FST fst = new FST(new InputStreamDataInput(is), outputs); + FST fst = new FST<>(new InputStreamDataInput(is), outputs); success = true; return fst; } finally { @@ -1349,7 +1349,7 @@ public final class FST { // TODO: must assert this FST was built with // "willRewrite" - final List> queue = new ArrayList>(); + final List> queue = new ArrayList<>(); // TODO: use bitset to not revisit nodes already // visited @@ -1358,7 +1358,7 @@ public final class FST { int saved = 0; queue.add(new ArcAndState(getFirstArc(new Arc()), new IntsRef())); - Arc scratchArc = new Arc(); + Arc scratchArc = new Arc<>(); while(queue.size() > 0) { //System.out.println("cycle size=" + queue.size()); //for(ArcAndState ent : queue) { @@ -1499,7 +1499,7 @@ public final class FST { throw new IllegalArgumentException("this FST was not built with willPackFST=true"); } - Arc arc = new Arc(); + Arc arc = new Arc<>(); final BytesReader r = getBytesReader(); @@ -1526,7 +1526,7 @@ public final class FST { // Free up RAM: inCounts = null; - final Map topNodeMap = new HashMap(); + final Map topNodeMap = new HashMap<>(); for(int downTo=q.size()-1;downTo>=0;downTo--) { NodeAndInCount n = q.pop(); topNodeMap.put(n.node, downTo); @@ -1558,7 +1558,7 @@ public final class FST { // for assert: boolean negDelta = false; - fst = new FST(inputType, outputs, bytes.getBlockBits()); + fst = new FST<>(inputType, outputs, bytes.getBlockBits()); final BytesStore writer = fst.bytes; diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/FSTEnum.java b/lucene/core/src/java/org/apache/lucene/util/fst/FSTEnum.java index ea0c68d8f85..60ab642898e 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/FSTEnum.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/FSTEnum.java @@ -36,7 +36,7 @@ abstract class FSTEnum { protected final T NO_OUTPUT; protected final FST.BytesReader fstReader; - protected final FST.Arc scratchArc = new FST.Arc(); + protected final FST.Arc scratchArc = new FST.Arc<>(); protected int upto; protected int targetLength; @@ -522,7 +522,7 @@ abstract class FSTEnum { private FST.Arc getArc(int idx) { if (arcs[idx] == null) { - arcs[idx] = new FST.Arc(); + arcs[idx] = new FST.Arc<>(); } return arcs[idx]; } diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/IntsRefFSTEnum.java b/lucene/core/src/java/org/apache/lucene/util/fst/IntsRefFSTEnum.java index ff4b80a8102..d45f5a6c3bc 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/IntsRefFSTEnum.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/IntsRefFSTEnum.java @@ -30,7 +30,7 @@ import java.io.IOException; public final class IntsRefFSTEnum extends FSTEnum { private final IntsRef current = new IntsRef(10); - private final InputOutput result = new InputOutput(); + private final InputOutput result = new InputOutput<>(); private IntsRef target; /** Holds a single input (IntsRef) + output pair. */ diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java b/lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java index 32123303758..04a01c1b23b 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/NodeHash.java @@ -29,7 +29,7 @@ final class NodeHash { private long count; private long mask; private final FST fst; - private final FST.Arc scratchArc = new FST.Arc(); + private final FST.Arc scratchArc = new FST.Arc<>(); private final FST.BytesReader in; public NodeHash(FST fst, FST.BytesReader in) { diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/PairOutputs.java b/lucene/core/src/java/org/apache/lucene/util/fst/PairOutputs.java index e625ca06342..b9d5da6e093 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/PairOutputs.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/PairOutputs.java @@ -66,7 +66,7 @@ public class PairOutputs extends Outputs> { public PairOutputs(Outputs outputs1, Outputs outputs2) { this.outputs1 = outputs1; this.outputs2 = outputs2; - NO_OUTPUT = new Pair(outputs1.getNoOutput(), outputs2.getNoOutput()); + NO_OUTPUT = new Pair<>(outputs1.getNoOutput(), outputs2.getNoOutput()); } /** Create a new Pair */ @@ -81,7 +81,7 @@ public class PairOutputs extends Outputs> { if (a == outputs1.getNoOutput() && b == outputs2.getNoOutput()) { return NO_OUTPUT; } else { - final Pair p = new Pair(a, b); + final Pair p = new Pair<>(a, b); assert valid(p); return p; } diff --git a/lucene/core/src/java/org/apache/lucene/util/fst/Util.java b/lucene/core/src/java/org/apache/lucene/util/fst/Util.java index 0e20e712271..21ea3dab3cc 100644 --- a/lucene/core/src/java/org/apache/lucene/util/fst/Util.java +++ b/lucene/core/src/java/org/apache/lucene/util/fst/Util.java @@ -104,7 +104,7 @@ public final class Util { // TODO: would be nice not to alloc this on every lookup FST.Arc arc = fst.getFirstArc(new FST.Arc()); - FST.Arc scratchArc = new FST.Arc(); + FST.Arc scratchArc = new FST.Arc<>(); final IntsRef result = new IntsRef(); @@ -288,7 +288,7 @@ public final class Util { private final int topN; private final int maxQueueDepth; - private final FST.Arc scratchArc = new FST.Arc(); + private final FST.Arc scratchArc = new FST.Arc<>(); final Comparator comparator; @@ -301,7 +301,7 @@ public final class Util { this.maxQueueDepth = maxQueueDepth; this.comparator = comparator; - queue = new TreeSet>(new TieBreakByInputComparator(comparator)); + queue = new TreeSet<>(new TieBreakByInputComparator<>(comparator)); } // If back plus this arc is competitive then add to queue: @@ -344,7 +344,7 @@ public final class Util { System.arraycopy(path.input.ints, 0, newInput.ints, 0, path.input.length); newInput.ints[path.input.length] = path.arc.label; newInput.length = path.input.length+1; - final FSTPath newPath = new FSTPath(cost, path.arc, newInput); + final FSTPath newPath = new FSTPath<>(cost, path.arc, newInput); queue.add(newPath); @@ -362,7 +362,7 @@ public final class Util { startOutput = fst.outputs.getNoOutput(); } - FSTPath path = new FSTPath(startOutput, node, input); + FSTPath path = new FSTPath<>(startOutput, node, input); fst.readFirstTargetArc(node, path.arc, bytesReader); //System.out.println("add start paths"); @@ -381,7 +381,7 @@ public final class Util { public MinResult[] search() throws IOException { - final List> results = new ArrayList>(); + final List> results = new ArrayList<>(); //System.out.println("search topN=" + topN); @@ -422,7 +422,7 @@ public final class Util { //System.out.println(" empty string! cost=" + path.cost); // Empty string! path.input.length--; - results.add(new MinResult(path.input, path.cost)); + results.add(new MinResult<>(path.input, path.cost)); continue; } @@ -486,7 +486,7 @@ public final class Util { T finalOutput = fst.outputs.add(path.cost, path.arc.output); if (acceptResult(path.input, finalOutput)) { //System.out.println(" add result: " + path); - results.add(new MinResult(path.input, finalOutput)); + results.add(new MinResult<>(path.input, finalOutput)); } else { rejectCount++; assert rejectCount + topN <= maxQueueDepth: "maxQueueDepth (" + maxQueueDepth + ") is too small for topN (" + topN + "): rejected " + rejectCount + " paths"; @@ -529,7 +529,7 @@ public final class Util { // All paths are kept, so we can pass topN for // maxQueueDepth and the pruning is admissible: - TopNSearcher searcher = new TopNSearcher(fst, topN, topN, comparator); + TopNSearcher searcher = new TopNSearcher<>(fst, topN, topN, comparator); // since this search is initialized with a single start node // it is okay to start with an empty input path here @@ -578,15 +578,15 @@ public final class Util { final FST.Arc startArc = fst.getFirstArc(new FST.Arc()); // A queue of transitions to consider for the next level. - final List> thisLevelQueue = new ArrayList>(); + final List> thisLevelQueue = new ArrayList<>(); // A queue of transitions to consider when processing the next level. - final List> nextLevelQueue = new ArrayList>(); + final List> nextLevelQueue = new ArrayList<>(); nextLevelQueue.add(startArc); //System.out.println("toDot: startArc: " + startArc); // A list of states on the same level (for ranking). - final List sameLevelStates = new ArrayList(); + final List sameLevelStates = new ArrayList<>(); // A bitset of already seen states (target offset). final BitSet seen = new BitSet(); @@ -609,7 +609,7 @@ public final class Util { final T NO_OUTPUT = fst.outputs.getNoOutput(); final BytesReader r = fst.getBytesReader(); - // final FST.Arc scratchArc = new FST.Arc(); + // final FST.Arc scratchArc = new FST.Arc<>(); { final String stateColor; diff --git a/lucene/core/src/test/org/apache/lucene/TestSearch.java b/lucene/core/src/test/org/apache/lucene/TestSearch.java index 1eaeb1d6eb8..cb8920151d5 100644 --- a/lucene/core/src/test/org/apache/lucene/TestSearch.java +++ b/lucene/core/src/test/org/apache/lucene/TestSearch.java @@ -159,7 +159,7 @@ public class TestSearch extends LuceneTestCase { } private List buildQueries() { - List queries = new ArrayList(); + List queries = new ArrayList<>(); BooleanQuery booleanAB = new BooleanQuery(); booleanAB.add(new TermQuery(new Term("contents", "a")), BooleanClause.Occur.SHOULD); diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java b/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java index d215f8e799f..c49a3ee2f01 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/TestGraphTokenizers.java @@ -118,7 +118,7 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase { final String[] parts = sb.toString().split(" "); - tokens = new ArrayList(); + tokens = new ArrayList<>(); int pos = 0; int maxPos = -1; int offset = 0; @@ -454,7 +454,7 @@ public class TestGraphTokenizers extends BaseTokenStreamTestCase { private static final Automaton HOLE_A = BasicAutomata.makeChar(TokenStreamToAutomaton.HOLE); private Automaton join(String ... strings) { - List as = new ArrayList(); + List as = new ArrayList<>(); for(String s : strings) { as.add(BasicAutomata.makeString(s)); as.add(SEP_A); diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TrivialLookaheadFilter.java b/lucene/core/src/test/org/apache/lucene/analysis/TrivialLookaheadFilter.java index 4c395b12c97..cf50927291b 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/TrivialLookaheadFilter.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/TrivialLookaheadFilter.java @@ -78,7 +78,7 @@ final public class TrivialLookaheadFilter extends LookaheadTokenFilter facts = new ArrayList(); + List facts = new ArrayList<>(); boolean haveSentence = false; do { if (peekToken()) { diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestLucene40PostingsReader.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestLucene40PostingsReader.java index a18d6cdba10..a149e3bd5c7 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestLucene40PostingsReader.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestLucene40PostingsReader.java @@ -112,7 +112,7 @@ public class TestLucene40PostingsReader extends LuceneTestCase { } String fieldValue(int maxTF) { - ArrayList shuffled = new ArrayList(); + ArrayList shuffled = new ArrayList<>(); StringBuilder sb = new StringBuilder(); int i = random().nextInt(terms.length); while (i < terms.length) { diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java index 59bcd81a732..6712134daf5 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene40/TestReuseDocsEnum.java @@ -62,7 +62,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { AtomicReader indexReader = ctx.reader(); Terms terms = indexReader.terms("body"); TermsEnum iterator = terms.iterator(null); - IdentityHashMap enums = new IdentityHashMap(); + IdentityHashMap enums = new IdentityHashMap<>(); MatchNoBits bits = new Bits.MatchNoBits(indexReader.maxDoc()); while ((iterator.next()) != null) { DocsEnum docs = iterator.docs(random().nextBoolean() ? bits : new Bits.MatchNoBits(indexReader.maxDoc()), null, random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE); @@ -88,7 +88,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { for (AtomicReaderContext ctx : open.leaves()) { Terms terms = ctx.reader().terms("body"); TermsEnum iterator = terms.iterator(null); - IdentityHashMap enums = new IdentityHashMap(); + IdentityHashMap enums = new IdentityHashMap<>(); MatchNoBits bits = new Bits.MatchNoBits(open.maxDoc()); DocsEnum docs = null; while ((iterator.next()) != null) { @@ -139,7 +139,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { for (AtomicReaderContext ctx : leaves) { Terms terms = ctx.reader().terms("body"); TermsEnum iterator = terms.iterator(null); - IdentityHashMap enums = new IdentityHashMap(); + IdentityHashMap enums = new IdentityHashMap<>(); MatchNoBits bits = new Bits.MatchNoBits(firstReader.maxDoc()); iterator = terms.iterator(null); DocsEnum docs = null; diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java index c4592e9b3a4..ee294d22820 100644 --- a/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java +++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene41/TestBlockPostingsFormat3.java @@ -201,7 +201,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase { Random random = random(); // collect this number of terms from the left side - HashSet tests = new HashSet(); + HashSet tests = new HashSet<>(); int numPasses = 0; while (numPasses < 10 && tests.size() < numTests) { leftEnum = leftTerms.iterator(leftEnum); @@ -228,7 +228,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase { numPasses++; } - ArrayList shuffledTests = new ArrayList(tests); + ArrayList shuffledTests = new ArrayList<>(tests); Collections.shuffle(shuffledTests, random); for (BytesRef b : shuffledTests) { diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java index b4249d0dc29..8616615a2dd 100644 --- a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java +++ b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java @@ -52,7 +52,7 @@ public class Test2BTerms extends LuceneTestCase { private final int tokensPerDoc; private int tokenCount; - public final List savedTerms = new ArrayList(); + public final List savedTerms = new ArrayList<>(); private int nextSave; private final Random random; @@ -204,7 +204,7 @@ public class Test2BTerms extends LuceneTestCase { savedTerms = findTerms(r); } final int numSavedTerms = savedTerms.size(); - final List bigOrdTerms = new ArrayList(savedTerms.subList(numSavedTerms-10, numSavedTerms)); + final List bigOrdTerms = new ArrayList<>(savedTerms.subList(numSavedTerms-10, numSavedTerms)); System.out.println("TEST: test big ord terms..."); testSavedTerms(r, bigOrdTerms); System.out.println("TEST: test all saved terms..."); @@ -223,7 +223,7 @@ public class Test2BTerms extends LuceneTestCase { private List findTerms(IndexReader r) throws IOException { System.out.println("TEST: findTerms"); final TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator(null); - final List savedTerms = new ArrayList(); + final List savedTerms = new ArrayList<>(); int nextSave = TestUtil.nextInt(random(), 500000, 1000000); BytesRef term; while((term = termsEnum.next()) != null) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java index 31aeac89ae2..27fd97ef298 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java @@ -653,7 +653,7 @@ public class TestAddIndexes extends LuceneTestCase { Directory dir, dir2; final static int NUM_INIT_DOCS = 17; IndexWriter writer2; - final List failures = new ArrayList(); + final List failures = new ArrayList<>(); volatile boolean didClose; final IndexReader[] readers; final int NUM_COPY; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java index ca8817e5a0f..efd759f5229 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java @@ -222,10 +222,10 @@ public class TestBackwardsCompatibility extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { assertFalse("test infra is broken!", LuceneTestCase.OLD_FORMAT_IMPERSONATION_IS_ACTIVE); - List names = new ArrayList(oldNames.length + oldSingleSegmentNames.length); + List names = new ArrayList<>(oldNames.length + oldSingleSegmentNames.length); names.addAll(Arrays.asList(oldNames)); names.addAll(Arrays.asList(oldSingleSegmentNames)); - oldIndexDirs = new HashMap(); + oldIndexDirs = new HashMap<>(); for (String name : names) { File dir = TestUtil.getTempDir(name); File dataFile = new File(TestBackwardsCompatibility.class.getResource("index." + name + ".zip").toURI()); @@ -935,7 +935,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { } public void testUpgradeOldIndex() throws Exception { - List names = new ArrayList(oldNames.length + oldSingleSegmentNames.length); + List names = new ArrayList<>(oldNames.length + oldSingleSegmentNames.length); names.addAll(Arrays.asList(oldNames)); names.addAll(Arrays.asList(oldSingleSegmentNames)); for(String name : names) { @@ -961,7 +961,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase { String path = dir.getAbsolutePath(); - List args = new ArrayList(); + List args = new ArrayList<>(); if (random().nextBoolean()) { args.add("-verbose"); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java index 8b3c93be473..61b41136cf0 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java @@ -43,7 +43,7 @@ import org.apache.lucene.util.TestUtil; @SuppressCodecs({"Direct", "Memory"}) // at night this makes like 200k/300k docs and will make Direct's heart beat! public class TestBagOfPositions extends LuceneTestCase { public void test() throws Exception { - List postingsList = new ArrayList(); + List postingsList = new ArrayList<>(); int numTerms = atLeast(300); final int maxTermsPerDoc = TestUtil.nextInt(random(), 10, 20); boolean isSimpleText = "SimpleText".equals(TestUtil.getPostingsFormat("field")); @@ -66,7 +66,7 @@ public class TestBagOfPositions extends LuceneTestCase { } Collections.shuffle(postingsList, random()); - final ConcurrentLinkedQueue postings = new ConcurrentLinkedQueue(postingsList); + final ConcurrentLinkedQueue postings = new ConcurrentLinkedQueue<>(postingsList); Directory dir = newFSDirectory(TestUtil.getTempDir("bagofpositions")); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java index 572069961bf..a3067e01cde 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java @@ -42,7 +42,7 @@ import org.apache.lucene.util.TestUtil; @SuppressCodecs({"Direct", "Memory"}) // at night this makes like 200k/300k docs and will make Direct's heart beat! public class TestBagOfPostings extends LuceneTestCase { public void test() throws Exception { - List postingsList = new ArrayList(); + List postingsList = new ArrayList<>(); int numTerms = atLeast(300); final int maxTermsPerDoc = TestUtil.nextInt(random(), 10, 20); @@ -68,7 +68,7 @@ public class TestBagOfPostings extends LuceneTestCase { } Collections.shuffle(postingsList, random()); - final ConcurrentLinkedQueue postings = new ConcurrentLinkedQueue(postingsList); + final ConcurrentLinkedQueue postings = new ConcurrentLinkedQueue<>(postingsList); Directory dir = newFSDirectory(TestUtil.getTempDir("bagofpostings")); final RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); @@ -93,7 +93,7 @@ public class TestBagOfPostings extends LuceneTestCase { startingGun.await(); while (!postings.isEmpty()) { StringBuilder text = new StringBuilder(); - Set visited = new HashSet(); + Set visited = new HashSet<>(); for (int i = 0; i < maxTermsPerDoc; i++) { String token = postings.poll(); if (token == null) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java b/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java index 66542ab0b49..ba2102eb94a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestCheckIndex.java @@ -90,7 +90,7 @@ public class TestCheckIndex extends LuceneTestCase { assertEquals(18, seg.termVectorStatus.totVectors); assertTrue(seg.diagnostics.size() > 0); - final List onlySegments = new ArrayList(); + final List onlySegments = new ArrayList<>(); onlySegments.add("_0"); assertTrue(checker.checkIndex(onlySegments).clean == true); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java index 30a6bd942e0..5b33629d3b7 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java @@ -180,7 +180,7 @@ public class TestCodecs extends LuceneTestCase { //final int numTerms = 2; final TermData[] terms = new TermData[numTerms]; - final HashSet termsSeen = new HashSet(); + final HashSet termsSeen = new HashSet<>(); for(int i=0;i seen = new HashSet(); + Set seen = new HashSet<>(); public KeepLastNDeletionPolicy(int numToKeep) { this.numToKeep = numToKeep; @@ -228,7 +228,7 @@ public class TestDeletionPolicy extends LuceneTestCase { mp.setNoCFSRatio(1.0); IndexWriter writer = new IndexWriter(dir, conf); ExpirationTimeDeletionPolicy policy = (ExpirationTimeDeletionPolicy) writer.getConfig().getIndexDeletionPolicy(); - Map commitData = new HashMap(); + Map commitData = new HashMap<>(); commitData.put("commitTime", String.valueOf(System.currentTimeMillis())); writer.setCommitData(commitData); writer.commit(); @@ -250,7 +250,7 @@ public class TestDeletionPolicy extends LuceneTestCase { for(int j=0;j<17;j++) { addDoc(writer); } - commitData = new HashMap(); + commitData = new HashMap<>(); commitData.put("commitTime", String.valueOf(System.currentTimeMillis())); writer.setCommitData(commitData); writer.commit(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java index bc7358fbe74..89fd37733b5 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReader.java @@ -250,10 +250,10 @@ public class TestDirectoryReader extends LuceneTestCase { reader = DirectoryReader.open(d); fieldInfos = MultiFields.getMergedFieldInfos(reader); - Collection allFieldNames = new HashSet(); - Collection indexedFieldNames = new HashSet(); - Collection notIndexedFieldNames = new HashSet(); - Collection tvFieldNames = new HashSet(); + Collection allFieldNames = new HashSet<>(); + Collection indexedFieldNames = new HashSet<>(); + Collection notIndexedFieldNames = new HashSet<>(); + Collection tvFieldNames = new HashSet<>(); for(FieldInfo fieldInfo : fieldInfos) { final String name = fieldInfo.name; @@ -743,7 +743,7 @@ public void testFilesOpenClose() throws IOException { Collection commits = DirectoryReader.listCommits(dir); for (final IndexCommit commit : commits) { Collection files = commit.getFileNames(); - HashSet seen = new HashSet(); + HashSet seen = new HashSet<>(); for (final String fileName : files) { assertTrue("file " + fileName + " was duplicated", !seen.contains(fileName)); seen.add(fileName); @@ -1079,7 +1079,7 @@ public void testFilesOpenClose() throws IOException { writer.addDocument(doc); DirectoryReader r = writer.getReader(); writer.close(); - Set fieldsToLoad = new HashSet(); + Set fieldsToLoad = new HashSet<>(); assertEquals(0, r.document(0, fieldsToLoad).getFields().size()); fieldsToLoad.add("field1"); StoredDocument doc2 = r.document(0, fieldsToLoad); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java index 89389530217..a3bb7335a8a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java @@ -552,14 +552,14 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { Document doc = new Document(); doc.add(newStringField("id", ""+i, Field.Store.NO)); writer.addDocument(doc); - Map data = new HashMap(); + Map data = new HashMap<>(); data.put("index", i+""); writer.setCommitData(data); writer.commit(); } for(int i=0;i<4;i++) { writer.deleteDocuments(new Term("id", ""+i)); - Map data = new HashMap(); + Map data = new HashMap<>(); data.put("index", (4+i)+""); writer.setCommitData(data); writer.commit(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java index 53ee2cc1736..1ef079e6da2 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java @@ -71,7 +71,7 @@ public class TestDoc extends LuceneTestCase { Directory directory = newFSDirectory(indexDir); directory.close(); - files = new LinkedList(); + files = new LinkedList<>(); files.add(createOutput("test.txt", "This is the first test file" )); @@ -229,7 +229,7 @@ public class TestDoc extends LuceneTestCase { final SegmentInfo info = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged, si1.info.getDocCount() + si2.info.getDocCount(), false, codec, null); - info.setFiles(new HashSet(trackingDir.getCreatedFiles())); + info.setFiles(new HashSet<>(trackingDir.getCreatedFiles())); if (useCompoundFile) { Collection filesToDelete = IndexWriter.createCompoundFile(InfoStream.getDefault(), dir, MergeState.CheckAbort.NONE, info, newIOContext(random())); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java b/lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java index d557859cf0d..de4534fd83e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java @@ -95,7 +95,7 @@ public class TestDocTermOrds extends LuceneTestCase { Directory dir = newDirectory(); final int NUM_TERMS = atLeast(20); - final Set terms = new HashSet(); + final Set terms = new HashSet<>(); while(terms.size() < NUM_TERMS) { final String s = TestUtil.randomRealisticUnicodeString(random()); //final String s = _TestUtil.randomSimpleString(random); @@ -120,7 +120,7 @@ public class TestDocTermOrds extends LuceneTestCase { final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf); final int[][] idToOrds = new int[NUM_DOCS][]; - final Set ordsForDocSet = new HashSet(); + final Set ordsForDocSet = new HashSet<>(); for(int id=0;id prefixes = new HashSet(); + final Set prefixes = new HashSet<>(); final int numPrefix = TestUtil.nextInt(random(), 2, 7); if (VERBOSE) { System.out.println("TEST: use " + numPrefix + " prefixes"); @@ -193,7 +193,7 @@ public class TestDocTermOrds extends LuceneTestCase { final String[] prefixesArray = prefixes.toArray(new String[prefixes.size()]); final int NUM_TERMS = atLeast(20); - final Set terms = new HashSet(); + final Set terms = new HashSet<>(); while(terms.size() < NUM_TERMS) { final String s = prefixesArray[random().nextInt(prefixesArray.length)] + TestUtil.randomRealisticUnicodeString(random()); //final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomSimpleString(random); @@ -217,7 +217,7 @@ public class TestDocTermOrds extends LuceneTestCase { final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf); final int[][] idToOrds = new int[NUM_DOCS][]; - final Set ordsForDocSet = new HashSet(); + final Set ordsForDocSet = new HashSet<>(); for(int id=0;id newOrds = new ArrayList(); + final List newOrds = new ArrayList<>(); for(int ord : idToOrds[id]) { if (StringHelper.startsWith(termsArray[ord], prefixRef)) { newOrds.add(ord); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java index 0ba48979694..b2188bf00e7 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocValuesWithThreads.java @@ -43,9 +43,9 @@ public class TestDocValuesWithThreads extends LuceneTestCase { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); - final List numbers = new ArrayList(); - final List binary = new ArrayList(); - final List sorted = new ArrayList(); + final List numbers = new ArrayList<>(); + final List binary = new ArrayList<>(); + final List sorted = new ArrayList<>(); final int numDocs = atLeast(100); for(int i=0;i threads = new ArrayList(); + List threads = new ArrayList<>(); final CountDownLatch startingGun = new CountDownLatch(1); for(int t=0;t seen = new HashSet(); + final Set seen = new HashSet<>(); if (VERBOSE) { System.out.println("TEST: NUM_DOCS=" + NUM_DOCS + " allowDups=" + allowDups); } int numDocs = 0; - final List docValues = new ArrayList(); + final List docValues = new ArrayList<>(); // TODO: deletions while (numDocs < NUM_DOCS) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java index 3bd8797d12c..52eee95112f 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java @@ -120,7 +120,7 @@ public class TestDocsAndPositions extends LuceneTestCase { customType.setOmitNorms(true); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - ArrayList positions = new ArrayList(); + ArrayList positions = new ArrayList<>(); StringBuilder builder = new StringBuilder(); int num = atLeast(131); for (int j = 0; j < num; j++) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java index 714638cc5e0..dcc6119deee 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java @@ -47,7 +47,7 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { BufferedUpdates bd2 = new BufferedUpdates(); int last1 = 0; int last2 = 0; - Set uniqueValues = new HashSet(); + Set uniqueValues = new HashSet<>(); for (int j = 0; j < ids.length; j++) { Integer i = ids[j]; // create an array here since we compare identity below against tailItem @@ -72,7 +72,7 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { } assertEquals(uniqueValues, bd1.terms.keySet()); assertEquals(uniqueValues, bd2.terms.keySet()); - HashSet frozenSet = new HashSet(); + HashSet frozenSet = new HashSet<>(); for (Term t : queue.freezeGlobalBuffer(null).termsIterable()) { BytesRef bytesRef = new BytesRef(); bytesRef.copyBytes(t.bytes); @@ -173,7 +173,7 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { public void testStressDeleteQueue() throws InterruptedException { DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue(); - Set uniqueValues = new HashSet(); + Set uniqueValues = new HashSet<>(); final int size = 10000 + random().nextInt(500) * RANDOM_MULTIPLIER; Integer[] ids = new Integer[size]; for (int i = 0; i < ids.length; i++) { @@ -201,7 +201,7 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { assertEquals(uniqueValues, deletes.terms.keySet()); } queue.tryApplyGlobalSlice(); - Set frozenSet = new HashSet(); + Set frozenSet = new HashSet<>(); for (Term t : queue.freezeGlobalBuffer(null).termsIterable()) { BytesRef bytesRef = new BytesRef(); bytesRef.copyBytes(t.bytes); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java index 7960b0ce650..0f8d05f782d 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java @@ -342,8 +342,8 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { @Override public void onDelete(DocumentsWriterFlushControl control, ThreadState state) { - final ArrayList pending = new ArrayList(); - final ArrayList notPending = new ArrayList(); + final ArrayList pending = new ArrayList<>(); + final ArrayList notPending = new ArrayList<>(); findPending(control, pending, notPending); final boolean flushCurrent = state.flushPending; final ThreadState toFlush; @@ -374,8 +374,8 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { @Override public void onInsert(DocumentsWriterFlushControl control, ThreadState state) { - final ArrayList pending = new ArrayList(); - final ArrayList notPending = new ArrayList(); + final ArrayList pending = new ArrayList<>(); + final ArrayList notPending = new ArrayList<>(); findPending(control, pending, notPending); final boolean flushCurrent = state.flushPending; long activeBytes = control.activeBytes(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java index a34d2f895ac..81c3461c380 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexFileDeleter.java @@ -141,9 +141,9 @@ public class TestIndexFileDeleter extends LuceneTestCase { } private static Set difFiles(String[] files1, String[] files2) { - Set set1 = new HashSet(); - Set set2 = new HashSet(); - Set extra = new HashSet(); + Set set1 = new HashSet<>(); + Set set2 = new HashSet<>(); + Set extra = new HashSet<>(); for (int x=0; x < files1.length; x++) { set1.add(files1[x]); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java index 3284dde2782..da4a0c7ccb7 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -1361,7 +1361,7 @@ public class TestIndexWriter extends LuceneTestCase { r = DirectoryReader.open(dir); } - List files = new ArrayList(Arrays.asList(dir.listAll())); + List files = new ArrayList<>(Arrays.asList(dir.listAll())); // RAMDir won't have a write.lock, but fs dirs will: files.remove("write.lock"); @@ -1820,7 +1820,7 @@ public class TestIndexWriter extends LuceneTestCase { IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); - final List docs = new ArrayList(); + final List docs = new ArrayList<>(); docs.add(new Document()); w.updateDocuments(new Term("foo", "bar"), docs); @@ -2135,9 +2135,9 @@ public class TestIndexWriter extends LuceneTestCase { int iters = atLeast(100); int docCount = 0; int docId = 0; - Set liveIds = new HashSet(); + Set liveIds = new HashSet<>(); for (int i = 0; i < iters; i++) { - List docs = new ArrayList(); + List docs = new ArrayList<>(); FieldType ft = new FieldType(TextField.TYPE_NOT_STORED); FieldType idFt = new FieldType(TextField.TYPE_STORED); @@ -2316,7 +2316,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testMergeAllDeleted() throws IOException { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); - final SetOnce iwRef = new SetOnce(); + final SetOnce iwRef = new SetOnce<>(); iwc.setInfoStream(new RandomIndexWriter.TestPointInfoStream(iwc.getInfoStream(), new RandomIndexWriter.TestPoint() { @Override public void apply(String message) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java index a36cbc32352..9bb2c5c61c3 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterCommit.java @@ -425,7 +425,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { w.addDocument(doc); // commit to "first" - Map commitData = new HashMap(); + Map commitData = new HashMap<>(); commitData.put("tag", "first"); w.setCommitData(commitData); w.commit(); @@ -633,7 +633,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMaxBufferedDocs(2)); for(int j=0;j<17;j++) TestIndexWriter.addDoc(w); - Map data = new HashMap(); + Map data = new HashMap<>(); data.put("label", "test1"); w.setCommitData(data); w.close(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java index bdd83a7b17f..8df58c96e1a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java @@ -82,7 +82,7 @@ public class TestIndexWriterConfig extends LuceneTestCase { assertEquals(InfoStream.getDefault(), conf.getInfoStream()); assertEquals(IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM, conf.getUseCompoundFile()); // Sanity check - validate that all getters are covered. - Set getters = new HashSet(); + Set getters = new HashSet<>(); getters.add("getAnalyzer"); getters.add("getIndexCommit"); getters.add("getIndexDeletionPolicy"); @@ -117,8 +117,8 @@ public class TestIndexWriterConfig extends LuceneTestCase { @Test public void testSettersChaining() throws Exception { // Ensures that every setter returns IndexWriterConfig to allow chaining. - HashSet liveSetters = new HashSet(); - HashSet allSetters = new HashSet(); + HashSet liveSetters = new HashSet<>(); + HashSet allSetters = new HashSet<>(); for (Method m : IndexWriterConfig.class.getDeclaredMethods()) { if (m.getName().startsWith("set") && !Modifier.isStatic(m.getModifiers())) { allSetters.add(m.getName()); @@ -175,7 +175,7 @@ public class TestIndexWriterConfig extends LuceneTestCase { // Test that IndexWriterConfig overrides all getters, so that javadocs // contain all methods for the users. Also, ensures that IndexWriterConfig // doesn't declare getters that are not declared on LiveIWC. - HashSet liveGetters = new HashSet(); + HashSet liveGetters = new HashSet<>(); for (Method m : LiveIndexWriterConfig.class.getDeclaredMethods()) { if (m.getName().startsWith("get") && !Modifier.isStatic(m.getModifiers())) { liveGetters.add(m.getName()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java index 36af529c6ee..445c4af4431 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java @@ -939,7 +939,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { final Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); final int NUM_DOCS = atLeast(1000); - final List ids = new ArrayList(NUM_DOCS); + final List ids = new ArrayList<>(NUM_DOCS); for(int id=0;id doFail = new ThreadLocal(); + ThreadLocal doFail = new ThreadLocal<>(); private class TestPoint1 implements RandomIndexWriter.TestPoint { Random r = new Random(random().nextLong()); @@ -1355,7 +1355,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { w.addDocument(doc); } - final List docs = new ArrayList(); + final List docs = new ArrayList<>(); for(int docCount=0;docCount<7;docCount++) { Document doc = new Document(); docs.add(doc); @@ -1415,7 +1415,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { } // Use addDocs (no exception) to get docs in the index: - final List docs = new ArrayList(); + final List docs = new ArrayList<>(); final int numDocs2 = random().nextInt(25); for(int docCount=0;docCount list = new ArrayList(); - List storedList = new ArrayList(); + List list = new ArrayList<>(); + List storedList = new ArrayList<>(); @Override public Iterable indexableFields() { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java index 63c88752cc2..9a015fa1569 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java @@ -398,7 +398,7 @@ public class TestIndexWriterMerging extends LuceneTestCase ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(2); final IndexWriter finalWriter = writer; - final ArrayList failure = new ArrayList(); + final ArrayList failure = new ArrayList<>(); Thread t1 = new Thread() { @Override public void run() { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnJRECrash.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnJRECrash.java index 3c6dd3e437d..9620ed795e0 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnJRECrash.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOnJRECrash.java @@ -88,7 +88,7 @@ public class TestIndexWriterOnJRECrash extends TestNRTThreads { /** fork ourselves in a new jvm. sets -Dtests.crashmode=true */ public void forkTest() throws Exception { - List cmd = new ArrayList(); + List cmd = new ArrayList<>(); cmd.add(System.getProperty("java.home") + System.getProperty("file.separator") + "bin" diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java index 9e2c45896c5..d5cd7941e8a 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterOutOfFileDescriptors.java @@ -132,7 +132,7 @@ public class TestIndexWriterOutOfFileDescriptors extends LuceneTestCase { dir.setRandomIOExceptionRateOnOpen(0.0); r = DirectoryReader.open(dir); dirCopy = newMockFSDirectory(TestUtil.getTempDir("TestIndexWriterOutOfFileDescriptors.copy")); - Set files = new HashSet(); + Set files = new HashSet<>(); for (String file : dir.listAll()) { dir.copy(dirCopy, file, file, IOContext.DEFAULT); files.add(file); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java index 5fedf2bd74d..80b77a59c23 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -402,7 +402,7 @@ public class TestIndexWriterReader extends LuceneTestCase { int numDirs; final Thread[] threads = new Thread[numThreads]; IndexWriter mainWriter; - final List failures = new ArrayList(); + final List failures = new ArrayList<>(); IndexReader[] readers; boolean didClose = false; AtomicInteger count = new AtomicInteger(0); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java index b0f44f0612b..be3170d8660 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java @@ -137,7 +137,7 @@ public class TestIndexWriterUnicode extends LuceneTestCase { BytesRef last = new BytesRef(); - Set seenTerms = new HashSet(); + Set seenTerms = new HashSet<>(); while(true) { final BytesRef term = terms.next(); @@ -279,7 +279,7 @@ public class TestIndexWriterUnicode extends LuceneTestCase { Field f = newStringField("f", "", Field.Store.NO); d.add(f); char[] chars = new char[2]; - final Set allTerms = new HashSet(); + final Set allTerms = new HashSet<>(); int num = atLeast(200); for (int i = 0; i < num; i++) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java index 00ebb551db5..f905695f2f6 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java @@ -547,7 +547,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase { final int threadCount = TestUtil.nextInt(random(), 2, 6); - final AtomicReference writerRef = new AtomicReference(); + final AtomicReference writerRef = new AtomicReference<>(); MockAnalyzer analyzer = new MockAnalyzer(random()); analyzer.setMaxTokenLength(TestUtil.nextInt(random(), 1, IndexWriter.MAX_TERM_LENGTH)); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIntBlockPool.java b/lucene/core/src/test/org/apache/lucene/index/TestIntBlockPool.java index 8fb87b2bb47..199c3a7f423 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIntBlockPool.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIntBlockPool.java @@ -63,7 +63,7 @@ public class TestIntBlockPool extends LuceneTestCase { Counter bytesUsed = Counter.newCounter(); IntBlockPool pool = new IntBlockPool(new ByteTrackingAllocator(bytesUsed)); for (int j = 0; j < 2; j++) { - List holders = new ArrayList(); + List holders = new ArrayList<>(); int num = atLeast(4); for (int i = 0; i < num; i++) { holders.add(new StartEndAndValues(random().nextInt(1000))); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java b/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java index 84411850ae0..078d7c96a9e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java @@ -39,7 +39,7 @@ public class TestMaxTermFrequency extends LuceneTestCase { Directory dir; IndexReader reader; /* expected maxTermFrequency values for our documents */ - ArrayList expected = new ArrayList(); + ArrayList expected = new ArrayList<>(); @Override public void setUp() throws Exception { @@ -81,7 +81,7 @@ public class TestMaxTermFrequency extends LuceneTestCase { * puts the max-frequency term into expected, to be checked against the norm. */ private String addValue() { - List terms = new ArrayList(); + List terms = new ArrayList<>(); int maxCeiling = TestUtil.nextInt(random(), 0, 255); int max = 0; for (char ch = 'a'; ch <= 'z'; ch++) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java index 9cb54b176ab..708c160514d 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java @@ -70,7 +70,7 @@ public class TestMixedCodecs extends LuceneTestCase { } // Random delete half the docs: - final Set deleted = new HashSet(); + final Set deleted = new HashSet<>(); while(deleted.size() < NUM_DOCS/2) { final Integer toDelete = random().nextInt(NUM_DOCS); if (!deleted.contains(toDelete)) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java index 72d546f1afd..679458817f5 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiDocValues.java @@ -241,7 +241,7 @@ public class TestMultiDocValues extends LuceneTestCase { // check ord list for (int i = 0; i < numDocs; i++) { single.setDocument(i); - ArrayList expectedList = new ArrayList(); + ArrayList expectedList = new ArrayList<>(); long ord; while ((ord = single.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) { expectedList.add(ord); @@ -306,7 +306,7 @@ public class TestMultiDocValues extends LuceneTestCase { // check ord list for (int i = 0; i < numDocs; i++) { single.setDocument(i); - ArrayList expectedList = new ArrayList(); + ArrayList expectedList = new ArrayList<>(); long ord; while ((ord = single.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) { expectedList.add(ord); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java index 6c162fe2374..ec45a2d172b 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java @@ -40,9 +40,9 @@ public class TestMultiFields extends LuceneTestCase { // we can do this because we use NoMergePolicy (and dont merge to "nothing") w.setKeepFullyDeletedSegments(true); - Map> docs = new HashMap>(); - Set deleted = new HashSet(); - List terms = new ArrayList(); + Map> docs = new HashMap<>(); + Set deleted = new HashSet<>(); + List terms = new ArrayList<>(); int numDocs = TestUtil.nextInt(random(), 1, 100 * RANDOM_MULTIPLIER); Document doc = new Document(); @@ -55,7 +55,7 @@ public class TestMultiFields extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: onlyUniqueTerms=" + onlyUniqueTerms + " numDocs=" + numDocs); } - Set uniqueTerms = new HashSet(); + Set uniqueTerms = new HashSet<>(); for(int i=0;i 0) { @@ -90,7 +90,7 @@ public class TestMultiFields extends LuceneTestCase { } if (VERBOSE) { - List termsList = new ArrayList(uniqueTerms); + List termsList = new ArrayList<>(uniqueTerms); Collections.sort(termsList, BytesRef.getUTF8SortedAsUTF16Comparator()); System.out.println("TEST: terms in UTF16 order:"); for(BytesRef b : termsList) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java b/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java index 9f45e21afb6..fc0df11d6df 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java @@ -80,7 +80,7 @@ public class TestNeverDelete extends LuceneTestCase { indexThreads[x].start(); } - final Set allFiles = new HashSet(); + final Set allFiles = new HashSet<>(); DirectoryReader r = DirectoryReader.open(d); while(System.currentTimeMillis() < stopTime) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java index 4d81ba5afb4..1491d37ec6c 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestNumericDocValuesUpdates.java @@ -1202,7 +1202,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase { final int numDocs = atLeast(50); final int numTerms = TestUtil.nextInt(random(), 1, numDocs / 5); - Set randomTerms = new HashSet(); + Set randomTerms = new HashSet<>(); while (randomTerms.size() < numTerms) { randomTerms.add(TestUtil.randomSimpleString(random())); } @@ -1300,7 +1300,7 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase { final int numDocs = atLeast(20000); final int numNumericFields = atLeast(5); final int numTerms = TestUtil.nextInt(random, 10, 100); // terms should affect many docs - Set updateTerms = new HashSet(); + Set updateTerms = new HashSet<>(); while (updateTerms.size() < numTerms) { updateTerms.add(TestUtil.randomSimpleString(random)); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java index dda5ab3f5fd..9e21e7cbce9 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java @@ -359,7 +359,7 @@ public class TestPayloads extends LuceneTestCase { * This Analyzer uses an WhitespaceTokenizer and PayloadFilter. */ private static class PayloadAnalyzer extends Analyzer { - Map fieldToData = new HashMap(); + Map fieldToData = new HashMap<>(); public PayloadAnalyzer() { super(PER_FIELD_REUSE_STRATEGY); @@ -539,7 +539,7 @@ public class TestPayloads extends LuceneTestCase { private List pool; ByteArrayPool(int capacity, int size) { - pool = new ArrayList(); + pool = new ArrayList<>(); for (int i = 0; i < capacity; i++) { pool.add(new byte[size]); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java index c5045428c32..0fded2247aa 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java @@ -234,7 +234,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase { } public static int[] toArray(DocsEnum docsEnum) throws IOException { - List docs = new ArrayList(); + List docs = new ArrayList<>(); while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int docID = docsEnum.docID(); docs.add(docID); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java index c460af4f579..f7238a5adde 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java @@ -220,7 +220,7 @@ public class TestPostingsOffsets extends LuceneTestCase { public void testRandom() throws Exception { // token -> docID -> tokens - final Map>> actualTokens = new HashMap>>(); + final Map>> actualTokens = new HashMap<>(); Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); @@ -242,7 +242,7 @@ public class TestPostingsOffsets extends LuceneTestCase { for(int docCount=0;docCount tokens = new ArrayList(); + List tokens = new ArrayList<>(); final int numTokens = atLeast(100); //final int numTokens = atLeast(20); int pos = -1; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java b/lucene/core/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java index 11d2c2313b6..4676904ef6e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestPrefixCodedTerms.java @@ -46,7 +46,7 @@ public class TestPrefixCodedTerms extends LuceneTestCase { } public void testRandom() { - Set terms = new TreeSet(); + Set terms = new TreeSet<>(); int nterms = atLeast(10000); for (int i = 0; i < nterms; i++) { Term term = new Term(TestUtil.randomUnicodeString(random(), 2), TestUtil.randomUnicodeString(random())); @@ -79,7 +79,7 @@ public class TestPrefixCodedTerms extends LuceneTestCase { b2.add(t2); PrefixCodedTerms pb2 = b2.finish(); - Iterator merged = new MergedIterator(pb1.iterator(), pb2.iterator()); + Iterator merged = new MergedIterator<>(pb1.iterator(), pb2.iterator()); assertTrue(merged.hasNext()); assertEquals(t1, merged.next()); assertTrue(merged.hasNext()); @@ -89,10 +89,10 @@ public class TestPrefixCodedTerms extends LuceneTestCase { @SuppressWarnings({"unchecked","rawtypes"}) public void testMergeRandom() { PrefixCodedTerms pb[] = new PrefixCodedTerms[TestUtil.nextInt(random(), 2, 10)]; - Set superSet = new TreeSet(); + Set superSet = new TreeSet<>(); for (int i = 0; i < pb.length; i++) { - Set terms = new TreeSet(); + Set terms = new TreeSet<>(); int nterms = TestUtil.nextInt(random(), 0, 10000); for (int j = 0; j < nterms; j++) { Term term = new Term(TestUtil.randomUnicodeString(random(), 2), TestUtil.randomUnicodeString(random(), 4)); @@ -107,13 +107,13 @@ public class TestPrefixCodedTerms extends LuceneTestCase { pb[i] = b.finish(); } - List> subs = new ArrayList>(); + List> subs = new ArrayList<>(); for (int i = 0; i < pb.length; i++) { subs.add(pb[i].iterator()); } Iterator expected = superSet.iterator(); - Iterator actual = new MergedIterator(subs.toArray(new Iterator[0])); + Iterator actual = new MergedIterator<>(subs.toArray(new Iterator[0])); while (actual.hasNext()) { assertTrue(expected.hasNext()); assertEquals(expected.next(), actual.next()); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java index 11e1ab10c45..f9f1f49c500 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java @@ -75,11 +75,11 @@ public class TestSegmentReader extends LuceneTestCase { } public void testGetFieldNameVariations() { - Collection allFieldNames = new HashSet(); - Collection indexedFieldNames = new HashSet(); - Collection notIndexedFieldNames = new HashSet(); - Collection tvFieldNames = new HashSet(); - Collection noTVFieldNames = new HashSet(); + Collection allFieldNames = new HashSet<>(); + Collection indexedFieldNames = new HashSet<>(); + Collection notIndexedFieldNames = new HashSet<>(); + Collection tvFieldNames = new HashSet<>(); + Collection noTVFieldNames = new HashSet<>(); for(FieldInfo fieldInfo : reader.getFieldInfos()) { final String name = fieldInfo.name; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java b/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java index 84fd32e7550..599a64de83f 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java @@ -63,7 +63,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { } } - protected List snapshots = new ArrayList(); + protected List snapshots = new ArrayList<>(); protected void prepareIndexAndSnapshots(SnapshotDeletionPolicy sdp, IndexWriter writer, int numSnapshots) diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java index c24b171871c..bd35b503a28 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java @@ -35,7 +35,7 @@ public class TestStressAdvance extends LuceneTestCase { } Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); - final Set aDocs = new HashSet(); + final Set aDocs = new HashSet<>(); final Document doc = new Document(); final Field f = newStringField("field", "", Field.Store.NO); doc.add(f); @@ -61,8 +61,8 @@ public class TestStressAdvance extends LuceneTestCase { w.forceMerge(1); - final List aDocIDs = new ArrayList(); - final List bDocIDs = new ArrayList(); + final List aDocIDs = new ArrayList<>(); + final List bDocIDs = new ArrayList<>(); final DirectoryReader r = w.getReader(); final int[] idToDocID = new int[r.maxDoc()]; diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java index 9c154cb5c0a..199a484e5db 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java @@ -145,7 +145,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { - Map docs = new HashMap(); + Map docs = new HashMap<>(); IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB( 0.1).setMaxBufferedDocs(maxBufferedDocs).setMergePolicy(newLogMergePolicy()), new YieldTestPoint()); @@ -196,7 +196,7 @@ public class TestStressIndexing2 extends LuceneTestCase { public Map indexRandom(int nThreads, int iterations, int range, Directory dir, int maxThreadStates, boolean doReaderPooling) throws IOException, InterruptedException { - Map docs = new HashMap(); + Map docs = new HashMap<>(); IndexWriter w = RandomIndexWriter.mockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE) .setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs).setIndexerThreadPool(new ThreadAffinityDocumentsWriterThreadPool(maxThreadStates)) @@ -246,7 +246,7 @@ public class TestStressIndexing2 extends LuceneTestCase { Iterator iter = docs.values().iterator(); while (iter.hasNext()) { Document d = iter.next(); - ArrayList fields = new ArrayList(); + ArrayList fields = new ArrayList<>(); fields.addAll(d.getFields()); // put fields in same order each time Collections.sort(fields, fieldNameComparator); @@ -690,7 +690,7 @@ public class TestStressIndexing2 extends LuceneTestCase { int base; int range; int iterations; - Map docs = new HashMap(); + Map docs = new HashMap<>(); Random r; public int nextInt(int lim) { @@ -768,7 +768,7 @@ public class TestStressIndexing2 extends LuceneTestCase { customType1.setTokenized(false); customType1.setOmitNorms(true); - ArrayList fields = new ArrayList(); + ArrayList fields = new ArrayList<>(); String idString = getIdString(); Field idField = newField("id", idString, customType1); fields.add(idField); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java index fa3b909eb14..c891c03a97e 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java @@ -42,8 +42,8 @@ import org.apache.lucene.util.TestUtil; public class TestStressNRT extends LuceneTestCase { volatile DirectoryReader reader; - final ConcurrentHashMap model = new ConcurrentHashMap(); - Map committedModel = new HashMap(); + final ConcurrentHashMap model = new ConcurrentHashMap<>(); + Map committedModel = new HashMap<>(); long snapshotCount; long committedModelClock; volatile int lastId; @@ -102,7 +102,7 @@ public class TestStressNRT extends LuceneTestCase { final AtomicInteger numCommitting = new AtomicInteger(); - List threads = new ArrayList(); + List threads = new ArrayList<>(); Directory dir = newDirectory(); @@ -128,7 +128,7 @@ public class TestStressNRT extends LuceneTestCase { DirectoryReader oldReader; synchronized(TestStressNRT.this) { - newCommittedModel = new HashMap(model); // take a snapshot + newCommittedModel = new HashMap<>(model); // take a snapshot version = snapshotCount++; oldReader = reader; oldReader.incRef(); // increment the reference since we will use this for reopening diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java index 1385f3e825b..9e0716753eb 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java @@ -54,7 +54,7 @@ public class TestTermsEnum extends LuceneTestCase { final IndexReader r = w.getReader(); w.close(); - final List terms = new ArrayList(); + final List terms = new ArrayList<>(); final TermsEnum termsEnum = MultiFields.getTerms(r, "body").iterator(null); BytesRef term; while((term = termsEnum.next()) != null) { @@ -188,9 +188,9 @@ public class TestTermsEnum extends LuceneTestCase { final int numTerms = atLeast(300); //final int numTerms = 50; - final Set terms = new HashSet(); - final Collection pendingTerms = new ArrayList(); - final Map termToID = new HashMap(); + final Set terms = new HashSet<>(); + final Collection pendingTerms = new ArrayList<>(); + final Map termToID = new HashMap<>(); int id = 0; while(terms.size() != numTerms) { final String s = getRandomString(); @@ -205,7 +205,7 @@ public class TestTermsEnum extends LuceneTestCase { addDoc(w, pendingTerms, termToID, id++); final BytesRef[] termsArray = new BytesRef[terms.size()]; - final Set termsSet = new HashSet(); + final Set termsSet = new HashSet<>(); { int upto = 0; for(String s : terms) { @@ -235,8 +235,8 @@ public class TestTermsEnum extends LuceneTestCase { // From the random terms, pick some ratio and compile an // automaton: - final Set acceptTerms = new HashSet(); - final TreeSet sortedAcceptTerms = new TreeSet(); + final Set acceptTerms = new HashSet<>(); + final TreeSet sortedAcceptTerms = new TreeSet<>(); final double keepPct = random().nextDouble(); Automaton a; if (iter == 0) { @@ -271,7 +271,7 @@ public class TestTermsEnum extends LuceneTestCase { final CompiledAutomaton c = new CompiledAutomaton(a, true, false); final BytesRef[] acceptTermsArray = new BytesRef[acceptTerms.size()]; - final Set acceptTermsSet = new HashSet(); + final Set acceptTermsSet = new HashSet<>(); int upto = 0; for(String s : acceptTerms) { final BytesRef b = new BytesRef(s); @@ -531,7 +531,7 @@ public class TestTermsEnum extends LuceneTestCase { public void testRandomTerms() throws Exception { final String[] terms = new String[TestUtil.nextInt(random(), 1, atLeast(1000))]; - final Set seen = new HashSet(); + final Set seen = new HashSet<>(); final boolean allowEmptyString = random().nextBoolean(); @@ -622,7 +622,7 @@ public class TestTermsEnum extends LuceneTestCase { final int END_LOC = -validTerms.length-1; - final List termStates = new ArrayList(); + final List termStates = new ArrayList<>(); for(int iter=0;iter<100*RANDOM_MULTIPLIER;iter++) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java index fb1356e6d3f..af87fb3bca7 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java @@ -58,7 +58,7 @@ public class TestTermsEnum2 extends LuceneTestCase { Document doc = new Document(); Field field = newStringField("field", "", Field.Store.YES); doc.add(field); - terms = new TreeSet(); + terms = new TreeSet<>(); int num = atLeast(200); for (int i = 0; i < num; i++) { @@ -87,7 +87,7 @@ public class TestTermsEnum2 extends LuceneTestCase { for (int i = 0; i < numIterations; i++) { String reg = AutomatonTestUtil.randomRegexp(random()); Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton(); - final List matchedTerms = new ArrayList(); + final List matchedTerms = new ArrayList<>(); for(BytesRef t : terms) { if (BasicOperations.run(automaton, t.utf8ToString())) { matchedTerms.add(t); @@ -110,7 +110,7 @@ public class TestTermsEnum2 extends LuceneTestCase { String reg = AutomatonTestUtil.randomRegexp(random()); Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton(); TermsEnum te = MultiFields.getTerms(reader, "field").iterator(null); - ArrayList unsortedTerms = new ArrayList(terms); + ArrayList unsortedTerms = new ArrayList<>(terms); Collections.shuffle(unsortedTerms, random()); for (BytesRef term : unsortedTerms) { @@ -156,7 +156,7 @@ public class TestTermsEnum2 extends LuceneTestCase { CompiledAutomaton ca = new CompiledAutomaton(automaton, SpecialOperations.isFinite(automaton), false); TermsEnum te = MultiFields.getTerms(reader, "field").intersect(ca, null); Automaton expected = BasicOperations.intersection(termsAutomaton, automaton); - TreeSet found = new TreeSet(); + TreeSet found = new TreeSet<>(); while (te.next() != null) { found.add(BytesRef.deepCopyOf(te.term())); } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java b/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java index ef926dbf577..2c2b8693241 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTransactionRollback.java @@ -69,7 +69,7 @@ public class TestTransactionRollback extends LuceneTestCase { IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random())).setIndexDeletionPolicy( new RollbackDeletionPolicy(id)).setIndexCommit(last)); - Map data = new HashMap(); + Map data = new HashMap<>(); data.put("index", "Rolled back to 1-"+id); w.setCommitData(data); w.close(); @@ -139,7 +139,7 @@ public class TestTransactionRollback extends LuceneTestCase { w.addDocument(doc); if (currentRecordId%10 == 0) { - Map data = new HashMap(); + Map data = new HashMap<>(); data.put("index", "records 1-"+currentRecordId); w.setCommitData(data); w.commit(); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java b/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java index 5d87e9f0c46..bf3f4d500b4 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java @@ -39,7 +39,7 @@ public class TestUniqueTermCount extends LuceneTestCase { Directory dir; IndexReader reader; /* expected uniqueTermCount values for our documents */ - ArrayList expected = new ArrayList(); + ArrayList expected = new ArrayList<>(); @Override public void setUp() throws Exception { @@ -82,7 +82,7 @@ public class TestUniqueTermCount extends LuceneTestCase { */ private String addValue() { StringBuilder sb = new StringBuilder(); - HashSet terms = new HashSet(); + HashSet terms = new HashSet<>(); int num = TestUtil.nextInt(random(), 0, 255); for (int i = 0; i < num; i++) { sb.append(' '); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java index 4442db3d2cc..dcefa2b4753 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQuery.java @@ -218,7 +218,7 @@ public class TestBooleanQuery extends LuceneTestCase { if (VERBOSE) { System.out.println("iter=" + iter); } - final List terms = new ArrayList(Arrays.asList("a", "b", "c", "d", "e", "f")); + final List terms = new ArrayList<>(Arrays.asList("a", "b", "c", "d", "e", "f")); final int numTerms = TestUtil.nextInt(random(), 1, terms.size()); while(terms.size() > numTerms) { terms.remove(random().nextInt(terms.size())); @@ -238,7 +238,7 @@ public class TestBooleanQuery extends LuceneTestCase { Scorer scorer = weight.scorer(s.leafContexts.get(0), null); // First pass: just use .nextDoc() to gather all hits - final List hits = new ArrayList(); + final List hits = new ArrayList<>(); while(scorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { hits.add(new ScoreDoc(scorer.docID(), scorer.score())); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java index 744ab68fb47..175061a4281 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java @@ -126,8 +126,8 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase { private TopDocsCollector collector; private int docBase; - public final Map docCounts = new HashMap(); - private final Set tqsSet = new HashSet(); + public final Map docCounts = new HashMap<>(); + private final Set tqsSet = new HashSet<>(); MyCollector() { collector = TopScoreDocCollector.create(10, true); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java index 41e5f60b3ab..68227c6df11 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java @@ -98,7 +98,7 @@ public class TestBooleanScorer extends LuceneTestCase { BooleanScorer bs = new BooleanScorer(weight, false, 1, Arrays.asList(scorers), Collections.emptyList(), scorers.length); - final List hits = new ArrayList(); + final List hits = new ArrayList<>(); bs.score(new Collector() { int docBase; @Override diff --git a/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java b/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java index 0959d51d88d..28877c681ec 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestControlledRealTimeReopenThread.java @@ -65,7 +65,7 @@ public class TestControlledRealTimeReopenThread extends ThreadedIndexingAndSearc private ControlledRealTimeReopenThread nrtDeletesThread; private ControlledRealTimeReopenThread nrtNoDeletesThread; - private final ThreadLocal lastGens = new ThreadLocal(); + private final ThreadLocal lastGens = new ThreadLocal<>(); private boolean warmCalled; public void testControlledRealTimeReopenThread() throws Exception { @@ -232,13 +232,13 @@ public class TestControlledRealTimeReopenThread extends ThreadedIndexingAndSearc nrtNoDeletes = new SearcherManager(writer, false, sf); nrtDeletes = new SearcherManager(writer, true, sf); - nrtDeletesThread = new ControlledRealTimeReopenThread(genWriter, nrtDeletes, maxReopenSec, minReopenSec); + nrtDeletesThread = new ControlledRealTimeReopenThread<>(genWriter, nrtDeletes, maxReopenSec, minReopenSec); nrtDeletesThread.setName("NRTDeletes Reopen Thread"); nrtDeletesThread.setPriority(Math.min(Thread.currentThread().getPriority()+2, Thread.MAX_PRIORITY)); nrtDeletesThread.setDaemon(true); nrtDeletesThread.start(); - nrtNoDeletesThread = new ControlledRealTimeReopenThread(genWriter, nrtNoDeletes, maxReopenSec, minReopenSec); + nrtNoDeletesThread = new ControlledRealTimeReopenThread<>(genWriter, nrtNoDeletes, maxReopenSec, minReopenSec); nrtNoDeletesThread.setName("NRTNoDeletes Reopen Thread"); nrtNoDeletesThread.setPriority(Math.min(Thread.currentThread().getPriority()+2, Thread.MAX_PRIORITY)); nrtNoDeletesThread.setDaemon(true); @@ -343,7 +343,7 @@ public class TestControlledRealTimeReopenThread extends ThreadedIndexingAndSearc } finally { manager.release(searcher); } - final ControlledRealTimeReopenThread thread = new ControlledRealTimeReopenThread(writer, manager, 0.01, 0.01); + final ControlledRealTimeReopenThread thread = new ControlledRealTimeReopenThread<>(writer, manager, 0.01, 0.01); thread.start(); // start reopening if (VERBOSE) { System.out.println("waiting now for generation " + lastGen); @@ -482,12 +482,12 @@ public class TestControlledRealTimeReopenThread extends ThreadedIndexingAndSearc SearcherManager sm = new SearcherManager(iw, true, new SearcherFactory()); final TrackingIndexWriter tiw = new TrackingIndexWriter(iw); ControlledRealTimeReopenThread controlledRealTimeReopenThread = - new ControlledRealTimeReopenThread(tiw, sm, maxStaleSecs, 0); + new ControlledRealTimeReopenThread<>(tiw, sm, maxStaleSecs, 0); controlledRealTimeReopenThread.setDaemon(true); controlledRealTimeReopenThread.start(); - List commitThreads = new ArrayList(); + List commitThreads = new ArrayList<>(); for (int i = 0; i < 500; i++) { if (i > 0 && i % 50 == 0) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java b/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java index 3c872a5981e..a42c8f8287c 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java @@ -112,7 +112,7 @@ public class TestCustomSearcherSort extends LuceneTestCase { // make a query without sorting first ScoreDoc[] hitsByRank = searcher.search(query, null, Integer.MAX_VALUE).scoreDocs; checkHits(hitsByRank, "Sort by rank: "); // check for duplicates - Map resultMap = new TreeMap(); + Map resultMap = new TreeMap<>(); // store hits in TreeMap - TreeMap does not allow duplicates; existing // entries are silently overwritten for (int hitid = 0; hitid < hitsByRank.length; ++hitid) { @@ -155,7 +155,7 @@ public class TestCustomSearcherSort extends LuceneTestCase { */ private void checkHits(ScoreDoc[] hits, String prefix) { if (hits != null) { - Map idMap = new TreeMap(); + Map idMap = new TreeMap<>(); for (int docnum = 0; docnum < hits.length; ++docnum) { Integer luceneId = null; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java b/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java index c0fc91829c4..4f23f886004 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java @@ -77,7 +77,7 @@ public class TestDocIdSet extends LuceneTestCase { }; DocIdSetIterator iter = filteredSet.iterator(); - ArrayList list = new ArrayList(); + ArrayList list = new ArrayList<>(); int doc = iter.advance(3); if (doc != DocIdSetIterator.NO_MORE_DOCS) { list.add(Integer.valueOf(doc)); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java index fc11cb06ce3..8fe3858af04 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRangeFilter.java @@ -54,7 +54,7 @@ public class TestDocTermOrdsRangeFilter extends LuceneTestCase { RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)) .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); - List terms = new ArrayList(); + List terms = new ArrayList<>(); int num = atLeast(200); for (int i = 0; i < num; i++) { Document doc = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java index 64c5382f3a7..31d973b49f8 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestDocTermOrdsRewriteMethod.java @@ -56,7 +56,7 @@ public class TestDocTermOrdsRewriteMethod extends LuceneTestCase { RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.KEYWORD, false)) .setMaxBufferedDocs(TestUtil.nextInt(random(), 50, 1000))); - List terms = new ArrayList(); + List terms = new ArrayList<>(); int num = atLeast(200); for (int i = 0; i < num; i++) { Document doc = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java index 082884a0775..816e58288d9 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java @@ -32,7 +32,7 @@ import java.util.Map; public class TestElevationComparator extends LuceneTestCase { - private final Map priority = new HashMap(); + private final Map priority = new HashMap<>(); //@Test public void testSorting() throws Throwable { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java index 8c2148a5012..f78f6f84bf9 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java @@ -278,7 +278,7 @@ public class TestFieldCache extends LuceneTestCase { for (int i = 0; i < NUM_DOCS; i++) { termOrds.setDocument(i); // This will remove identical terms. A DocTermOrds doesn't return duplicate ords for a docId - List values = new ArrayList(new LinkedHashSet(Arrays.asList(multiValued[i]))); + List values = new ArrayList<>(new LinkedHashSet<>(Arrays.asList(multiValued[i]))); for (BytesRef v : values) { if (v == null) { // why does this test use null values... instead of an empty list: confusing diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java index bd6a78e8a67..8e022b64900 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java @@ -52,17 +52,17 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase { ScoreDoc[] results; MatchAllDocsQuery q = new MatchAllDocsQuery(); - List terms = new ArrayList(); + List terms = new ArrayList<>(); terms.add("5"); results = searcher.search(q, new FieldCacheTermsFilter(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs; assertEquals("Must match nothing", 0, results.length); - terms = new ArrayList(); + terms = new ArrayList<>(); terms.add("10"); results = searcher.search(q, new FieldCacheTermsFilter(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs; assertEquals("Must match 1", 1, results.length); - terms = new ArrayList(); + terms = new ArrayList<>(); terms.add("10"); terms.add("20"); results = searcher.search(q, new FieldCacheTermsFilter(fieldName, terms.toArray(new String[0])), numDocs).scoreDocs; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java b/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java index 2da514f7c4e..6a4f29509c6 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestLiveFieldValues.java @@ -79,7 +79,7 @@ public class TestLiveFieldValues extends LuceneTestCase { } final CountDownLatch startingGun = new CountDownLatch(1); - List threads = new ArrayList(); + List threads = new ArrayList<>(); final int iters = atLeast(1000); final int idCount = TestUtil.nextInt(random(), 100, 10000); @@ -96,7 +96,7 @@ public class TestLiveFieldValues extends LuceneTestCase { @Override public void run() { try { - Map values = new HashMap(); + Map values = new HashMap<>(); List allIDs = Collections.synchronizedList(new ArrayList()); startingGun.await(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java index 694484c5a82..16aae8884ca 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java @@ -194,7 +194,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { /** test next with giant bq of all terms with varying minShouldMatch */ public void testNextAllTerms() throws Exception { - List termsList = new ArrayList(); + List termsList = new ArrayList<>(); termsList.addAll(Arrays.asList(commonTerms)); termsList.addAll(Arrays.asList(mediumTerms)); termsList.addAll(Arrays.asList(rareTerms)); @@ -209,7 +209,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { /** test advance with giant bq of all terms with varying minShouldMatch */ public void testAdvanceAllTerms() throws Exception { - List termsList = new ArrayList(); + List termsList = new ArrayList<>(); termsList.addAll(Arrays.asList(commonTerms)); termsList.addAll(Arrays.asList(mediumTerms)); termsList.addAll(Arrays.asList(rareTerms)); @@ -226,7 +226,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { /** test next with varying numbers of terms with varying minShouldMatch */ public void testNextVaryingNumberOfTerms() throws Exception { - List termsList = new ArrayList(); + List termsList = new ArrayList<>(); termsList.addAll(Arrays.asList(commonTerms)); termsList.addAll(Arrays.asList(mediumTerms)); termsList.addAll(Arrays.asList(rareTerms)); @@ -243,7 +243,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { /** test advance with varying numbers of terms with varying minShouldMatch */ public void testAdvanceVaryingNumberOfTerms() throws Exception { - List termsList = new ArrayList(); + List termsList = new ArrayList<>(); termsList.addAll(Arrays.asList(commonTerms)); termsList.addAll(Arrays.asList(mediumTerms)); termsList.addAll(Arrays.asList(rareTerms)); @@ -273,7 +273,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase { final SortedSetDocValues dv; final int maxDoc; - final Set ords = new HashSet(); + final Set ords = new HashSet<>(); final SimScorer[] sims; final int minNrShouldMatch; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java index 392dc6b6c4e..1b352a129e5 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java @@ -70,7 +70,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { query1.add(new Term("body", "blueberry")); query2.add(new Term("body", "strawberry")); - LinkedList termsWithPrefix = new LinkedList(); + LinkedList termsWithPrefix = new LinkedList<>(); // this TermEnum gives "piccadilly", "pie" and "pizza". String prefix = "pi"; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java index 8fe492e75f2..26cf76aa128 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java @@ -69,7 +69,7 @@ public class TestPhrasePrefixQuery extends LuceneTestCase { query1.add(new Term("body", "blueberry")); query2.add(new Term("body", "strawberry")); - LinkedList termsWithPrefix = new LinkedList(); + LinkedList termsWithPrefix = new LinkedList<>(); // this TermEnum gives "piccadilly", "pie" and "pizza". String prefix = "pi"; diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java index d96dad86f7b..ca0a33f38fd 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java @@ -590,7 +590,7 @@ public class TestPhraseQuery extends LuceneTestCase { Analyzer analyzer = new MockAnalyzer(random()); RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy())); - List> docs = new ArrayList>(); + List> docs = new ArrayList<>(); Document d = new Document(); Field f = newTextField("f", "", Field.Store.NO); d.add(f); @@ -602,7 +602,7 @@ public class TestPhraseQuery extends LuceneTestCase { // must be > 4096 so it spans multiple chunks int termCount = TestUtil.nextInt(random(), 4097, 8200); - List doc = new ArrayList(); + List doc = new ArrayList<>(); StringBuilder sb = new StringBuilder(); while(doc.size() < termCount) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java index dd6bef58054..d727aec2d32 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java @@ -88,7 +88,7 @@ public class TestQueryWrapperFilter extends LuceneTestCase { final RandomIndexWriter w = new RandomIndexWriter(random(), d); w.w.getConfig().setMaxBufferedDocs(17); final int numDocs = atLeast(100); - final Set aDocs = new HashSet(); + final Set aDocs = new HashSet<>(); for(int i=0;i terms = new ArrayList(); + List terms = new ArrayList<>(); int num = atLeast(200); for (int i = 0; i < num; i++) { String s = TestUtil.randomUnicodeString(random()); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java b/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java index ba8059f9b49..3abd501414e 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java @@ -72,7 +72,7 @@ public class TestSameScoresWithThreads extends LuceneTestCase { // Target ~10 terms to search: double chance = 10.0 / termCount; termsEnum = terms.iterator(termsEnum); - final Map answers = new HashMap(); + final Map answers = new HashMap<>(); while(termsEnum.next() != null) { if (random().nextDouble() <= chance) { BytesRef term = BytesRef.deepCopyOf(termsEnum.term()); @@ -92,7 +92,7 @@ public class TestSameScoresWithThreads extends LuceneTestCase { try { startingGun.await(); for(int i=0;i<20;i++) { - List> shuffled = new ArrayList>(answers.entrySet()); + List> shuffled = new ArrayList<>(answers.entrySet()); Collections.shuffle(shuffled); for(Map.Entry ent : shuffled) { TopDocs actual = s.search(new TermQuery(new Term("body", ent.getKey())), 100); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java b/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java index 38e89140817..efb96e02e3b 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSearchAfter.java @@ -56,7 +56,7 @@ public class TestSearchAfter extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); - allSortFields = new ArrayList(Arrays.asList(new SortField[] { + allSortFields = new ArrayList<>(Arrays.asList(new SortField[] { new SortField("int", SortField.Type.INT, false), new SortField("long", SortField.Type.LONG, false), new SortField("float", SortField.Type.FLOAT, false), @@ -123,7 +123,7 @@ public class TestSearchAfter extends LuceneTestCase { RandomIndexWriter iw = new RandomIndexWriter(random(), dir); int numDocs = atLeast(200); for (int i = 0; i < numDocs; i++) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); fields.add(newTextField("english", English.intToEnglish(i), Field.Store.NO)); fields.add(newTextField("oddeven", (i % 2 == 0) ? "even" : "odd", Field.Store.NO)); fields.add(newStringField("byte", "" + ((byte) random().nextInt()), Field.Store.NO)); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java b/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java index 963ec79343b..cfe72c44f10 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSearcherManager.java @@ -67,7 +67,7 @@ public class TestSearcherManager extends ThreadedIndexingAndSearchingTestCase { private SearcherManager mgr; private SearcherLifetimeManager lifetimeMGR; - private final List pastSearchers = new ArrayList(); + private final List pastSearchers = new ArrayList<>(); private boolean isNRT; @Override diff --git a/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java b/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java index 7d2c585a66a..ea54c267b8f 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java @@ -82,7 +82,7 @@ public class TestShardSearching extends ShardSearchingTestBase { maxSearcherAgeSeconds ); - final List priorSearches = new ArrayList(); + final List priorSearches = new ArrayList<>(); List terms = null; while (System.nanoTime() < endTimeNanos) { @@ -175,7 +175,7 @@ public class TestShardSearching extends ShardSearchingTestBase { // TODO: try to "focus" on high freq terms sometimes too // TODO: maybe also periodically reset the terms...? final TermsEnum termsEnum = MultiFields.getTerms(mockReader, "body").iterator(null); - terms = new ArrayList(); + terms = new ArrayList<>(); while(termsEnum.next() != null) { terms.add(BytesRef.deepCopyOf(termsEnum.term())); } diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java index bfa3dd72fba..faa65603f5c 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSortRandom.java @@ -53,14 +53,14 @@ public class TestSortRandom extends LuceneTestCase { final Directory dir = newDirectory(); final RandomIndexWriter writer = new RandomIndexWriter(random, dir); final boolean allowDups = random.nextBoolean(); - final Set seen = new HashSet(); + final Set seen = new HashSet<>(); final int maxLength = TestUtil.nextInt(random, 5, 100); if (VERBOSE) { System.out.println("TEST: NUM_DOCS=" + NUM_DOCS + " maxLength=" + maxLength + " allowDups=" + allowDups); } int numDocs = 0; - final List docValues = new ArrayList(); + final List docValues = new ArrayList<>(); // TODO: deletions while (numDocs < NUM_DOCS) { final Document doc = new Document(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java b/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java index f82caa8038a..063d26b5c40 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java @@ -69,13 +69,13 @@ public class TestSubScorerFreqs extends LuceneTestCase { private final Collector other; private int docBase; - public final Map> docCounts = new HashMap>(); + public final Map> docCounts = new HashMap<>(); - private final Map subScorers = new HashMap(); + private final Map subScorers = new HashMap<>(); private final Set relationships; public CountingCollector(Collector other) { - this(other, new HashSet(Arrays.asList("MUST", "SHOULD", "MUST_NOT"))); + this(other, new HashSet<>(Arrays.asList("MUST", "SHOULD", "MUST_NOT"))); } public CountingCollector(Collector other, Set relationships) { @@ -101,7 +101,7 @@ public class TestSubScorerFreqs extends LuceneTestCase { @Override public void collect(int doc) throws IOException { - final Map freqs = new HashMap(); + final Map freqs = new HashMap<>(); for (Map.Entry ent : subScorers.entrySet()) { Scorer value = ent.getValue(); int matchId = value.docID(); @@ -165,7 +165,7 @@ public class TestSubScorerFreqs extends LuceneTestCase { // see http://docs.oracle.com/javase/7/docs/api/java/lang/SafeVarargs.html @SuppressWarnings("unchecked") final Iterable> occurList = Arrays.asList( Collections.singleton("MUST"), - new HashSet(Arrays.asList("MUST", "SHOULD")) + new HashSet<>(Arrays.asList("MUST", "SHOULD")) ); for (final Set occur : occurList) { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java index b0ff3460bc3..6b4f474f8d4 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java @@ -81,7 +81,7 @@ public class TestTermScorer extends LuceneTestCase { BulkScorer ts = weight.bulkScorer(context, true, context.reader().getLiveDocs()); // we have 2 documents with the term all in them, one document for all the // other values - final List docs = new ArrayList(); + final List docs = new ArrayList<>(); // must call next first ts.score(new Collector() { diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java index 1dcd43d09bb..ef89211f182 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestTopDocsMerge.java @@ -145,7 +145,7 @@ public class TestTopDocsMerge extends LuceneTestCase { } } - final List sortFields = new ArrayList(); + final List sortFields = new ArrayList<>(); sortFields.add(new SortField("string", SortField.Type.STRING, true)); sortFields.add(new SortField("string", SortField.Type.STRING, false)); sortFields.add(new SortField("int", SortField.Type.INT, true)); diff --git a/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarity2.java b/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarity2.java index 730877a25b2..abe5a6d6fd7 100644 --- a/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarity2.java +++ b/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarity2.java @@ -48,7 +48,7 @@ public class TestSimilarity2 extends LuceneTestCase { @Override public void setUp() throws Exception { super.setUp(); - sims = new ArrayList(); + sims = new ArrayList<>(); sims.add(new DefaultSimilarity()); sims.add(new BM25Similarity()); // TODO: not great that we dup this all with TestSimilarityBase diff --git a/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java b/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java index f526c6b6399..91861f9165b 100644 --- a/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java +++ b/lucene/core/src/test/org/apache/lucene/search/similarities/TestSimilarityBase.java @@ -121,7 +121,7 @@ public class TestSimilarityBase extends LuceneTestCase { searcher = newSearcher(reader); writer.close(); - sims = new ArrayList(); + sims = new ArrayList<>(); for (BasicModel basicModel : BASIC_MODELS) { for (AfterEffect afterEffect : AFTER_EFFECTS) { for (Normalization normalization : NORMALIZATIONS) { diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java b/lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java index fcab7ae0fa8..a07db83a054 100644 --- a/lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java +++ b/lucene/core/src/test/org/apache/lucene/search/spans/MultiSpansWrapper.java @@ -56,8 +56,8 @@ public class MultiSpansWrapper extends Spans { // can't be package private due t } public static Spans wrap(IndexReaderContext topLevelReaderContext, SpanQuery query) throws IOException { - Map termContexts = new HashMap(); - TreeSet terms = new TreeSet(); + Map termContexts = new HashMap<>(); + TreeSet terms = new TreeSet<>(); query.extractTerms(terms); for (Term term : terms) { termContexts.put(term, TermContext.build(topLevelReaderContext, term)); diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java index 2dfeb1c93db..53314e84b42 100644 --- a/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java +++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestBasics.java @@ -499,7 +499,7 @@ public class TestBasics extends LuceneTestCase { snq = new SpanNearQuery(clauses, 0, true); pay = new BytesRef(("pos: " + 0).getBytes("UTF-8")); pay2 = new BytesRef(("pos: " + 1).getBytes("UTF-8")); - list = new ArrayList(); + list = new ArrayList<>(); list.add(pay.bytes); list.add(pay2.bytes); query = new SpanNearPayloadCheckQuery(snq, list); @@ -513,7 +513,7 @@ public class TestBasics extends LuceneTestCase { pay = new BytesRef(("pos: " + 0).getBytes("UTF-8")); pay2 = new BytesRef(("pos: " + 1).getBytes("UTF-8")); BytesRef pay3 = new BytesRef(("pos: " + 2).getBytes("UTF-8")); - list = new ArrayList(); + list = new ArrayList<>(); list.add(pay.bytes); list.add(pay2.bytes); list.add(pay3.bytes); @@ -540,7 +540,7 @@ public class TestBasics extends LuceneTestCase { query = new SpanPositionRangeQuery(oneThousHunThree, 0, 6); checkHits(query, new int[]{1103, 1203,1303,1403,1503,1603,1703,1803,1903}); - Collection payloads = new ArrayList(); + Collection payloads = new ArrayList<>(); BytesRef pay = new BytesRef(("pos: " + 0).getBytes("UTF-8")); BytesRef pay2 = new BytesRef(("pos: " + 1).getBytes("UTF-8")); BytesRef pay3 = new BytesRef(("pos: " + 3).getBytes("UTF-8")); diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java index 0971b209ab6..d5b55c63218 100644 --- a/lucene/core/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java +++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java @@ -138,7 +138,7 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase { QueryUtils.checkEqual(q, qr); - Set terms = new HashSet(); + Set terms = new HashSet<>(); qr.extractTerms(terms); assertEquals(1, terms.size()); } @@ -158,7 +158,7 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase { QueryUtils.checkUnequal(q, qr); - Set terms = new HashSet(); + Set terms = new HashSet<>(); qr.extractTerms(terms); assertEquals(2, terms.size()); } @@ -172,7 +172,7 @@ public class TestFieldMaskingSpanQuery extends LuceneTestCase { QueryUtils.checkEqual(q, qr); - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); qr.extractTerms(set); assertEquals(2, set.size()); } diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java index 9f5481c42c1..485430b5ed7 100644 --- a/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java +++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java @@ -270,7 +270,7 @@ public class TestPayloadSpans extends LuceneTestCase { Spans spans = MultiSpansWrapper.wrap(is.getTopReaderContext(), snq); TopDocs topDocs = is.search(snq, 1); - Set payloadSet = new HashSet(); + Set payloadSet = new HashSet<>(); for (int i = 0; i < topDocs.scoreDocs.length; i++) { while (spans.next()) { Collection payloads = spans.getPayload(); @@ -306,7 +306,7 @@ public class TestPayloadSpans extends LuceneTestCase { Spans spans = MultiSpansWrapper.wrap(is.getTopReaderContext(), snq); TopDocs topDocs = is.search(snq, 1); - Set payloadSet = new HashSet(); + Set payloadSet = new HashSet<>(); for (int i = 0; i < topDocs.scoreDocs.length; i++) { while (spans.next()) { Collection payloads = spans.getPayload(); @@ -341,7 +341,7 @@ public class TestPayloadSpans extends LuceneTestCase { Spans spans = MultiSpansWrapper.wrap(is.getTopReaderContext(), snq); TopDocs topDocs = is.search(snq, 1); - Set payloadSet = new HashSet(); + Set payloadSet = new HashSet<>(); for (int i = 0; i < topDocs.scoreDocs.length; i++) { while (spans.next()) { Collection payloads = spans.getPayload(); @@ -479,8 +479,8 @@ public class TestPayloadSpans extends LuceneTestCase { } final class PayloadFilter extends TokenFilter { - Set entities = new HashSet(); - Set nopayload = new HashSet(); + Set entities = new HashSet<>(); + Set nopayload = new HashSet<>(); int pos; PayloadAttribute payloadAtt; CharTermAttribute termAtt; diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java index 4dad29342dc..c5023c7f8ff 100644 --- a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java +++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java @@ -67,7 +67,7 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { public void testWildcard() throws Exception { WildcardQuery wq = new WildcardQuery(new Term("field", "bro?n")); - SpanQuery swq = new SpanMultiTermQueryWrapper(wq); + SpanQuery swq = new SpanMultiTermQueryWrapper<>(wq); // will only match quick brown fox SpanFirstQuery sfq = new SpanFirstQuery(swq, 2); assertEquals(1, searcher.search(sfq, 10).totalHits); @@ -75,7 +75,7 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { public void testPrefix() throws Exception { WildcardQuery wq = new WildcardQuery(new Term("field", "extrem*")); - SpanQuery swq = new SpanMultiTermQueryWrapper(wq); + SpanQuery swq = new SpanMultiTermQueryWrapper<>(wq); // will only match "jumps over extremely very lazy broxn dog" SpanFirstQuery sfq = new SpanFirstQuery(swq, 3); assertEquals(1, searcher.search(sfq, 10).totalHits); @@ -83,7 +83,7 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { public void testFuzzy() throws Exception { FuzzyQuery fq = new FuzzyQuery(new Term("field", "broan")); - SpanQuery sfq = new SpanMultiTermQueryWrapper(fq); + SpanQuery sfq = new SpanMultiTermQueryWrapper<>(fq); // will not match quick brown fox SpanPositionRangeQuery sprq = new SpanPositionRangeQuery(sfq, 3, 6); assertEquals(2, searcher.search(sprq, 10).totalHits); @@ -92,7 +92,7 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { public void testFuzzy2() throws Exception { // maximum of 1 term expansion FuzzyQuery fq = new FuzzyQuery(new Term("field", "broan"), 1, 0, 1, false); - SpanQuery sfq = new SpanMultiTermQueryWrapper(fq); + SpanQuery sfq = new SpanMultiTermQueryWrapper<>(fq); // will only match jumps over lazy broun dog SpanPositionRangeQuery sprq = new SpanPositionRangeQuery(sfq, 0, 100); assertEquals(1, searcher.search(sprq, 10).totalHits); @@ -100,7 +100,7 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { public void testNoSuchMultiTermsInNear() throws Exception { //test to make sure non existent multiterms aren't throwing null pointer exceptions FuzzyQuery fuzzyNoSuch = new FuzzyQuery(new Term("field", "noSuch"), 1, 0, 1, false); - SpanQuery spanNoSuch = new SpanMultiTermQueryWrapper(fuzzyNoSuch); + SpanQuery spanNoSuch = new SpanMultiTermQueryWrapper<>(fuzzyNoSuch); SpanQuery term = new SpanTermQuery(new Term("field", "brown")); SpanQuery near = new SpanNearQuery(new SpanQuery[]{term, spanNoSuch}, 1, true); assertEquals(0, searcher.search(near, 10).totalHits); @@ -109,17 +109,17 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { assertEquals(0, searcher.search(near, 10).totalHits); WildcardQuery wcNoSuch = new WildcardQuery(new Term("field", "noSuch*")); - SpanQuery spanWCNoSuch = new SpanMultiTermQueryWrapper(wcNoSuch); + SpanQuery spanWCNoSuch = new SpanMultiTermQueryWrapper<>(wcNoSuch); near = new SpanNearQuery(new SpanQuery[]{term, spanWCNoSuch}, 1, true); assertEquals(0, searcher.search(near, 10).totalHits); RegexpQuery rgxNoSuch = new RegexpQuery(new Term("field", "noSuch")); - SpanQuery spanRgxNoSuch = new SpanMultiTermQueryWrapper(rgxNoSuch); + SpanQuery spanRgxNoSuch = new SpanMultiTermQueryWrapper<>(rgxNoSuch); near = new SpanNearQuery(new SpanQuery[]{term, spanRgxNoSuch}, 1, true); assertEquals(0, searcher.search(near, 10).totalHits); PrefixQuery prfxNoSuch = new PrefixQuery(new Term("field", "noSuch")); - SpanQuery spanPrfxNoSuch = new SpanMultiTermQueryWrapper(prfxNoSuch); + SpanQuery spanPrfxNoSuch = new SpanMultiTermQueryWrapper<>(prfxNoSuch); near = new SpanNearQuery(new SpanQuery[]{term, spanPrfxNoSuch}, 1, true); assertEquals(0, searcher.search(near, 10).totalHits); @@ -136,7 +136,7 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { public void testNoSuchMultiTermsInNotNear() throws Exception { //test to make sure non existent multiterms aren't throwing non-matching field exceptions FuzzyQuery fuzzyNoSuch = new FuzzyQuery(new Term("field", "noSuch"), 1, 0, 1, false); - SpanQuery spanNoSuch = new SpanMultiTermQueryWrapper(fuzzyNoSuch); + SpanQuery spanNoSuch = new SpanMultiTermQueryWrapper<>(fuzzyNoSuch); SpanQuery term = new SpanTermQuery(new Term("field", "brown")); SpanNotQuery notNear = new SpanNotQuery(term, spanNoSuch, 0,0); assertEquals(1, searcher.search(notNear, 10).totalHits); @@ -150,17 +150,17 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { assertEquals(0, searcher.search(notNear, 10).totalHits); WildcardQuery wcNoSuch = new WildcardQuery(new Term("field", "noSuch*")); - SpanQuery spanWCNoSuch = new SpanMultiTermQueryWrapper(wcNoSuch); + SpanQuery spanWCNoSuch = new SpanMultiTermQueryWrapper<>(wcNoSuch); notNear = new SpanNotQuery(term, spanWCNoSuch, 0,0); assertEquals(1, searcher.search(notNear, 10).totalHits); RegexpQuery rgxNoSuch = new RegexpQuery(new Term("field", "noSuch")); - SpanQuery spanRgxNoSuch = new SpanMultiTermQueryWrapper(rgxNoSuch); + SpanQuery spanRgxNoSuch = new SpanMultiTermQueryWrapper<>(rgxNoSuch); notNear = new SpanNotQuery(term, spanRgxNoSuch, 1, 1); assertEquals(1, searcher.search(notNear, 10).totalHits); PrefixQuery prfxNoSuch = new PrefixQuery(new Term("field", "noSuch")); - SpanQuery spanPrfxNoSuch = new SpanMultiTermQueryWrapper(prfxNoSuch); + SpanQuery spanPrfxNoSuch = new SpanMultiTermQueryWrapper<>(prfxNoSuch); notNear = new SpanNotQuery(term, spanPrfxNoSuch, 1, 1); assertEquals(1, searcher.search(notNear, 10).totalHits); @@ -169,7 +169,7 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { public void testNoSuchMultiTermsInOr() throws Exception { //test to make sure non existent multiterms aren't throwing null pointer exceptions FuzzyQuery fuzzyNoSuch = new FuzzyQuery(new Term("field", "noSuch"), 1, 0, 1, false); - SpanQuery spanNoSuch = new SpanMultiTermQueryWrapper(fuzzyNoSuch); + SpanQuery spanNoSuch = new SpanMultiTermQueryWrapper<>(fuzzyNoSuch); SpanQuery term = new SpanTermQuery(new Term("field", "brown")); SpanOrQuery near = new SpanOrQuery(new SpanQuery[]{term, spanNoSuch}); assertEquals(1, searcher.search(near, 10).totalHits); @@ -180,17 +180,17 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { WildcardQuery wcNoSuch = new WildcardQuery(new Term("field", "noSuch*")); - SpanQuery spanWCNoSuch = new SpanMultiTermQueryWrapper(wcNoSuch); + SpanQuery spanWCNoSuch = new SpanMultiTermQueryWrapper<>(wcNoSuch); near = new SpanOrQuery(new SpanQuery[]{term, spanWCNoSuch}); assertEquals(1, searcher.search(near, 10).totalHits); RegexpQuery rgxNoSuch = new RegexpQuery(new Term("field", "noSuch")); - SpanQuery spanRgxNoSuch = new SpanMultiTermQueryWrapper(rgxNoSuch); + SpanQuery spanRgxNoSuch = new SpanMultiTermQueryWrapper<>(rgxNoSuch); near = new SpanOrQuery(new SpanQuery[]{term, spanRgxNoSuch}); assertEquals(1, searcher.search(near, 10).totalHits); PrefixQuery prfxNoSuch = new PrefixQuery(new Term("field", "noSuch")); - SpanQuery spanPrfxNoSuch = new SpanMultiTermQueryWrapper(prfxNoSuch); + SpanQuery spanPrfxNoSuch = new SpanMultiTermQueryWrapper<>(prfxNoSuch); near = new SpanOrQuery(new SpanQuery[]{term, spanPrfxNoSuch}); assertEquals(1, searcher.search(near, 10).totalHits); @@ -206,23 +206,23 @@ public class TestSpanMultiTermQueryWrapper extends LuceneTestCase { public void testNoSuchMultiTermsInSpanFirst() throws Exception { //this hasn't been a problem FuzzyQuery fuzzyNoSuch = new FuzzyQuery(new Term("field", "noSuch"), 1, 0, 1, false); - SpanQuery spanNoSuch = new SpanMultiTermQueryWrapper(fuzzyNoSuch); + SpanQuery spanNoSuch = new SpanMultiTermQueryWrapper<>(fuzzyNoSuch); SpanQuery spanFirst = new SpanFirstQuery(spanNoSuch, 10); assertEquals(0, searcher.search(spanFirst, 10).totalHits); WildcardQuery wcNoSuch = new WildcardQuery(new Term("field", "noSuch*")); - SpanQuery spanWCNoSuch = new SpanMultiTermQueryWrapper(wcNoSuch); + SpanQuery spanWCNoSuch = new SpanMultiTermQueryWrapper<>(wcNoSuch); spanFirst = new SpanFirstQuery(spanWCNoSuch, 10); assertEquals(0, searcher.search(spanFirst, 10).totalHits); RegexpQuery rgxNoSuch = new RegexpQuery(new Term("field", "noSuch")); - SpanQuery spanRgxNoSuch = new SpanMultiTermQueryWrapper(rgxNoSuch); + SpanQuery spanRgxNoSuch = new SpanMultiTermQueryWrapper<>(rgxNoSuch); spanFirst = new SpanFirstQuery(spanRgxNoSuch, 10); assertEquals(0, searcher.search(spanFirst, 10).totalHits); PrefixQuery prfxNoSuch = new PrefixQuery(new Term("field", "noSuch")); - SpanQuery spanPrfxNoSuch = new SpanMultiTermQueryWrapper(prfxNoSuch); + SpanQuery spanPrfxNoSuch = new SpanMultiTermQueryWrapper<>(prfxNoSuch); spanFirst = new SpanFirstQuery(spanPrfxNoSuch, 10); assertEquals(0, searcher.search(spanFirst, 10).totalHits); } diff --git a/lucene/core/src/test/org/apache/lucene/store/TestBufferedIndexInput.java b/lucene/core/src/test/org/apache/lucene/store/TestBufferedIndexInput.java index 349e8b95383..5c99fa2b56a 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestBufferedIndexInput.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestBufferedIndexInput.java @@ -281,7 +281,7 @@ public class TestBufferedIndexInput extends LuceneTestCase { private static class MockFSDirectory extends BaseDirectory { - List allIndexInputs = new ArrayList(); + List allIndexInputs = new ArrayList<>(); Random rand; diff --git a/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java index bebc675ccbd..0d91b58d1e6 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java @@ -40,7 +40,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase { * Test if writing doc stores to disk and everything else to ram works. */ public void testBasic() throws IOException { - Set fileExtensions = new HashSet(); + Set fileExtensions = new HashSet<>(); fileExtensions.add(Lucene40StoredFieldsWriter.FIELDS_EXTENSION); fileExtensions.add(Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION); diff --git a/lucene/core/src/test/org/apache/lucene/store/TestFilterDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestFilterDirectory.java index 3a90c9a3c95..577315be3df 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestFilterDirectory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestFilterDirectory.java @@ -30,7 +30,7 @@ public class TestFilterDirectory extends LuceneTestCase { public void testOverrides() throws Exception { // verify that all methods of Directory are overridden by FilterDirectory, // except those under the 'exclude' list - Set exclude = new HashSet(); + Set exclude = new HashSet<>(); exclude.add("copy"); exclude.add("createSlicer"); for (Method m : FilterDirectory.class.getMethods()) { diff --git a/lucene/core/src/test/org/apache/lucene/store/TestHugeRamFile.java b/lucene/core/src/test/org/apache/lucene/store/TestHugeRamFile.java index 6840f51dc24..01c7ed16cf1 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestHugeRamFile.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestHugeRamFile.java @@ -31,7 +31,7 @@ public class TestHugeRamFile extends LuceneTestCase { * buffers under maxint. */ private static class DenseRAMFile extends RAMFile { private long capacity = 0; - private HashMap singleBuffers = new HashMap(); + private HashMap singleBuffers = new HashMap<>(); @Override protected byte[] newBuffer(int size) { capacity += size; diff --git a/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java index f5b3e023b39..5102c7686bf 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java @@ -55,7 +55,7 @@ public class TestNRTCachingDirectory extends LuceneTestCase { System.out.println("TEST: numDocs=" + numDocs); } - final List ids = new ArrayList(); + final List ids = new ArrayList<>(); DirectoryReader r = null; for(int docCount=0;docCount garbageCollectorMXBeans = ManagementFactory.getGarbageCollectorMXBeans(); - List ccounts = new ArrayList(); + List ccounts = new ArrayList<>(); for (GarbageCollectorMXBean g : garbageCollectorMXBeans) { ccounts.add(g.getCollectionCount()); } - List ccounts2 = new ArrayList(); + List ccounts2 = new ArrayList<>(); do { System.gc(); ccounts.clear(); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java b/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java index 8b439f883f0..3b845bc66cc 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java @@ -29,7 +29,7 @@ public class TestByteBlockPool extends LuceneTestCase { boolean reuseFirst = random().nextBoolean(); for (int j = 0; j < 2; j++) { - List list = new ArrayList(); + List list = new ArrayList<>(); int maxLength = atLeast(500); final int numValues = atLeast(100); BytesRef ref = new BytesRef(); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestBytesRefArray.java b/lucene/core/src/test/org/apache/lucene/util/TestBytesRefArray.java index 9fcd6a1b6df..fa691fb5d4c 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestBytesRefArray.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestBytesRefArray.java @@ -31,7 +31,7 @@ public class TestBytesRefArray extends LuceneTestCase { public void testAppend() throws IOException { Random random = random(); BytesRefArray list = new BytesRefArray(Counter.newCounter()); - List stringList = new ArrayList(); + List stringList = new ArrayList<>(); for (int j = 0; j < 2; j++) { if (j > 0 && random.nextBoolean()) { list.clear(); @@ -73,7 +73,7 @@ public class TestBytesRefArray extends LuceneTestCase { public void testSort() throws IOException { Random random = random(); BytesRefArray list = new BytesRefArray(Counter.newCounter()); - List stringList = new ArrayList(); + List stringList = new ArrayList<>(); for (int j = 0; j < 2; j++) { if (j > 0 && random.nextBoolean()) { diff --git a/lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java b/lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java index f71f3c7aa99..52fd56891f0 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java @@ -95,7 +95,7 @@ public class TestBytesRefHash extends LuceneTestCase { BytesRef scratch = new BytesRef(); int num = atLeast(2); for (int j = 0; j < num; j++) { - Map strings = new HashMap(); + Map strings = new HashMap<>(); int uniqueCount = 0; for (int i = 0; i < 797; i++) { String str; @@ -175,7 +175,7 @@ public class TestBytesRefHash extends LuceneTestCase { BytesRef ref = new BytesRef(); int num = atLeast(2); for (int j = 0; j < num; j++) { - SortedSet strings = new TreeSet(); + SortedSet strings = new TreeSet<>(); for (int i = 0; i < 797; i++) { String str; do { @@ -213,7 +213,7 @@ public class TestBytesRefHash extends LuceneTestCase { BytesRef scratch = new BytesRef(); int num = atLeast(2); for (int j = 0; j < num; j++) { - Set strings = new HashSet(); + Set strings = new HashSet<>(); int uniqueCount = 0; for (int i = 0; i < 797; i++) { String str; @@ -250,7 +250,7 @@ public class TestBytesRefHash extends LuceneTestCase { BytesRef scratch = new BytesRef(); int num = atLeast(2); for (int j = 0; j < num; j++) { - Set strings = new HashSet(); + Set strings = new HashSet<>(); int uniqueCount = 0; for (int i = 0; i < 797; i++) { String str; @@ -313,7 +313,7 @@ public class TestBytesRefHash extends LuceneTestCase { BytesRefHash offsetHash = newHash(pool); int num = atLeast(2); for (int j = 0; j < num; j++) { - Set strings = new HashSet(); + Set strings = new HashSet<>(); int uniqueCount = 0; for (int i = 0; i < 797; i++) { String str; diff --git a/lucene/core/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java b/lucene/core/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java index c1931ceda19..c093722bd86 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestCloseableThreadLocal.java @@ -29,7 +29,7 @@ public class TestCloseableThreadLocal extends LuceneTestCase { public void testNullValue() throws Exception { // Tests that null can be set as a valid value (LUCENE-1805). This // previously failed in get(). - CloseableThreadLocal ctl = new CloseableThreadLocal(); + CloseableThreadLocal ctl = new CloseableThreadLocal<>(); ctl.set(null); assertNull(ctl.get()); } @@ -37,7 +37,7 @@ public class TestCloseableThreadLocal extends LuceneTestCase { public void testDefaultValueWithoutSetting() throws Exception { // LUCENE-1805: make sure default get returns null, // twice in a row - CloseableThreadLocal ctl = new CloseableThreadLocal(); + CloseableThreadLocal ctl = new CloseableThreadLocal<>(); assertNull(ctl.get()); } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java b/lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java index 36558562e46..013e8542bfc 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java @@ -37,13 +37,13 @@ public class TestCollectionUtil extends LuceneTestCase { public void testIntroSort() { for (int i = 0, c = atLeast(500); i < c; i++) { - List list1 = createRandomList(2000), list2 = new ArrayList(list1); + List list1 = createRandomList(2000), list2 = new ArrayList<>(list1); CollectionUtil.introSort(list1); Collections.sort(list2); assertEquals(list2, list1); list1 = createRandomList(2000); - list2 = new ArrayList(list1); + list2 = new ArrayList<>(list1); CollectionUtil.introSort(list1, Collections.reverseOrder()); Collections.sort(list2, Collections.reverseOrder()); assertEquals(list2, list1); @@ -56,13 +56,13 @@ public class TestCollectionUtil extends LuceneTestCase { public void testTimSort() { for (int i = 0, c = atLeast(500); i < c; i++) { - List list1 = createRandomList(2000), list2 = new ArrayList(list1); + List list1 = createRandomList(2000), list2 = new ArrayList<>(list1); CollectionUtil.timSort(list1); Collections.sort(list2); assertEquals(list2, list1); list1 = createRandomList(2000); - list2 = new ArrayList(list1); + list2 = new ArrayList<>(list1); CollectionUtil.timSort(list1, Collections.reverseOrder()); Collections.sort(list2, Collections.reverseOrder()); assertEquals(list2, list1); @@ -82,7 +82,7 @@ public class TestCollectionUtil extends LuceneTestCase { CollectionUtil.timSort(list, Collections.reverseOrder()); // check that empty non-random access lists pass sorting without ex (as sorting is not needed) - list = new LinkedList(); + list = new LinkedList<>(); CollectionUtil.introSort(list); CollectionUtil.timSort(list); CollectionUtil.introSort(list, Collections.reverseOrder()); @@ -91,7 +91,7 @@ public class TestCollectionUtil extends LuceneTestCase { public void testOneElementListSort() { // check that one-element non-random access lists pass sorting without ex (as sorting is not needed) - List list = new LinkedList(); + List list = new LinkedList<>(); list.add(1); CollectionUtil.introSort(list); CollectionUtil.timSort(list); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java b/lucene/core/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java index b84ecf55719..ffc65dcf44d 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestDoubleBarrelLRUCache.java @@ -118,7 +118,7 @@ public class TestDoubleBarrelLRUCache extends LuceneTestCase { final int CACHE_SIZE = 512; final int OBJ_COUNT = 3*CACHE_SIZE; - DoubleBarrelLRUCache c = new DoubleBarrelLRUCache(1024); + DoubleBarrelLRUCache c = new DoubleBarrelLRUCache<>(1024); CloneableObject[] objs = new CloneableObject[OBJ_COUNT]; for(int i=0;i set = new TreeSet(Arrays.asList("a", "b", "c")); + private static final Set set = new TreeSet<>(Arrays.asList("a", "b", "c")); private static void assertNoMore(Iterator it) { assertFalse(it.hasNext()); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestIdentityHashSet.java b/lucene/core/src/test/org/apache/lucene/util/TestIdentityHashSet.java index 49e6b84b9f7..434978d182b 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestIdentityHashSet.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestIdentityHashSet.java @@ -28,7 +28,7 @@ public class TestIdentityHashSet extends LuceneTestCase { Random rnd = random(); Set jdk = Collections.newSetFromMap( new IdentityHashMap()); - RamUsageEstimator.IdentityHashSet us = new RamUsageEstimator.IdentityHashSet(); + RamUsageEstimator.IdentityHashSet us = new RamUsageEstimator.IdentityHashSet<>(); int max = 100000; int threshold = 256; diff --git a/lucene/core/src/test/org/apache/lucene/util/TestInPlaceMergeSorter.java b/lucene/core/src/test/org/apache/lucene/util/TestInPlaceMergeSorter.java index 6c1fe5762b1..d6e5f464624 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestInPlaceMergeSorter.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestInPlaceMergeSorter.java @@ -30,7 +30,7 @@ public class TestInPlaceMergeSorter extends BaseSortTestCase { @Override public Sorter newSorter(Entry[] arr) { - return new ArrayInPlaceMergeSorter(arr, ArrayUtil.naturalComparator()); + return new ArrayInPlaceMergeSorter<>(arr, ArrayUtil.naturalComparator()); } } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestIntroSorter.java b/lucene/core/src/test/org/apache/lucene/util/TestIntroSorter.java index 63e9f9548e8..eb4316e2267 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestIntroSorter.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestIntroSorter.java @@ -26,7 +26,7 @@ public class TestIntroSorter extends BaseSortTestCase { @Override public Sorter newSorter(Entry[] arr) { - return new ArrayIntroSorter(arr, ArrayUtil.naturalComparator()); + return new ArrayIntroSorter<>(arr, ArrayUtil.naturalComparator()); } } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestMergedIterator.java b/lucene/core/src/test/org/apache/lucene/util/TestMergedIterator.java index e9e37a732fc..98670987e75 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestMergedIterator.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestMergedIterator.java @@ -30,17 +30,17 @@ public class TestMergedIterator extends LuceneTestCase { @SuppressWarnings({"rawtypes", "unchecked"}) public void testMergeEmpty() { - Iterator merged = new MergedIterator(); + Iterator merged = new MergedIterator<>(); assertFalse(merged.hasNext()); - merged = new MergedIterator(new ArrayList().iterator()); + merged = new MergedIterator<>(new ArrayList().iterator()); assertFalse(merged.hasNext()); Iterator[] itrs = new Iterator[random().nextInt(100)]; for (int i = 0; i < itrs.length; i++) { itrs[i] = new ArrayList().iterator(); } - merged = new MergedIterator( itrs ); + merged = new MergedIterator<>( itrs ); assertFalse(merged.hasNext()); } @@ -106,13 +106,13 @@ public class TestMergedIterator extends LuceneTestCase { private void testCase(int itrsWithVal, int specifiedValsOnItr, boolean removeDups) { // Build a random number of lists - List expected = new ArrayList(); + List expected = new ArrayList<>(); Random random = new Random(random().nextLong()); int numLists = itrsWithVal + random.nextInt(1000 - itrsWithVal); @SuppressWarnings({"rawtypes", "unchecked"}) List[] lists = new List[numLists]; for (int i = 0; i < numLists; i++) { - lists[i] = new ArrayList(); + lists[i] = new ArrayList<>(); } int start = random.nextInt(1000000); int end = start + VALS_TO_MERGE / itrsWithVal / Math.abs(specifiedValsOnItr); @@ -143,7 +143,7 @@ public class TestMergedIterator extends LuceneTestCase { itrs[i] = lists[i].iterator(); } - MergedIterator mergedItr = new MergedIterator(removeDups, itrs); + MergedIterator mergedItr = new MergedIterator<>(removeDups, itrs); Iterator expectedItr = expected.iterator(); while (expectedItr.hasNext()) { assertTrue(mergedItr.hasNext()); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestOfflineSorter.java b/lucene/core/src/test/org/apache/lucene/util/TestOfflineSorter.java index b7f14d02ffb..273f59f446b 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestOfflineSorter.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestOfflineSorter.java @@ -86,7 +86,7 @@ public class TestOfflineSorter extends LuceneTestCase { } private byte[][] generateRandom(int howMuchData) { - ArrayList data = new ArrayList(); + ArrayList data = new ArrayList<>(); while (howMuchData > 0) { byte [] current = new byte [random().nextInt(256)]; random().nextBytes(current); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java b/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java index 8d81f6562c3..db1044f1965 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java @@ -45,7 +45,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { @Test public void testAllocate() { RecyclingByteBlockAllocator allocator = newAllocator(); - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); byte[] block = allocator.getByteBlock(); set.add(block); assertNotNull(block); @@ -65,7 +65,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { @Test public void testAllocateAndRecycle() { RecyclingByteBlockAllocator allocator = newAllocator(); - HashSet allocated = new HashSet(); + HashSet allocated = new HashSet<>(); byte[] block = allocator.getByteBlock(); allocated.add(block); @@ -86,7 +86,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { byte[][] array = allocated.toArray(new byte[0][]); int begin = random().nextInt(array.length); int end = begin + random().nextInt(array.length - begin); - List selected = new ArrayList(); + List selected = new ArrayList<>(); for (int j = begin; j < end; j++) { selected.add(array[j]); } @@ -102,7 +102,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { @Test public void testAllocateAndFree() { RecyclingByteBlockAllocator allocator = newAllocator(); - HashSet allocated = new HashSet(); + HashSet allocated = new HashSet<>(); int freeButAllocated = 0; byte[] block = allocator.getByteBlock(); allocated.add(block); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestRecyclingIntBlockAllocator.java b/lucene/core/src/test/org/apache/lucene/util/TestRecyclingIntBlockAllocator.java index 46ad8cd524f..1ea670200c2 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestRecyclingIntBlockAllocator.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestRecyclingIntBlockAllocator.java @@ -45,7 +45,7 @@ public class TestRecyclingIntBlockAllocator extends LuceneTestCase { @Test public void testAllocate() { RecyclingIntBlockAllocator allocator = newAllocator(); - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); int[] block = allocator.getIntBlock(); set.add(block); assertNotNull(block); @@ -65,7 +65,7 @@ public class TestRecyclingIntBlockAllocator extends LuceneTestCase { @Test public void testAllocateAndRecycle() { RecyclingIntBlockAllocator allocator = newAllocator(); - HashSet allocated = new HashSet(); + HashSet allocated = new HashSet<>(); int[] block = allocator.getIntBlock(); allocated.add(block); @@ -86,7 +86,7 @@ public class TestRecyclingIntBlockAllocator extends LuceneTestCase { int[][] array = allocated.toArray(new int[0][]); int begin = random().nextInt(array.length); int end = begin + random().nextInt(array.length - begin); - List selected = new ArrayList(); + List selected = new ArrayList<>(); for (int j = begin; j < end; j++) { selected.add(array[j]); } @@ -102,7 +102,7 @@ public class TestRecyclingIntBlockAllocator extends LuceneTestCase { @Test public void testAllocateAndFree() { RecyclingIntBlockAllocator allocator = newAllocator(); - HashSet allocated = new HashSet(); + HashSet allocated = new HashSet<>(); int freeButAllocated = 0; int[] block = allocator.getIntBlock(); allocated.add(block); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestSentinelIntSet.java b/lucene/core/src/test/org/apache/lucene/util/TestSentinelIntSet.java index 796c723c69d..00a5bb8259d 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestSentinelIntSet.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestSentinelIntSet.java @@ -56,7 +56,7 @@ public class TestSentinelIntSet extends LuceneTestCase { int num = random().nextInt(30); int maxVal = (random().nextBoolean() ? random().nextInt(50) : random().nextInt(Integer.MAX_VALUE)) + 1; - HashSet a = new HashSet(initSz); + HashSet a = new HashSet<>(initSz); SentinelIntSet b = new SentinelIntSet(initSz, -1); for (int j=0; j set = new SetOnce(); + SetOnce set = new SetOnce<>(); assertNull(set.get()); } @Test(expected=AlreadySetException.class) public void testSettingCtor() throws Exception { - SetOnce set = new SetOnce(new Integer(5)); + SetOnce set = new SetOnce<>(new Integer(5)); assertEquals(5, set.get().intValue()); set.set(new Integer(7)); } @Test(expected=AlreadySetException.class) public void testSetOnce() throws Exception { - SetOnce set = new SetOnce(); + SetOnce set = new SetOnce<>(); set.set(new Integer(5)); assertEquals(5, set.get().intValue()); set.set(new Integer(7)); @@ -72,7 +72,7 @@ public class TestSetOnce extends LuceneTestCase { @Test public void testSetMultiThreaded() throws Exception { - final SetOnce set = new SetOnce(); + final SetOnce set = new SetOnce<>(); SetOnceThread[] threads = new SetOnceThread[10]; for (int i = 0; i < threads.length; i++) { threads[i] = new SetOnceThread(random()); diff --git a/lucene/core/src/test/org/apache/lucene/util/TestTimSorter.java b/lucene/core/src/test/org/apache/lucene/util/TestTimSorter.java index 8037e3b92b1..064f5d7ea13 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestTimSorter.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestTimSorter.java @@ -25,7 +25,7 @@ public class TestTimSorter extends BaseSortTestCase { @Override public Sorter newSorter(Entry[] arr) { - return new ArrayTimSorter(arr, ArrayUtil.naturalComparator(), TestUtil.nextInt(random(), 0, arr.length)); + return new ArrayTimSorter<>(arr, ArrayUtil.naturalComparator(), TestUtil.nextInt(random(), 0, arr.length)); } } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java b/lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java index fffa0e8cbf5..1f365f2bb5b 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestVirtualMethod.java @@ -20,9 +20,9 @@ package org.apache.lucene.util; public class TestVirtualMethod extends LuceneTestCase { private static final VirtualMethod publicTestMethod = - new VirtualMethod(TestVirtualMethod.class, "publicTest", String.class); + new VirtualMethod<>(TestVirtualMethod.class, "publicTest", String.class); private static final VirtualMethod protectedTestMethod = - new VirtualMethod(TestVirtualMethod.class, "protectedTest", int.class); + new VirtualMethod<>(TestVirtualMethod.class, "protectedTest", int.class); public void publicTest(String test) {} protected void protectedTest(int test) {} @@ -80,14 +80,14 @@ public class TestVirtualMethod extends LuceneTestCase { } try { - new VirtualMethod(TestVirtualMethod.class, "bogus"); + new VirtualMethod<>(TestVirtualMethod.class, "bogus"); fail("Method bogus() does not exist, so IAE should be thrown"); } catch (IllegalArgumentException arg) { // pass } try { - new VirtualMethod(TestClass2.class, "publicTest", String.class); + new VirtualMethod<>(TestClass2.class, "publicTest", String.class); fail("Method publicTest(String) is not declared in TestClass2, so IAE should be thrown"); } catch (IllegalArgumentException arg) { // pass @@ -95,7 +95,7 @@ public class TestVirtualMethod extends LuceneTestCase { try { // try to create a second instance of the same baseClass / method combination - new VirtualMethod(TestVirtualMethod.class, "publicTest", String.class); + new VirtualMethod<>(TestVirtualMethod.class, "publicTest", String.class); fail("Violating singleton status succeeded"); } catch (UnsupportedOperationException arg) { // pass diff --git a/lucene/core/src/test/org/apache/lucene/util/TestWAH8DocIdSet.java b/lucene/core/src/test/org/apache/lucene/util/TestWAH8DocIdSet.java index de135db5954..1689d020b98 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestWAH8DocIdSet.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestWAH8DocIdSet.java @@ -44,11 +44,11 @@ public class TestWAH8DocIdSet extends BaseDocIdSetTestCase { public void testUnion() throws IOException { final int numBits = TestUtil.nextInt(random(), 100, 1 << 20); final int numDocIdSets = TestUtil.nextInt(random(), 0, 4); - final List fixedSets = new ArrayList(numDocIdSets); + final List fixedSets = new ArrayList<>(numDocIdSets); for (int i = 0; i < numDocIdSets; ++i) { fixedSets.add(randomSet(numBits, random().nextFloat() / 16)); } - final List compressedSets = new ArrayList(numDocIdSets); + final List compressedSets = new ArrayList<>(numDocIdSets); for (BitSet set : fixedSets) { compressedSets.add(copyOf(set, numBits)); } @@ -66,11 +66,11 @@ public class TestWAH8DocIdSet extends BaseDocIdSetTestCase { public void testIntersection() throws IOException { final int numBits = TestUtil.nextInt(random(), 100, 1 << 20); final int numDocIdSets = TestUtil.nextInt(random(), 1, 4); - final List fixedSets = new ArrayList(numDocIdSets); + final List fixedSets = new ArrayList<>(numDocIdSets); for (int i = 0; i < numDocIdSets; ++i) { fixedSets.add(randomSet(numBits, random().nextFloat())); } - final List compressedSets = new ArrayList(numDocIdSets); + final List compressedSets = new ArrayList<>(numDocIdSets); for (BitSet set : fixedSets) { compressedSets.add(copyOf(set, numBits)); } diff --git a/lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java b/lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java index db8c632a337..0173e0abe0b 100644 --- a/lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java +++ b/lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java @@ -167,7 +167,7 @@ public class TestWeakIdentityMap extends LuceneTestCase { WeakIdentityMap.newConcurrentHashMap(random().nextBoolean()); // we keep strong references to the keys, // so WeakIdentityMap will not forget about them: - final AtomicReferenceArray keys = new AtomicReferenceArray(keyCount); + final AtomicReferenceArray keys = new AtomicReferenceArray<>(keyCount); for (int j = 0; j < keyCount; j++) { keys.set(j, new Object()); } diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java index 622aa9d835b..1b2e62df3c9 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java @@ -26,7 +26,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomInts; public class TestBasicOperations extends LuceneTestCase { /** Test string union. */ public void testStringUnion() { - List strings = new ArrayList(); + List strings = new ArrayList<>(); for (int i = RandomInts.randomIntBetween(random(), 0, 1000); --i >= 0;) { strings.add(new BytesRef(TestUtil.randomUnicodeString(random()))); } diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestCompiledAutomaton.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestCompiledAutomaton.java index b45ee8f7d93..7370f3476ee 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestCompiledAutomaton.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestCompiledAutomaton.java @@ -31,7 +31,7 @@ import org.apache.lucene.util.TestUtil; public class TestCompiledAutomaton extends LuceneTestCase { private CompiledAutomaton build(String... strings) { - final List terms = new ArrayList(); + final List terms = new ArrayList<>(); for(String s : strings) { terms.add(new BytesRef(s)); } @@ -95,7 +95,7 @@ public class TestCompiledAutomaton extends LuceneTestCase { public void testRandom() throws Exception { final int numTerms = atLeast(400); - final Set terms = new HashSet(); + final Set terms = new HashSet<>(); while(terms.size() != numTerms) { terms.add(randomString()); } diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java index 1bf01e7dce9..e435fe6f6ae 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestDeterminizeLexicon.java @@ -29,8 +29,8 @@ import org.apache.lucene.util.TestUtil; * somewhat randomly, by determinizing a huge random lexicon. */ public class TestDeterminizeLexicon extends LuceneTestCase { - private List automata = new ArrayList(); - private List terms = new ArrayList(); + private List automata = new ArrayList<>(); + private List terms = new ArrayList<>(); public void testLexicon() throws Exception { int num = atLeast(1); diff --git a/lucene/core/src/test/org/apache/lucene/util/automaton/TestLevenshteinAutomata.java b/lucene/core/src/test/org/apache/lucene/util/automaton/TestLevenshteinAutomata.java index d0c01b0c316..c3014936f3f 100644 --- a/lucene/core/src/test/org/apache/lucene/util/automaton/TestLevenshteinAutomata.java +++ b/lucene/core/src/test/org/apache/lucene/util/automaton/TestLevenshteinAutomata.java @@ -141,7 +141,7 @@ public class TestLevenshteinAutomata extends LuceneTestCase { * one character) */ private Automaton insertionsOf(String s) { - List list = new ArrayList(); + List list = new ArrayList<>(); for (int i = 0; i <= s.length(); i++) { Automaton a = BasicAutomata.makeString(s.substring(0, i)); @@ -161,7 +161,7 @@ public class TestLevenshteinAutomata extends LuceneTestCase { * one character). */ private Automaton deletionsOf(String s) { - List list = new ArrayList(); + List list = new ArrayList<>(); for (int i = 0; i < s.length(); i++) { Automaton a = BasicAutomata.makeString(s.substring(0, i)); @@ -181,7 +181,7 @@ public class TestLevenshteinAutomata extends LuceneTestCase { * (replacing one character) */ private Automaton substitutionsOf(String s) { - List list = new ArrayList(); + List list = new ArrayList<>(); for (int i = 0; i < s.length(); i++) { Automaton a = BasicAutomata.makeString(s.substring(0, i)); @@ -203,7 +203,7 @@ public class TestLevenshteinAutomata extends LuceneTestCase { private Automaton transpositionsOf(String s) { if (s.length() < 2) return BasicAutomata.makeEmpty(); - List list = new ArrayList(); + List list = new ArrayList<>(); for (int i = 0; i < s.length()-1; i++) { StringBuilder sb = new StringBuilder(); sb.append(s.substring(0, i)); diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java b/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java index 21feb91bfc3..96288751631 100644 --- a/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java +++ b/lucene/core/src/test/org/apache/lucene/util/fst/Test2BFST.java @@ -55,7 +55,7 @@ public class Test2BFST extends LuceneTestCase { System.out.println("\nTEST: 3B nodes; doPack=false output=NO_OUTPUTS"); Outputs outputs = NoOutputs.getSingleton(); Object NO_OUTPUT = outputs.getNoOutput(); - final Builder b = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, + final Builder b = new Builder<>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, doPack, PackedInts.COMPACT, true, 15); int count = 0; @@ -98,7 +98,7 @@ public class Test2BFST extends LuceneTestCase { } System.out.println("\nTEST: enum all input/outputs"); - IntsRefFSTEnum fstEnum = new IntsRefFSTEnum(fst); + IntsRefFSTEnum fstEnum = new IntsRefFSTEnum<>(fst); Arrays.fill(ints2, 0); r = new Random(seed); @@ -124,7 +124,7 @@ public class Test2BFST extends LuceneTestCase { fst.save(out); out.close(); IndexInput in = dir.openInput("fst", IOContext.DEFAULT); - fst = new FST(in, outputs); + fst = new FST<>(in, outputs); in.close(); } else { dir.deleteFile("fst"); @@ -137,7 +137,7 @@ public class Test2BFST extends LuceneTestCase { { System.out.println("\nTEST: 3 GB size; doPack=" + doPack + " outputs=bytes"); Outputs outputs = ByteSequenceOutputs.getSingleton(); - final Builder b = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, + final Builder b = new Builder<>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, doPack, PackedInts.COMPACT, true, 15); byte[] outputBytes = new byte[20]; @@ -177,7 +177,7 @@ public class Test2BFST extends LuceneTestCase { } System.out.println("\nTEST: enum all input/outputs"); - IntsRefFSTEnum fstEnum = new IntsRefFSTEnum(fst); + IntsRefFSTEnum fstEnum = new IntsRefFSTEnum<>(fst); Arrays.fill(ints, 0); r = new Random(seed); @@ -201,7 +201,7 @@ public class Test2BFST extends LuceneTestCase { fst.save(out); out.close(); IndexInput in = dir.openInput("fst", IOContext.DEFAULT); - fst = new FST(in, outputs); + fst = new FST<>(in, outputs); in.close(); } else { dir.deleteFile("fst"); @@ -214,7 +214,7 @@ public class Test2BFST extends LuceneTestCase { { System.out.println("\nTEST: 3 GB size; doPack=" + doPack + " outputs=long"); Outputs outputs = PositiveIntOutputs.getSingleton(); - final Builder b = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, + final Builder b = new Builder<>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, doPack, PackedInts.COMPACT, true, 15); long output = 1; @@ -260,7 +260,7 @@ public class Test2BFST extends LuceneTestCase { } System.out.println("\nTEST: enum all input/outputs"); - IntsRefFSTEnum fstEnum = new IntsRefFSTEnum(fst); + IntsRefFSTEnum fstEnum = new IntsRefFSTEnum<>(fst); Arrays.fill(ints, 0); r = new Random(seed); @@ -285,7 +285,7 @@ public class Test2BFST extends LuceneTestCase { fst.save(out); out.close(); IndexInput in = dir.openInput("fst", IOContext.DEFAULT); - fst = new FST(in, outputs); + fst = new FST<>(in, outputs); in.close(); } else { dir.deleteFile("fst"); diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java index e720d791a97..1d24cf9943c 100644 --- a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java +++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java @@ -114,11 +114,11 @@ public class TestFSTs extends LuceneTestCase { { final Outputs outputs = NoOutputs.getSingleton(); final Object NO_OUTPUT = outputs.getNoOutput(); - final List> pairs = new ArrayList>(terms2.length); + final List> pairs = new ArrayList<>(terms2.length); for(IntsRef term : terms2) { - pairs.add(new FSTTester.InputOutput(term, NO_OUTPUT)); + pairs.add(new FSTTester.InputOutput<>(term, NO_OUTPUT)); } - FST fst = new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(0, 0, false); + FST fst = new FSTTester<>(random(), dir, inputMode, pairs, outputs, false).doTest(0, 0, false); assertNotNull(fst); assertEquals(22, fst.getNodeCount()); assertEquals(27, fst.getArcCount()); @@ -127,11 +127,11 @@ public class TestFSTs extends LuceneTestCase { // FST ord pos int { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); - final List> pairs = new ArrayList>(terms2.length); + final List> pairs = new ArrayList<>(terms2.length); for(int idx=0;idx(terms2[idx], (long) idx)); + pairs.add(new FSTTester.InputOutput<>(terms2[idx], (long) idx)); } - final FST fst = new FSTTester(random(), dir, inputMode, pairs, outputs, true).doTest(0, 0, false); + final FST fst = new FSTTester<>(random(), dir, inputMode, pairs, outputs, true).doTest(0, 0, false); assertNotNull(fst); assertEquals(22, fst.getNodeCount()); assertEquals(27, fst.getArcCount()); @@ -141,12 +141,12 @@ public class TestFSTs extends LuceneTestCase { { final ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton(); final BytesRef NO_OUTPUT = outputs.getNoOutput(); - final List> pairs = new ArrayList>(terms2.length); + final List> pairs = new ArrayList<>(terms2.length); for(int idx=0;idx(terms2[idx], output)); + pairs.add(new FSTTester.InputOutput<>(terms2[idx], output)); } - final FST fst = new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(0, 0, false); + final FST fst = new FSTTester<>(random(), dir, inputMode, pairs, outputs, false).doTest(0, 0, false); assertNotNull(fst); assertEquals(24, fst.getNodeCount()); assertEquals(30, fst.getArcCount()); @@ -162,78 +162,78 @@ public class TestFSTs extends LuceneTestCase { { final Outputs outputs = NoOutputs.getSingleton(); final Object NO_OUTPUT = outputs.getNoOutput(); - final List> pairs = new ArrayList>(terms.length); + final List> pairs = new ArrayList<>(terms.length); for(IntsRef term : terms) { - pairs.add(new FSTTester.InputOutput(term, NO_OUTPUT)); + pairs.add(new FSTTester.InputOutput<>(term, NO_OUTPUT)); } - new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(true); + new FSTTester<>(random(), dir, inputMode, pairs, outputs, false).doTest(true); } // PositiveIntOutput (ord) { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); - final List> pairs = new ArrayList>(terms.length); + final List> pairs = new ArrayList<>(terms.length); for(int idx=0;idx(terms[idx], (long) idx)); + pairs.add(new FSTTester.InputOutput<>(terms[idx], (long) idx)); } - new FSTTester(random(), dir, inputMode, pairs, outputs, true).doTest(true); + new FSTTester<>(random(), dir, inputMode, pairs, outputs, true).doTest(true); } // PositiveIntOutput (random monotonically increasing positive number) { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); - final List> pairs = new ArrayList>(terms.length); + final List> pairs = new ArrayList<>(terms.length); long lastOutput = 0; for(int idx=0;idx(terms[idx], value)); + pairs.add(new FSTTester.InputOutput<>(terms[idx], value)); } - new FSTTester(random(), dir, inputMode, pairs, outputs, true).doTest(true); + new FSTTester<>(random(), dir, inputMode, pairs, outputs, true).doTest(true); } // PositiveIntOutput (random positive number) { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); - final List> pairs = new ArrayList>(terms.length); + final List> pairs = new ArrayList<>(terms.length); for(int idx=0;idx(terms[idx], TestUtil.nextLong(random(), 0, Long.MAX_VALUE))); + pairs.add(new FSTTester.InputOutput<>(terms[idx], TestUtil.nextLong(random(), 0, Long.MAX_VALUE))); } - new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(true); + new FSTTester<>(random(), dir, inputMode, pairs, outputs, false).doTest(true); } // Pair { final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton(); final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton(); - final PairOutputs outputs = new PairOutputs(o1, o2); - final List>> pairs = new ArrayList>>(terms.length); + final PairOutputs outputs = new PairOutputs<>(o1, o2); + final List>> pairs = new ArrayList<>(terms.length); long lastOutput = 0; for(int idx=0;idx>(terms[idx], + pairs.add(new FSTTester.InputOutput<>(terms[idx], outputs.newPair((long) idx, value))); } - new FSTTester>(random(), dir, inputMode, pairs, outputs, false).doTest(true); + new FSTTester<>(random(), dir, inputMode, pairs, outputs, false).doTest(true); } // Sequence-of-bytes { final ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton(); final BytesRef NO_OUTPUT = outputs.getNoOutput(); - final List> pairs = new ArrayList>(terms.length); + final List> pairs = new ArrayList<>(terms.length); for(int idx=0;idx(terms[idx], output)); + pairs.add(new FSTTester.InputOutput<>(terms[idx], output)); } - new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(true); + new FSTTester<>(random(), dir, inputMode, pairs, outputs, false).doTest(true); } // Sequence-of-ints { final IntSequenceOutputs outputs = IntSequenceOutputs.getSingleton(); - final List> pairs = new ArrayList>(terms.length); + final List> pairs = new ArrayList<>(terms.length); for(int idx=0;idx(terms[idx], output)); + pairs.add(new FSTTester.InputOutput<>(terms[idx], output)); } - new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(true); + new FSTTester<>(random(), dir, inputMode, pairs, outputs, false).doTest(true); } } @@ -270,7 +270,7 @@ public class TestFSTs extends LuceneTestCase { } for(int inputMode=0;inputMode<2;inputMode++) { final int numWords = random.nextInt(maxNumWords+1); - Set termsSet = new HashSet(); + Set termsSet = new HashSet<>(); IntsRef[] terms = new IntsRef[numWords]; while(termsSet.size() < numWords) { final String term = getRandomString(random); @@ -312,7 +312,7 @@ public class TestFSTs extends LuceneTestCase { final boolean doRewrite = random().nextBoolean(); - Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, doRewrite, PackedInts.DEFAULT, true, 15); + Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, doRewrite, PackedInts.DEFAULT, true, 15); boolean storeOrd = random().nextBoolean(); if (VERBOSE) { @@ -373,7 +373,7 @@ public class TestFSTs extends LuceneTestCase { final Random random = new Random(random().nextLong()); // Now confirm BytesRefFSTEnum and TermsEnum act the // same: - final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum(fst); + final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum<>(fst); int num = atLeast(1000); for(int iter=0;iter(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, 0, prune, prune == 0, true, Integer.MAX_VALUE, outputs, null, doPack, PackedInts.DEFAULT, !noArcArrays, 15); + builder = new Builder<>(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, 0, prune, prune == 0, true, Integer.MAX_VALUE, outputs, null, doPack, PackedInts.DEFAULT, !noArcArrays, 15); } protected abstract T getOutput(IntsRef input, int ord) throws IOException; @@ -657,7 +657,7 @@ public class TestFSTs extends LuceneTestCase { // Store both ord & docFreq: final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton(); final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton(); - final PairOutputs outputs = new PairOutputs(o1, o2); + final PairOutputs outputs = new PairOutputs<>(o1, o2); new VisitTerms>(dirOut, wordsFileIn, inputMode, prune, outputs, doPack, noArcArrays) { Random rand; @Override @@ -706,9 +706,9 @@ public class TestFSTs extends LuceneTestCase { public void testSingleString() throws Exception { final Outputs outputs = NoOutputs.getSingleton(); - final Builder b = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + final Builder b = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); b.add(Util.toIntsRef(new BytesRef("foobar"), new IntsRef()), outputs.getNoOutput()); - final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum(b.finish()); + final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum<>(b.finish()); assertNull(fstEnum.seekFloor(new BytesRef("foo"))); assertNull(fstEnum.seekCeil(new BytesRef("foobaz"))); } @@ -717,7 +717,7 @@ public class TestFSTs extends LuceneTestCase { public void testDuplicateFSAString() throws Exception { String str = "foobar"; final Outputs outputs = NoOutputs.getSingleton(); - final Builder b = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + final Builder b = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); IntsRef ints = new IntsRef(); for(int i=0; i<10; i++) { b.add(Util.toIntsRef(new BytesRef(str), ints), outputs.getNoOutput()); @@ -726,7 +726,7 @@ public class TestFSTs extends LuceneTestCase { // count the input paths int count = 0; - final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum(fst); + final BytesRefFSTEnum fstEnum = new BytesRefFSTEnum<>(fst); while(fstEnum.next()!=null) { count++; } @@ -786,7 +786,7 @@ public class TestFSTs extends LuceneTestCase { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); // Build an FST mapping BytesRef -> Long - final Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + final Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); final BytesRef a = new BytesRef("a"); final BytesRef b = new BytesRef("b"); @@ -802,7 +802,7 @@ public class TestFSTs extends LuceneTestCase { assertEquals(42, (long) Util.get(fst, b)); assertEquals(17, (long) Util.get(fst, a)); - BytesRefFSTEnum fstEnum = new BytesRefFSTEnum(fst); + BytesRefFSTEnum fstEnum = new BytesRefFSTEnum<>(fst); BytesRefFSTEnum.InputOutput seekResult; seekResult = fstEnum.seekFloor(a); assertNotNull(seekResult); @@ -846,7 +846,7 @@ public class TestFSTs extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: NUM_IDS=" + NUM_IDS); } - final Set allIDs = new HashSet(); + final Set allIDs = new HashSet<>(); for(int id=0;id allIDsList = new ArrayList(allIDs); - final List sortedAllIDsList = new ArrayList(allIDsList); + final List allIDsList = new ArrayList<>(allIDs); + final List sortedAllIDsList = new ArrayList<>(allIDsList); Collections.sort(sortedAllIDsList); // Sprinkle in some non-existent PKs: - Set outOfBounds = new HashSet(); + Set outOfBounds = new HashSet<>(); for(int idx=0;idx fst = builder.finish(); @@ -1114,7 +1114,7 @@ public class TestFSTs extends LuceneTestCase { public void testInternalFinalState() throws Exception { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); final boolean willRewrite = random().nextBoolean(); - final Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, willRewrite, PackedInts.DEFAULT, true, 15); + final Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, willRewrite, PackedInts.DEFAULT, true, 15); builder.add(Util.toIntsRef(new BytesRef("stat"), new IntsRef()), outputs.getNoOutput()); builder.add(Util.toIntsRef(new BytesRef("station"), new IntsRef()), outputs.getNoOutput()); final FST fst = builder.finish(); @@ -1135,15 +1135,15 @@ public class TestFSTs extends LuceneTestCase { public void testNonFinalStopNode() throws Exception { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); final Long nothing = outputs.getNoOutput(); - final Builder b = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + final Builder b = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); - final FST fst = new FST(FST.INPUT_TYPE.BYTE1, outputs, false, PackedInts.COMPACT, true, 15); + final FST fst = new FST<>(FST.INPUT_TYPE.BYTE1, outputs, false, PackedInts.COMPACT, true, 15); - final Builder.UnCompiledNode rootNode = new Builder.UnCompiledNode(b, 0); + final Builder.UnCompiledNode rootNode = new Builder.UnCompiledNode<>(b, 0); // Add final stop node { - final Builder.UnCompiledNode node = new Builder.UnCompiledNode(b, 0); + final Builder.UnCompiledNode node = new Builder.UnCompiledNode<>(b, 0); node.isFinal = true; rootNode.addArc('a', node); final Builder.CompiledNode frozen = new Builder.CompiledNode(); @@ -1156,7 +1156,7 @@ public class TestFSTs extends LuceneTestCase { // Add non-final stop node { - final Builder.UnCompiledNode node = new Builder.UnCompiledNode(b, 0); + final Builder.UnCompiledNode node = new Builder.UnCompiledNode<>(b, 0); rootNode.addArc('b', node); final Builder.CompiledNode frozen = new Builder.CompiledNode(); frozen.node = fst.addNode(node); @@ -1181,7 +1181,7 @@ public class TestFSTs extends LuceneTestCase { out.close(); IndexInput in = dir.openInput("fst", IOContext.DEFAULT); - final FST fst2 = new FST(in, outputs); + final FST fst2 = new FST<>(in, outputs); checkStopNodes(fst2, outputs); in.close(); dir.close(); @@ -1214,7 +1214,7 @@ public class TestFSTs extends LuceneTestCase { public void testShortestPaths() throws Exception { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); - final Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + final Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); final IntsRef scratch = new IntsRef(); builder.add(Util.toIntsRef(new BytesRef("aab"), scratch), 22L); @@ -1254,12 +1254,12 @@ public class TestFSTs extends LuceneTestCase { /** like testShortestPaths, but uses pairoutputs so we have both a weight and an output */ public void testShortestPathsWFST() throws Exception { - PairOutputs outputs = new PairOutputs( + PairOutputs outputs = new PairOutputs<>( PositiveIntOutputs.getSingleton(), // weight PositiveIntOutputs.getSingleton() // output ); - final Builder> builder = new Builder>(FST.INPUT_TYPE.BYTE1, outputs); + final Builder> builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); final IntsRef scratch = new IntsRef(); builder.add(Util.toIntsRef(new BytesRef("aab"), scratch), outputs.newPair(22L, 57L)); @@ -1295,11 +1295,11 @@ public class TestFSTs extends LuceneTestCase { final Random random = random(); int numWords = atLeast(1000); - final TreeMap slowCompletor = new TreeMap(); - final TreeSet allPrefixes = new TreeSet(); + final TreeMap slowCompletor = new TreeMap<>(); + final TreeSet allPrefixes = new TreeSet<>(); final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); - final Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + final Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); final IntsRef scratch = new IntsRef(); for (int i = 0; i < numWords; i++) { @@ -1350,19 +1350,19 @@ public class TestFSTs extends LuceneTestCase { Util.MinResult[] r = Util.shortestPaths(fst, arc, fst.outputs.getNoOutput(), minLongComparator, topN, true); // 2. go thru whole treemap (slowCompletor) and check its actually the best suggestion - final List> matches = new ArrayList>(); + final List> matches = new ArrayList<>(); // TODO: could be faster... but its slowCompletor for a reason for (Map.Entry e : slowCompletor.entrySet()) { if (e.getKey().startsWith(prefix)) { //System.out.println(" consider " + e.getKey()); - matches.add(new Util.MinResult(Util.toIntsRef(new BytesRef(e.getKey().substring(prefix.length())), new IntsRef()), + matches.add(new Util.MinResult<>(Util.toIntsRef(new BytesRef(e.getKey().substring(prefix.length())), new IntsRef()), e.getValue() - prefixOutput)); } } assertTrue(matches.size() > 0); - Collections.sort(matches, new TieBreakByInputComparator(minLongComparator)); + Collections.sort(matches, new TieBreakByInputComparator<>(minLongComparator)); if (matches.size() > topN) { matches.subList(topN, matches.size()).clear(); } @@ -1409,14 +1409,14 @@ public class TestFSTs extends LuceneTestCase { public void testShortestPathsWFSTRandom() throws Exception { int numWords = atLeast(1000); - final TreeMap slowCompletor = new TreeMap(); - final TreeSet allPrefixes = new TreeSet(); + final TreeMap slowCompletor = new TreeMap<>(); + final TreeSet allPrefixes = new TreeSet<>(); - PairOutputs outputs = new PairOutputs( + PairOutputs outputs = new PairOutputs<>( PositiveIntOutputs.getSingleton(), // weight PositiveIntOutputs.getSingleton() // output ); - final Builder> builder = new Builder>(FST.INPUT_TYPE.BYTE1, outputs); + final Builder> builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); final IntsRef scratch = new IntsRef(); Random random = random(); @@ -1471,19 +1471,19 @@ public class TestFSTs extends LuceneTestCase { Util.MinResult>[] r = Util.shortestPaths(fst, arc, fst.outputs.getNoOutput(), minPairWeightComparator, topN, true); // 2. go thru whole treemap (slowCompletor) and check its actually the best suggestion - final List>> matches = new ArrayList>>(); + final List>> matches = new ArrayList<>(); // TODO: could be faster... but its slowCompletor for a reason for (Map.Entry e : slowCompletor.entrySet()) { if (e.getKey().startsWith(prefix)) { //System.out.println(" consider " + e.getKey()); - matches.add(new Util.MinResult>(Util.toIntsRef(new BytesRef(e.getKey().substring(prefix.length())), new IntsRef()), + matches.add(new Util.MinResult<>(Util.toIntsRef(new BytesRef(e.getKey().substring(prefix.length())), new IntsRef()), outputs.newPair(e.getValue().a - prefixOutput.output1, e.getValue().b - prefixOutput.output2))); } } assertTrue(matches.size() > 0); - Collections.sort(matches, new TieBreakByInputComparator>(minPairWeightComparator)); + Collections.sort(matches, new TieBreakByInputComparator<>(minPairWeightComparator)); if (matches.size() > topN) { matches.subList(topN, matches.size()).clear(); } @@ -1500,7 +1500,7 @@ public class TestFSTs extends LuceneTestCase { public void testLargeOutputsOnArrayArcs() throws Exception { final ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton(); - final Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + final Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); final byte[] bytes = new byte[300]; final IntsRef input = new IntsRef(); diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestExceptionInBeforeClassHooks.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestExceptionInBeforeClassHooks.java index 4457b041809..18af7afe901 100644 --- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestExceptionInBeforeClassHooks.java +++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestExceptionInBeforeClassHooks.java @@ -117,7 +117,7 @@ public class TestExceptionInBeforeClassHooks extends WithNestedTests { Assert.assertEquals(3, runClasses.getFailureCount()); Assert.assertEquals(3, runClasses.getRunCount()); - ArrayList foobars = new ArrayList(); + ArrayList foobars = new ArrayList<>(); for (Failure f : runClasses.getFailures()) { Matcher m = Pattern.compile("foobar[0-9]+").matcher(f.getTrace()); while (m.find()) { diff --git a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestJUnitRuleOrder.java b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestJUnitRuleOrder.java index 875879f3fac..9d29c9fa787 100644 --- a/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestJUnitRuleOrder.java +++ b/lucene/core/src/test/org/apache/lucene/util/junitcompat/TestJUnitRuleOrder.java @@ -75,7 +75,7 @@ public class TestJUnitRuleOrder extends WithNestedTests { @BeforeClass public static void beforeClassCleanup() { - stack = new Stack(); + stack = new Stack<>(); } @AfterClass diff --git a/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java b/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java index d997f38f913..db9e91772e7 100644 --- a/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java +++ b/lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java @@ -355,7 +355,7 @@ public class TestPackedInts extends LuceneTestCase { private static List createPackedInts( int valueCount, int bitsPerValue) { - List packedInts = new ArrayList(); + List packedInts = new ArrayList<>(); if (bitsPerValue <= 8) { packedInts.add(new Direct8(valueCount)); } diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/AssociationsFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/AssociationsFacetsExample.java index 8b17e3c4970..7d492d7f6f2 100644 --- a/lucene/demo/src/java/org/apache/lucene/demo/facet/AssociationsFacetsExample.java +++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/AssociationsFacetsExample.java @@ -106,7 +106,7 @@ public class AssociationsFacetsExample { Facets genre = new TaxonomyFacetSumFloatAssociations("$genre", taxoReader, config, fc); // Retrieve results - List results = new ArrayList(); + List results = new ArrayList<>(); results.add(tags.getTopChildren(10, "tags")); results.add(genre.getTopChildren(10, "genre")); diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/MultiCategoryListsFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/MultiCategoryListsFacetsExample.java index a638312f38c..0bc2bbbd5d9 100644 --- a/lucene/demo/src/java/org/apache/lucene/demo/facet/MultiCategoryListsFacetsExample.java +++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/MultiCategoryListsFacetsExample.java @@ -105,7 +105,7 @@ public class MultiCategoryListsFacetsExample { FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc); // Retrieve results - List results = new ArrayList(); + List results = new ArrayList<>(); // Count both "Publish Date" and "Author" dimensions Facets author = new FastTaxonomyFacetCounts("author", taxoReader, config, fc); diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleFacetsExample.java index eec15d7a5e5..0e1c4a6cc3b 100644 --- a/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleFacetsExample.java +++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleFacetsExample.java @@ -104,7 +104,7 @@ public class SimpleFacetsExample { FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc); // Retrieve results - List results = new ArrayList(); + List results = new ArrayList<>(); // Count both "Publish Date" and "Author" dimensions Facets facets = new FastTaxonomyFacetCounts(taxoReader, config, fc); @@ -131,7 +131,7 @@ public class SimpleFacetsExample { searcher.search(new MatchAllDocsQuery(), null /*Filter */, fc); // Retrieve results - List results = new ArrayList(); + List results = new ArrayList<>(); // Count both "Publish Date" and "Author" dimensions Facets facets = new FastTaxonomyFacetCounts(taxoReader, config, fc); diff --git a/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java b/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java index 7fc1a203e0f..46054f6f106 100644 --- a/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java +++ b/lucene/demo/src/java/org/apache/lucene/demo/facet/SimpleSortedSetFacetsExample.java @@ -102,7 +102,7 @@ public class SimpleSortedSetFacetsExample { // Retrieve results Facets facets = new SortedSetDocValuesFacetCounts(state, fc); - List results = new ArrayList(); + List results = new ArrayList<>(); results.add(facets.getTopChildren(10, "Author")); results.add(facets.getTopChildren(10, "Publish Year")); indexReader.close(); diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionComparator.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionComparator.java index 60e2d7b497f..47d3eaa90c4 100644 --- a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionComparator.java +++ b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionComparator.java @@ -48,7 +48,7 @@ class ExpressionComparator extends FieldComparator { // TODO: might be cleaner to lazy-init 'source' and set scorer after? assert readerContext != null; try { - Map context = new HashMap(); + Map context = new HashMap<>(); assert scorer != null; context.put("scorer", scorer); scores = source.getValues(context, readerContext); diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionValueSource.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionValueSource.java index 02f50df0cc6..3cc0fc94340 100644 --- a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionValueSource.java +++ b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionValueSource.java @@ -61,7 +61,7 @@ final class ExpressionValueSource extends ValueSource { public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { Map valuesCache = (Map)context.get("valuesCache"); if (valuesCache == null) { - valuesCache = new HashMap(); + valuesCache = new HashMap<>(); context = new HashMap(context); context.put("valuesCache", valuesCache); } diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java b/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java index 26d58870962..e58a6b6fcee 100644 --- a/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java +++ b/lucene/expressions/src/java/org/apache/lucene/expressions/SimpleBindings.java @@ -52,7 +52,7 @@ import org.apache.lucene.search.SortField; * @lucene.experimental */ public final class SimpleBindings extends Bindings { - final Map map = new HashMap(); + final Map map = new HashMap<>(); /** Creates a new empty Bindings */ public SimpleBindings() {} diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java b/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java index 1b1bef0989a..0e3f730bb13 100644 --- a/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java +++ b/lucene/expressions/src/java/org/apache/lucene/expressions/js/JavascriptCompiler.java @@ -108,7 +108,7 @@ public class JavascriptCompiler { private static final int MAX_SOURCE_LENGTH = 16384; private final String sourceText; - private final Map externalsMap = new LinkedHashMap(); + private final Map externalsMap = new LinkedHashMap<>(); private final ClassWriter classWriter = new ClassWriter(ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS); private GeneratorAdapter gen; @@ -499,7 +499,7 @@ public class JavascriptCompiler { */ public static final Map DEFAULT_FUNCTIONS; static { - Map map = new HashMap(); + Map map = new HashMap<>(); try { final Properties props = new Properties(); try (Reader in = IOUtils.getDecodingReader(JavascriptCompiler.class, diff --git a/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestCustomFunctions.java b/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestCustomFunctions.java index bff35774be6..2288a7cc0a0 100644 --- a/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestCustomFunctions.java +++ b/lucene/expressions/src/test/org/apache/lucene/expressions/js/TestCustomFunctions.java @@ -57,7 +57,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** tests a method with no arguments */ public void testNoArgMethod() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo", getClass().getMethod("zeroArgMethod")); Expression expr = JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader()); assertEquals(5, expr.evaluate(0, null), DELTA); @@ -67,7 +67,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** tests a method with one arguments */ public void testOneArgMethod() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo", getClass().getMethod("oneArgMethod", double.class)); Expression expr = JavascriptCompiler.compile("foo(3)", functions, getClass().getClassLoader()); assertEquals(6, expr.evaluate(0, null), DELTA); @@ -77,7 +77,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** tests a method with three arguments */ public void testThreeArgMethod() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo", getClass().getMethod("threeArgMethod", double.class, double.class, double.class)); Expression expr = JavascriptCompiler.compile("foo(3, 4, 5)", functions, getClass().getClassLoader()); assertEquals(12, expr.evaluate(0, null), DELTA); @@ -85,7 +85,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** tests a map with 2 functions */ public void testTwoMethods() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo", getClass().getMethod("zeroArgMethod")); functions.put("bar", getClass().getMethod("oneArgMethod", double.class)); Expression expr = JavascriptCompiler.compile("foo() + bar(3)", functions, getClass().getClassLoader()); @@ -96,7 +96,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** wrong return type: must be double */ public void testWrongReturnType() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo", getClass().getMethod("bogusReturnType")); try { JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader()); @@ -110,7 +110,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** wrong param type: must be doubles */ public void testWrongParameterType() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo", getClass().getMethod("bogusParameterType", String.class)); try { JavascriptCompiler.compile("foo(2)", functions, getClass().getClassLoader()); @@ -124,7 +124,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** wrong modifiers: must be static */ public void testWrongNotStatic() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo", getClass().getMethod("nonStaticMethod")); try { JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader()); @@ -138,7 +138,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** wrong modifiers: must be public */ public void testWrongNotPublic() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo", getClass().getDeclaredMethod("nonPublicMethod")); try { JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader()); @@ -154,7 +154,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** wrong class modifiers: class containing method is not public */ public void testWrongNestedNotPublic() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo", NestedNotPublic.class.getMethod("method")); try { JavascriptCompiler.compile("foo()", functions, getClass().getClassLoader()); @@ -244,7 +244,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** the method throws an exception. We should check the stack trace that it contains the source code of the expression as file name. */ public void testThrowingException() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo", StaticThrowingException.class.getMethod("method")); String source = "3 * foo() / 5"; Expression expr = JavascriptCompiler.compile(source, functions, getClass().getClassLoader()); @@ -263,7 +263,7 @@ public class TestCustomFunctions extends LuceneTestCase { /** test that namespaces work with custom expressions. */ public void testNamespaces() throws Exception { - Map functions = new HashMap(); + Map functions = new HashMap<>(); functions.put("foo.bar", getClass().getMethod("zeroArgMethod")); String source = "foo.bar()"; Expression expr = JavascriptCompiler.compile(source, functions, getClass().getClassLoader()); diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillDownQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillDownQuery.java index 8a390347bc1..29b4068f8e8 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/DrillDownQuery.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillDownQuery.java @@ -56,7 +56,7 @@ public final class DrillDownQuery extends Query { private final FacetsConfig config; private final BooleanQuery query; - private final Map drillDownDims = new LinkedHashMap(); + private final Map drillDownDims = new LinkedHashMap<>(); /** Used by clone() */ DrillDownQuery(FacetsConfig config, BooleanQuery query, Map drillDownDims) { @@ -233,8 +233,8 @@ public final class DrillDownQuery extends Query { return new MatchAllDocsQuery(); } - List filters = new ArrayList(); - List queries = new ArrayList(); + List filters = new ArrayList<>(); + List queries = new ArrayList<>(); List clauses = query.clauses(); Query baseQuery; int startIndex; diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java index e8741eb60be..01b3b6c5b53 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSideways.java @@ -101,7 +101,7 @@ public class DrillSideways { protected Facets buildFacetsResult(FacetsCollector drillDowns, FacetsCollector[] drillSideways, String[] drillSidewaysDims) throws IOException { Facets drillDownFacets; - Map drillSidewaysFacets = new HashMap(); + Map drillSidewaysFacets = new HashMap<>(); if (taxoReader != null) { drillDownFacets = new FastTaxonomyFacetCounts(taxoReader, config, drillDowns); diff --git a/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java b/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java index bf02d6246e4..912725d4c65 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/FacetsCollector.java @@ -54,7 +54,7 @@ public class FacetsCollector extends Collector { private int totalHits; private float[] scores; private final boolean keepScores; - private final List matchingDocs = new ArrayList(); + private final List matchingDocs = new ArrayList<>(); private Docs docs; /** diff --git a/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java b/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java index 12351a10247..e72f9c4b7bf 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/FacetsConfig.java @@ -62,11 +62,11 @@ public class FacetsConfig { * doc values). */ public static final String DEFAULT_INDEX_FIELD_NAME = "$facets"; - private final Map fieldTypes = new ConcurrentHashMap(); + private final Map fieldTypes = new ConcurrentHashMap<>(); // Used only for best-effort detection of app mixing // int/float/bytes in a single indexed field: - private final Map assocDimTypes = new ConcurrentHashMap(); + private final Map assocDimTypes = new ConcurrentHashMap<>(); /** Holds the configuration for one dimension * @@ -198,15 +198,15 @@ public class FacetsConfig { */ public Document build(TaxonomyWriter taxoWriter, Document doc) throws IOException { // Find all FacetFields, collated by the actual field: - Map> byField = new HashMap>(); + Map> byField = new HashMap<>(); // ... and also all SortedSetDocValuesFacetFields: - Map> dvByField = new HashMap>(); + Map> dvByField = new HashMap<>(); // ... and also all AssociationFacetFields - Map> assocByField = new HashMap>(); + Map> assocByField = new HashMap<>(); - Set seenDims = new HashSet(); + Set seenDims = new HashSet<>(); for (IndexableField field : doc.indexableFields()) { if (field.fieldType() == FacetField.TYPE) { @@ -218,7 +218,7 @@ public class FacetsConfig { String indexFieldName = dimConfig.indexFieldName; List fields = byField.get(indexFieldName); if (fields == null) { - fields = new ArrayList(); + fields = new ArrayList<>(); byField.put(indexFieldName, fields); } fields.add(facetField); @@ -233,7 +233,7 @@ public class FacetsConfig { String indexFieldName = dimConfig.indexFieldName; List fields = dvByField.get(indexFieldName); if (fields == null) { - fields = new ArrayList(); + fields = new ArrayList<>(); dvByField.put(indexFieldName, fields); } fields.add(facetField); @@ -255,7 +255,7 @@ public class FacetsConfig { String indexFieldName = dimConfig.indexFieldName; List fields = assocByField.get(indexFieldName); if (fields == null) { - fields = new ArrayList(); + fields = new ArrayList<>(); assocByField.put(indexFieldName, fields); } fields.add(facetField); @@ -514,7 +514,7 @@ public class FacetsConfig { * #pathToString}) back into the original {@code * String[]}. */ public static String[] stringToPath(String s) { - List parts = new ArrayList(); + List parts = new ArrayList<>(); int length = s.length(); if (length == 0) { return new String[0]; diff --git a/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeCounter.java b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeCounter.java index 3e9db730a2d..0f047b2d9a1 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeCounter.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/range/LongRangeCounter.java @@ -44,7 +44,7 @@ final class LongRangeCounter { // track the start vs end case separately because if a // given point is both, then it must be its own // elementary interval: - Map endsMap = new HashMap(); + Map endsMap = new HashMap<>(); endsMap.put(Long.MIN_VALUE, 1); endsMap.put(Long.MAX_VALUE, 2); @@ -64,11 +64,11 @@ final class LongRangeCounter { } } - List endsList = new ArrayList(endsMap.keySet()); + List endsList = new ArrayList<>(endsMap.keySet()); Collections.sort(endsList); // Build elementaryIntervals (a 1D Venn diagram): - List elementaryIntervals = new ArrayList(); + List elementaryIntervals = new ArrayList<>(); int upto0 = 1; long v = endsList.get(0); long prev; @@ -281,7 +281,7 @@ final class LongRangeCounter { // Our range is fully included in the incoming // range; add to our output list: if (outputs == null) { - outputs = new ArrayList(); + outputs = new ArrayList<>(); } outputs.add(index); } else if (left != null) { diff --git a/lucene/facet/src/java/org/apache/lucene/facet/sortedset/DefaultSortedSetDocValuesReaderState.java b/lucene/facet/src/java/org/apache/lucene/facet/sortedset/DefaultSortedSetDocValuesReaderState.java index 9665b28a1d7..8f386193af1 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/sortedset/DefaultSortedSetDocValuesReaderState.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/sortedset/DefaultSortedSetDocValuesReaderState.java @@ -42,7 +42,7 @@ public class DefaultSortedSetDocValuesReaderState extends SortedSetDocValuesRead /** {@link IndexReader} passed to the constructor. */ public final IndexReader origReader; - private final Map prefixToOrdRange = new HashMap(); + private final Map prefixToOrdRange = new HashMap<>(); /** Creates this, pulling doc values from the default {@link * FacetsConfig#DEFAULT_INDEX_FIELD_NAME}. */ diff --git a/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetCounts.java b/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetCounts.java index 76c7205b33f..92107481e80 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetCounts.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/sortedset/SortedSetDocValuesFacetCounts.java @@ -266,7 +266,7 @@ public class SortedSetDocValuesFacetCounts extends Facets { @Override public List getAllDims(int topN) throws IOException { - List results = new ArrayList(); + List results = new ArrayList<>(); for(Map.Entry ent : state.getPrefixToOrdRange().entrySet()) { FacetResult fr = getDim(ent.getKey(), ent.getValue(), topN); if (fr != null) { diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CachedOrdinalsReader.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CachedOrdinalsReader.java index e6ada5f1809..bbd3250a694 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CachedOrdinalsReader.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/CachedOrdinalsReader.java @@ -57,7 +57,7 @@ public class CachedOrdinalsReader extends OrdinalsReader { private final OrdinalsReader source; - private final Map ordsCache = new WeakHashMap(); + private final Map ordsCache = new WeakHashMap<>(); /** Sole constructor. */ public CachedOrdinalsReader(OrdinalsReader source) { diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java index 1342c187866..6514a3c1dcb 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java @@ -86,7 +86,7 @@ public class TaxonomyFacetSumValueSource extends FloatTaxonomyFacets { private final void sumValues(List matchingDocs, boolean keepScores, ValueSource valueSource) throws IOException { final FakeScorer scorer = new FakeScorer(); - Map context = new HashMap(); + Map context = new HashMap<>(); if (keepScores) { context.put("scorer", scorer); } diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacets.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacets.java index 0c4d5865609..d1d9e11e059 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacets.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacets.java @@ -85,7 +85,7 @@ public abstract class TaxonomyFacets extends Facets { @Override public List getAllDims(int topN) throws IOException { int ord = children[TaxonomyReader.ROOT_ORDINAL]; - List results = new ArrayList(); + List results = new ArrayList<>(); while (ord != TaxonomyReader.INVALID_ORDINAL) { String dim = taxoReader.getPath(ord).components[0]; FacetsConfig.DimConfig dimConfig = config.getDimConfig(dim); diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java index 0d4d6ce2b9e..a084278e8a3 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java @@ -102,8 +102,8 @@ public class DirectoryTaxonomyReader extends TaxonomyReader { // These are the default cache sizes; they can be configured after // construction with the cache's setMaxSize() method - ordinalCache = new LRUHashMap(DEFAULT_CACHE_VALUE); - categoryCache = new LRUHashMap(DEFAULT_CACHE_VALUE); + ordinalCache = new LRUHashMap<>(DEFAULT_CACHE_VALUE); + categoryCache = new LRUHashMap<>(DEFAULT_CACHE_VALUE); } /** @@ -121,8 +121,8 @@ public class DirectoryTaxonomyReader extends TaxonomyReader { // These are the default cache sizes; they can be configured after // construction with the cache's setMaxSize() method - ordinalCache = new LRUHashMap(DEFAULT_CACHE_VALUE); - categoryCache = new LRUHashMap(DEFAULT_CACHE_VALUE); + ordinalCache = new LRUHashMap<>(DEFAULT_CACHE_VALUE); + categoryCache = new LRUHashMap<>(DEFAULT_CACHE_VALUE); } private synchronized void initTaxoArrays() throws IOException { diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java index 9d5b24d6d97..55d8384b64c 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java @@ -620,7 +620,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter { /** Combine original user data with the taxonomy epoch. */ private Map combinedCommitData(Map commitData) { - Map m = new HashMap(); + Map m = new HashMap<>(); if (commitData != null) { m.putAll(commitData); } diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/CharBlockArray.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/CharBlockArray.java index 5c7d46fd6af..d182f8b6f1c 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/CharBlockArray.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/CharBlockArray.java @@ -60,7 +60,7 @@ class CharBlockArray implements Appendable, Serializable, CharSequence { } CharBlockArray(int blockSize) { - this.blocks = new ArrayList(); + this.blocks = new ArrayList<>(); this.blockSize = blockSize; addBlock(); } diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/NameIntCacheLRU.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/NameIntCacheLRU.java index 1626ada3e5b..b42549650b8 100644 --- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/NameIntCacheLRU.java +++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/writercache/NameIntCacheLRU.java @@ -54,9 +54,9 @@ class NameIntCacheLRU { private void createCache (int maxSize) { if (maxSize(1000,(float)0.7,true); //for LRU + cache = new LinkedHashMap<>(1000,(float)0.7,true); //for LRU } else { - cache = new HashMap(1000,(float)0.7); //no need for LRU + cache = new HashMap<>(1000,(float)0.7); //no need for LRU } } diff --git a/lucene/facet/src/test/org/apache/lucene/facet/AssertingSubDocsAtOnceCollector.java b/lucene/facet/src/test/org/apache/lucene/facet/AssertingSubDocsAtOnceCollector.java index 1caad4072d5..644b3adc8e0 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/AssertingSubDocsAtOnceCollector.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/AssertingSubDocsAtOnceCollector.java @@ -36,7 +36,7 @@ class AssertingSubDocsAtOnceCollector extends Collector { @Override public void setScorer(Scorer s) { // Gathers all scorers, including s and "under": - allScorers = new ArrayList(); + allScorers = new ArrayList<>(); allScorers.add(s); int upto = 0; while(upto < allScorers.size()) { diff --git a/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java b/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java index 80849a94d89..1622e3d2f08 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/FacetTestCase.java @@ -177,14 +177,14 @@ public abstract class FacetTestCase extends LuceneTestCase { protected void assertFloatValuesEquals(List a, List b) { assertEquals(a.size(), b.size()); float lastValue = Float.POSITIVE_INFINITY; - Map aByDim = new HashMap(); + Map aByDim = new HashMap<>(); for(int i=0;i bByDim = new HashMap(); + Map bByDim = new HashMap<>(); for(int i=0;i values = new HashSet(); + Set values = new HashSet<>(); while (values.size() < valueCount) { String s = TestUtil.randomRealisticUnicodeString(random()); //String s = _TestUtil.randomString(random()); @@ -434,7 +434,7 @@ public class TestDrillSideways extends FacetTestCase { valueCount *= 2; } - List docs = new ArrayList(); + List docs = new ArrayList<>(); for(int i=0;i drillSidewaysFacets = new HashMap(); + Map drillSidewaysFacets = new HashMap<>(); Facets drillDownFacets = getTaxonomyFacetCounts(taxoReader, config, drillDowns); if (drillSideways != null) { for(int i=0;i scores = new HashMap(); + Map scores = new HashMap<>(); for(ScoreDoc sd : hits.scoreDocs) { scores.put(s.doc(sd.doc).get("id"), sd.score); } @@ -847,7 +847,7 @@ public class TestDrillSideways extends FacetTestCase { String[][] dimValues, Filter onlyEven) throws Exception { int numDims = dimValues.length; - List hits = new ArrayList(); + List hits = new ArrayList<>(); Counters drillDownCounts = new Counters(dimValues); Counters[] drillSidewaysCounts = new Counters[dimValues.length]; for(int dim=0;dim idToDocID = new HashMap(); + Map idToDocID = new HashMap<>(); for(int i=0;i actualValues = new HashMap(); + Map actualValues = new HashMap<>(); if (fr != null) { for(LabelAndValue labelValue : fr.labelValues) { diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestMultipleIndexFields.java b/lucene/facet/src/test/org/apache/lucene/facet/TestMultipleIndexFields.java index db25349c7ab..d9888e5cb3c 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/TestMultipleIndexFields.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/TestMultipleIndexFields.java @@ -120,7 +120,7 @@ public class TestMultipleIndexFields extends FacetTestCase { FacetsCollector sfc = performSearch(tr, ir, searcher); - Map facetsMap = new HashMap(); + Map facetsMap = new HashMap<>(); facetsMap.put("Author", getTaxonomyFacetCounts(tr, config, sfc, "$author")); Facets facets = new MultiFacets(facetsMap, getTaxonomyFacetCounts(tr, config, sfc)); @@ -160,7 +160,7 @@ public class TestMultipleIndexFields extends FacetTestCase { FacetsCollector sfc = performSearch(tr, ir, searcher); - Map facetsMap = new HashMap(); + Map facetsMap = new HashMap<>(); Facets facets2 = getTaxonomyFacetCounts(tr, config, sfc, "$music"); facetsMap.put("Band", facets2); facetsMap.put("Composer", facets2); @@ -213,7 +213,7 @@ public class TestMultipleIndexFields extends FacetTestCase { FacetsCollector sfc = performSearch(tr, ir, searcher); - Map facetsMap = new HashMap(); + Map facetsMap = new HashMap<>(); facetsMap.put("Band", getTaxonomyFacetCounts(tr, config, sfc, "$bands")); facetsMap.put("Composer", getTaxonomyFacetCounts(tr, config, sfc, "$composers")); Facets facets = new MultiFacets(facetsMap, getTaxonomyFacetCounts(tr, config, sfc)); @@ -255,7 +255,7 @@ public class TestMultipleIndexFields extends FacetTestCase { FacetsCollector sfc = performSearch(tr, ir, searcher); - Map facetsMap = new HashMap(); + Map facetsMap = new HashMap<>(); Facets facets2 = getTaxonomyFacetCounts(tr, config, sfc, "$music"); facetsMap.put("Band", facets2); facetsMap.put("Composer", facets2); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java index 0dc324be568..238af9838c6 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/range/TestRangeFacetCounts.java @@ -260,7 +260,7 @@ public class TestRangeFacetCounts extends FacetTestCase { } } - Map byDim = new HashMap(); + Map byDim = new HashMap<>(); byDim.put("field", new LongRangeFacetCounts("field", fieldFC, new LongRange("less than 10", 0L, true, 10L, false), diff --git a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java index 4210ba7253c..6d478cda85f 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java @@ -310,7 +310,7 @@ public class TestSortedSetDocValuesFacets extends FacetTestCase { // Slow, yet hopefully bug-free, faceting: @SuppressWarnings({"rawtypes","unchecked"}) Map[] expectedCounts = new HashMap[numDims]; for(int i=0;i(); + expectedCounts[i] = new HashMap<>(); } for(TestDoc doc : testDocs) { @@ -328,9 +328,9 @@ public class TestSortedSetDocValuesFacets extends FacetTestCase { } } - List expected = new ArrayList(); + List expected = new ArrayList<>(); for(int i=0;i labelValues = new ArrayList(); + List labelValues = new ArrayList<>(); int totCount = 0; for(Map.Entry ent : expectedCounts[i].entrySet()) { labelValues.add(new LabelAndValue(ent.getKey(), ent.getValue())); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestLRUHashMap.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestLRUHashMap.java index 2607a1e19b0..f86a7a79907 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestLRUHashMap.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestLRUHashMap.java @@ -27,7 +27,7 @@ public class TestLRUHashMap extends FacetTestCase { // recently used @Test public void testLRU() throws Exception { - LRUHashMap lru = new LRUHashMap(3); + LRUHashMap lru = new LRUHashMap<>(3); assertEquals(0, lru.size()); lru.put("one", "Hello world"); assertEquals(1, lru.size()); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java index 9482176ea4e..daf96721c68 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestSearcherTaxonomyManager.java @@ -67,8 +67,8 @@ public class TestSearcherTaxonomyManager extends FacetTestCase { @Override public void run() { try { - Set seen = new HashSet(); - List paths = new ArrayList(); + Set seen = new HashSet<>(); + List paths = new ArrayList<>(); while (true) { Document doc = new Document(); int numPaths = TestUtil.nextInt(random(), 1, 5); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java index a230d4fd797..60aa0fe189a 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyCombined.java @@ -534,7 +534,7 @@ public class TestTaxonomyCombined extends FacetTestCase { for (int i=0; i expectedChildren = new ArrayList(); + ArrayList expectedChildren = new ArrayList<>(); for (int j=expectedCategories.length-1; j>=0; j--) { if (expectedCategories[j].length != expectedCategories[i].length+1) { continue; // not longer by 1, so can't be a child diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java index ec4abc3775c..a63a27a6259 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts.java @@ -450,7 +450,7 @@ public class TestTaxonomyFacetCounts extends FacetTestCase { FacetResult result = facets.getTopChildren(Integer.MAX_VALUE, "dim"); assertEquals(numLabels, result.labelValues.length); - Set allLabels = new HashSet(); + Set allLabels = new HashSet<>(); for (LabelAndValue labelValue : result.labelValues) { allLabels.add(labelValue.label); assertEquals(1, labelValue.value.intValue()); @@ -710,7 +710,7 @@ public class TestTaxonomyFacetCounts extends FacetTestCase { // Slow, yet hopefully bug-free, faceting: @SuppressWarnings({"rawtypes","unchecked"}) Map[] expectedCounts = new HashMap[numDims]; for(int i=0;i(); + expectedCounts[i] = new HashMap<>(); } for(TestDoc doc : testDocs) { @@ -728,9 +728,9 @@ public class TestTaxonomyFacetCounts extends FacetTestCase { } } - List expected = new ArrayList(); + List expected = new ArrayList<>(); for(int i=0;i labelValues = new ArrayList(); + List labelValues = new ArrayList<>(); int totCount = 0; for(Map.Entry ent : expectedCounts[i].entrySet()) { labelValues.add(new LabelAndValue(ent.getKey(), ent.getValue())); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts2.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts2.java index f60dfadc961..d7f5940fa59 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts2.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetCounts2.java @@ -101,14 +101,14 @@ public class TestTaxonomyFacetCounts2 extends FacetTestCase { // category is not added twice. int numFacetsA = random.nextInt(3) + 1; // 1-3 int numFacetsB = random.nextInt(2) + 1; // 1-2 - ArrayList categories_a = new ArrayList(); + ArrayList categories_a = new ArrayList<>(); categories_a.addAll(Arrays.asList(CATEGORIES_A)); - ArrayList categories_b = new ArrayList(); + ArrayList categories_b = new ArrayList<>(); categories_b.addAll(Arrays.asList(CATEGORIES_B)); Collections.shuffle(categories_a, random); Collections.shuffle(categories_b, random); - ArrayList categories = new ArrayList(); + ArrayList categories = new ArrayList<>(); categories.addAll(categories_a.subList(0, numFacetsA)); categories.addAll(categories_b.subList(0, numFacetsB)); @@ -210,7 +210,7 @@ public class TestTaxonomyFacetCounts2 extends FacetTestCase { // initialize expectedCounts w/ 0 for all categories private static Map newCounts() { - Map counts = new HashMap(); + Map counts = new HashMap<>(); counts.put(CP_A, 0); counts.put(CP_B, 0); counts.put(CP_C, 0); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java index 6735905eadf..316e5f632f6 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/TestTaxonomyFacetSumValueSource.java @@ -464,7 +464,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase { // Slow, yet hopefully bug-free, faceting: @SuppressWarnings({"rawtypes","unchecked"}) Map[] expectedValues = new HashMap[numDims]; for(int i=0;i(); + expectedValues[i] = new HashMap<>(); } for(TestDoc doc : testDocs) { @@ -482,9 +482,9 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase { } } - List expected = new ArrayList(); + List expected = new ArrayList<>(); for(int i=0;i labelValues = new ArrayList(); + List labelValues = new ArrayList<>(); double totValue = 0; for(Map.Entry ent : expectedValues[i].entrySet()) { labelValues.add(new LabelAndValue(ent.getKey(), ent.getValue())); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java index f61e27338c9..fdf492b01a4 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java @@ -229,7 +229,7 @@ public class TestAddTaxonomy extends FacetTestCase { DirectoryTaxonomyReader dtr = new DirectoryTaxonomyReader(dest); // +2 to account for the root category + "a" assertEquals(numCategories + 2, dtr.getSize()); - HashSet categories = new HashSet(); + HashSet categories = new HashSet<>(); for (int i = 1; i < dtr.getSize(); i++) { FacetLabel cat = dtr.getPath(i); assertTrue("category " + cat + " already existed", categories.add(cat)); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestConcurrentFacetedIndexing.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestConcurrentFacetedIndexing.java index d28e7b7eb19..30168b4f715 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestConcurrentFacetedIndexing.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestConcurrentFacetedIndexing.java @@ -82,7 +82,7 @@ public class TestConcurrentFacetedIndexing extends FacetTestCase { final AtomicInteger numDocs = new AtomicInteger(atLeast(10000)); final Directory indexDir = newDirectory(); final Directory taxoDir = newDirectory(); - final ConcurrentHashMap values = new ConcurrentHashMap(); + final ConcurrentHashMap values = new ConcurrentHashMap<>(); final IndexWriter iw = new IndexWriter(indexDir, newIndexWriterConfig(TEST_VERSION_CURRENT, null)); final DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(taxoDir, OpenMode.CREATE, newTaxoWriterCache(numDocs.get())); final Thread[] indexThreads = new Thread[atLeast(4)]; diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyReader.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyReader.java index fe5b09cb413..623af3634ed 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyReader.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyReader.java @@ -504,7 +504,7 @@ public class TestDirectoryTaxonomyReader extends FacetTestCase { assertEquals(TaxonomyReader.INVALID_ORDINAL, it.next()); // root's children - Set roots = new HashSet(Arrays.asList("a", "b", "c")); + Set roots = new HashSet<>(Arrays.asList("a", "b", "c")); it = taxoReader.getChildren(TaxonomyReader.ROOT_ORDINAL); while (!roots.isEmpty()) { FacetLabel root = taxoReader.getPath(it.next()); diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java index a676b280112..c79292a6470 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestDirectoryTaxonomyWriter.java @@ -93,7 +93,7 @@ public class TestDirectoryTaxonomyWriter extends FacetTestCase { DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(dir, OpenMode.CREATE_OR_APPEND, NO_OP_CACHE); taxoWriter.addCategory(new FacetLabel("a")); taxoWriter.addCategory(new FacetLabel("b")); - Map userCommitData = new HashMap(); + Map userCommitData = new HashMap<>(); userCommitData.put("testing", "1 2 3"); taxoWriter.setCommitData(userCommitData); taxoWriter.close(); @@ -243,7 +243,7 @@ public class TestDirectoryTaxonomyWriter extends FacetTestCase { final int range = ncats * 3; // affects the categories selection final AtomicInteger numCats = new AtomicInteger(ncats); final Directory dir = newDirectory(); - final ConcurrentHashMap values = new ConcurrentHashMap(); + final ConcurrentHashMap values = new ConcurrentHashMap<>(); final double d = random().nextDouble(); final TaxonomyWriterCache cache; if (d < 0.7) { diff --git a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCompactLabelToOrdinal.java b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCompactLabelToOrdinal.java index 715a80e7c74..eddcb1b492b 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCompactLabelToOrdinal.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/taxonomy/writercache/TestCompactLabelToOrdinal.java @@ -118,7 +118,7 @@ public class TestCompactLabelToOrdinal extends FacetTestCase { } private static class LabelToOrdinalMap extends LabelToOrdinal { - private Map map = new HashMap(); + private Map map = new HashMap<>(); LabelToOrdinalMap() { } diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java index 8aab5807fa5..032abbb5dfd 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java @@ -101,7 +101,7 @@ public class SearchGroup { public final T groupValue; public Object[] topValues; - public final List> shards = new ArrayList>(); + public final List> shards = new ArrayList<>(); public int minShardIndex; public boolean processed; public boolean inQueue; @@ -214,7 +214,7 @@ public class SearchGroup { if (isNew) { // Start a new group: //System.out.println(" new"); - mergedGroup = new MergedGroup(group.groupValue); + mergedGroup = new MergedGroup<>(group.groupValue); mergedGroup.minShardIndex = shard.shardIndex; assert group.sortValues != null; mergedGroup.topValues = group.sortValues; @@ -281,12 +281,12 @@ public class SearchGroup { final Collection> shard = shards.get(shardIDX); if (!shard.isEmpty()) { //System.out.println(" insert shard=" + shardIDX); - updateNextGroup(maxQueueSize, new ShardIter(shard, shardIDX)); + updateNextGroup(maxQueueSize, new ShardIter<>(shard, shardIDX)); } } // Pull merged topN groups: - final List> newTopGroups = new ArrayList>(); + final List> newTopGroups = new ArrayList<>(); int count = 0; @@ -295,7 +295,7 @@ public class SearchGroup { group.processed = true; //System.out.println(" pop: shards=" + group.shards + " group=" + (group.groupValue == null ? "null" : (((BytesRef) group.groupValue).utf8ToString())) + " sortValues=" + Arrays.toString(group.topValues)); if (count++ >= offset) { - final SearchGroup newGroup = new SearchGroup(); + final SearchGroup newGroup = new SearchGroup<>(); newGroup.groupValue = group.groupValue; newGroup.sortValues = group.topValues; newTopGroups.add(newGroup); diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java index 72b82466237..c778162e0ca 100644 --- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java +++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/function/FunctionAllGroupsCollector.java @@ -45,7 +45,7 @@ public class FunctionAllGroupsCollector extends AbstractAllGroupsCollector vsContext; private final ValueSource groupBy; - private final SortedSet groups = new TreeSet(); + private final SortedSet groups = new TreeSet<>(); private FunctionValues.ValueFiller filler; private MutableValue mval; diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java index e740e678318..71bee9a1d78 100644 --- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java +++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java @@ -320,7 +320,7 @@ public class TestGrouping extends LuceneTestCase { BytesRef groupValue = mvalGd.groupValue.exists() ? ((MutableValueStr) mvalGd.groupValue).value : null; groups.add(new GroupDocs<>(Float.NaN, mvalGd.maxScore, mvalGd.totalHits, mvalGd.scoreDocs, groupValue, mvalGd.groupSortValues)); } - return new TopGroups(mvalTopGroups.groupSort, mvalTopGroups.withinGroupSort, mvalTopGroups.totalHitCount, mvalTopGroups.totalGroupedHitCount, groups.toArray(new GroupDocs[groups.size()]), Float.NaN); + return new TopGroups<>(mvalTopGroups.groupSort, mvalTopGroups.withinGroupSort, mvalTopGroups.totalHitCount, mvalTopGroups.totalGroupedHitCount, groups.toArray(new GroupDocs[groups.size()]), Float.NaN); } fail(); return null; diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java index e59d43080af..758e35fc090 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java @@ -156,7 +156,7 @@ public class Highlighter TextFragment[] frag =getBestTextFragments(tokenStream,text, true,maxNumFragments); //Get text - ArrayList fragTexts = new ArrayList(); + ArrayList fragTexts = new ArrayList<>(); for (int i = 0; i < frag.length; i++) { if ((frag[i] != null) && (frag[i].getScore() > 0)) @@ -182,7 +182,7 @@ public class Highlighter int maxNumFragments) throws IOException, InvalidTokenOffsetsException { - ArrayList docFrags = new ArrayList(); + ArrayList docFrags = new ArrayList<>(); StringBuilder newText=new StringBuilder(); CharTermAttribute termAtt = tokenStream.addAttribute(CharTermAttribute.class); @@ -327,7 +327,7 @@ public class Highlighter if(mergeContiguousFragments) { mergeContiguousFragments(frag); - ArrayList fragTexts = new ArrayList(); + ArrayList fragTexts = new ArrayList<>(); for (int i = 0; i < frag.length; i++) { if ((frag[i] != null) && (frag[i].getScore() > 0)) diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java index 09806fcd339..38dd0e5fc13 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java @@ -102,7 +102,7 @@ public class QueryScorer implements Scorer { * @param weightedTerms an array of pre-created {@link WeightedSpanTerm}s */ public QueryScorer(WeightedSpanTerm[] weightedTerms) { - this.fieldWeightedSpanTerms = new HashMap(weightedTerms.length); + this.fieldWeightedSpanTerms = new HashMap<>(weightedTerms.length); for (int i = 0; i < weightedTerms.length; i++) { WeightedSpanTerm existingTerm = fieldWeightedSpanTerms.get(weightedTerms[i].term); @@ -239,7 +239,7 @@ public class QueryScorer implements Scorer { */ @Override public void startFragment(TextFragment newFragment) { - foundTerms = new HashSet(); + foundTerms = new HashSet<>(); totalScore = 0; } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java index 2fe6aa59ae4..59ceb9c830b 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java @@ -88,7 +88,7 @@ public final class QueryTermExtractor */ public static final WeightedTerm[] getTerms(Query query, boolean prohibited, String fieldName) { - HashSet terms=new HashSet(); + HashSet terms=new HashSet<>(); getTerms(query,terms,prohibited,fieldName); return terms.toArray(new WeightedTerm[0]); } @@ -112,7 +112,7 @@ public final class QueryTermExtractor else if (query instanceof FilteredQuery) getTermsFromFilteredQuery((FilteredQuery) query, terms, prohibited, fieldName); else { - HashSet nonWeightedTerms = new HashSet(); + HashSet nonWeightedTerms = new HashSet<>(); query.extractTerms(nonWeightedTerms); for (Iterator iter = nonWeightedTerms.iterator(); iter.hasNext(); ) { Term term = iter.next(); diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java index 17ba516742d..69f93ec4c45 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java @@ -77,7 +77,7 @@ public class QueryTermScorer implements Scorer { } public QueryTermScorer(WeightedTerm[] weightedTerms) { - termsToFind = new HashMap(); + termsToFind = new HashMap<>(); for (int i = 0; i < weightedTerms.length; i++) { WeightedTerm existingTerm = termsToFind .get(weightedTerms[i].term); @@ -109,7 +109,7 @@ public class QueryTermScorer implements Scorer { */ @Override public void startFragment(TextFragment newFragment) { - uniqueTermsInFragment = new HashSet(); + uniqueTermsInFragment = new HashSet<>(); currentTextFragment = newFragment; totalScore = 0; diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java index 14a8f7f5239..68433a9425f 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java @@ -257,7 +257,7 @@ public class TokenSources { // tokens NOT stored with positions or not guaranteed contiguous - must // add to list and sort later if (unsortedTokens == null) { - unsortedTokens = new ArrayList(); + unsortedTokens = new ArrayList<>(); } unsortedTokens.add(token); } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java index 42db712998e..bd87206736a 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java @@ -39,7 +39,7 @@ import org.apache.lucene.util.CollectionUtil; */ public final class TokenStreamFromTermPositionVector extends TokenStream { - private final List positionedTokens = new ArrayList(); + private final List positionedTokens = new ArrayList<>(); private Iterator tokensAtCurrentPosition; diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTerm.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTerm.java index ff395468383..6a066836918 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTerm.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTerm.java @@ -28,11 +28,11 @@ import java.util.List; */ public class WeightedSpanTerm extends WeightedTerm{ boolean positionSensitive; - private List positionSpans = new ArrayList(); + private List positionSpans = new ArrayList<>(); public WeightedSpanTerm(float weight, String term) { super(weight, term); - this.positionSpans = new ArrayList(); + this.positionSpans = new ArrayList<>(); } public WeightedSpanTerm(float weight, String term, boolean positionSensitive) { diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java index a2b1bd45b2d..f38f74d765d 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java @@ -175,7 +175,7 @@ public class WeightedSpanTermExtractor { final Term[] termArray = termArrays.get(i); List disjuncts = disjunctLists[positions[i]]; if (disjuncts == null) { - disjuncts = (disjunctLists[positions[i]] = new ArrayList(termArray.length)); + disjuncts = (disjunctLists[positions[i]] = new ArrayList<>(termArray.length)); ++distinctPositions; } for (int j = 0; j < termArray.length; ++j) { @@ -243,10 +243,10 @@ public class WeightedSpanTermExtractor { Set fieldNames; if (fieldName == null) { - fieldNames = new HashSet(); + fieldNames = new HashSet<>(); collectSpanQueryFields(spanQuery, fieldNames); } else { - fieldNames = new HashSet(1); + fieldNames = new HashSet<>(1); fieldNames.add(fieldName); } // To support the use of the default field name @@ -254,9 +254,9 @@ public class WeightedSpanTermExtractor { fieldNames.add(defaultField); } - Map queries = new HashMap(); + Map queries = new HashMap<>(); - Set nonWeightedTerms = new HashSet(); + Set nonWeightedTerms = new HashSet<>(); final boolean mustRewriteQuery = mustRewriteQuery(spanQuery); if (mustRewriteQuery) { for (final String field : fieldNames) { @@ -268,7 +268,7 @@ public class WeightedSpanTermExtractor { spanQuery.extractTerms(nonWeightedTerms); } - List spanPositions = new ArrayList(); + List spanPositions = new ArrayList<>(); for (final String field : fieldNames) { final SpanQuery q; @@ -278,8 +278,8 @@ public class WeightedSpanTermExtractor { q = spanQuery; } AtomicReaderContext context = getLeafContext(); - Map termContexts = new HashMap(); - TreeSet extractedTerms = new TreeSet(); + Map termContexts = new HashMap<>(); + TreeSet extractedTerms = new TreeSet<>(); q.extractTerms(extractedTerms); for (Term term : extractedTerms) { termContexts.put(term, TermContext.build(context, term)); @@ -328,7 +328,7 @@ public class WeightedSpanTermExtractor { * @throws IOException If there is a low-level I/O error */ protected void extractWeightedTerms(Map terms, Query query) throws IOException { - Set nonWeightedTerms = new HashSet(); + Set nonWeightedTerms = new HashSet<>(); query.extractTerms(nonWeightedTerms); for (final Term queryTerm : nonWeightedTerms) { @@ -468,7 +468,7 @@ public class WeightedSpanTermExtractor { this.fieldName = null; } - Map terms = new PositionCheckingMap(); + Map terms = new PositionCheckingMap<>(); this.tokenStream = tokenStream; try { extract(query, terms); @@ -505,7 +505,7 @@ public class WeightedSpanTermExtractor { } this.tokenStream = tokenStream; - Map terms = new PositionCheckingMap(); + Map terms = new PositionCheckingMap<>(); extract(query, terms); int totalNumDocs = reader.maxDoc(); diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java index ccfe124103b..e55ae3651eb 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java @@ -299,7 +299,7 @@ public class PostingsHighlighter { * {@link IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS} */ public Map highlightFields(String fieldsIn[], Query query, IndexSearcher searcher, int[] docidsIn, int maxPassagesIn[]) throws IOException { - Map snippets = new HashMap(); + Map snippets = new HashMap<>(); for(Map.Entry ent : highlightFieldsAsObjects(fieldsIn, query, searcher, docidsIn, maxPassagesIn).entrySet()) { Object[] snippetObjects = ent.getValue(); String[] snippetStrings = new String[snippetObjects.length]; @@ -346,7 +346,7 @@ public class PostingsHighlighter { } final IndexReader reader = searcher.getIndexReader(); Query rewritten = rewrite(query); - SortedSet queryTerms = new TreeSet(); + SortedSet queryTerms = new TreeSet<>(); rewritten.extractTerms(queryTerms); IndexReaderContext readerContext = reader.getContext(); @@ -384,7 +384,7 @@ public class PostingsHighlighter { // pull stored data: String[][] contents = loadFieldValues(searcher, fields, docids, maxLength); - Map highlights = new HashMap(); + Map highlights = new HashMap<>(); for (int i = 0; i < fields.length; i++) { String field = fields[i]; int numPassages = maxPassages[i]; @@ -454,7 +454,7 @@ public class PostingsHighlighter { } private Map highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List leaves, int maxPassages, Query query) throws IOException { - Map highlights = new HashMap(); + Map highlights = new HashMap<>(); PassageFormatter fieldFormatter = getFormatter(field); if (fieldFormatter == null) { @@ -539,7 +539,7 @@ public class PostingsHighlighter { if (scorer == null) { throw new NullPointerException("PassageScorer cannot be null"); } - PriorityQueue pq = new PriorityQueue(); + PriorityQueue pq = new PriorityQueue<>(); float weights[] = new float[terms.length]; // initialize postings for (int i = 0; i < terms.length; i++) { @@ -574,7 +574,7 @@ public class PostingsHighlighter { pq.add(new OffsetsEnum(EMPTY, Integer.MAX_VALUE)); // a sentinel for termination - PriorityQueue passageQueue = new PriorityQueue(n, new Comparator() { + PriorityQueue passageQueue = new PriorityQueue<>(n, new Comparator() { @Override public int compare(Passage left, Passage right) { if (left.score < right.score) { @@ -678,7 +678,7 @@ public class PostingsHighlighter { * to customize. */ protected Passage[] getEmptyHighlight(String fieldName, BreakIterator bi, int maxPassages) { // BreakIterator should be un-next'd: - List passages = new ArrayList(); + List passages = new ArrayList<>(); int pos = bi.current(); assert pos == 0; while (passages.size() < maxPassages) { diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragListBuilder.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragListBuilder.java index 6699f6e9713..058b5789855 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragListBuilder.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragListBuilder.java @@ -50,8 +50,8 @@ public abstract class BaseFragListBuilder implements FragListBuilder { if( fragCharSize < minFragCharSize ) throw new IllegalArgumentException( "fragCharSize(" + fragCharSize + ") is too small. It must be " + minFragCharSize + " or higher." ); - List wpil = new ArrayList(); - IteratorQueue queue = new IteratorQueue(fieldPhraseList.getPhraseList().iterator()); + List wpil = new ArrayList<>(); + IteratorQueue queue = new IteratorQueue<>(fieldPhraseList.getPhraseList().iterator()); WeightedPhraseInfo phraseInfo = null; int startOffset = 0; while((phraseInfo = queue.top()) != null){ diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java index 8ee06dc9471..f0ec6fdd1bc 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java @@ -135,7 +135,7 @@ public abstract class BaseFragmentsBuilder implements FragmentsBuilder { fragInfos = getWeightedFragInfoList(fragInfos); int limitFragments = maxNumFragments < fragInfos.size() ? maxNumFragments : fragInfos.size(); - List fragments = new ArrayList( limitFragments ); + List fragments = new ArrayList<>( limitFragments ); StringBuilder buffer = new StringBuilder(); int[] nextValueIndex = { 0 }; @@ -148,7 +148,7 @@ public abstract class BaseFragmentsBuilder implements FragmentsBuilder { protected Field[] getFields( IndexReader reader, int docId, final String fieldName) throws IOException { // according to javadoc, doc.getFields(fieldName) cannot be used with lazy loaded field??? - final List fields = new ArrayList(); + final List fields = new ArrayList<>(); reader.document(docId, new StoredFieldVisitor() { @Override @@ -215,7 +215,7 @@ public abstract class BaseFragmentsBuilder implements FragmentsBuilder { } protected List discreteMultiValueHighlighting(List fragInfos, Field[] fields) { - Map> fieldNameToFragInfos = new HashMap>(); + Map> fieldNameToFragInfos = new HashMap<>(); for (Field field : fields) { fieldNameToFragInfos.put(field.name(), new ArrayList()); } @@ -257,12 +257,12 @@ public abstract class BaseFragmentsBuilder implements FragmentsBuilder { } - List subInfos = new ArrayList(); + List subInfos = new ArrayList<>(); Iterator subInfoIterator = fragInfo.getSubInfos().iterator(); float boost = 0.0f; // The boost of the new info will be the sum of the boosts of its SubInfos while (subInfoIterator.hasNext()) { SubInfo subInfo = subInfoIterator.next(); - List toffsList = new ArrayList(); + List toffsList = new ArrayList<>(); Iterator toffsIterator = subInfo.getTermsOffsets().iterator(); while (toffsIterator.hasNext()) { Toffs toffs = toffsIterator.next(); @@ -286,7 +286,7 @@ public abstract class BaseFragmentsBuilder implements FragmentsBuilder { } } - List result = new ArrayList(); + List result = new ArrayList<>(); for (List weightedFragInfos : fieldNameToFragInfos.values()) { result.addAll(weightedFragInfos); } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldFragList.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldFragList.java index 81afd4e3023..119ff43048f 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldFragList.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldFragList.java @@ -29,7 +29,7 @@ import java.util.List; */ public abstract class FieldFragList { - private List fragInfos = new ArrayList(); + private List fragInfos = new ArrayList<>(); /** * a constructor. diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldPhraseList.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldPhraseList.java index d46b4d2ffcb..1696702ba1d 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldPhraseList.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldPhraseList.java @@ -34,7 +34,7 @@ public class FieldPhraseList { /** * List of non-overlapping WeightedPhraseInfo objects. */ - LinkedList phraseList = new LinkedList(); + LinkedList phraseList = new LinkedList<>(); /** * create a FieldPhraseList that has no limit on the number of phrases to analyze @@ -65,7 +65,7 @@ public class FieldPhraseList { public FieldPhraseList( FieldTermStack fieldTermStack, FieldQuery fieldQuery, int phraseLimit ){ final String field = fieldTermStack.getFieldName(); - LinkedList phraseCandidate = new LinkedList(); + LinkedList phraseCandidate = new LinkedList<>(); QueryPhraseMap currMap = null; QueryPhraseMap nextMap = null; while( !fieldTermStack.isEmpty() && (phraseList.size() < phraseLimit) ) @@ -125,13 +125,13 @@ public class FieldPhraseList { for ( FieldPhraseList fplToMerge : toMerge ) { allInfos[ index++ ] = fplToMerge.phraseList.iterator(); } - MergedIterator< WeightedPhraseInfo > itr = new MergedIterator< WeightedPhraseInfo >( false, allInfos ); + MergedIterator< WeightedPhraseInfo > itr = new MergedIterator<>( false, allInfos ); // Step 2. Walk the sorted list merging infos that overlap - phraseList = new LinkedList< WeightedPhraseInfo >(); + phraseList = new LinkedList<>(); if ( !itr.hasNext() ) { return; } - List< WeightedPhraseInfo > work = new ArrayList< WeightedPhraseInfo >(); + List< WeightedPhraseInfo > work = new ArrayList<>(); WeightedPhraseInfo first = itr.next(); work.add( first ); int workEndOffset = first.getEndOffset(); @@ -225,9 +225,9 @@ public class FieldPhraseList { this.seqnum = seqnum; // We keep TermInfos for further operations - termsInfos = new ArrayList( terms ); + termsInfos = new ArrayList<>( terms ); - termsOffsets = new ArrayList( terms.size() ); + termsOffsets = new ArrayList<>( terms.size() ); TermInfo ti = terms.get( 0 ); termsOffsets.add( new Toffs( ti.getStartOffset(), ti.getEndOffset() ) ); if( terms.size() == 1 ){ @@ -261,7 +261,7 @@ public class FieldPhraseList { WeightedPhraseInfo first = toMergeItr.next(); @SuppressWarnings( { "rawtypes", "unchecked" } ) Iterator< Toffs >[] allToffs = new Iterator[ toMerge.size() ]; - termsInfos = new ArrayList< TermInfo >(); + termsInfos = new ArrayList<>(); seqnum = first.seqnum; boost = first.boost; allToffs[ 0 ] = first.termsOffsets.iterator(); @@ -273,8 +273,8 @@ public class FieldPhraseList { allToffs[ index++ ] = info.termsOffsets.iterator(); } // Step 2. Walk the sorted list merging overlaps - MergedIterator< Toffs > itr = new MergedIterator< Toffs >( false, allToffs ); - termsOffsets = new ArrayList< Toffs >(); + MergedIterator< Toffs > itr = new MergedIterator<>( false, allToffs ); + termsOffsets = new ArrayList<>(); if ( !itr.hasNext() ) { return; } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldQuery.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldQuery.java index 9d125c6c7c0..dbf72a13456 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldQuery.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldQuery.java @@ -50,11 +50,11 @@ public class FieldQuery { // fieldMatch==true, Map // fieldMatch==false, Map - Map rootMaps = new HashMap(); + Map rootMaps = new HashMap<>(); // fieldMatch==true, Map // fieldMatch==false, Map - Map> termSetMap = new HashMap>(); + Map> termSetMap = new HashMap<>(); int termOrPhraseNumber; // used for colored tag support @@ -63,7 +63,7 @@ public class FieldQuery { FieldQuery( Query query, IndexReader reader, boolean phraseHighlight, boolean fieldMatch ) throws IOException { this.fieldMatch = fieldMatch; - Set flatQueries = new LinkedHashSet(); + Set flatQueries = new LinkedHashSet<>(); flatten( query, reader, flatQueries ); saveTerms( flatQueries, reader ); Collection expandQueries = expand( flatQueries ); @@ -169,7 +169,7 @@ public class FieldQuery { * => expandQueries={a,"b c","c d","b c d"} */ Collection expand( Collection flatQueries ){ - Set expandQueries = new LinkedHashSet(); + Set expandQueries = new LinkedHashSet<>(); for( Iterator i = flatQueries.iterator(); i.hasNext(); ){ Query query = i.next(); i.remove(); @@ -316,7 +316,7 @@ public class FieldQuery { String key = getKey( query ); Set set = termSetMap.get( key ); if( set == null ){ - set = new HashSet(); + set = new HashSet<>(); termSetMap.put( key, set ); } return set; @@ -364,7 +364,7 @@ public class FieldQuery { float boost; // valid if terminal == true int termOrPhraseNumber; // valid if terminal == true FieldQuery fieldQuery; - Map subMap = new HashMap(); + Map subMap = new HashMap<>(); public QueryPhraseMap( FieldQuery fieldQuery ){ this.fieldQuery = fieldQuery; diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java index 7c4534e0363..db5ecc6eb75 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java @@ -38,7 +38,7 @@ import org.apache.lucene.util.UnicodeUtil; public class FieldTermStack { private final String fieldName; - LinkedList termList = new LinkedList(); + LinkedList termList = new LinkedList<>(); //public static void main( String[] args ) throws Exception { // Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT); diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleBoundaryScanner.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleBoundaryScanner.java index bca20c42d40..2d6b468e0ef 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleBoundaryScanner.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleBoundaryScanner.java @@ -47,7 +47,7 @@ public class SimpleBoundaryScanner implements BoundaryScanner { public SimpleBoundaryScanner( int maxScan, Character[] boundaryChars ){ this.maxScan = maxScan; - this.boundaryChars = new HashSet(); + this.boundaryChars = new HashSet<>(); this.boundaryChars.addAll(Arrays.asList(boundaryChars)); } diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java index 93d1140cd60..09d29e6a9b6 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java @@ -43,7 +43,7 @@ public class SimpleFieldFragList extends FieldFragList { @Override public void add( int startOffset, int endOffset, List phraseInfoList ) { float totalBoost = 0; - List subInfos = new ArrayList(); + List subInfos = new ArrayList<>(); for( WeightedPhraseInfo phraseInfo : phraseInfoList ){ subInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(), phraseInfo.getSeqnum(), phraseInfo.getBoost() ) ); totalBoost += phraseInfo.getBoost(); diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SingleFragListBuilder.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SingleFragListBuilder.java index 2d2d10cfe8a..205186026cb 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SingleFragListBuilder.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/SingleFragListBuilder.java @@ -41,7 +41,7 @@ public class SingleFragListBuilder implements FragListBuilder { FieldFragList ffl = new SimpleFieldFragList( fragCharSize ); - List wpil = new ArrayList(); + List wpil = new ArrayList<>(); Iterator ite = fieldPhraseList.phraseList.iterator(); WeightedPhraseInfo phraseInfo = null; while( true ){ diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java index e542f6d2b3b..9af3ca668b0 100644 --- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java +++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java @@ -44,9 +44,9 @@ public class WeightedFieldFragList extends FieldFragList { */ @Override public void add( int startOffset, int endOffset, List phraseInfoList ) { - List tempSubInfos = new ArrayList(); - List realSubInfos = new ArrayList(); - HashSet distinctTerms = new HashSet(); + List tempSubInfos = new ArrayList<>(); + List realSubInfos = new ArrayList<>(); + HashSet distinctTerms = new HashSet<>(); int length = 0; for( WeightedPhraseInfo phraseInfo : phraseInfoList ){ diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java index 25692549f16..706fcc63161 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java @@ -419,7 +419,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } public void testSpanRegexQuery() throws Exception { - query = new SpanOrQuery(new SpanMultiTermQueryWrapper(new RegexpQuery(new Term(FIELD_NAME, "ken.*")))); + query = new SpanOrQuery(new SpanMultiTermQueryWrapper<>(new RegexpQuery(new Term(FIELD_NAME, "ken.*")))); searcher = newSearcher(reader); hits = searcher.search(query, 100); int maxNumFragmentsRequired = 2; @@ -1173,12 +1173,12 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte WeightedSpanTerm[] wTerms = new WeightedSpanTerm[2]; wTerms[0] = new WeightedSpanTerm(10f, "hello"); - List positionSpans = new ArrayList(); + List positionSpans = new ArrayList<>(); positionSpans.add(new PositionSpan(0, 0)); wTerms[0].addPositionSpans(positionSpans); wTerms[1] = new WeightedSpanTerm(1f, "kennedy"); - positionSpans = new ArrayList(); + positionSpans = new ArrayList<>(); positionSpans.add(new PositionSpan(14, 14)); wTerms[1].addPositionSpans(positionSpans); @@ -1216,7 +1216,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte @Override public void run() throws Exception { - HashMap synonyms = new HashMap(); + HashMap synonyms = new HashMap<>(); synonyms.put("football", "soccer,footie"); Analyzer analyzer = new SynonymAnalyzer(synonyms); @@ -1578,7 +1578,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); { - lst = new ArrayList(); + lst = new ArrayList<>(); Token t; t = createToken("hi", 0, 2); t.setPositionIncrement(1); @@ -1629,7 +1629,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); { - lst = new ArrayList(); + lst = new ArrayList<>(); Token t; t = createToken("hispeed", 0, 8); t.setPositionIncrement(1); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java index 4b1ce8768ea..29210a0223c 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestMultiTermHighlighting.java @@ -511,7 +511,7 @@ public class TestMultiTermHighlighting extends LuceneTestCase { return analyzer; } }; - Query query = new SpanMultiTermQueryWrapper(new WildcardQuery(new Term("body", "te*"))); + Query query = new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term("body", "te*"))); TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits); String snippets[] = highlighter.highlight("body", query, searcher, topDocs); @@ -552,7 +552,7 @@ public class TestMultiTermHighlighting extends LuceneTestCase { return analyzer; } }; - SpanQuery childQuery = new SpanMultiTermQueryWrapper(new WildcardQuery(new Term("body", "te*"))); + SpanQuery childQuery = new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term("body", "te*"))); Query query = new SpanOrQuery(new SpanQuery[] { childQuery }); TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits); @@ -594,7 +594,7 @@ public class TestMultiTermHighlighting extends LuceneTestCase { return analyzer; } }; - SpanQuery childQuery = new SpanMultiTermQueryWrapper(new WildcardQuery(new Term("body", "te*"))); + SpanQuery childQuery = new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term("body", "te*"))); Query query = new SpanNearQuery(new SpanQuery[] { childQuery }, 0, true); TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits); @@ -636,7 +636,7 @@ public class TestMultiTermHighlighting extends LuceneTestCase { return analyzer; } }; - SpanQuery include = new SpanMultiTermQueryWrapper(new WildcardQuery(new Term("body", "te*"))); + SpanQuery include = new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term("body", "te*"))); SpanQuery exclude = new SpanTermQuery(new Term("body", "bogus")); Query query = new SpanNotQuery(include, exclude); TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); @@ -679,7 +679,7 @@ public class TestMultiTermHighlighting extends LuceneTestCase { return analyzer; } }; - SpanQuery childQuery = new SpanMultiTermQueryWrapper(new WildcardQuery(new Term("body", "te*"))); + SpanQuery childQuery = new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term("body", "te*"))); Query query = new SpanFirstQuery(childQuery, 1000000); TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); assertEquals(2, topDocs.totalHits); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java index 284c2ce4c6f..3af9eb1cc38 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java @@ -163,7 +163,7 @@ public class TestPostingsHighlighterRanking extends LuceneTestCase { * instead it just collects them for asserts! */ static class FakePassageFormatter extends PassageFormatter { - HashSet seen = new HashSet(); + HashSet seen = new HashSet<>(); @Override public String format(Passage passages[], String content) { diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java index fcd1a38bbf7..b5f33e53741 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java @@ -168,7 +168,7 @@ public abstract class AbstractTestCase extends LuceneTestCase { } protected List analyze(String text, String field, Analyzer analyzer) throws IOException { - List bytesRefs = new ArrayList(); + List bytesRefs = new ArrayList<>(); try (TokenStream tokenStream = analyzer.tokenStream(field, text)) { TermToBytesRefAttribute termAttribute = tokenStream.getAttribute(TermToBytesRefAttribute.class); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java index de236344570..cbff7512f63 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java @@ -533,7 +533,7 @@ public class FastVectorHighlighterTest extends LuceneTestCase { token( "red", 0, 0, 3 ) ), matched ) ); - final Map fieldAnalyzers = new TreeMap(); + final Map fieldAnalyzers = new TreeMap<>(); fieldAnalyzers.put( "field", new MockAnalyzer( random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET ) ); fieldAnalyzers.put( "field_exact", new MockAnalyzer( random() ) ); fieldAnalyzers.put( "field_super_exact", new MockAnalyzer( random(), MockTokenizer.WHITESPACE, false ) ); @@ -566,7 +566,7 @@ public class FastVectorHighlighterTest extends LuceneTestCase { FieldQuery fieldQuery = new FieldQuery( query, reader, true, fieldMatch ); String[] bestFragments; if ( useMatchedFields ) { - Set< String > matchedFields = new HashSet< String >(); + Set< String > matchedFields = new HashSet<>(); matchedFields.add( "field" ); matchedFields.add( "field_exact" ); matchedFields.add( "field_super_exact" ); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldPhraseListTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldPhraseListTest.java index e0f5ad10598..a0bfc392329 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldPhraseListTest.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldPhraseListTest.java @@ -269,7 +269,7 @@ public class FieldPhraseListTest extends AbstractTestCase { } private WeightedPhraseInfo newInfo( int startOffset, int endOffset, float boost ) { - LinkedList< TermInfo > infos = new LinkedList< TermInfo >(); + LinkedList< TermInfo > infos = new LinkedList<>(); infos.add( new TermInfo( TestUtil.randomUnicodeString(random()), startOffset, endOffset, 0, 0 ) ); return new WeightedPhraseInfo( infos, boost ); } diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldQueryTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldQueryTest.java index ac67674f790..a0a92336316 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldQueryTest.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/FieldQueryTest.java @@ -67,7 +67,7 @@ public class FieldQueryTest extends AbstractTestCase { booleanQuery.add(innerQuery, Occur.MUST_NOT); FieldQuery fq = new FieldQuery(booleanQuery, true, true ); - Set flatQueries = new HashSet(); + Set flatQueries = new HashSet<>(); fq.flatten(booleanQuery, reader, flatQueries); assertCollectionQueries( flatQueries, tq( boost, "A" ), tq( boost, "B" ), tq( boost, "C" ) ); } @@ -77,7 +77,7 @@ public class FieldQueryTest extends AbstractTestCase { Query query = dmq( tq( "A" ), tq( "B" ), pqF( "C", "D" ) ); query.setBoost( boost ); FieldQuery fq = new FieldQuery( query, true, true ); - Set flatQueries = new HashSet(); + Set flatQueries = new HashSet<>(); fq.flatten( query, reader, flatQueries ); assertCollectionQueries( flatQueries, tq( boost, "A" ), tq( boost, "B" ), pqF( boost, "C", "D" ) ); } @@ -90,7 +90,7 @@ public class FieldQueryTest extends AbstractTestCase { booleanQuery.add(pqF("B", "C"), Occur.MUST); FieldQuery fq = new FieldQuery(booleanQuery, true, true ); - Set flatQueries = new HashSet(); + Set flatQueries = new HashSet<>(); fq.flatten(booleanQuery, reader, flatQueries); assertCollectionQueries( flatQueries, tq( boost, "A" ), pqF( boost, "B", "C" ) ); } @@ -102,7 +102,7 @@ public class FieldQueryTest extends AbstractTestCase { query.add(toPhraseQuery(analyze("EFGH", F, analyzerB), F), Occur.SHOULD); FieldQuery fq = new FieldQuery( query, true, true ); - Set flatQueries = new HashSet(); + Set flatQueries = new HashSet<>(); fq.flatten( query, reader, flatQueries ); assertCollectionQueries( flatQueries, tq( "AA" ), pqF( "BC", "CD" ), pqF( "EF", "FG", "GH" ) ); } @@ -110,7 +110,7 @@ public class FieldQueryTest extends AbstractTestCase { public void testFlatten1TermPhrase() throws Exception { Query query = pqF( "A" ); FieldQuery fq = new FieldQuery( query, true, true ); - Set flatQueries = new HashSet(); + Set flatQueries = new HashSet<>(); fq.flatten( query, reader, flatQueries ); assertCollectionQueries( flatQueries, tq( "A" ) ); } @@ -120,56 +120,56 @@ public class FieldQueryTest extends AbstractTestCase { FieldQuery fq = new FieldQuery( dummy, true, true ); // "a b","b c" => "a b","b c","a b c" - Set flatQueries = new HashSet(); + Set flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b" ) ); flatQueries.add( pqF( "b", "c" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b" ), pqF( "b", "c" ), pqF( "a", "b", "c" ) ); // "a b","b c d" => "a b","b c d","a b c d" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b" ) ); flatQueries.add( pqF( "b", "c", "d" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b" ), pqF( "b", "c", "d" ), pqF( "a", "b", "c", "d" ) ); // "a b c","b c d" => "a b c","b c d","a b c d" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b", "c" ) ); flatQueries.add( pqF( "b", "c", "d" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b", "c" ), pqF( "b", "c", "d" ), pqF( "a", "b", "c", "d" ) ); // "a b c","c d e" => "a b c","c d e","a b c d e" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b", "c" ) ); flatQueries.add( pqF( "c", "d", "e" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b", "c" ), pqF( "c", "d", "e" ), pqF( "a", "b", "c", "d", "e" ) ); // "a b c d","b c" => "a b c d","b c" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b", "c", "d" ) ); flatQueries.add( pqF( "b", "c" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b", "c", "d" ), pqF( "b", "c" ) ); // "a b b","b c" => "a b b","b c","a b b c" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b", "b" ) ); flatQueries.add( pqF( "b", "c" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b", "b" ), pqF( "b", "c" ), pqF( "a", "b", "b", "c" ) ); // "a b","b a" => "a b","b a","a b a", "b a b" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b" ) ); flatQueries.add( pqF( "b", "a" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b" ), pqF( "b", "a" ), pqF( "a", "b", "a" ), pqF( "b", "a", "b" ) ); // "a b","a b c" => "a b","a b c" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b" ) ); flatQueries.add( pqF( "a", "b", "c" ) ); assertCollectionQueries( fq.expand( flatQueries ), @@ -181,42 +181,42 @@ public class FieldQueryTest extends AbstractTestCase { FieldQuery fq = new FieldQuery( dummy, true, true ); // "a b","c d" => "a b","c d" - Set flatQueries = new HashSet(); + Set flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b" ) ); flatQueries.add( pqF( "c", "d" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b" ), pqF( "c", "d" ) ); // "a","a b" => "a", "a b" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( tq( "a" ) ); flatQueries.add( pqF( "a", "b" ) ); assertCollectionQueries( fq.expand( flatQueries ), tq( "a" ), pqF( "a", "b" ) ); // "a b","b" => "a b", "b" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b" ) ); flatQueries.add( tq( "b" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b" ), tq( "b" ) ); // "a b c","b c" => "a b c","b c" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b", "c" ) ); flatQueries.add( pqF( "b", "c" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b", "c" ), pqF( "b", "c" ) ); // "a b","a b c" => "a b","a b c" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b" ) ); flatQueries.add( pqF( "a", "b", "c" ) ); assertCollectionQueries( fq.expand( flatQueries ), pqF( "a", "b" ), pqF( "a", "b", "c" ) ); // "a b c","b d e" => "a b c","b d e" - flatQueries = new HashSet(); + flatQueries = new HashSet<>(); flatQueries.add( pqF( "a", "b", "c" ) ); flatQueries.add( pqF( "b", "d", "e" ) ); assertCollectionQueries( fq.expand( flatQueries ), @@ -228,7 +228,7 @@ public class FieldQueryTest extends AbstractTestCase { FieldQuery fq = new FieldQuery( dummy, true, false ); // f1:"a b",f2:"b c" => f1:"a b",f2:"b c",f1:"a b c" - Set flatQueries = new HashSet(); + Set flatQueries = new HashSet<>(); flatQueries.add( pq( F1, "a", "b" ) ); flatQueries.add( pq( F2, "b", "c" ) ); assertCollectionQueries( fq.expand( flatQueries ), @@ -826,7 +826,7 @@ public class FieldQueryTest extends AbstractTestCase { FieldQuery fq = new FieldQuery( query, true, true ); // "a" - List phraseCandidate = new ArrayList(); + List phraseCandidate = new ArrayList<>(); phraseCandidate.add( new TermInfo( "a", 0, 1, 0, 1 ) ); assertNull( fq.searchPhrase( F, phraseCandidate ) ); // "a b" @@ -868,7 +868,7 @@ public class FieldQueryTest extends AbstractTestCase { FieldQuery fq = new FieldQuery( query, true, true ); // "a b c" w/ position-gap = 2 - List phraseCandidate = new ArrayList(); + List phraseCandidate = new ArrayList<>(); phraseCandidate.add( new TermInfo( "a", 0, 1, 0, 1 ) ); phraseCandidate.add( new TermInfo( "b", 2, 3, 2, 1 ) ); phraseCandidate.add( new TermInfo( "c", 4, 5, 4, 1 ) ); @@ -917,7 +917,7 @@ public class FieldQueryTest extends AbstractTestCase { QueryPhraseMap qpm = fq.getFieldTermMap(F, "defg"); assertNotNull (qpm); assertNull (fq.getFieldTermMap(F, "dog")); - List phraseCandidate = new ArrayList(); + List phraseCandidate = new ArrayList<>(); phraseCandidate.add( new TermInfo( "defg", 0, 12, 0, 1 ) ); assertNotNull (fq.searchPhrase(F, phraseCandidate)); } @@ -947,7 +947,7 @@ public class FieldQueryTest extends AbstractTestCase { }); query.setBoost(boost); FieldQuery fq = new FieldQuery( query, true, true ); - Set flatQueries = new HashSet(); + Set flatQueries = new HashSet<>(); fq.flatten( query, reader, flatQueries ); assertCollectionQueries( flatQueries, tq( boost, "A" ) ); } @@ -957,7 +957,7 @@ public class FieldQueryTest extends AbstractTestCase { Query query = new ConstantScoreQuery(pqF( "A" )); query.setBoost(boost); FieldQuery fq = new FieldQuery( query, true, true ); - Set flatQueries = new HashSet(); + Set flatQueries = new HashSet<>(); fq.flatten( query, reader, flatQueries ); assertCollectionQueries( flatQueries, tq( boost, "A" ) ); } diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java index b09b0dc11ec..1c495c6d43d 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java @@ -48,7 +48,7 @@ public class IndexTimeSynonymTest extends AbstractTestCase { FieldQuery fq = new FieldQuery( bq, true, true ); FieldTermStack stack = new FieldTermStack( reader, 0, F, fq ); assertEquals( 2, stack.termList.size() ); - Set expectedSet = new HashSet(); + Set expectedSet = new HashSet<>(); expectedSet.add( "Mac(11,20,3)" ); expectedSet.add( "MacBook(11,20,3)" ); assertTrue( expectedSet.contains( stack.pop().toString() ) ); @@ -92,7 +92,7 @@ public class IndexTimeSynonymTest extends AbstractTestCase { FieldQuery fq = new FieldQuery( bq, true, true ); FieldTermStack stack = new FieldTermStack( reader, 0, F, fq ); assertEquals( 3, stack.termList.size() ); - Set expectedSet = new HashSet(); + Set expectedSet = new HashSet<>(); expectedSet.add( "pc(3,5,1)" ); expectedSet.add( "personal(3,5,1)" ); assertTrue( expectedSet.contains( stack.pop().toString() ) ); @@ -137,7 +137,7 @@ public class IndexTimeSynonymTest extends AbstractTestCase { FieldQuery fq = new FieldQuery( bq, true, true ); FieldTermStack stack = new FieldTermStack( reader, 0, F, fq ); assertEquals( 3, stack.termList.size() ); - Set expectedSet = new HashSet(); + Set expectedSet = new HashSet<>(); expectedSet.add( "pc(3,20,1)" ); expectedSet.add( "personal(3,20,1)" ); assertTrue( expectedSet.contains( stack.pop().toString() ) ); diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java index e15b32a377e..ee4cf6557b7 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java @@ -248,9 +248,9 @@ public class SimpleFragmentsBuilderTest extends AbstractTestCase { int numDocs = randomValues.length * 5; int numFields = 2 + random().nextInt(5); int numTerms = 2 + random().nextInt(3); - List docs = new ArrayList(numDocs); - List documents = new ArrayList(numDocs); - Map> valueToDocId = new HashMap>(); + List docs = new ArrayList<>(numDocs); + List documents = new ArrayList<>(numDocs); + Map> valueToDocId = new HashMap<>(); for (int i = 0; i < numDocs; i++) { Document document = new Document(); String[][] fields = new String[numFields][numTerms]; @@ -277,7 +277,7 @@ public class SimpleFragmentsBuilderTest extends AbstractTestCase { for (int highlightIter = 0; highlightIter < highlightIters; highlightIter++) { String queryTerm = randomValues[random().nextInt(randomValues.length)]; int randomHit = valueToDocId.get(queryTerm).iterator().next(); - List builders = new ArrayList(); + List builders = new ArrayList<>(); for (String[] fieldValues : docs.get(randomHit).fieldValues) { StringBuilder builder = new StringBuilder(); boolean hit = false; diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java index 9dbb2a9e83c..2e3785d310d 100644 --- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java +++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java @@ -309,7 +309,7 @@ public class ToParentBlockJoinCollector extends Collector { } Arrays.fill(joinScorers, null); - Queue queue = new LinkedList(); + Queue queue = new LinkedList<>(); //System.out.println("\nqueue: add top scorer=" + scorer); queue.add(scorer); while ((scorer = queue.poll()) != null) { diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java index 573e3ef6855..147dcb773e0 100644 --- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java +++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java @@ -69,7 +69,7 @@ public class TestBlockJoin extends LuceneTestCase { // we don't want to merge - since we rely on certain segment setup final IndexWriter w = new IndexWriter(dir, config); - final List docs = new ArrayList(); + final List docs = new ArrayList<>(); docs.add(makeJob("java", 2007)); docs.add(makeJob("python", 2010)); @@ -127,7 +127,7 @@ public class TestBlockJoin extends LuceneTestCase { final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); - final List docs = new ArrayList(); + final List docs = new ArrayList<>(); docs.add(makeJob("java", 2007)); docs.add(makeJob("python", 2010)); @@ -217,7 +217,7 @@ public class TestBlockJoin extends LuceneTestCase { final Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random(), dir); - final List docs = new ArrayList(); + final List docs = new ArrayList<>(); for (int i=0;i<10;i++) { docs.clear(); @@ -1264,7 +1264,7 @@ public class TestBlockJoin extends LuceneTestCase { parent.add(newTextField("parentText", "text", Field.Store.NO)); parent.add(newStringField("isParent", "yes", Field.Store.NO)); - List docs = new ArrayList(); + List docs = new ArrayList<>(); Document child = new Document(); docs.add(child); @@ -1328,7 +1328,7 @@ public class TestBlockJoin extends LuceneTestCase { parent.add(newTextField("parentText", "text", Field.Store.NO)); parent.add(newStringField("isParent", "yes", Field.Store.NO)); - List docs = new ArrayList(); + List docs = new ArrayList<>(); Document child = new Document(); docs.add(child); @@ -1393,7 +1393,7 @@ public class TestBlockJoin extends LuceneTestCase { parent.add(newTextField("parentText", "text", Field.Store.NO)); parent.add(newStringField("isParent", "yes", Field.Store.NO)); - List docs = new ArrayList(); + List docs = new ArrayList<>(); Document child = new Document(); docs.add(child); diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java index f6b30bb15c7..87f33dc809f 100644 --- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java +++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java @@ -195,7 +195,7 @@ import org.apache.lucene.util.RecyclingIntBlockAllocator; public class MemoryIndex { /** info for each field: Map */ - private final HashMap fields = new HashMap(); + private final HashMap fields = new HashMap<>(); /** fields sorted ascending by fieldName; lazily computed on demand */ private transient Map.Entry[] sortedFields; @@ -209,7 +209,7 @@ public class MemoryIndex { // private final IntBlockPool.SliceReader postingsReader; private final IntBlockPool.SliceWriter postingsWriter; - private HashMap fieldInfos = new HashMap(); + private HashMap fieldInfos = new HashMap<>(); private Counter bytesUsed; diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java b/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java index 2f093478823..093c0c36dc6 100644 --- a/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java +++ b/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java @@ -83,7 +83,7 @@ import static org.hamcrest.CoreMatchers.equalTo; * returning the same results for queries on some randomish indexes. */ public class MemoryIndexTest extends BaseTokenStreamTestCase { - private Set queries = new HashSet(); + private Set queries = new HashSet<>(); public static final int ITERATIONS = 100 * RANDOM_MULTIPLIER; @@ -98,7 +98,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { * read a set of queries from a resource file */ private Set readQueries(String resource) throws IOException { - Set queries = new HashSet(); + Set queries = new HashSet<>(); InputStream stream = getClass().getResourceAsStream(resource); BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8")); String line = null; @@ -376,7 +376,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { // LUCENE-3831 public void testNullPointerException() throws IOException { RegexpQuery regex = new RegexpQuery(new Term("field", "worl.")); - SpanQuery wrappedquery = new SpanMultiTermQueryWrapper(regex); + SpanQuery wrappedquery = new SpanMultiTermQueryWrapper<>(regex); MemoryIndex mindex = new MemoryIndex(random().nextBoolean(), random().nextInt(50) * 1024 * 1024); mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", "hello there")); @@ -388,7 +388,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { // LUCENE-3831 public void testPassesIfWrapped() throws IOException { RegexpQuery regex = new RegexpQuery(new Term("field", "worl.")); - SpanQuery wrappedquery = new SpanOrQuery(new SpanMultiTermQueryWrapper(regex)); + SpanQuery wrappedquery = new SpanOrQuery(new SpanMultiTermQueryWrapper<>(regex)); MemoryIndex mindex = new MemoryIndex(random().nextBoolean(), random().nextInt(50) * 1024 * 1024); mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", "hello there")); diff --git a/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java b/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java index fc34460ce28..784db2cfcdf 100644 --- a/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java +++ b/lucene/misc/src/java/org/apache/lucene/document/LazyDocument.java @@ -45,8 +45,8 @@ public class LazyDocument { // null until first field is loaded private StoredDocument doc; - private Map> fields = new HashMap>(); - private Set fieldNames = new HashSet(); + private Map> fields = new HashMap<>(); + private Set fieldNames = new HashSet<>(); public LazyDocument(IndexReader reader, int docID) { this.reader = reader; @@ -73,7 +73,7 @@ public class LazyDocument { fieldNames.add(fieldInfo.name); List values = fields.get(fieldInfo.number); if (null == values) { - values = new ArrayList(); + values = new ArrayList<>(); fields.put(fieldInfo.number, values); } diff --git a/lucene/misc/src/java/org/apache/lucene/index/IndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/index/IndexSplitter.java index 4d8eec4c734..5a3508dd1f2 100644 --- a/lucene/misc/src/java/org/apache/lucene/index/IndexSplitter.java +++ b/lucene/misc/src/java/org/apache/lucene/index/IndexSplitter.java @@ -73,14 +73,14 @@ public class IndexSplitter { if (args[1].equals("-l")) { is.listSegments(); } else if (args[1].equals("-d")) { - List segs = new ArrayList(); + List segs = new ArrayList<>(); for (int x = 2; x < args.length; x++) { segs.add(args[x]); } is.remove(segs.toArray(new String[0])); } else { File targetDir = new File(args[1]); - List segs = new ArrayList(); + List segs = new ArrayList<>(); for (int x = 2; x < args.length; x++) { segs.add(args[x]); } diff --git a/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java b/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java index 3b9ee549a87..65a57442cd3 100644 --- a/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java +++ b/lucene/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java @@ -118,7 +118,7 @@ public class MultiPassIndexSplitter { System.err.println("\t-seq\tsequential docid-range split (default is round-robin)"); System.exit(-1); } - ArrayList indexes = new ArrayList(); + ArrayList indexes = new ArrayList<>(); String outDir = null; int numParts = -1; boolean seq = false; diff --git a/lucene/misc/src/java/org/apache/lucene/util/fst/ListOfOutputs.java b/lucene/misc/src/java/org/apache/lucene/util/fst/ListOfOutputs.java index 99b2f3f2a98..b4f41c30912 100644 --- a/lucene/misc/src/java/org/apache/lucene/util/fst/ListOfOutputs.java +++ b/lucene/misc/src/java/org/apache/lucene/util/fst/ListOfOutputs.java @@ -90,7 +90,7 @@ public final class ListOfOutputs extends Outputs { return outputs.add((T) prefix, (T) output); } else { List outputList = (List) output; - List addedList = new ArrayList(outputList.size()); + List addedList = new ArrayList<>(outputList.size()); for(T _output : outputList) { addedList.add(outputs.add((T) prefix, _output)); } @@ -129,7 +129,7 @@ public final class ListOfOutputs extends Outputs { if (count == 1) { return outputs.read(in); } else { - List outputList = new ArrayList(count); + List outputList = new ArrayList<>(count); for(int i=0;i extends Outputs { @Override public Object merge(Object first, Object second) { - List outputList = new ArrayList(); + List outputList = new ArrayList<>(); if (!(first instanceof List)) { outputList.add((T) first); } else { @@ -188,7 +188,7 @@ public final class ListOfOutputs extends Outputs { public List asList(Object output) { if (!(output instanceof List)) { - List result = new ArrayList(1); + List result = new ArrayList<>(1); result.add((T) output); return result; } else { diff --git a/lucene/misc/src/test/org/apache/lucene/document/TestLazyDocument.java b/lucene/misc/src/test/org/apache/lucene/document/TestLazyDocument.java index e25326f64dc..0427b8de0ec 100644 --- a/lucene/misc/src/test/org/apache/lucene/document/TestLazyDocument.java +++ b/lucene/misc/src/test/org/apache/lucene/document/TestLazyDocument.java @@ -94,7 +94,7 @@ public class TestLazyDocument extends LuceneTestCase { StoredDocument d = visitor.doc; int numFieldValues = 0; - Map fieldValueCounts = new HashMap(); + Map fieldValueCounts = new HashMap<>(); // at this point, all FIELDS should be Lazy and unrealized for (StorableField f : d) { @@ -195,7 +195,7 @@ public class TestLazyDocument extends LuceneTestCase { LazyTestingStoredFieldVisitor(LazyDocument l, String... fields) { lazyDoc = l; - lazyFieldNames = new HashSet(Arrays.asList(fields)); + lazyFieldNames = new HashSet<>(Arrays.asList(fields)); } @Override diff --git a/lucene/misc/src/test/org/apache/lucene/index/sorter/IndexSortingTest.java b/lucene/misc/src/test/org/apache/lucene/index/sorter/IndexSortingTest.java index 16d4adf617b..3838b589a5f 100644 --- a/lucene/misc/src/test/org/apache/lucene/index/sorter/IndexSortingTest.java +++ b/lucene/misc/src/test/org/apache/lucene/index/sorter/IndexSortingTest.java @@ -43,7 +43,7 @@ public class IndexSortingTest extends SorterTestBase { // only read the values of the undeleted documents, since after addIndexes, // the deleted ones will be dropped from the index. Bits liveDocs = reader.getLiveDocs(); - List values = new ArrayList(); + List values = new ArrayList<>(); for (int i = 0; i < reader.maxDoc(); i++) { if (liveDocs == null || liveDocs.get(i)) { values.add(Integer.valueOf(reader.document(i).get(ID_FIELD))); diff --git a/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java b/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java index 372a1199b52..49c036d5832 100644 --- a/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java +++ b/lucene/misc/src/test/org/apache/lucene/index/sorter/SorterTestBase.java @@ -194,7 +194,7 @@ public abstract class SorterTestBase extends LuceneTestCase { /** Creates an index for sorting. */ public static void createIndex(Directory dir, int numDocs, Random random) throws IOException { - List ids = new ArrayList(); + List ids = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { ids.add(Integer.valueOf(i * 10)); } diff --git a/lucene/misc/src/test/org/apache/lucene/index/sorter/TestBlockJoinSorter.java b/lucene/misc/src/test/org/apache/lucene/index/sorter/TestBlockJoinSorter.java index aa8e77e26db..ea732b20e89 100644 --- a/lucene/misc/src/test/org/apache/lucene/index/sorter/TestBlockJoinSorter.java +++ b/lucene/misc/src/test/org/apache/lucene/index/sorter/TestBlockJoinSorter.java @@ -73,7 +73,7 @@ public class TestBlockJoinSorter extends LuceneTestCase { final StringField parent = new StringField("parent", "true", Store.YES); parentDoc.add(parent); for (int i = 0; i < numParents; ++i) { - List documents = new ArrayList(); + List documents = new ArrayList<>(); final int numChildren = random().nextInt(10); for (int j = 0; j < numChildren; ++j) { final Document childDoc = new Document(); diff --git a/lucene/misc/src/test/org/apache/lucene/index/sorter/TestEarlyTermination.java b/lucene/misc/src/test/org/apache/lucene/index/sorter/TestEarlyTermination.java index 9601dc9c0e1..f64f56de711 100644 --- a/lucene/misc/src/test/org/apache/lucene/index/sorter/TestEarlyTermination.java +++ b/lucene/misc/src/test/org/apache/lucene/index/sorter/TestEarlyTermination.java @@ -73,11 +73,11 @@ public class TestEarlyTermination extends LuceneTestCase { dir = newDirectory(); numDocs = atLeast(150); final int numTerms = TestUtil.nextInt(random(), 1, numDocs / 5); - Set randomTerms = new HashSet(); + Set randomTerms = new HashSet<>(); while (randomTerms.size() < numTerms) { randomTerms.add(TestUtil.randomSimpleString(random())); } - terms = new ArrayList(randomTerms); + terms = new ArrayList<>(randomTerms); final long seed = random().nextLong(); final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(new Random(seed))); iwc.setMergePolicy(TestSortingMergePolicy.newSortingMergePolicy(sort)); diff --git a/lucene/misc/src/test/org/apache/lucene/index/sorter/TestSortingMergePolicy.java b/lucene/misc/src/test/org/apache/lucene/index/sorter/TestSortingMergePolicy.java index 5095aeca299..3d4edbb20fd 100644 --- a/lucene/misc/src/test/org/apache/lucene/index/sorter/TestSortingMergePolicy.java +++ b/lucene/misc/src/test/org/apache/lucene/index/sorter/TestSortingMergePolicy.java @@ -93,11 +93,11 @@ public class TestSortingMergePolicy extends LuceneTestCase { dir2 = newDirectory(); final int numDocs = atLeast(150); final int numTerms = TestUtil.nextInt(random(), 1, numDocs / 5); - Set randomTerms = new HashSet(); + Set randomTerms = new HashSet<>(); while (randomTerms.size() < numTerms) { randomTerms.add(TestUtil.randomSimpleString(random())); } - terms = new ArrayList(randomTerms); + terms = new ArrayList<>(randomTerms); final long seed = random().nextLong(); final IndexWriterConfig iwc1 = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(new Random(seed))); final IndexWriterConfig iwc2 = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(new Random(seed))); diff --git a/lucene/misc/src/test/org/apache/lucene/util/fst/TestFSTsMisc.java b/lucene/misc/src/test/org/apache/lucene/util/fst/TestFSTsMisc.java index caa5b5f9e6d..c0341a9a534 100644 --- a/lucene/misc/src/test/org/apache/lucene/util/fst/TestFSTsMisc.java +++ b/lucene/misc/src/test/org/apache/lucene/util/fst/TestFSTsMisc.java @@ -67,7 +67,7 @@ public class TestFSTsMisc extends LuceneTestCase { } for(int inputMode=0;inputMode<2;inputMode++) { final int numWords = random.nextInt(maxNumWords+1); - Set termsSet = new HashSet(); + Set termsSet = new HashSet<>(); IntsRef[] terms = new IntsRef[numWords]; while(termsSet.size() < numWords) { final String term = getRandomString(random); @@ -88,7 +88,7 @@ public class TestFSTsMisc extends LuceneTestCase { System.out.println("TEST: now test UpToTwoPositiveIntOutputs"); } final UpToTwoPositiveIntOutputs outputs = UpToTwoPositiveIntOutputs.getSingleton(true); - final List> pairs = new ArrayList>(terms.length); + final List> pairs = new ArrayList<>(terms.length); long lastOutput = 0; for(int idx=0;idx values = new ArrayList(); + List values = new ArrayList<>(); values.add(value); values.add(value2); output = values; } else { output = outputs.get(value); } - pairs.add(new FSTTester.InputOutput(terms[idx], output)); + pairs.add(new FSTTester.InputOutput<>(terms[idx], output)); } new FSTTester(random(), dir, inputMode, pairs, outputs, false) { @Override @@ -133,13 +133,13 @@ public class TestFSTsMisc extends LuceneTestCase { System.out.println("TEST: now test OneOrMoreOutputs"); } final PositiveIntOutputs _outputs = PositiveIntOutputs.getSingleton(); - final ListOfOutputs outputs = new ListOfOutputs(_outputs); - final List> pairs = new ArrayList>(terms.length); + final ListOfOutputs outputs = new ListOfOutputs<>(_outputs); + final List> pairs = new ArrayList<>(terms.length); long lastOutput = 0; for(int idx=0;idx values = new ArrayList(); + List values = new ArrayList<>(); for(int i=0;i(terms[idx], output)); + pairs.add(new FSTTester.InputOutput<>(terms[idx], output)); } - new FSTTester(random(), dir, inputMode, pairs, outputs, false).doTest(false); + new FSTTester<>(random(), dir, inputMode, pairs, outputs, false).doTest(false); } } public void testListOfOutputs() throws Exception { PositiveIntOutputs _outputs = PositiveIntOutputs.getSingleton(); - ListOfOutputs outputs = new ListOfOutputs(_outputs); - final Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + ListOfOutputs outputs = new ListOfOutputs<>(_outputs); + final Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); final IntsRef scratch = new IntsRef(); // Add the same input more than once and the outputs @@ -194,8 +194,8 @@ public class TestFSTsMisc extends LuceneTestCase { public void testListOfOutputsEmptyString() throws Exception { PositiveIntOutputs _outputs = PositiveIntOutputs.getSingleton(); - ListOfOutputs outputs = new ListOfOutputs(_outputs); - final Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + ListOfOutputs outputs = new ListOfOutputs<>(_outputs); + final Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); final IntsRef scratch = new IntsRef(); builder.add(scratch, 0L); diff --git a/lucene/queries/src/java/org/apache/lucene/queries/BooleanFilter.java b/lucene/queries/src/java/org/apache/lucene/queries/BooleanFilter.java index d050e0f05a0..ce2497ebc60 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/BooleanFilter.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/BooleanFilter.java @@ -43,7 +43,7 @@ import org.apache.lucene.util.FixedBitSet; */ public class BooleanFilter extends Filter implements Iterable { - private final List clauses = new ArrayList(); + private final List clauses = new ArrayList<>(); /** * Returns the a DocIdSetIterator representing the Boolean composition diff --git a/lucene/queries/src/java/org/apache/lucene/queries/CommonTermsQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/CommonTermsQuery.java index 2e04ca4ba24..a78cef32bea 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/CommonTermsQuery.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/CommonTermsQuery.java @@ -67,7 +67,7 @@ public class CommonTermsQuery extends Query { * rewrite to dismax rather than boolean. Yet, this can already be subclassed * to do so. */ - protected final List terms = new ArrayList(); + protected final List terms = new ArrayList<>(); protected final boolean disableCoord; protected final float maxTermFrequency; protected final Occur lowFreqOccur; diff --git a/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java b/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java index 9c83dbce3ae..652addc3df6 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java @@ -129,7 +129,7 @@ public final class TermsFilter extends Filter { this.offsets = new int[length+1]; int lastEndOffset = 0; int index = 0; - ArrayList termsAndFields = new ArrayList(); + ArrayList termsAndFields = new ArrayList<>(); TermsAndField lastTermsAndField = null; BytesRef previousTerm = null; String previousField = null; diff --git a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java index 9e2ceee0af6..94eecce19e8 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java @@ -706,7 +706,7 @@ public final class MoreLikeThis { * @param docNum the id of the lucene document from which to find terms */ public PriorityQueue retrieveTerms(int docNum) throws IOException { - Map termFreqMap = new HashMap(); + Map termFreqMap = new HashMap<>(); for (String fieldName : fieldNames) { final Fields vectors = ir.getTermVectors(docNum); final Terms vector; @@ -846,7 +846,7 @@ public final class MoreLikeThis { * @see #retrieveInterestingTerms */ public PriorityQueue retrieveTerms(Reader r, String fieldName) throws IOException { - Map words = new HashMap(); + Map words = new HashMap<>(); addTermFrequencies(r, words, fieldName); return createQueue(words); } @@ -855,7 +855,7 @@ public final class MoreLikeThis { * @see #retrieveInterestingTerms(java.io.Reader, String) */ public String[] retrieveInterestingTerms(int docNum) throws IOException { - ArrayList al = new ArrayList(maxQueryTerms); + ArrayList al = new ArrayList<>(maxQueryTerms); PriorityQueue pq = retrieveTerms(docNum); Object cur; int lim = maxQueryTerms; // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller... @@ -879,7 +879,7 @@ public final class MoreLikeThis { * @see #setMaxQueryTerms */ public String[] retrieveInterestingTerms(Reader r, String fieldName) throws IOException { - ArrayList al = new ArrayList(maxQueryTerms); + ArrayList al = new ArrayList<>(maxQueryTerms); PriorityQueue pq = retrieveTerms(r, fieldName); Object cur; int lim = maxQueryTerms; // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller... diff --git a/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java b/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java index 0d22c61faf8..728ac595cb3 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java @@ -477,7 +477,7 @@ public class CommonTermsQueryTest extends LuceneTestCase { TopDocs verifySearch = searcher.search(verifyQuery, reader.maxDoc()); assertEquals(verifySearch.totalHits, cqSearch.totalHits); - Set hits = new HashSet(); + Set hits = new HashSet<>(); for (ScoreDoc doc : verifySearch.scoreDocs) { hits.add(doc.doc); } @@ -508,7 +508,7 @@ public class CommonTermsQueryTest extends LuceneTestCase { } private static List queueToList(PriorityQueue queue) { - List terms = new ArrayList(); + List terms = new ArrayList<>(); while (queue.size() > 0) { terms.add(queue.pop()); } diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TermFilterTest.java b/lucene/queries/src/test/org/apache/lucene/queries/TermFilterTest.java index e46dd70f7e9..41e30dbc207 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/TermFilterTest.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/TermFilterTest.java @@ -45,7 +45,7 @@ public class TermFilterTest extends LuceneTestCase { public void testCachability() throws Exception { TermFilter a = termFilter("field1", "a"); - HashSet cachedFilters = new HashSet(); + HashSet cachedFilters = new HashSet<>(); cachedFilters.add(a); assertTrue("Must be cached", cachedFilters.contains(termFilter("field1", "a"))); assertFalse("Must not be cached", cachedFilters.contains(termFilter("field1", "b"))); @@ -84,7 +84,7 @@ public class TermFilterTest extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); int num = atLeast(100); - List terms = new ArrayList(); + List terms = new ArrayList<>(); for (int i = 0; i < num; i++) { String field = "field" + i; String string = TestUtil.randomRealisticUnicodeString(random()); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java b/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java index c25e5b2cd82..36ebadd1204 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java @@ -55,7 +55,7 @@ public class TermsFilterTest extends LuceneTestCase { public void testCachability() throws Exception { TermsFilter a = termsFilter(random().nextBoolean(), new Term("field1", "a"), new Term("field1", "b")); - HashSet cachedFilters = new HashSet(); + HashSet cachedFilters = new HashSet<>(); cachedFilters.add(a); TermsFilter b = termsFilter(random().nextBoolean(), new Term("field1", "b"), new Term("field1", "a")); assertTrue("Must be cached", cachedFilters.contains(b)); @@ -79,7 +79,7 @@ public class TermsFilterTest extends LuceneTestCase { AtomicReaderContext context = (AtomicReaderContext) reader.getContext(); w.close(); - List terms = new ArrayList(); + List terms = new ArrayList<>(); terms.add(new Term(fieldName, "19")); FixedBitSet bits = (FixedBitSet) termsFilter(random().nextBoolean(), terms).getDocIdSet(context, context.reader().getLiveDocs()); assertNull("Must match nothing", bits); @@ -142,7 +142,7 @@ public class TermsFilterTest extends LuceneTestCase { RandomIndexWriter w = new RandomIndexWriter(random(), dir); int num = atLeast(3); int skip = random().nextInt(num); - List terms = new ArrayList(); + List terms = new ArrayList<>(); for (int i = 0; i < num; i++) { terms.add(new Term("field" + i, "content1")); Document doc = new Document(); @@ -173,7 +173,7 @@ public class TermsFilterTest extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); int num = atLeast(10); - Set terms = new HashSet(); + Set terms = new HashSet<>(); for (int i = 0; i < num; i++) { String field = "field" + random().nextInt(100); terms.add(new Term(field, "content1")); @@ -197,7 +197,7 @@ public class TermsFilterTest extends LuceneTestCase { w.close(); assertEquals(1, reader.leaves().size()); AtomicReaderContext context = reader.leaves().get(0); - TermsFilter tf = new TermsFilter(new ArrayList(terms)); + TermsFilter tf = new TermsFilter(new ArrayList<>(terms)); FixedBitSet bits = (FixedBitSet) tf.getDocIdSet(context, context.reader().getLiveDocs()); assertEquals(context.reader().numDocs(), bits.cardinality()); @@ -210,7 +210,7 @@ public class TermsFilterTest extends LuceneTestCase { RandomIndexWriter w = new RandomIndexWriter(random(), dir); int num = atLeast(100); final boolean singleField = random().nextBoolean(); - List terms = new ArrayList(); + List terms = new ArrayList<>(); for (int i = 0; i < num; i++) { String field = "field" + (singleField ? "1" : random().nextInt(100)); String string = TestUtil.randomRealisticUnicodeString(random()); @@ -255,10 +255,10 @@ public class TermsFilterTest extends LuceneTestCase { private TermsFilter termsFilter(boolean singleField, Collection termList) { if (!singleField) { - return new TermsFilter(new ArrayList(termList)); + return new TermsFilter(new ArrayList<>(termList)); } final TermsFilter filter; - List bytes = new ArrayList(); + List bytes = new ArrayList<>(); String field = null; for (Term term : termList) { bytes.add(term.bytes()); @@ -275,8 +275,8 @@ public class TermsFilterTest extends LuceneTestCase { public void testHashCodeAndEquals() { int num = atLeast(100); final boolean singleField = random().nextBoolean(); - List terms = new ArrayList(); - Set uniqueTerms = new HashSet(); + List terms = new ArrayList<>(); + Set uniqueTerms = new HashSet<>(); for (int i = 0; i < num; i++) { String field = "field" + (singleField ? "1" : random().nextInt(100)); String string = TestUtil.randomRealisticUnicodeString(random()); @@ -288,7 +288,7 @@ public class TermsFilterTest extends LuceneTestCase { assertEquals(right, left); assertEquals(right.hashCode(), left.hashCode()); if (uniqueTerms.size() > 1) { - List asList = new ArrayList(uniqueTerms); + List asList = new ArrayList<>(uniqueTerms); asList.remove(0); TermsFilter notEqual = termsFilter(singleField ? random().nextBoolean() : false, asList); assertFalse(left.equals(notEqual)); diff --git a/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java index d976d805475..72101044db6 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java @@ -343,7 +343,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup { // since custom scoring modifies the order of docs, map results // by doc ids so that we can later compare/verify them private Map topDocsToMap(TopDocs td) { - Map h = new HashMap(); + Map h = new HashMap<>(); for (int i=0; i getOriginalValues() throws IOException { - Map originalValues = new HashMap(); + Map originalValues = new HashMap<>(); MoreLikeThis mlt = new MoreLikeThis(reader); mlt.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); mlt.setMinDocFreq(1); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java index 9b6e196bd4a..5626cdf3ab8 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/MultiFieldQueryParser.java @@ -98,7 +98,7 @@ public class MultiFieldQueryParser extends QueryParser @Override protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException { if (field == null) { - List clauses = new ArrayList(); + List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { Query q = super.getFieldQuery(fields[i], queryText, true); if (q != null) { @@ -135,7 +135,7 @@ public class MultiFieldQueryParser extends QueryParser @Override protected Query getFieldQuery(String field, String queryText, boolean quoted) throws ParseException { if (field == null) { - List clauses = new ArrayList(); + List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { Query q = super.getFieldQuery(fields[i], queryText, quoted); if (q != null) { @@ -163,7 +163,7 @@ public class MultiFieldQueryParser extends QueryParser protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException { if (field == null) { - List clauses = new ArrayList(); + List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { clauses.add(new BooleanClause(getFuzzyQuery(fields[i], termStr, minSimilarity), BooleanClause.Occur.SHOULD)); @@ -177,7 +177,7 @@ public class MultiFieldQueryParser extends QueryParser protected Query getPrefixQuery(String field, String termStr) throws ParseException { if (field == null) { - List clauses = new ArrayList(); + List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { clauses.add(new BooleanClause(getPrefixQuery(fields[i], termStr), BooleanClause.Occur.SHOULD)); @@ -190,7 +190,7 @@ public class MultiFieldQueryParser extends QueryParser @Override protected Query getWildcardQuery(String field, String termStr) throws ParseException { if (field == null) { - List clauses = new ArrayList(); + List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { clauses.add(new BooleanClause(getWildcardQuery(fields[i], termStr), BooleanClause.Occur.SHOULD)); @@ -204,7 +204,7 @@ public class MultiFieldQueryParser extends QueryParser @Override protected Query getRangeQuery(String field, String part1, String part2, boolean startInclusive, boolean endInclusive) throws ParseException { if (field == null) { - List clauses = new ArrayList(); + List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { clauses.add(new BooleanClause(getRangeQuery(fields[i], part1, part2, startInclusive, endInclusive), BooleanClause.Occur.SHOULD)); @@ -220,7 +220,7 @@ public class MultiFieldQueryParser extends QueryParser protected Query getRegexpQuery(String field, String termStr) throws ParseException { if (field == null) { - List clauses = new ArrayList(); + List clauses = new ArrayList<>(); for (int i = 0; i < fields.length; i++) { clauses.add(new BooleanClause(getRegexpQuery(fields[i], termStr), BooleanClause.Occur.SHOULD)); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java index 273ed8c0e64..2d7e29b29d4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParser.java @@ -174,7 +174,7 @@ public class QueryParser extends QueryParserBase implements QueryParserConstants } final public Query Query(String field) throws ParseException { - List clauses = new ArrayList(); + List clauses = new ArrayList<>(); Query q, firstQuery=null; int conj, mods; mods = Modifiers(); @@ -640,7 +640,7 @@ public class QueryParser extends QueryParserBase implements QueryParserConstants return (jj_ntk = jj_nt.kind); } - private java.util.List jj_expentries = new java.util.ArrayList(); + private java.util.List jj_expentries = new java.util.ArrayList<>(); private int[] jj_expentry; private int jj_kind = -1; private int[] jj_lasttokens = new int[100]; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java index 4e0d5a62e61..0b8d803b17e 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/classic/QueryParserBase.java @@ -351,7 +351,7 @@ public abstract class QueryParserBase extends QueryBuilder implements CommonQuer if (fieldToDateResolution == null) { // lazily initialize HashMap - fieldToDateResolution = new HashMap(); + fieldToDateResolution = new HashMap<>(); } fieldToDateResolution.put(fieldName, dateResolution); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java index 1adabae48d7..0f63fb71ad2 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/complexPhrase/ComplexPhraseQueryParser.java @@ -104,7 +104,7 @@ public class ComplexPhraseQueryParser extends QueryParser { // First pass - parse the top-level query recording any PhraseQuerys // which will need to be resolved - complexPhrases = new ArrayList(); + complexPhrases = new ArrayList<>(); Query q = super.parse(query); // Perform second pass, using this QueryParser to parse any nested @@ -254,7 +254,7 @@ public class ComplexPhraseQueryParser extends QueryParser { } if (qc instanceof BooleanQuery) { - ArrayList sc = new ArrayList(); + ArrayList sc = new ArrayList<>(); addComplexPhraseClause(sc, (BooleanQuery) qc); if (sc.size() > 0) { allSpanClauses[i] = sc.get(0); @@ -285,7 +285,7 @@ public class ComplexPhraseQueryParser extends QueryParser { // Complex case - we have mixed positives and negatives in the // sequence. // Need to return a SpanNotQuery - ArrayList positiveClauses = new ArrayList(); + ArrayList positiveClauses = new ArrayList<>(); for (int j = 0; j < allSpanClauses.length; j++) { if (!bclauses[j].getOccur().equals(BooleanClause.Occur.MUST_NOT)) { positiveClauses.add(allSpanClauses[j]); @@ -312,8 +312,8 @@ public class ComplexPhraseQueryParser extends QueryParser { } private void addComplexPhraseClause(List spanClauses, BooleanQuery qc) { - ArrayList ors = new ArrayList(); - ArrayList nots = new ArrayList(); + ArrayList ors = new ArrayList<>(); + ArrayList nots = new ArrayList<>(); BooleanClause[] bclauses = qc.getClauses(); // For all clauses e.g. one* two~ diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java index 1a82e98f15b..86e616b2a0c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/ext/Extensions.java @@ -42,7 +42,7 @@ import java.util.Map; * @see ParserExtension */ public class Extensions { - private final Map extensions = new HashMap(); + private final Map extensions = new HashMap<>(); private final char extensionFieldDelimiter; /** * The default extension field delimiter character. This constant is set to @@ -122,11 +122,11 @@ public class Extensions { String field) { int indexOf = field.indexOf(this.extensionFieldDelimiter); if (indexOf < 0) - return new Pair(field, null); + return new Pair<>(field, null); final String indexField = indexOf == 0 ? defaultField : field.substring(0, indexOf); final String extensionKey = field.substring(indexOf + 1); - return new Pair(indexField, extensionKey); + return new Pair<>(indexField, extensionKey); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java index 7fc916ec663..567a326f227 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/builders/QueryTreeBuilder.java @@ -79,7 +79,7 @@ public class QueryTreeBuilder implements QueryBuilder { public void setBuilder(CharSequence fieldName, QueryBuilder builder) { if (this.fieldNameBuilders == null) { - this.fieldNameBuilders = new HashMap(); + this.fieldNameBuilders = new HashMap<>(); } this.fieldNameBuilders.put(fieldName.toString(), builder); @@ -97,7 +97,7 @@ public class QueryTreeBuilder implements QueryBuilder { QueryBuilder builder) { if (this.queryNodeBuilders == null) { - this.queryNodeBuilders = new HashMap, QueryBuilder>(); + this.queryNodeBuilders = new HashMap<>(); } this.queryNodeBuilders.put(queryNodeClass, builder); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/AbstractQueryConfig.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/AbstractQueryConfig.java index 0276e3332c4..f3f6c7edd7f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/AbstractQueryConfig.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/AbstractQueryConfig.java @@ -33,7 +33,7 @@ import java.util.HashMap; */ public abstract class AbstractQueryConfig { - final private HashMap, Object> configMap = new HashMap, Object>(); + final private HashMap, Object> configMap = new HashMap<>(); AbstractQueryConfig() { // although this class is public, it can only be constructed from package diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/ConfigurationKey.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/ConfigurationKey.java index 1ced76d6109..5689a9a537f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/ConfigurationKey.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/ConfigurationKey.java @@ -36,7 +36,7 @@ final public class ConfigurationKey { * @return a new instance */ public static ConfigurationKey newInstance() { - return new ConfigurationKey(); + return new ConfigurationKey<>(); } } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/QueryConfigHandler.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/QueryConfigHandler.java index 03e6110bc2a..3efb435b741 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/QueryConfigHandler.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/config/QueryConfigHandler.java @@ -40,7 +40,7 @@ import org.apache.lucene.queryparser.flexible.core.util.StringUtils; */ public abstract class QueryConfigHandler extends AbstractQueryConfig { - final private LinkedList listeners = new LinkedList(); + final private LinkedList listeners = new LinkedList<>(); /** * Returns an implementation of diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/GroupQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/GroupQueryNode.java index 2214467ca60..0eff69a2969 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/GroupQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/GroupQueryNode.java @@ -73,7 +73,7 @@ public class GroupQueryNode extends QueryNodeImpl { } public void setChild(QueryNode child) { - List list = new ArrayList(); + List list = new ArrayList<>(); list.add(child); this.set(list); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ModifierQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ModifierQueryNode.java index 9791cf60fce..a88667eecde 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ModifierQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/ModifierQueryNode.java @@ -151,7 +151,7 @@ public class ModifierQueryNode extends QueryNodeImpl { } public void setChild(QueryNode child) { - List list = new ArrayList(); + List list = new ArrayList<>(); list.add(child); this.set(list); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java index fd54d4b0289..2235ad2b179 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/PathQueryNode.java @@ -162,7 +162,7 @@ public class PathQueryNode extends QueryNodeImpl { * @return a List QueryText element from position startIndex */ public List getPathElements(int startIndex) { - List rValues = new ArrayList(); + List rValues = new ArrayList<>(); for (int i = startIndex; i < this.values.size(); i++) { try { rValues.add(this.values.get(i).clone()); @@ -209,7 +209,7 @@ public class PathQueryNode extends QueryNodeImpl { // copy children if (this.values != null) { - List localValues = new ArrayList(); + List localValues = new ArrayList<>(); for (QueryText value : this.values) { localValues.add(value.clone()); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java index 81fd9bb64d2..6e3de271dfd 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/nodes/QueryNodeImpl.java @@ -41,14 +41,14 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { private boolean isLeaf = true; - private Hashtable tags = new Hashtable(); + private Hashtable tags = new Hashtable<>(); private List clauses = null; protected void allocate() { if (this.clauses == null) { - this.clauses = new ArrayList(); + this.clauses = new ArrayList<>(); } else { this.clauses.clear(); @@ -106,7 +106,7 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { child.removeFromParent(); } - ArrayList existingChildren = new ArrayList(getChildren()); + ArrayList existingChildren = new ArrayList<>(getChildren()); for (QueryNode existingChild : existingChildren) { existingChild.removeFromParent(); } @@ -124,11 +124,11 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { clone.isLeaf = this.isLeaf; // Reset all tags - clone.tags = new Hashtable(); + clone.tags = new Hashtable<>(); // copy children if (this.clauses != null) { - List localClauses = new ArrayList(); + List localClauses = new ArrayList<>(); for (QueryNode clause : this.clauses) { localClauses.add(clause.cloneTree()); } @@ -156,7 +156,7 @@ public abstract class QueryNodeImpl implements QueryNode, Cloneable { if (isLeaf() || this.clauses == null) { return null; } - return new ArrayList(this.clauses); + return new ArrayList<>(this.clauses); } @Override diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorImpl.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorImpl.java index 965fb5185f3..92f0d6e631f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorImpl.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorImpl.java @@ -73,7 +73,7 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; */ public abstract class QueryNodeProcessorImpl implements QueryNodeProcessor { - private ArrayList childrenListPool = new ArrayList(); + private ArrayList childrenListPool = new ArrayList<>(); private QueryConfigHandler queryConfig; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorPipeline.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorPipeline.java index 9b15207bfb0..b439798b470 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorPipeline.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/processors/QueryNodeProcessorPipeline.java @@ -40,7 +40,7 @@ import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode; public class QueryNodeProcessorPipeline implements QueryNodeProcessor, List { - private LinkedList processors = new LinkedList(); + private LinkedList processors = new LinkedList<>(); private QueryConfigHandler queryConfig; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java index 1c91c301083..fc1f09778e1 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/core/util/QueryNodeOperation.java @@ -66,7 +66,7 @@ public final class QueryNodeOperation { QueryNode result = null; switch (op) { case NONE: - List children = new ArrayList(); + List children = new ArrayList<>(); children.add(q1.cloneTree()); children.add(q2.cloneTree()); result = new AndQueryNode(children); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java index df2f5f9bb48..06b65ca7769 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/messages/NLS.java @@ -46,7 +46,7 @@ import java.util.ResourceBundle; public class NLS { private static Map> bundles = - new HashMap>(0); + new HashMap<>(0); protected NLS() { // Do not instantiate @@ -129,7 +129,7 @@ public class NLS { // build a map of field names to Field objects final int len = fieldArray.length; - Map fields = new HashMap(len * 2); + Map fields = new HashMap<>(len * 2); for (int i = 0; i < len; i++) { fields.put(fieldArray[i].getName(), fieldArray[i]); loadfieldValue(fieldArray[i], isFieldAccessible, clazz); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/BooleanModifiersQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/BooleanModifiersQueryNodeProcessor.java index fe4fb13f8f0..85cdae9e115 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/BooleanModifiersQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/precedence/processors/BooleanModifiersQueryNodeProcessor.java @@ -46,7 +46,7 @@ import org.apache.lucene.queryparser.flexible.core.nodes.ModifierQueryNode.Modif */ public class BooleanModifiersQueryNodeProcessor extends QueryNodeProcessorImpl { - private ArrayList childrenBuffer = new ArrayList(); + private ArrayList childrenBuffer = new ArrayList<>(); private Boolean usingAnd = false; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MultiPhraseQueryNodeBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MultiPhraseQueryNodeBuilder.java index 89923d741fb..7a695d90378 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MultiPhraseQueryNodeBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/builders/MultiPhraseQueryNodeBuilder.java @@ -49,7 +49,7 @@ public class MultiPhraseQueryNodeBuilder implements StandardQueryBuilder { List children = phraseNode.getChildren(); if (children != null) { - TreeMap> positionTermMap = new TreeMap>(); + TreeMap> positionTermMap = new TreeMap<>(); for (QueryNode child : children) { FieldQueryNode termNode = (FieldQueryNode) child; @@ -59,7 +59,7 @@ public class MultiPhraseQueryNodeBuilder implements StandardQueryBuilder { .getPositionIncrement()); if (termList == null) { - termList = new LinkedList(); + termList = new LinkedList<>(); positionTermMap.put(termNode.getPositionIncrement(), termList); } diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/AbstractRangeQueryNode.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/AbstractRangeQueryNode.java index 935db13c225..8e83fe01e8c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/AbstractRangeQueryNode.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/nodes/AbstractRangeQueryNode.java @@ -163,7 +163,7 @@ public class AbstractRangeQueryNode> this.lowerInclusive = lowerInclusive; this.upperInclusive = upperInclusive; - ArrayList children = new ArrayList(2); + ArrayList children = new ArrayList<>(2); children.add(lower); children.add(upper); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java index 4844ec86f10..5001f630e7a 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/parser/StandardSyntaxParser.java @@ -185,7 +185,7 @@ public class StandardSyntaxParser implements SyntaxParser, StandardSyntaxParserC } c = DisjQuery(field); if (clauses == null) { - clauses = new Vector(); + clauses = new Vector<>(); clauses.addElement(first); } clauses.addElement(c); @@ -215,7 +215,7 @@ public class StandardSyntaxParser implements SyntaxParser, StandardSyntaxParserC jj_consume_token(OR); c = ConjQuery(field); if (clauses == null) { - clauses = new Vector(); + clauses = new Vector<>(); clauses.addElement(first); } clauses.addElement(c); @@ -245,7 +245,7 @@ public class StandardSyntaxParser implements SyntaxParser, StandardSyntaxParserC jj_consume_token(AND); c = ModClause(field); if (clauses == null) { - clauses = new Vector(); + clauses = new Vector<>(); clauses.addElement(first); } clauses.addElement(c); @@ -986,7 +986,7 @@ public class StandardSyntaxParser implements SyntaxParser, StandardSyntaxParserC return (jj_ntk = jj_nt.kind); } - private java.util.List jj_expentries = new java.util.ArrayList(); + private java.util.List jj_expentries = new java.util.ArrayList<>(); private int[] jj_expentry; private int jj_kind = -1; private int[] jj_lasttokens = new int[100]; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java index c0b3e721bb1..ef680663121 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/AnalyzerQueryNodeProcessor.java @@ -193,7 +193,7 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { if (positionCount == 1) { // simple case: only one position, with synonyms - LinkedList children = new LinkedList(); + LinkedList children = new LinkedList<>(); for (int i = 0; i < numTokens; i++) { String term = null; @@ -257,7 +257,7 @@ public class AnalyzerQueryNodeProcessor extends QueryNodeProcessorImpl { // phrase query: MultiPhraseQueryNode mpq = new MultiPhraseQueryNode(); - List multiTerms = new ArrayList(); + List multiTerms = new ArrayList<>(); int position = -1; int i = 0; int termGroupCount = 0; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanQuery2ModifierNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanQuery2ModifierNodeProcessor.java index 7f4d727ae59..db8f3082f9c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanQuery2ModifierNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/BooleanQuery2ModifierNodeProcessor.java @@ -68,7 +68,7 @@ public class BooleanQuery2ModifierNodeProcessor implements QueryNodeProcessor { QueryConfigHandler queryConfigHandler; - private final ArrayList childrenBuffer = new ArrayList(); + private final ArrayList childrenBuffer = new ArrayList<>(); private Boolean usingAnd = false; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/GroupQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/GroupQueryNodeProcessor.java index 38deff02e3d..ef1228a7c5b 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/GroupQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/GroupQueryNodeProcessor.java @@ -80,7 +80,7 @@ public class GroupQueryNodeProcessor implements QueryNodeProcessor { queryTree = ((GroupQueryNode) queryTree).getChild(); } - this.queryNodeList = new ArrayList(); + this.queryNodeList = new ArrayList<>(); this.latestNodeVerified = false; readTree(queryTree); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiFieldQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiFieldQueryNodeProcessor.java index 99a121cbbdf..44f74e854ff 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiFieldQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/MultiFieldQueryNodeProcessor.java @@ -93,7 +93,7 @@ public class MultiFieldQueryNodeProcessor extends QueryNodeProcessorImpl { return fieldNode; } else { - LinkedList children = new LinkedList(); + LinkedList children = new LinkedList<>(); children.add(fieldNode); for (int i = 1; i < fields.length; i++) { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RemoveEmptyNonLeafQueryNodeProcessor.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RemoveEmptyNonLeafQueryNodeProcessor.java index addee51652b..0365d357248 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RemoveEmptyNonLeafQueryNodeProcessor.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/flexible/standard/processors/RemoveEmptyNonLeafQueryNodeProcessor.java @@ -42,7 +42,7 @@ import org.apache.lucene.queryparser.flexible.core.processors.QueryNodeProcessor public class RemoveEmptyNonLeafQueryNodeProcessor extends QueryNodeProcessorImpl { - private LinkedList childrenBuffer = new LinkedList(); + private LinkedList childrenBuffer = new LinkedList<>(); public RemoveEmptyNonLeafQueryNodeProcessor() { // empty constructor diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java index 41ed459c903..1c429d68022 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/parser/QueryParser.java @@ -185,7 +185,7 @@ public class QueryParser implements QueryParserConstants { fieldName = jj_consume_token(TERM); jj_consume_token(COLON); if (fieldNames == null) { - fieldNames = new ArrayList(); + fieldNames = new ArrayList<>(); } fieldNames.add(fieldName.image); } @@ -211,7 +211,7 @@ public class QueryParser implements QueryParserConstants { oprt = jj_consume_token(OR); /* keep only last used operator */ if (queries == null) { - queries = new ArrayList(); + queries = new ArrayList<>(); queries.add(q); } q = AndQuery(); @@ -239,7 +239,7 @@ public class QueryParser implements QueryParserConstants { oprt = jj_consume_token(AND); /* keep only last used operator */ if (queries == null) { - queries = new ArrayList(); + queries = new ArrayList<>(); queries.add(q); } q = NotQuery(); @@ -267,7 +267,7 @@ public class QueryParser implements QueryParserConstants { oprt = jj_consume_token(NOT); /* keep only last used operator */ if (queries == null) { - queries = new ArrayList(); + queries = new ArrayList<>(); queries.add(q); } q = NQuery(); @@ -293,7 +293,7 @@ public class QueryParser implements QueryParserConstants { break label_5; } dt = jj_consume_token(N); - queries = new ArrayList(); + queries = new ArrayList<>(); queries.add(q); /* left associative */ q = WQuery(); @@ -320,7 +320,7 @@ public class QueryParser implements QueryParserConstants { break label_6; } wt = jj_consume_token(W); - queries = new ArrayList(); + queries = new ArrayList<>(); queries.add(q); /* left associative */ q = PrimaryQuery(); @@ -401,7 +401,7 @@ public class QueryParser implements QueryParserConstants { final public List FieldsQueryList() throws ParseException { SrndQuery q; - ArrayList queries = new ArrayList(); + ArrayList queries = new ArrayList<>(); jj_consume_token(LPAREN); q = FieldsQuery(); queries.add(q); @@ -644,7 +644,7 @@ public class QueryParser implements QueryParserConstants { return (jj_ntk = jj_nt.kind); } - private java.util.List jj_expentries = new java.util.ArrayList(); + private java.util.List jj_expentries = new java.util.ArrayList<>(); private int[] jj_expentry; private int jj_kind = -1; private int[] jj_lasttokens = new int[100]; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/ComposedQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/ComposedQuery.java index 1ec65c8fd74..0874a1f0a1f 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/ComposedQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/ComposedQuery.java @@ -51,7 +51,7 @@ public abstract class ComposedQuery extends SrndQuery { public boolean isOperatorInfix() { return operatorInfix; } /* else prefix operator */ public List makeLuceneSubQueriesField(String fn, BasicQueryFactory qf) { - List luceneSubQueries = new ArrayList(); + List luceneSubQueries = new ArrayList<>(); Iterator sqi = getSubQueriesIterator(); while (sqi.hasNext()) { luceneSubQueries.add( (sqi.next()).makeLuceneQueryField(fn, qf)); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java index 7582711ffba..e0dfe742695 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/FieldsQuery.java @@ -39,7 +39,7 @@ public class FieldsQuery extends SrndQuery { /* mostly untested */ public FieldsQuery(SrndQuery q, String fieldName, char fieldOp) { this.q = q; - fieldNames = new ArrayList(); + fieldNames = new ArrayList<>(); fieldNames.add(fieldName); this.fieldOp = fieldOp; } @@ -53,7 +53,7 @@ public class FieldsQuery extends SrndQuery { /* mostly untested */ if (fieldNames.size() == 1) { /* single field name: no new queries needed */ return q.makeLuceneQueryFieldNoBoost(fieldNames.get(0), qf); } else { /* OR query over the fields */ - List queries = new ArrayList(); + List queries = new ArrayList<>(); Iterator fni = getFieldNames().listIterator(); SrndQuery qc; while (fni.hasNext()) { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java index c3529b62009..f7255cb3522 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SimpleTermRewriteQuery.java @@ -35,7 +35,7 @@ class SimpleTermRewriteQuery extends RewriteQuery { @Override public Query rewrite(IndexReader reader) throws IOException { - final List luceneSubQueries = new ArrayList(); + final List luceneSubQueries = new ArrayList<>(); srndQuery.visitMatchingTerms(reader, fieldName, new SimpleTerm.MatchingTermVisitor() { @Override diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java index ddbb61a97b7..bbc7ad106b6 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SpanNearClauseFactory.java @@ -71,7 +71,7 @@ public class SpanNearClauseFactory { // FIXME: rename to SpanClauseFactory public SpanNearClauseFactory(IndexReader reader, String fieldName, BasicQueryFactory qf) { this.reader = reader; this.fieldName = fieldName; - this.weightBySpanQuery = new HashMap(); + this.weightBySpanQuery = new HashMap<>(); this.qf = qf; } private IndexReader reader; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilderFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilderFactory.java index 1d804e10bff..1ab1bb8ae15 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilderFactory.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/FilterBuilderFactory.java @@ -29,7 +29,7 @@ import java.util.HashMap; */ public class FilterBuilderFactory implements FilterBuilder { - HashMap builders = new HashMap(); + HashMap builders = new HashMap<>(); @Override public Filter getFilter(Element n) throws ParserException { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java index 23cc6e8cd03..94ab8f8b466 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryBuilderFactory.java @@ -29,7 +29,7 @@ import java.util.HashMap; */ public class QueryBuilderFactory implements QueryBuilder { - HashMap builders = new HashMap(); + HashMap builders = new HashMap<>(); @Override public Query getQuery(Element n) throws ParserException { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryTemplateManager.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryTemplateManager.java index 4abc2fd8211..3abdd27e7cd 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryTemplateManager.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/QueryTemplateManager.java @@ -48,7 +48,7 @@ public class QueryTemplateManager { static final DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); static final TransformerFactory tFactory = TransformerFactory.newInstance(); - HashMap compiledTemplatesCache = new HashMap(); + HashMap compiledTemplatesCache = new HashMap<>(); Templates defaultCompiledTemplates = null; diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java index b101ad51c5b..da01dc9e9bd 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/CachedFilterBuilder.java @@ -65,7 +65,7 @@ public class CachedFilterBuilder implements FilterBuilder { Element childElement = DOMUtils.getFirstChildOrFail(e); if (filterCache == null) { - filterCache = new LRUCache(cacheSize); + filterCache = new LRUCache<>(cacheSize); } // Test to see if child Element is a query or filter that needs to be diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java index 11565040589..d63f61ae2a4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/LikeThisQueryBuilder.java @@ -71,7 +71,7 @@ public class LikeThisQueryBuilder implements QueryBuilder { String stopWords = e.getAttribute("stopWords"); Set stopWordsSet = null; if ((stopWords != null) && (fields != null)) { - stopWordsSet = new HashSet(); + stopWordsSet = new HashSet<>(); for (String field : fields) { try (TokenStream ts = analyzer.tokenStream(field, stopWords)) { CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java index 548844e15a0..e26735fa2b0 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanNearBuilder.java @@ -42,7 +42,7 @@ public class SpanNearBuilder extends SpanBuilderBase { String slopString = DOMUtils.getAttributeOrFail(e, "slop"); int slop = Integer.parseInt(slopString); boolean inOrder = DOMUtils.getAttribute(e, "inOrder", false); - List spans = new ArrayList(); + List spans = new ArrayList<>(); for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling()) { if (kid.getNodeType() == Node.ELEMENT_NODE) { spans.add(factory.getSpanQuery((Element) kid)); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java index 54d0618f917..ce48d00a590 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrBuilder.java @@ -39,7 +39,7 @@ public class SpanOrBuilder extends SpanBuilderBase { @Override public SpanQuery getSpanQuery(Element e) throws ParserException { - List clausesList = new ArrayList(); + List clausesList = new ArrayList<>(); for (Node kid = e.getFirstChild(); kid != null; kid = kid.getNextSibling()) { if (kid.getNodeType() == Node.ELEMENT_NODE) { SpanQuery clause = factory.getSpanQuery((Element) kid); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java index 5e316f5c0f3..e7978d130b4 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanOrTermsBuilder.java @@ -49,7 +49,7 @@ public class SpanOrTermsBuilder extends SpanBuilderBase { String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName"); String value = DOMUtils.getNonBlankTextOrFail(e); - List clausesList = new ArrayList(); + List clausesList = new ArrayList<>(); try (TokenStream ts = analyzer.tokenStream(fieldName, value)) { TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java index 4c7f20084b6..69fd7ba2fdd 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/SpanQueryBuilderFactory.java @@ -29,7 +29,7 @@ import java.util.Map; */ public class SpanQueryBuilderFactory implements SpanQueryBuilder { - private final Map builders = new HashMap(); + private final Map builders = new HashMap<>(); @Override public Query getQuery(Element e) throws ParserException { diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java index 6b97f728341..59f424dd87c 100644 --- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java +++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/xml/builders/TermsFilterBuilder.java @@ -50,7 +50,7 @@ public class TermsFilterBuilder implements FilterBuilder { */ @Override public Filter getFilter(Element e) throws ParserException { - List terms = new ArrayList(); + List terms = new ArrayList<>(); String text = DOMUtils.getNonBlankTextOrFail(e); String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName"); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java index cf21eb2a6c6..126e07e6c64 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java @@ -61,8 +61,8 @@ public class TestAnalyzingQueryParser extends LuceneTestCase { private String[] fuzzyInput; private String[] fuzzyExpected; - private Map wildcardEscapeHits = new TreeMap(); - private Map wildcardEscapeMisses = new TreeMap(); + private Map wildcardEscapeHits = new TreeMap<>(); + private Map wildcardEscapeMisses = new TreeMap<>(); @Override public void setUp() throws Exception { diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java index 5a2431679df..38ba07ab8f7 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java @@ -132,7 +132,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { } public void testBoostsSimple() throws Exception { - Map boosts = new HashMap(); + Map boosts = new HashMap<>(); boosts.put("b", Float.valueOf(5)); boosts.put("t", Float.valueOf(10)); String[] fields = {"b", "t"}; diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java index 37ec3168256..ec7c20f550e 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java @@ -91,7 +91,7 @@ public class TestComplexPhraseQuery extends LuceneTestCase { Query q = qp.parse(qString); - HashSet expecteds = new HashSet(); + HashSet expecteds = new HashSet<>(); String[] vals = expectedVals.split(","); for (int i = 0; i < vals.length; i++) { if (vals[i].length() > 0) diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java index 91cc1aeeb9a..a758a6ff2a8 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java @@ -431,7 +431,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { final String hourField = "hour"; PrecedenceQueryParser qp = new PrecedenceQueryParser(new MockAnalyzer(random())); - Map fieldMap = new HashMap(); + Map fieldMap = new HashMap<>(); // set a field specific date resolution fieldMap.put(monthField, DateTools.Resolution.MONTH); qp.setDateResolution(fieldMap); diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java index db15646ebfb..debb484aef0 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java @@ -145,7 +145,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { } public void testBoostsSimple() throws Exception { - Map boosts = new HashMap(); + Map boosts = new HashMap<>(); boosts.put("b", Float.valueOf(5)); boosts.put("t", Float.valueOf(10)); String[] fields = { "b", "t" }; diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java index a209ce22b46..7dc8aebc7e9 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java @@ -103,7 +103,7 @@ public class TestNumericQueryParser extends LuceneTestCase { qp = new StandardQueryParser(ANALYZER); - final HashMap randomNumberMap = new HashMap(); + final HashMap randomNumberMap = new HashMap<>(); SimpleDateFormat dateFormat; long randomDate; @@ -194,8 +194,8 @@ public class TestNumericQueryParser extends LuceneTestCase { .setMergePolicy(newLogMergePolicy())); Document doc = new Document(); - HashMap numericConfigMap = new HashMap(); - HashMap numericFieldMap = new HashMap(); + HashMap numericConfigMap = new HashMap<>(); + HashMap numericFieldMap = new HashMap<>(); qp.setNumericConfigMap(numericConfigMap); for (NumericType type : NumericType.values()) { diff --git a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java index 177e6e1efa6..ab9d1dd54c8 100644 --- a/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java +++ b/lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java @@ -741,7 +741,7 @@ public class TestQPHelper extends LuceneTestCase { final String hourField = "hour"; StandardQueryParser qp = new StandardQueryParser(); - Map dateRes = new HashMap(); + Map dateRes = new HashMap<>(); // set a field specific date resolution dateRes.put(monthField, DateTools.Resolution.MONTH); diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java index 125a66c30fe..dcecf9fd2ae 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexAndTaxonomyRevision.java @@ -116,7 +116,7 @@ public class IndexAndTaxonomyRevision implements Revision { /** Returns a singleton map of the revision files from the given {@link IndexCommit}. */ public static Map> revisionFiles(IndexCommit indexCommit, IndexCommit taxoCommit) throws IOException { - HashMap> files = new HashMap>(); + HashMap> files = new HashMap<>(); files.put(INDEX_SOURCE, IndexRevision.revisionFiles(indexCommit).values().iterator().next()); files.put(TAXONOMY_SOURCE, IndexRevision.revisionFiles(taxoCommit).values().iterator().next()); return files; diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java index 325c96d0e60..299a6661488 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexReplicationHandler.java @@ -150,7 +150,7 @@ public class IndexReplicationHandler implements ReplicationHandler { // if there were any IO errors reading the expected commit point (i.e. // segments files mismatch), then ignore that commit either. if (commit != null && commit.getSegmentsFileName().equals(segmentsFile)) { - Set commitFiles = new HashSet(); + Set commitFiles = new HashSet<>(); commitFiles.addAll(commit.getFileNames()); commitFiles.add(IndexFileNames.SEGMENTS_GEN); Matcher matcher = IndexFileNames.CODEC_FILE_PATTERN.matcher(""); diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java index d135a3ddb1e..c43d331cb79 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/IndexRevision.java @@ -69,7 +69,7 @@ public class IndexRevision implements Revision { /** Returns a singleton map of the revision files from the given {@link IndexCommit}. */ public static Map> revisionFiles(IndexCommit commit) throws IOException { Collection commitFiles = commit.getFileNames(); - List revisionFiles = new ArrayList(commitFiles.size()); + List revisionFiles = new ArrayList<>(commitFiles.size()); String segmentsFile = commit.getSegmentsFileName(); Directory dir = commit.getDirectory(); diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java b/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java index 4ab746c11f0..8dd9087b46a 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/LocalReplicator.java @@ -108,11 +108,11 @@ public class LocalReplicator implements Replicator { private volatile boolean closed = false; private final AtomicInteger sessionToken = new AtomicInteger(0); - private final Map sessions = new HashMap(); + private final Map sessions = new HashMap<>(); private void checkExpiredSessions() throws IOException { // make a "to-delete" list so we don't risk deleting from the map while iterating it - final ArrayList toExpire = new ArrayList(); + final ArrayList toExpire = new ArrayList<>(); for (ReplicationSession token : sessions.values()) { if (token.isExpired(expirationThresholdMilllis)) { toExpire.add(token); diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java b/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java index 93973514982..b7478535c6a 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/ReplicationClient.java @@ -188,8 +188,8 @@ public class ReplicationClient implements Closeable { private void doUpdate() throws IOException { SessionToken session = null; - final Map sourceDirectory = new HashMap(); - final Map> copiedFiles = new HashMap>(); + final Map sourceDirectory = new HashMap<>(); + final Map> copiedFiles = new HashMap<>(); boolean notify = false; try { final String version = handler.currentVersion(); @@ -209,7 +209,7 @@ public class ReplicationClient implements Closeable { String source = e.getKey(); Directory dir = factory.getDirectory(session.id, source); sourceDirectory.put(source, dir); - List cpFiles = new ArrayList(); + List cpFiles = new ArrayList<>(); copiedFiles.put(source, cpFiles); for (RevisionFile file : e.getValue()) { if (closed) { @@ -298,16 +298,16 @@ public class ReplicationClient implements Closeable { return newRevisionFiles; } - Map> requiredFiles = new HashMap>(); + Map> requiredFiles = new HashMap<>(); for (Entry> e : handlerRevisionFiles.entrySet()) { // put the handler files in a Set, for faster contains() checks later - Set handlerFiles = new HashSet(); + Set handlerFiles = new HashSet<>(); for (RevisionFile file : e.getValue()) { handlerFiles.add(file.fileName); } // make sure to preserve revisionFiles order - ArrayList res = new ArrayList(); + ArrayList res = new ArrayList<>(); String source = e.getKey(); assert newRevisionFiles.containsKey(source) : "source not found in newRevisionFiles: " + newRevisionFiles; for (RevisionFile file : newRevisionFiles.get(source)) { diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java b/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java index 90b6e41f9a9..955736e937e 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/SessionToken.java @@ -60,12 +60,12 @@ public final class SessionToken { public SessionToken(DataInput in) throws IOException { this.id = in.readUTF(); this.version = in.readUTF(); - this.sourceFiles = new HashMap>(); + this.sourceFiles = new HashMap<>(); int numSources = in.readInt(); while (numSources > 0) { String source = in.readUTF(); int numFiles = in.readInt(); - List files = new ArrayList(numFiles); + List files = new ArrayList<>(numFiles); for (int i = 0; i < numFiles; i++) { String fileName = in.readUTF(); RevisionFile file = new RevisionFile(fileName); diff --git a/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java b/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java index 4f62490bfc5..3815f8d218e 100644 --- a/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java +++ b/lucene/replicator/src/java/org/apache/lucene/replicator/http/ReplicationService.java @@ -106,7 +106,7 @@ public class ReplicationService { // than using String.split() since the latter may return empty elements in // the array StringTokenizer stok = new StringTokenizer(path.substring(startIdx), "/"); - ArrayList elements = new ArrayList(); + ArrayList elements = new ArrayList<>(); while (stok.hasMoreTokens()) { elements.add(stok.nextToken()); } diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java index 6d9f5a3f012..845ac62dc32 100644 --- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java +++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java @@ -60,7 +60,7 @@ public class FuzzyLikeThisQuery extends Query // provided to TermQuery, so that the general idea is agnostic to any scoring system... static TFIDFSimilarity sim=new DefaultSimilarity(); Query rewrittenQuery=null; - ArrayList fieldVals=new ArrayList(); + ArrayList fieldVals=new ArrayList<>(); Analyzer analyzer; ScoreTermQueue q; @@ -201,7 +201,7 @@ public class FuzzyLikeThisQuery extends Query CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); int corpusNumDocs = reader.numDocs(); - HashSet processedTerms = new HashSet(); + HashSet processedTerms = new HashSet<>(); ts.reset(); while (ts.incrementToken()) { String term = termAtt.toString(); @@ -277,7 +277,7 @@ public class FuzzyLikeThisQuery extends Query //create BooleanQueries to hold the variants for each token/field pair and ensure it // has no coord factor //Step 1: sort the termqueries by term/field - HashMap> variantQueries=new HashMap>(); + HashMap> variantQueries=new HashMap<>(); int size = q.size(); for(int i = 0; i < size; i++) { @@ -285,7 +285,7 @@ public class FuzzyLikeThisQuery extends Query ArrayList l= variantQueries.get(st.fuzziedSourceTerm); if(l==null) { - l=new ArrayList(); + l=new ArrayList<>(); variantQueries.put(st.fuzziedSourceTerm,l); } l.add(st); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java index 260e85bb7f8..aa8cb87345d 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java @@ -83,7 +83,7 @@ public class DuplicateFilterTest extends LuceneTestCase { public void testDefaultFilter() throws Throwable { DuplicateFilter df = new DuplicateFilter(KEY_FIELD); - HashSet results = new HashSet(); + HashSet results = new HashSet<>(); ScoreDoc[] hits = searcher.search(tq, df, 1000).scoreDocs; for (ScoreDoc hit : hits) { @@ -95,7 +95,7 @@ public class DuplicateFilterTest extends LuceneTestCase { } public void testNoFilter() throws Throwable { - HashSet results = new HashSet(); + HashSet results = new HashSet<>(); ScoreDoc[] hits = searcher.search(tq, null, 1000).scoreDocs; assertTrue("Default searching should have found some matches", hits.length > 0); boolean dupsFound = false; @@ -113,7 +113,7 @@ public class DuplicateFilterTest extends LuceneTestCase { public void testFastFilter() throws Throwable { DuplicateFilter df = new DuplicateFilter(KEY_FIELD); df.setProcessingMode(DuplicateFilter.ProcessingMode.PM_FAST_INVALIDATION); - HashSet results = new HashSet(); + HashSet results = new HashSet<>(); ScoreDoc[] hits = searcher.search(tq, df, 1000).scoreDocs; assertTrue("Filtered searching should have found some matches", hits.length > 0); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java index 2b7070a41b0..edfc963b18c 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java @@ -81,7 +81,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { FuzzyLikeThisQuery flt = new FuzzyLikeThisQuery(10, analyzer); flt.addTerms("smith", "name", 0.3f, 1); Query q = flt.rewrite(searcher.getIndexReader()); - HashSet queryTerms = new HashSet(); + HashSet queryTerms = new HashSet<>(); q.extractTerms(queryTerms); assertTrue("Should have variant smythe", queryTerms.contains(new Term("name", "smythe"))); assertTrue("Should have variant smith", queryTerms.contains(new Term("name", "smith"))); @@ -98,7 +98,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { FuzzyLikeThisQuery flt = new FuzzyLikeThisQuery(10, analyzer); flt.addTerms("jonathin smoth", "name", 0.3f, 1); Query q = flt.rewrite(searcher.getIndexReader()); - HashSet queryTerms = new HashSet(); + HashSet queryTerms = new HashSet<>(); q.extractTerms(queryTerms); assertTrue("Should have variant jonathan", queryTerms.contains(new Term("name", "jonathan"))); assertTrue("Should have variant smith", queryTerms.contains(new Term("name", "smith"))); @@ -116,7 +116,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { flt.addTerms("jonathin smoth", "this field does not exist", 0.3f, 1); // don't fail here just because the field doesn't exits Query q = flt.rewrite(searcher.getIndexReader()); - HashSet queryTerms = new HashSet(); + HashSet queryTerms = new HashSet<>(); q.extractTerms(queryTerms); assertTrue("Should have variant jonathan", queryTerms.contains(new Term("name", "jonathan"))); assertTrue("Should have variant smith", queryTerms.contains(new Term("name", "smith"))); @@ -133,7 +133,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { FuzzyLikeThisQuery flt = new FuzzyLikeThisQuery(10, analyzer); flt.addTerms("fernando smith", "name", 0.3f, 1); Query q = flt.rewrite(searcher.getIndexReader()); - HashSet queryTerms = new HashSet(); + HashSet queryTerms = new HashSet<>(); q.extractTerms(queryTerms); assertTrue("Should have variant smith", queryTerms.contains(new Term("name", "smith"))); TopDocs topDocs = searcher.search(flt, 1); diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java index 7ae01e27ddc..a2e517bf568 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java @@ -73,8 +73,8 @@ public class TestRegexQuery extends LuceneTestCase { } private int spanRegexQueryNrHits(String regex1, String regex2, int slop, boolean ordered) throws Exception { - SpanQuery srq1 = new SpanMultiTermQueryWrapper(new RegexQuery(newTerm(regex1))); - SpanQuery srq2 = new SpanMultiTermQueryWrapper(new RegexQuery(newTerm(regex2))); + SpanQuery srq1 = new SpanMultiTermQueryWrapper<>(new RegexQuery(newTerm(regex1))); + SpanQuery srq2 = new SpanMultiTermQueryWrapper<>(new RegexQuery(newTerm(regex2))); SpanNearQuery query = new SpanNearQuery( new SpanQuery[]{srq1, srq2}, slop, ordered); return searcher.search(query, null, 1000).totalHits; diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java index 36e3a0c9b45..0680686ace2 100644 --- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java +++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java @@ -70,7 +70,7 @@ public class TestSpanRegexQuery extends LuceneTestCase { IndexReader reader = DirectoryReader.open(directory); IndexSearcher searcher = newSearcher(reader); - SpanQuery srq = new SpanMultiTermQueryWrapper(new RegexQuery(new Term("field", "aut.*"))); + SpanQuery srq = new SpanMultiTermQueryWrapper<>(new RegexQuery(new Term("field", "aut.*"))); SpanFirstQuery sfq = new SpanFirstQuery(srq, 1); // SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {srq, stq}, 6, // true); diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java index 297889c7874..31b9a855742 100644 --- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java +++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeStrategy.java @@ -77,7 +77,7 @@ import java.util.concurrent.ConcurrentHashMap; */ public abstract class PrefixTreeStrategy extends SpatialStrategy { protected final SpatialPrefixTree grid; - private final Map provider = new ConcurrentHashMap(); + private final Map provider = new ConcurrentHashMap<>(); protected final boolean simplifyIndexedCells; protected int defaultFieldValuesArrayLen = 2; protected double distErrPct = SpatialArgs.DEFAULT_DISTERRPCT;// [ 0 TO 0.5 ] diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java index b5bcb3e9302..592f815ae6a 100644 --- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java +++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/Cell.java @@ -174,7 +174,7 @@ public abstract class Cell implements Comparable { } //TODO change API to return a filtering iterator - List copy = new ArrayList(cells.size()); + List copy = new ArrayList<>(cells.size()); for (Cell cell : cells) { SpatialRelation rel = cell.getShape().relate(shapeFilter); if (rel == SpatialRelation.DISJOINT) diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java index 7085213c9db..5f2ca6d155c 100644 --- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java +++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/GeohashPrefixTree.java @@ -111,7 +111,7 @@ public class GeohashPrefixTree extends SpatialPrefixTree { @Override public Collection getSubCells() { String[] hashes = GeohashUtils.getSubGeohashes(getGeohash());//sorted - List cells = new ArrayList(hashes.length); + List cells = new ArrayList<>(hashes.length); for (String hash : hashes) { cells.add(new GhCell(hash)); } diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java index 6330605a2e0..d2e16a18f95 100644 --- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java +++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/QuadPrefixTree.java @@ -141,7 +141,7 @@ public class QuadPrefixTree extends SpatialPrefixTree { @Override public Cell getCell(Point p, int level) { - List cells = new ArrayList(1); + List cells = new ArrayList<>(1); build(xmid, ymid, 0, cells, new StringBuilder(), ctx.makePoint(p.getX(),p.getY()), level); return cells.get(0);//note cells could be longer if p on edge } @@ -240,7 +240,7 @@ public class QuadPrefixTree extends SpatialPrefixTree { @Override public Collection getSubCells() { - List cells = new ArrayList(4); + List cells = new ArrayList<>(4); cells.add(new QuadCell(getTokenString()+"A")); cells.add(new QuadCell(getTokenString()+"B")); cells.add(new QuadCell(getTokenString()+"C")); diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java index 1b8261a3017..1fbd3965468 100644 --- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java +++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/tree/SpatialPrefixTree.java @@ -164,7 +164,7 @@ public abstract class SpatialPrefixTree { if (shape instanceof Point) { return getCells((Point) shape, detailLevel, inclParents); } - List cells = new ArrayList(inclParents ? 4096 : 2048); + List cells = new ArrayList<>(inclParents ? 4096 : 2048); recursiveGetCells(getWorldCell(), shape, detailLevel, inclParents, simplify, cells); return cells; } @@ -226,7 +226,7 @@ public abstract class SpatialPrefixTree { String endToken = cell.getTokenString(); assert endToken.length() == detailLevel; - List cells = new ArrayList(detailLevel); + List cells = new ArrayList<>(detailLevel); for (int i = 1; i < detailLevel; i++) { cells.add(getCell(endToken.substring(0, i))); } @@ -238,7 +238,7 @@ public abstract class SpatialPrefixTree { * Will add the trailing leaf byte for leaves. This isn't particularly efficient. */ public static List cellsToTokenStrings(Collection cells) { - List tokens = new ArrayList((cells.size())); + List tokens = new ArrayList<>((cells.size())); for (Cell cell : cells) { final String token = cell.getTokenString(); if (cell.isLeaf()) { diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/query/SpatialArgsParser.java b/lucene/spatial/src/java/org/apache/lucene/spatial/query/SpatialArgsParser.java index 4ebf2e8938f..363ee133062 100644 --- a/lucene/spatial/src/java/org/apache/lucene/spatial/query/SpatialArgsParser.java +++ b/lucene/spatial/src/java/org/apache/lucene/spatial/query/SpatialArgsParser.java @@ -129,7 +129,7 @@ public class SpatialArgsParser { /** Parses "a=b c=d f" (whitespace separated) into name-value pairs. If there * is no '=' as in 'f' above then it's short for f=f. */ protected static Map parseMap(String body) { - Map map = new HashMap(); + Map map = new HashMap<>(); StringTokenizer st = new StringTokenizer(body, " \n\t"); while (st.hasMoreTokens()) { String a = st.nextToken(); diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/query/SpatialOperation.java b/lucene/spatial/src/java/org/apache/lucene/spatial/query/SpatialOperation.java index 7e772a9d455..7166649e6a8 100644 --- a/lucene/spatial/src/java/org/apache/lucene/spatial/query/SpatialOperation.java +++ b/lucene/spatial/src/java/org/apache/lucene/spatial/query/SpatialOperation.java @@ -40,8 +40,8 @@ import java.util.Map; */ public abstract class SpatialOperation implements Serializable { // Private registry - private static final Map registry = new HashMap(); - private static final List list = new ArrayList(); + private static final Map registry = new HashMap<>(); + private static final List list = new ArrayList<>(); // Geometry Operations diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/util/CachingDoubleValueSource.java b/lucene/spatial/src/java/org/apache/lucene/spatial/util/CachingDoubleValueSource.java index fa9fb857bb9..dd2b4118d24 100644 --- a/lucene/spatial/src/java/org/apache/lucene/spatial/util/CachingDoubleValueSource.java +++ b/lucene/spatial/src/java/org/apache/lucene/spatial/util/CachingDoubleValueSource.java @@ -38,7 +38,7 @@ public class CachingDoubleValueSource extends ValueSource { public CachingDoubleValueSource( ValueSource source ) { this.source = source; - cache = new HashMap(); + cache = new HashMap<>(); } @Override diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCache.java b/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCache.java index 92c30b6e548..07e1ab19214 100644 --- a/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCache.java +++ b/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCache.java @@ -44,7 +44,7 @@ public class ShapeFieldCache { public void add( int docid, T s ) { List list = cache[docid]; if( list == null ) { - list = cache[docid] = new ArrayList(defaultLength); + list = cache[docid] = new ArrayList<>(defaultLength); } list.add( s ); } diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java b/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java index c78ddd925c6..9458ff15ab4 100644 --- a/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java +++ b/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java @@ -39,7 +39,7 @@ public abstract class ShapeFieldCacheProvider { private Logger log = Logger.getLogger(getClass().getName()); // it may be a List or T - WeakHashMap> sidx = new WeakHashMap>(); + WeakHashMap> sidx = new WeakHashMap<>(); protected final int defaultSize; protected final String shapeField; @@ -59,7 +59,7 @@ public abstract class ShapeFieldCacheProvider { long startTime = System.currentTimeMillis(); log.fine("Building Cache [" + reader.maxDoc() + "]"); - idx = new ShapeFieldCache(reader.maxDoc(),defaultSize); + idx = new ShapeFieldCache<>(reader.maxDoc(),defaultSize); int count = 0; DocsEnum docs = null; Terms terms = reader.terms(shapeField); diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java index 30bad06b5f9..9f52dc2e7bd 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/DistanceStrategyTest.java @@ -42,7 +42,7 @@ public class DistanceStrategyTest extends StrategyTestCase { @ParametersFactory public static Iterable parameters() { - List ctorArgs = new ArrayList(); + List ctorArgs = new ArrayList<>(); SpatialContext ctx = SpatialContext.GEO; SpatialPrefixTree grid; diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/PortedSolr3Test.java b/lucene/spatial/src/test/org/apache/lucene/spatial/PortedSolr3Test.java index 6619d17a92f..cc3fb022cf0 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/PortedSolr3Test.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/PortedSolr3Test.java @@ -48,7 +48,7 @@ public class PortedSolr3Test extends StrategyTestCase { @ParametersFactory public static Iterable parameters() { - List ctorArgs = new ArrayList(); + List ctorArgs = new ArrayList<>(); SpatialContext ctx = SpatialContext.GEO; SpatialPrefixTree grid; @@ -176,7 +176,7 @@ public class PortedSolr3Test extends StrategyTestCase { SearchResults results = executeQuery(query, 100); assertEquals(""+shape,assertNumFound,results.numFound); if (assertIds != null) { - Set resultIds = new HashSet(); + Set resultIds = new HashSet<>(); for (SearchResult result : results.results) { resultIds.add(Integer.valueOf(result.document.get("id"))); } diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java index 913e88f60d6..3198e1b8d18 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/QueryEqualsHashCodeTest.java @@ -45,7 +45,7 @@ public class QueryEqualsHashCodeTest extends LuceneTestCase { final SpatialPrefixTree gridQuad = new QuadPrefixTree(ctx,10); final SpatialPrefixTree gridGeohash = new GeohashPrefixTree(ctx,10); - Collection strategies = new ArrayList(); + Collection strategies = new ArrayList<>(); strategies.add(new RecursivePrefixTreeStrategy(gridGeohash, "recursive_geohash")); strategies.add(new TermQueryPrefixTreeStrategy(gridQuad, "termquery_quad")); strategies.add(new PointVectorStrategy(ctx, "pointvector")); diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java index 51e4bc6b1c3..603f05e1d3a 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestCase.java @@ -119,7 +119,7 @@ public abstract class SpatialTestCase extends LuceneTestCase { try { TopDocs topDocs = indexSearcher.search(query, numDocs); - List results = new ArrayList(); + List results = new ArrayList<>(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { results.add(new SearchResult(scoreDoc.score, indexSearcher.doc(scoreDoc.doc))); } diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestData.java b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestData.java index 8087c92d4fd..eeaea3081c9 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestData.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestData.java @@ -42,7 +42,7 @@ public class SpatialTestData { * The stream is closed. */ public static Iterator getTestData(InputStream in, SpatialContext ctx) throws IOException { - List results = new ArrayList(); + List results = new ArrayList<>(); BufferedReader bufInput = new BufferedReader(new InputStreamReader(in,"UTF-8")); try { String line; diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestQuery.java b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestQuery.java index 81705ec6e3e..4ccf469c022 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestQuery.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/SpatialTestQuery.java @@ -38,7 +38,7 @@ public class SpatialTestQuery { public String line; public int lineNumber = -1; public SpatialArgs args; - public List ids = new ArrayList(); + public List ids = new ArrayList<>(); /** * Get Test Queries. The InputStream is closed. @@ -49,7 +49,7 @@ public class SpatialTestQuery { final String name, final InputStream in ) throws IOException { - List results = new ArrayList(); + List results = new ArrayList<>(); BufferedReader bufInput = new BufferedReader(new InputStreamReader(in,"UTF-8")); try { diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/StrategyTestCase.java b/lucene/spatial/src/test/org/apache/lucene/spatial/StrategyTestCase.java index 9cdab4e6d9d..52b0882fe86 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/StrategyTestCase.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/StrategyTestCase.java @@ -89,7 +89,7 @@ public abstract class StrategyTestCase extends SpatialTestCase { } protected List getDocuments(Iterator sampleData) { - List documents = new ArrayList(); + List documents = new ArrayList<>(); while (sampleData.hasNext()) { SpatialTestData data = sampleData.next(); Document document = new Document(); @@ -161,7 +161,7 @@ public abstract class StrategyTestCase extends SpatialTestCase { } else { // We are looking at how the results overlap if (concern.resultsAreSuperset) { - Set found = new HashSet(); + Set found = new HashSet<>(); for (SearchResult r : got.results) { found.add(r.document.get("id")); } @@ -171,7 +171,7 @@ public abstract class StrategyTestCase extends SpatialTestCase { } } } else { - List found = new ArrayList(); + List found = new ArrayList<>(); for (SearchResult r : got.results) { found.add(r.document.get("id")); } @@ -237,7 +237,7 @@ public abstract class StrategyTestCase extends SpatialTestCase { protected void assertOperation(Map indexedDocs, SpatialOperation operation, Shape queryShape) { //Generate truth via brute force - Set expectedIds = new HashSet(); + Set expectedIds = new HashSet<>(); for (Map.Entry stringShapeEntry : indexedDocs.entrySet()) { if (operation.evaluate(stringShapeEntry.getValue(), queryShape)) expectedIds.add(stringShapeEntry.getKey()); @@ -245,7 +245,7 @@ public abstract class StrategyTestCase extends SpatialTestCase { SpatialTestQuery testQuery = new SpatialTestQuery(); testQuery.args = new SpatialArgs(operation, queryShape); - testQuery.ids = new ArrayList(expectedIds); + testQuery.ids = new ArrayList<>(expectedIds); runTestQuery(SpatialMatchConcern.FILTER, testQuery); } diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/TestTestFramework.java b/lucene/spatial/src/test/org/apache/lucene/spatial/TestTestFramework.java index b33a24d5d3b..82c5aa86505 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/TestTestFramework.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/TestTestFramework.java @@ -45,7 +45,7 @@ public class TestTestFramework extends LuceneTestCase { SpatialContext ctx = SpatialContext.GEO; Iterator iter = SpatialTestQuery.getTestQueries( new SpatialArgsParser(), ctx, name, in );//closes the InputStream - List tests = new ArrayList(); + List tests = new ArrayList<>(); while( iter.hasNext() ) { tests.add( iter.next() ); } diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/JtsPolygonTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/JtsPolygonTest.java index 1eccf87c88c..6dbaa9b3832 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/JtsPolygonTest.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/JtsPolygonTest.java @@ -44,7 +44,7 @@ public class JtsPolygonTest extends StrategyTestCase { public JtsPolygonTest() { try { - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("spatialContextFactory", "com.spatial4j.core.context.jts.JtsSpatialContextFactory"); ctx = SpatialContextFactory.makeSpatialContext(args, getClass().getClassLoader()); diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/SpatialOpRecursivePrefixTreeTest.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/SpatialOpRecursivePrefixTreeTest.java index 7dcdac40636..070c63dfa0e 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/SpatialOpRecursivePrefixTreeTest.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/SpatialOpRecursivePrefixTreeTest.java @@ -199,8 +199,8 @@ public class SpatialOpRecursivePrefixTreeTest extends StrategyTestCase { final boolean biasContains = (operation == SpatialOperation.Contains); - Map indexedShapes = new LinkedHashMap(); - Map indexedShapesGS = new LinkedHashMap();//grid snapped + Map indexedShapes = new LinkedHashMap<>(); + Map indexedShapesGS = new LinkedHashMap<>();//grid snapped final int numIndexedShapes = randomIntBetween(1, 6); for (int i = 0; i < numIndexedShapes; i++) { String id = "" + i; @@ -257,8 +257,8 @@ public class SpatialOpRecursivePrefixTreeTest extends StrategyTestCase { // We ensure true-positive matches (if the predicate on the raw shapes match // then the search should find those same matches). // approximations, false-positive matches - Set expectedIds = new LinkedHashSet();//true-positives - Set secondaryIds = new LinkedHashSet();//false-positives (unless disjoint) + Set expectedIds = new LinkedHashSet<>();//true-positives + Set secondaryIds = new LinkedHashSet<>();//false-positives (unless disjoint) for (Map.Entry entry : indexedShapes.entrySet()) { String id = entry.getKey(); Shape indexedShapeCompare = entry.getValue(); @@ -297,7 +297,7 @@ public class SpatialOpRecursivePrefixTreeTest extends StrategyTestCase { SpatialArgs args = new SpatialArgs(operation, queryShape); Query query = strategy.makeQuery(args); SearchResults got = executeQuery(query, 100); - Set remainingExpectedIds = new LinkedHashSet(expectedIds); + Set remainingExpectedIds = new LinkedHashSet<>(expectedIds); for (SearchResult result : got.results) { String id = result.getId(); boolean removed = remainingExpectedIds.remove(id); @@ -334,11 +334,11 @@ public class SpatialOpRecursivePrefixTreeTest extends StrategyTestCase { List cells = grid.getCells(snapMe, detailLevel, false, true); //calc bounding box of cells. - List cellShapes = new ArrayList(cells.size()); + List cellShapes = new ArrayList<>(cells.size()); for (Cell cell : cells) { cellShapes.add(cell.getShape()); } - return new ShapeCollection(cellShapes, ctx).getBoundingBox(); + return new ShapeCollection<>(cellShapes, ctx).getBoundingBox(); } /** diff --git a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java index 4b52e3a9438..4bd7f778377 100644 --- a/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java +++ b/lucene/spatial/src/test/org/apache/lucene/spatial/prefix/TestRecursivePrefixTreeStrategy.java @@ -123,7 +123,7 @@ public class TestRecursivePrefixTreeStrategy extends StrategyTestCase { for (double radiusDeg : radiusDegs) { //3. Index random points in this cluster circle deleteAll(); - List points = new ArrayList(); + List points = new ArrayList<>(); for(int i = 0; i < 20; i++) { //Note that this will not result in randomly distributed points in the // circle, they will be concentrated towards the center a little. But @@ -184,7 +184,7 @@ public class TestRecursivePrefixTreeStrategy extends StrategyTestCase { SearchResults got = executeQuery(strategy.makeQuery(args), 100); assertEquals("" + args, assertNumFound, got.numFound); if (assertIds != null) { - Set gotIds = new HashSet(); + Set gotIds = new HashSet<>(); for (SearchResult result : got.results) { gotIds.add(Integer.valueOf(result.document.get("id"))); } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java index 4df757b05b8..adb453d51cd 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java @@ -354,7 +354,7 @@ public class DirectSpellChecker { // try ed=1 first, in case we get lucky terms = suggestSimilar(term, inspections, ir, docfreq, 1, accuracy, spare); if (maxEdits > 1 && terms.size() < inspections) { - HashSet moreTerms = new HashSet(); + HashSet moreTerms = new HashSet<>(); moreTerms.addAll(terms); moreTerms.addAll(suggestSimilar(term, inspections, ir, docfreq, maxEdits, accuracy, spare)); terms = moreTerms; @@ -409,7 +409,7 @@ public class DirectSpellChecker { return Collections.emptyList(); } FuzzyTermsEnum e = new FuzzyTermsEnum(terms, atts, term, editDistance, Math.max(minPrefix, editDistance-1), true); - final PriorityQueue stQueue = new PriorityQueue(); + final PriorityQueue stQueue = new PriorityQueue<>(); BytesRef queryTerm = new BytesRef(term.text()); BytesRef candidateTerm; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java index e61a2879114..27e720e1094 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java @@ -498,7 +498,7 @@ public class SpellChecker implements java.io.Closeable { final Directory dir = this.spellIndex; final IndexWriter writer = new IndexWriter(dir, config); IndexSearcher indexSearcher = obtainSearcher(); - final List termsEnums = new ArrayList(); + final List termsEnums = new ArrayList<>(); final IndexReader reader = searcher.getIndexReader(); if (reader.maxDoc() > 0) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java index 30f26c95c39..e4309a77d37 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/WordBreakSpellChecker.java @@ -106,7 +106,7 @@ public class WordBreakSpellChecker { int queueInitialCapacity = maxSuggestions > 10 ? 10 : maxSuggestions; Comparator queueComparator = sortMethod == BreakSuggestionSortMethod.NUM_CHANGES_THEN_MAX_FREQUENCY ? new LengthThenMaxFreqComparator() : new LengthThenSumFreqComparator(); - Queue suggestions = new PriorityQueue( + Queue suggestions = new PriorityQueue<>( queueInitialCapacity, queueComparator); int origFreq = ir.docFreq(term); @@ -176,7 +176,7 @@ public class WordBreakSpellChecker { int queueInitialCapacity = maxSuggestions > 10 ? 10 : maxSuggestions; Comparator queueComparator = new CombinationsThenFreqComparator(); - Queue suggestions = new PriorityQueue( + Queue suggestions = new PriorityQueue<>( queueInitialCapacity, queueComparator); int thisTimeEvaluations = 0; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java index df3aa04b859..9333f7aac3e 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggester.java @@ -375,7 +375,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { } BooleanQuery query; - Set matchedTokens = new HashSet(); + Set matchedTokens = new HashSet<>(); String prefixToken = null; try (TokenStream ts = queryAnalyzer.tokenStream("", new StringReader(key.toString()))) { @@ -386,7 +386,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { String lastToken = null; query = new BooleanQuery(); int maxEndOffset = -1; - matchedTokens = new HashSet(); + matchedTokens = new HashSet<>(); while (ts.incrementToken()) { if (lastToken != null) { matchedTokens.add(lastToken); @@ -475,7 +475,7 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable { // This will just be null if app didn't pass payloads to build(): // TODO: maybe just stored fields? they compress... BinaryDocValues payloadsDV = MultiDocValues.getBinaryValues(searcher.getIndexReader(), "payloads"); - List results = new ArrayList(); + List results = new ArrayList<>(); BytesRef scratch = new BytesRef(); for (int i=0;i=0;stateNumber--) { final State state = states[stateNumber]; - List newTransitions = new ArrayList(); + List newTransitions = new ArrayList<>(); for(Transition t : state.getTransitions()) { assert t.getMin() == t.getMax(); if (t.getMin() == TokenStreamToAutomaton.POS_SEP) { @@ -470,8 +470,8 @@ public class AnalyzingSuggester extends Lookup { reader = new OfflineSorter.ByteSequencesReader(tempSorted); - PairOutputs outputs = new PairOutputs(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()); - Builder> builder = new Builder>(FST.INPUT_TYPE.BYTE1, outputs); + PairOutputs outputs = new PairOutputs<>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()); + Builder> builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); // Build FST: BytesRef previousAnalyzed = null; @@ -484,7 +484,7 @@ public class AnalyzingSuggester extends Lookup { // still index the hightest-weight one). We clear // this when we see a new analyzed form, so it cannot // grow unbounded (at most 256 entries): - Set seenSurfaceForms = new HashSet(); + Set seenSurfaceForms = new HashSet<>(); int dedup = 0; while (reader.read(scratch)) { @@ -587,7 +587,7 @@ public class AnalyzingSuggester extends Lookup { @Override public boolean load(DataInput input) throws IOException { count = input.readVLong(); - this.fst = new FST>(input, new PairOutputs(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton())); + this.fst = new FST<>(input, new PairOutputs<>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton())); maxAnalyzedPathsForOneInput = input.readVInt(); hasPayloads = input.readByte() == 1; return true; @@ -674,9 +674,9 @@ public class AnalyzingSuggester extends Lookup { BytesReader bytesReader = fst.getBytesReader(); - FST.Arc> scratchArc = new FST.Arc>(); + FST.Arc> scratchArc = new FST.Arc<>(); - final List results = new ArrayList(); + final List results = new ArrayList<>(); List>> prefixPaths = FSTUtil.intersectPrefixPaths(convertAutomaton(lookupAutomaton), fst); @@ -694,7 +694,7 @@ public class AnalyzingSuggester extends Lookup { // Searcher just to find the single exact only // match, if present: Util.TopNSearcher> searcher; - searcher = new Util.TopNSearcher>(fst, count * maxSurfaceFormsPerAnalyzedForm, count * maxSurfaceFormsPerAnalyzedForm, weightComparator); + searcher = new Util.TopNSearcher<>(fst, count * maxSurfaceFormsPerAnalyzedForm, count * maxSurfaceFormsPerAnalyzedForm, weightComparator); // NOTE: we could almost get away with only using // the first start node. The only catch is if @@ -742,7 +742,7 @@ public class AnalyzingSuggester extends Lookup { num - results.size(), num * maxAnalyzedPathsForOneInput, weightComparator) { - private final Set seen = new HashSet(); + private final Set seen = new HashSet<>(); @Override protected boolean acceptResult(IntsRef input, Pair output) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FSTUtil.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FSTUtil.java index 3bb21c25a33..b9e886f4224 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FSTUtil.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FSTUtil.java @@ -70,13 +70,13 @@ public class FSTUtil { public static List> intersectPrefixPaths(Automaton a, FST fst) throws IOException { assert a.isDeterministic(); - final List> queue = new ArrayList>(); - final List> endNodes = new ArrayList>(); - queue.add(new Path(a.getInitialState(), fst + final List> queue = new ArrayList<>(); + final List> endNodes = new ArrayList<>(); + queue.add(new Path<>(a.getInitialState(), fst .getFirstArc(new FST.Arc()), fst.outputs.getNoOutput(), new IntsRef())); - final FST.Arc scratchArc = new FST.Arc(); + final FST.Arc scratchArc = new FST.Arc<>(); final FST.BytesReader fstReader = fst.getBytesReader(); while (queue.size() != 0) { @@ -100,7 +100,7 @@ public class FSTUtil { newInput.copyInts(currentInput); newInput.ints[currentInput.length] = t.getMin(); newInput.length = currentInput.length + 1; - queue.add(new Path(t.getDest(), new FST.Arc() + queue.add(new Path<>(t.getDest(), new FST.Arc() .copyFrom(nextArc), fst.outputs .add(path.output, nextArc.output), newInput)); } @@ -122,7 +122,7 @@ public class FSTUtil { newInput.copyInts(currentInput); newInput.ints[currentInput.length] = nextArc.label; newInput.length = currentInput.length + 1; - queue.add(new Path(t.getDest(), new FST.Arc() + queue.add(new Path<>(t.getDest(), new FST.Arc() .copyFrom(nextArc), fst.outputs .add(path.output, nextArc.output), newInput)); final int label = nextArc.label; // used in assert diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java index f425235e272..d2c2f61cad3 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java @@ -343,7 +343,7 @@ public class FreeTextSuggester extends Lookup { TermsEnum termsEnum = terms.iterator(null); Outputs outputs = PositiveIntOutputs.getSingleton(); - Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); IntsRef scratchInts = new IntsRef(); while (true) { @@ -424,7 +424,7 @@ public class FreeTextSuggester extends Lookup { } totTokens = input.readVLong(); - fst = new FST(input, PositiveIntOutputs.getSingleton()); + fst = new FST<>(input, PositiveIntOutputs.getSingleton()); return true; } @@ -527,7 +527,7 @@ public class FreeTextSuggester extends Lookup { lastTokens[0] = new BytesRef(); } - Arc arc = new Arc(); + Arc arc = new Arc<>(); BytesReader bytesReader = fst.getBytesReader(); @@ -535,12 +535,12 @@ public class FreeTextSuggester extends Lookup { // results, return that; else, fallback: double backoff = 1.0; - List results = new ArrayList(num); + List results = new ArrayList<>(num); // We only add a given suffix once, from the highest // order model that saw it; for subsequent lower order // models we skip it: - final Set seen = new HashSet(); + final Set seen = new HashSet<>(); for(int gram=grams-1;gram>=0;gram--) { BytesRef token = lastTokens[gram]; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java index eea10423442..51b8c18cbc5 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java @@ -73,7 +73,7 @@ public class FSTCompletion { * An empty result. Keep this an {@link ArrayList} to keep all the returned * lists of single type (monomorphic calls). */ - private static final ArrayList EMPTY_RESULT = new ArrayList(); + private static final ArrayList EMPTY_RESULT = new ArrayList<>(); /** * Finite state automaton encoding all the lookup terms. See class notes for @@ -137,12 +137,12 @@ public class FSTCompletion { @SuppressWarnings({"unchecked","rawtypes"}) private static Arc[] cacheRootArcs(FST automaton) { try { - List> rootArcs = new ArrayList>(); - Arc arc = automaton.getFirstArc(new Arc()); + List> rootArcs = new ArrayList<>(); + Arc arc = automaton.getFirstArc(new Arc<>()); FST.BytesReader fstReader = automaton.getBytesReader(); automaton.readFirstTargetArc(arc, arc, fstReader); while (true) { - rootArcs.add(new Arc().copyFrom(arc)); + rootArcs.add(new Arc<>().copyFrom(arc)); if (arc.isLast()) break; automaton.readNextArc(arc, fstReader); } @@ -172,7 +172,7 @@ public class FSTCompletion { int rootArcIndex, BytesRef utf8) { // Get the UTF-8 bytes representation of the input key. try { - final FST.Arc scratch = new FST.Arc(); + final FST.Arc scratch = new FST.Arc<>(); FST.BytesReader fstReader = automaton.getBytesReader(); for (; rootArcIndex < rootArcs.length; rootArcIndex++) { final FST.Arc rootArc = rootArcs[rootArcIndex]; @@ -261,12 +261,12 @@ public class FSTCompletion { // Don't overallocate the results buffers. This also serves the purpose of // allowing the user of this class to request all matches using Integer.MAX_VALUE as // the number of results. - final ArrayList res = new ArrayList(Math.min(10, num)); + final ArrayList res = new ArrayList<>(Math.min(10, num)); final BytesRef output = BytesRef.deepCopyOf(key); for (int i = 0; i < rootArcs.length; i++) { final FST.Arc rootArc = rootArcs[i]; - final FST.Arc arc = new FST.Arc().copyFrom(rootArc); + final FST.Arc arc = new FST.Arc<>().copyFrom(rootArc); // Descend into the automaton using the key as prefix. if (descendWithPrefix(arc, key)) { @@ -370,7 +370,7 @@ public class FSTCompletion { if (res.size() >= num) return true; } else { int save = output.length; - if (collect(res, num, bucket, output, new Arc().copyFrom(arc))) { + if (collect(res, num, bucket, output, new Arc<>().copyFrom(arc))) { return true; } output.length = save; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionBuilder.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionBuilder.java index 8029cfd973e..dbfb3ce10a3 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionBuilder.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionBuilder.java @@ -237,7 +237,7 @@ public class FSTCompletionBuilder { // Build the automaton. final Outputs outputs = NoOutputs.getSingleton(); final Object empty = outputs.getNoOutput(); - final Builder builder = new Builder( + final Builder builder = new Builder<>( FST.INPUT_TYPE.BYTE1, 0, 0, true, true, shareMaxTailLength, outputs, null, false, PackedInts.DEFAULT, true, 15); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java index 0f410c69970..a24d316e8b6 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletionLookup.java @@ -251,7 +251,7 @@ public class FSTCompletionLookup extends Lookup { completions = normalCompletion.lookup(key, num); } - final ArrayList results = new ArrayList(completions.size()); + final ArrayList results = new ArrayList<>(completions.size()); CharsRef spare = new CharsRef(); for (Completion c : completions) { spare.grow(c.utf8.length); @@ -283,7 +283,7 @@ public class FSTCompletionLookup extends Lookup { @Override public synchronized boolean load(DataInput input) throws IOException { count = input.readVLong(); - this.higherWeightsCompletion = new FSTCompletion(new FST( + this.higherWeightsCompletion = new FSTCompletion(new FST<>( input, NoOutputs.getSingleton())); this.normalCompletion = new FSTCompletion( higherWeightsCompletion.getFST(), false, exactMatchFirst); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java index d654f182e48..be4213db475 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/WFSTCompletionLookup.java @@ -102,7 +102,7 @@ public class WFSTCompletionLookup extends Lookup { IntsRef scratchInts = new IntsRef(); BytesRef previous = null; PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); - Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, outputs); + Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs); while ((scratch = iter.next()) != null) { long cost = iter.weight(); @@ -134,7 +134,7 @@ public class WFSTCompletionLookup extends Lookup { @Override public boolean load(DataInput input) throws IOException { count = input.readVLong(); - this.fst = new FST(input, PositiveIntOutputs.getSingleton()); + this.fst = new FST<>(input, PositiveIntOutputs.getSingleton()); return true; } @@ -152,7 +152,7 @@ public class WFSTCompletionLookup extends Lookup { BytesRef scratch = new BytesRef(key); int prefixLength = scratch.length; - Arc arc = new Arc(); + Arc arc = new Arc<>(); // match the prefix portion exactly Long prefixOutput = null; @@ -161,10 +161,10 @@ public class WFSTCompletionLookup extends Lookup { } catch (IOException bogus) { throw new RuntimeException(bogus); } if (prefixOutput == null) { - return Collections.emptyList(); + return Collections.emptyList(); } - List results = new ArrayList(num); + List results = new ArrayList<>(num); CharsRef spare = new CharsRef(); if (exactFirst && arc.isFinal()) { spare.grow(scratch.length); @@ -225,7 +225,7 @@ public class WFSTCompletionLookup extends Lookup { if (fst == null) { return null; } - Arc arc = new Arc(); + Arc arc = new Arc<>(); Long result = null; try { result = lookupPrefix(new BytesRef(key), arc); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java index 941df306aea..28cc39aa69f 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellLookup.java @@ -96,7 +96,7 @@ public class JaspellLookup extends Lookup { @Override public List lookup(CharSequence key, boolean onlyMorePopular, int num) { - List res = new ArrayList(); + List res = new ArrayList<>(); List list; int count = onlyMorePopular ? num * 2 : num; if (usePrefix) { diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java index 474f5dac1c5..2bffc11db35 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/jaspell/JaspellTernarySearchTrie.java @@ -682,7 +682,7 @@ public class JaspellTernarySearchTrie { *@return A List with the results */ public List matchPrefix(CharSequence prefix, int numReturnValues) { - Vector sortKeysResult = new Vector(); + Vector sortKeysResult = new Vector<>(); TSTNode startNode = getNode(prefix); if (startNode == null) { return sortKeysResult; diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTAutocomplete.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTAutocomplete.java index d18840292ae..ae454b7ddd2 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTAutocomplete.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTAutocomplete.java @@ -114,7 +114,7 @@ public class TSTAutocomplete { CharSequence s, int x) { TernaryTreeNode p = root; - ArrayList suggest = new ArrayList(); + ArrayList suggest = new ArrayList<>(); while (p != null) { if (s.charAt(x) < p.splitchar) { @@ -143,7 +143,7 @@ public class TSTAutocomplete { } p = p.eqKid; - Stack st = new Stack(); + Stack st = new Stack<>(); st.push(p); while (!st.empty()) { TernaryTreeNode top = st.peek(); diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTLookup.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTLookup.java index f47c80875b7..4b4d61edbc3 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTLookup.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/tst/TSTLookup.java @@ -60,8 +60,8 @@ public class TSTLookup extends Lookup { // make sure it's sorted and the comparator uses UTF16 sort order iterator = new SortedInputIterator(iterator, BytesRef.getUTF8SortedAsUTF16Comparator()); count = 0; - ArrayList tokens = new ArrayList(); - ArrayList vals = new ArrayList(); + ArrayList tokens = new ArrayList<>(); + ArrayList vals = new ArrayList<>(); BytesRef spare; CharsRef charsSpare = new CharsRef(); while ((spare = iterator.next()) != null) { @@ -119,7 +119,7 @@ public class TSTLookup extends Lookup { @Override public List lookup(CharSequence key, boolean onlyMorePopular, int num) { List list = autocomplete.prefixCompletion(root, key, 0); - List res = new ArrayList(); + List res = new ArrayList<>(); if (list == null || list.size() == 0) { return res; } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java index e5dfc45d140..de79db131ed 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/spell/TestWordBreakSpellChecker.java @@ -272,8 +272,8 @@ public class TestWordBreakSpellChecker extends LuceneTestCase { writer = new RandomIndexWriter(random(), dir, new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)); int maxLength = TestUtil.nextInt(random(), 5, 50); - List originals = new ArrayList(numDocs); - List breaks = new ArrayList(numDocs); + List originals = new ArrayList<>(numDocs); + List breaks = new ArrayList<>(numDocs); for (int i = 0; i < numDocs; i++) { String orig = ""; if (random().nextBoolean()) { diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java index c0a5185df50..b78c85644b7 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/DocumentDictionaryTest.java @@ -95,7 +95,7 @@ public class DocumentDictionaryTest extends LuceneTestCase { docs.put(term, doc); } - return new SimpleEntry, Map>(invalidDocTerms, docs); + return new SimpleEntry<>(invalidDocTerms, docs); } @Test diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java index ba4e675dcfa..2d880f587a5 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/FileDictionaryTest.java @@ -53,7 +53,7 @@ public class FileDictionaryTest extends LuceneTestCase { entryValues.add(payload); } sb.append("\n"); - return new SimpleEntry, String>(entryValues, sb.toString()); + return new SimpleEntry<>(entryValues, sb.toString()); } private Map.Entry>,String> generateFileInput(int count, String fieldDelimiter, boolean hasWeights, boolean hasPayloads) { @@ -68,7 +68,7 @@ public class FileDictionaryTest extends LuceneTestCase { entries.add(entrySet.getKey()); sb.append(entrySet.getValue()); } - return new SimpleEntry>, String>(entries, sb.toString()); + return new SimpleEntry<>(entries, sb.toString()); } @Test diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java index b2471ef6c5c..7d99824d8b9 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/LookupBenchmarkTest.java @@ -98,7 +98,7 @@ public class LookupBenchmarkTest extends LuceneTestCase { * Collect the multilingual input for benchmarks/ tests. */ public static List readTop50KWiki() throws Exception { - List input = new ArrayList(); + List input = new ArrayList<>(); URL resource = LookupBenchmarkTest.class.getResource("Top50KWiki.utf8"); assert resource != null : "Resource missing: Top50KWiki.utf8"; @@ -211,7 +211,7 @@ public class LookupBenchmarkTest extends LuceneTestCase { for (Class cls : benchmarkClasses) { final Lookup lookup = buildLookup(cls, dictionaryInput); - final List input = new ArrayList(benchmarkInput.size()); + final List input = new ArrayList<>(benchmarkInput.size()); for (Input tf : benchmarkInput) { String s = tf.term.utf8ToString(); String sub = s.substring(0, Math.min(s.length(), @@ -246,7 +246,7 @@ public class LookupBenchmarkTest extends LuceneTestCase { final double NANOS_PER_MS = 1000000; try { - List times = new ArrayList(); + List times = new ArrayList<>(); for (int i = 0; i < warmup + rounds; i++) { final long start = System.nanoTime(); guard = callable.call().intValue(); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java index 147ee3b1b61..86da9e06020 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/AnalyzingInfixSuggesterTest.java @@ -148,7 +148,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class); ts.reset(); - List fragments = new ArrayList(); + List fragments = new ArrayList<>(); int upto = 0; while (ts.incrementToken()) { String token = termAtt.toString(); @@ -492,11 +492,11 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase { int iters = atLeast(1000); int visibleUpto = 0; - Set usedWeights = new HashSet(); - Set usedKeys = new HashSet(); + Set usedWeights = new HashSet<>(); + Set usedKeys = new HashSet<>(); - List inputs = new ArrayList(); - List pendingUpdates = new ArrayList(); + List inputs = new ArrayList<>(); + List pendingUpdates = new ArrayList<>(); for(int iter=0;iter expected = new ArrayList(); + List expected = new ArrayList<>(); for(int i=0;i slowCompletor = new ArrayList(); - final TreeSet allPrefixes = new TreeSet(); - final Set seen = new HashSet(); + final List slowCompletor = new ArrayList<>(); + final TreeSet allPrefixes = new TreeSet<>(); + final Set seen = new HashSet<>(); boolean doPayloads = random().nextBoolean(); @@ -742,7 +742,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { if (VERBOSE) { // Don't just sort original list, to avoid VERBOSE // altering the test: - List sorted = new ArrayList(slowCompletor); + List sorted = new ArrayList<>(slowCompletor); Collections.sort(sorted); for(TermFreq2 ent : sorted) { System.out.println(" surface='" + ent.surfaceForm + "' analyzed='" + ent.analyzedForm + "' weight=" + ent.weight); @@ -768,7 +768,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { List r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random()), false, topN); // 2. go thru whole set to find suggestions: - List matches = new ArrayList(); + List matches = new ArrayList<>(); // "Analyze" the key: String[] tokens = prefix.split(" "); @@ -1194,7 +1194,7 @@ public class AnalyzingSuggesterTest extends LuceneTestCase { @SafeVarargs public final Iterable shuffle(T...values) { - final List asList = new ArrayList(values.length); + final List asList = new ArrayList<>(values.length); for (T value : values) { asList.add(value); } diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java index daac881e424..6a0a58c2e72 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/FuzzySuggesterTest.java @@ -54,7 +54,7 @@ import org.apache.lucene.util.fst.Util; public class FuzzySuggesterTest extends LuceneTestCase { public void testRandomEdits() throws IOException { - List keys = new ArrayList(); + List keys = new ArrayList<>(); int numTerms = atLeast(100); for (int i = 0; i < numTerms; i++) { keys.add(new Input("boo" + TestUtil.randomSimpleString(random()), 1 + random().nextInt(100))); @@ -75,7 +75,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { } public void testNonLatinRandomEdits() throws IOException { - List keys = new ArrayList(); + List keys = new ArrayList<>(); int numTerms = atLeast(100); for (int i = 0; i < numTerms; i++) { keys.add(new Input("буу" + TestUtil.randomSimpleString(random()), 1 + random().nextInt(100))); @@ -596,9 +596,9 @@ public class FuzzySuggesterTest extends LuceneTestCase { int numQueries = atLeast(100); - final List slowCompletor = new ArrayList(); - final TreeSet allPrefixes = new TreeSet(); - final Set seen = new HashSet(); + final List slowCompletor = new ArrayList<>(); + final TreeSet allPrefixes = new TreeSet<>(); + final Set seen = new HashSet<>(); Input[] keys = new Input[numQueries]; @@ -674,7 +674,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { if (VERBOSE) { // Don't just sort original list, to avoid VERBOSE // altering the test: - List sorted = new ArrayList(slowCompletor); + List sorted = new ArrayList<>(slowCompletor); Collections.sort(sorted); for(TermFreqPayload2 ent : sorted) { System.out.println(" surface='" + ent.surfaceForm + " analyzed='" + ent.analyzedForm + "' weight=" + ent.weight); @@ -696,7 +696,7 @@ public class FuzzySuggesterTest extends LuceneTestCase { List r = suggester.lookup(TestUtil.stringToCharSequence(prefix, random()), false, topN); // 2. go thru whole set to find suggestions: - List matches = new ArrayList(); + List matches = new ArrayList<>(); // "Analyze" the key: String[] tokens = prefix.split(" "); @@ -929,8 +929,8 @@ public class FuzzySuggesterTest extends LuceneTestCase { public void testRandom2() throws Throwable { final int NUM = atLeast(200); - final List answers = new ArrayList(); - final Set seen = new HashSet(); + final List answers = new ArrayList<>(); + final Set seen = new HashSet<>(); for(int i=0;i slowFuzzyMatch(int prefixLen, int maxEdits, boolean allowTransposition, List answers, String frag) { - final List results = new ArrayList(); + final List results = new ArrayList<>(); final int fragLen = frag.length(); for(Input tf : answers) { //System.out.println(" check s=" + tf.term.utf8ToString()); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java index 6ca17dee9e6..151d465d280 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/analyzing/TestFreeTextSuggester.java @@ -293,7 +293,7 @@ public class TestFreeTextSuggester extends LuceneTestCase { public void testRandom() throws IOException { String[] terms = new String[TestUtil.nextInt(random(), 2, 10)]; - Set seen = new HashSet(); + Set seen = new HashSet<>(); while (seen.size() < terms.length) { String token = TestUtil.randomSimpleString(random(), 1, 5); if (!seen.contains(token)) { @@ -367,12 +367,12 @@ public class TestFreeTextSuggester extends LuceneTestCase { }); // Build inefficient but hopefully correct model: - List> gramCounts = new ArrayList>(grams); + List> gramCounts = new ArrayList<>(grams); for(int gram=0;gram map = new HashMap(); + final HashMap map = new HashMap<>(); // create a map up front. // then with multiple threads, generate sort keys for all the keys in the map diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java index b963dd384ac..e664026a852 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/LookaheadTokenFilter.java @@ -59,7 +59,7 @@ public abstract class LookaheadTokenFilter inputTokens = new ArrayList(); + public final List inputTokens = new ArrayList<>(); // Next buffered token to be returned to consumer: public int nextRead; diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java index 8fe142dc53f..1ab9ef0bc31 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockAnalyzer.java @@ -48,7 +48,7 @@ public final class MockAnalyzer extends Analyzer { private int positionIncrementGap; private Integer offsetGap; private final Random random; - private Map previousMappings = new HashMap(); + private Map previousMappings = new HashMap<>(); private boolean enableChecks = true; private int maxTokenLength = MockTokenizer.DEFAULT_MAX_TOKEN_LENGTH; diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockCharFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockCharFilter.java index fc620281b5c..5e88a60ef40 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/MockCharFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/MockCharFilter.java @@ -100,5 +100,5 @@ public class MockCharFilter extends CharFilter { corrections.put(off, cumulativeDiff); } - TreeMap corrections = new TreeMap(); + TreeMap corrections = new TreeMap<>(); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java b/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java index a7d6456c8ff..bd406430246 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/analysis/ValidatingTokenFilter.java @@ -44,8 +44,8 @@ public final class ValidatingTokenFilter extends TokenFilter { private int lastStartOffset; // Maps position to the start/end offset: - private final Map posToStartOffset = new HashMap(); - private final Map posToEndOffset = new HashMap(); + private final Map posToStartOffset = new HashMap<>(); + private final Map posToEndOffset = new HashMap<>(); private final PositionIncrementAttribute posIncAtt = getAttrIfExists(PositionIncrementAttribute.class); private final PositionLengthAttribute posLenAtt = getAttrIfExists(PositionLengthAttribute.class); diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesWriter.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesWriter.java index 90c161cf696..2a300397df5 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesWriter.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene40/Lucene40DocValuesWriter.java @@ -155,7 +155,7 @@ class Lucene40DocValuesWriter extends DocValuesConsumer { @Override public void addBinaryField(FieldInfo field, Iterable values) throws IOException { // examine the values to determine best type to use - HashSet uniqueValues = new HashSet(); + HashSet uniqueValues = new HashSet<>(); int minLength = Integer.MAX_VALUE; int maxLength = Integer.MIN_VALUE; for (BytesRef b : values) { @@ -314,7 +314,7 @@ class Lucene40DocValuesWriter extends DocValuesConsumer { Lucene40DocValuesFormat.BYTES_FIXED_DEREF_VERSION_CURRENT); // deduplicate - TreeSet dictionary = new TreeSet(); + TreeSet dictionary = new TreeSet<>(); for (BytesRef v : values) { dictionary.add(v == null ? new BytesRef() : BytesRef.deepCopyOf(v)); } @@ -354,7 +354,7 @@ class Lucene40DocValuesWriter extends DocValuesConsumer { Lucene40DocValuesFormat.BYTES_VAR_DEREF_VERSION_CURRENT); // deduplicate - TreeSet dictionary = new TreeSet(); + TreeSet dictionary = new TreeSet<>(); for (BytesRef v : values) { dictionary.add(v == null ? new BytesRef() : BytesRef.deepCopyOf(v)); } @@ -362,7 +362,7 @@ class Lucene40DocValuesWriter extends DocValuesConsumer { /* values */ long startPosition = data.getFilePointer(); long currentAddress = 0; - HashMap valueToAddress = new HashMap(); + HashMap valueToAddress = new HashMap<>(); for (BytesRef v : dictionary) { currentAddress = data.getFilePointer() - startPosition; valueToAddress.put(v, currentAddress); diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java index ac8aeafd33c..78680f1dd33 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/lucene42/Lucene42DocValuesConsumer.java @@ -144,7 +144,7 @@ class Lucene42DocValuesConsumer extends DocValuesConsumer { } else { meta.writeByte(TABLE_COMPRESSED); // table-compressed Long[] decode = uniqueValues.toArray(new Long[uniqueValues.size()]); - final HashMap encode = new HashMap(); + final HashMap encode = new HashMap<>(); data.writeVInt(decode.length); for (int i = 0; i < decode.length; i++) { data.writeLong(decode[i]); @@ -252,7 +252,7 @@ class Lucene42DocValuesConsumer extends DocValuesConsumer { meta.writeByte(FST); meta.writeLong(data.getFilePointer()); PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(); - Builder builder = new Builder(INPUT_TYPE.BYTE1, outputs); + Builder builder = new Builder<>(INPUT_TYPE.BYTE1, outputs); IntsRef scratch = new IntsRef(); long ord = 0; for (BytesRef v : values) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java index eacde8e79db..f761565bcd6 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/mockrandom/MockRandomPostingsFormat.java @@ -96,7 +96,7 @@ public final class MockRandomPostingsFormat extends PostingsFormat { // Chooses random IntStreamFactory depending on file's extension private static class MockIntStreamFactory extends IntStreamFactory { private final int salt; - private final List delegates = new ArrayList(); + private final List delegates = new ArrayList<>(); public MockIntStreamFactory(Random random) { salt = random.nextInt(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java index 9fcaab73f3a..a83b5b6a8d7 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java +++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java @@ -65,7 +65,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { // Postings state: static class RAMPostings extends FieldsProducer { - final Map fieldToTerms = new TreeMap(); + final Map fieldToTerms = new TreeMap<>(); @Override public Terms terms(String field) { @@ -98,7 +98,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { static class RAMField extends Terms { final String field; - final SortedMap termToDocs = new TreeMap(); + final SortedMap termToDocs = new TreeMap<>(); long sumTotalTermFreq; long sumDocFreq; int docCount; @@ -167,7 +167,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { static class RAMTerm { final String term; long totalTermFreq; - final List docs = new ArrayList(); + final List docs = new ArrayList<>(); public RAMTerm(String term) { this.term = term; } @@ -599,7 +599,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat { } // Holds all indexes created, keyed by the ID assigned in fieldsConsumer - private final Map state = new HashMap(); + private final Map state = new HashMap<>(); private final AtomicInteger nextID = new AtomicInteger(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java index 119dcb94b65..6a2c1175f1a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseCompressingDocValuesFormatTestCase.java @@ -48,7 +48,7 @@ public abstract class BaseCompressingDocValuesFormatTestCase extends BaseDocValu final IndexWriter iwriter = new IndexWriter(dir, iwc); final int uniqueValueCount = TestUtil.nextInt(random(), 1, 256); - final List values = new ArrayList(); + final List values = new ArrayList<>(); final Document doc = new Document(); final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java index aab13f07a85..422eb1d433f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java @@ -1138,7 +1138,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { RandomIndexWriter w = new RandomIndexWriter(random(), dir, cfg); int numDocs = atLeast(100); BytesRefHash hash = new BytesRefHash(); - Map docToString = new HashMap(); + Map docToString = new HashMap<>(); int maxLength = TestUtil.nextInt(random(), 1, 50); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); @@ -2086,7 +2086,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { } int numValues = TestUtil.nextInt(random(), 0, maxValuesPerDoc); // create a random set of strings - Set values = new TreeSet(); + Set values = new TreeSet<>(); for (int v = 0; v < numValues; v++) { values.add(TestUtil.randomSimpleString(random(), length)); } @@ -2097,7 +2097,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { } // add in any order to the dv field - ArrayList unordered = new ArrayList(values); + ArrayList unordered = new ArrayList<>(values); Collections.shuffle(unordered, random()); for (String v : unordered) { doc.add(new SortedSetDocValuesField("dv", new BytesRef(v))); @@ -2303,20 +2303,20 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { } int numValues = random().nextInt(17); // create a random list of strings - List values = new ArrayList(); + List values = new ArrayList<>(); for (int v = 0; v < numValues; v++) { values.add(TestUtil.randomSimpleString(random(), length)); } // add in any order to the indexed field - ArrayList unordered = new ArrayList(values); + ArrayList unordered = new ArrayList<>(values); Collections.shuffle(unordered, random()); for (String v : values) { doc.add(newStringField("indexed", v, Field.Store.NO)); } // add in any order to the dv field - ArrayList unordered2 = new ArrayList(values); + ArrayList unordered2 = new ArrayList<>(values); Collections.shuffle(unordered2, random()); for (String v : unordered2) { doc.add(new SortedSetDocValuesField("dv", new BytesRef(v))); @@ -2628,7 +2628,7 @@ public abstract class BaseDocValuesFormatTestCase extends LuceneTestCase { numDocs = TestUtil.nextInt(random(), 100, 200); } IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); - List docBytes = new ArrayList(); + List docBytes = new ArrayList<>(); long totalBytes = 0; for(int docID=0;docID docBytes = new ArrayList(); + List docBytes = new ArrayList<>(); long totalBytes = 0; for(int docID=0;docID values = new TreeSet(); + Set values = new TreeSet<>(); for (int j = 0; j < numSortedSetFields; j++) { values.add(TestUtil.randomSimpleString(random())); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java index d34ce2f293f..a0550c7cb7e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java @@ -341,7 +341,7 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { public static void createPostings() throws IOException { totalPostings = 0; totalPayloadBytes = 0; - fields = new TreeMap>(); + fields = new TreeMap<>(); final int numFields = TestUtil.nextInt(random(), 1, 5); if (VERBOSE) { @@ -362,9 +362,9 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { null, DocValuesType.NUMERIC, null); fieldUpto++; - SortedMap postings = new TreeMap(); + SortedMap postings = new TreeMap<>(); fields.put(field, postings); - Set seenTerms = new HashSet(); + Set seenTerms = new HashSet<>(); int numTerms; if (random().nextInt(10) == 7) { @@ -422,7 +422,7 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { } } - allTerms = new ArrayList(); + allTerms = new ArrayList<>(); for(Map.Entry> fieldEnt : fields.entrySet()) { String field = fieldEnt.getKey(); for(Map.Entry termEnt : fieldEnt.getValue().entrySet()) { @@ -1103,8 +1103,8 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { ThreadState threadState = new ThreadState(); // Test random terms/fields: - List termStates = new ArrayList(); - List termStateTerms = new ArrayList(); + List termStates = new ArrayList<>(); + List termStateTerms = new ArrayList<>(); Collections.shuffle(allTerms, random()); int upto = 0; @@ -1387,7 +1387,7 @@ public abstract class BasePostingsFormatTestCase extends LuceneTestCase { // while up to one thread flushes, and each of those // threads iterates over the map while the flushing // thread might be adding to it: - final Map termFreqs = new ConcurrentHashMap(); + final Map termFreqs = new ConcurrentHashMap<>(); final AtomicLong sumDocFreq = new AtomicLong(); final AtomicLong sumTotalTermFreq = new AtomicLong(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java index 1c9a8267cac..a2d9234e0ff 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseStoredFieldsFormatTestCase.java @@ -99,7 +99,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends LuceneTestCase { final int docCount = atLeast(200); final int fieldCount = TestUtil.nextInt(rand, 1, 5); - final List fieldIDs = new ArrayList(); + final List fieldIDs = new ArrayList<>(); FieldType customType = new FieldType(TextField.TYPE_STORED); customType.setTokenized(false); @@ -109,7 +109,7 @@ public abstract class BaseStoredFieldsFormatTestCase extends LuceneTestCase { fieldIDs.add(i); } - final Map docs = new HashMap(); + final Map docs = new HashMap<>(); if (VERBOSE) { System.out.println("TEST: build index docCount=" + docCount); @@ -439,8 +439,8 @@ public abstract class BaseStoredFieldsFormatTestCase extends LuceneTestCase { final IndexSearcher searcher = new IndexSearcher(rd); final int concurrentReads = atLeast(5); final int readsPerThread = atLeast(50); - final List readThreads = new ArrayList(); - final AtomicReference ex = new AtomicReference(); + final List readThreads = new ArrayList<>(); + final AtomicReference ex = new AtomicReference<>(); for (int i = 0; i < concurrentReads; ++i) { readThreads.add(new Thread() { diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java index 5b0bdd02abc..76760d00f09 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java @@ -102,7 +102,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { } protected Options randomOptions() { - return RandomPicks.randomFrom(random(), new ArrayList(validOptions())); + return RandomPicks.randomFrom(random(), new ArrayList<>(validOptions())); } protected FieldType fieldType(Options options) { @@ -245,8 +245,8 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { } } - positionToTerms = new HashMap>(len); - startOffsetToTerms = new HashMap>(len); + positionToTerms = new HashMap<>(len); + startOffsetToTerms = new HashMap<>(len); for (int i = 0; i < len; ++i) { if (!positionToTerms.containsKey(positions[i])) { positionToTerms.put(positions[i], new HashSet(1)); @@ -258,7 +258,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { startOffsetToTerms.get(startOffsets[i]).add(i); } - freqs = new HashMap(); + freqs = new HashMap<>(); for (String term : terms) { if (freqs.containsKey(term)) { freqs.put(term, freqs.get(term) + 1); @@ -314,7 +314,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { fieldTypes = new FieldType[fieldCount]; tokenStreams = new RandomTokenStream[fieldCount]; Arrays.fill(fieldTypes, fieldType(options)); - final Set usedFileNames = new HashSet(); + final Set usedFileNames = new HashSet<>(); for (int i = 0; i < fieldCount; ++i) { do { this.fieldNames[i] = RandomPicks.randomFrom(random(), fieldNames); @@ -341,7 +341,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { private final BytesRef[] termBytes; protected RandomDocumentFactory(int distinctFieldNames, int disctinctTerms) { - final Set fieldNames = new HashSet(); + final Set fieldNames = new HashSet<>(); while (fieldNames.size() < distinctFieldNames) { fieldNames.add(TestUtil.randomSimpleString(random())); fieldNames.remove("id"); @@ -365,8 +365,8 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { // compare field names assertEquals(doc == null, fields == null); assertEquals(doc.fieldNames.length, fields.size()); - final Set fields1 = new HashSet(); - final Set fields2 = new HashSet(); + final Set fields1 = new HashSet<>(); + final Set fields2 = new HashSet<>(); for (int i = 0; i < doc.fieldNames.length; ++i) { fields1.add(doc.fieldNames[i]); } @@ -389,19 +389,19 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { } // to test reuse - private final ThreadLocal termsEnum = new ThreadLocal(); - private final ThreadLocal docsEnum = new ThreadLocal(); - private final ThreadLocal docsAndPositionsEnum = new ThreadLocal(); + private final ThreadLocal termsEnum = new ThreadLocal<>(); + private final ThreadLocal docsEnum = new ThreadLocal<>(); + private final ThreadLocal docsAndPositionsEnum = new ThreadLocal<>(); protected void assertEquals(RandomTokenStream tk, FieldType ft, Terms terms) throws IOException { assertEquals(1, terms.getDocCount()); - final int termCount = new HashSet(Arrays.asList(tk.terms)).size(); + final int termCount = new HashSet<>(Arrays.asList(tk.terms)).size(); assertEquals(termCount, terms.size()); assertEquals(termCount, terms.getSumDocFreq()); assertEquals(ft.storeTermVectorPositions(), terms.hasPositions()); assertEquals(ft.storeTermVectorOffsets(), terms.hasOffsets()); assertEquals(ft.storeTermVectorPayloads() && tk.hasPayloads(), terms.hasPayloads()); - final Set uniqueTerms = new HashSet(); + final Set uniqueTerms = new HashSet<>(); for (String term : tk.freqs.keySet()) { uniqueTerms.add(new BytesRef(term)); } @@ -638,7 +638,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { final RandomDocumentFactory docFactory = new RandomDocumentFactory(5, 20); final int numDocs = atLeast(100); final int numDeletes = random().nextInt(numDocs); - final Set deletes = new HashSet(); + final Set deletes = new HashSet<>(); while (deletes.size() < numDeletes) { deletes.add(random().nextInt(numDocs)); } @@ -694,7 +694,7 @@ public abstract class BaseTermVectorsFormatTestCase extends LuceneTestCase { assertEquals(docs[i], reader.getTermVectors(docID)); } - final AtomicReference exception = new AtomicReference(); + final AtomicReference exception = new AtomicReference<>(); final Thread[] threads = new Thread[2]; for (int i = 0; i < threads.length; ++i) { threads[i] = new Thread() { diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java b/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java index ca741f267dc..01588ed7ce7 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/DocHelper.java @@ -175,16 +175,16 @@ class DocHelper { largeLazyField//placeholder for large field, since this is null. It must always be last }; - public static Map all =new HashMap(); - public static Map indexed =new HashMap(); - public static Map stored =new HashMap(); - public static Map unstored=new HashMap(); - public static Map unindexed=new HashMap(); - public static Map termvector=new HashMap(); - public static Map notermvector=new HashMap(); - public static Map lazy= new HashMap(); - public static Map noNorms=new HashMap(); - public static Map noTf=new HashMap(); + public static Map all =new HashMap<>(); + public static Map indexed =new HashMap<>(); + public static Map stored =new HashMap<>(); + public static Map unstored=new HashMap<>(); + public static Map unindexed=new HashMap<>(); + public static Map termvector=new HashMap<>(); + public static Map notermvector=new HashMap<>(); + public static Map lazy= new HashMap<>(); + public static Map noNorms=new HashMap<>(); + public static Map noTf=new HashMap<>(); static { //Initialize the large Lazy Field @@ -227,7 +227,7 @@ class DocHelper { static { - nameValues = new HashMap(); + nameValues = new HashMap<>(); nameValues.put(TEXT_FIELD_1_KEY, FIELD_1_TEXT); nameValues.put(TEXT_FIELD_2_KEY, FIELD_2_TEXT); nameValues.put(TEXT_FIELD_3_KEY, FIELD_3_TEXT); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java index 66c24874b55..5f073e8c6fc 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterAtomicReader.java @@ -39,7 +39,7 @@ public final class FieldFilterAtomicReader extends FilterAtomicReader { super(in); this.fields = fields; this.negate = negate; - ArrayList filteredInfos = new ArrayList(); + ArrayList filteredInfos = new ArrayList<>(); for (FieldInfo fi : in.getFieldInfos()) { if (hasField(fi.name)) { filteredInfos.add(fi); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java b/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java index d4ec400c89e..9d8e0ee8798 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/MockRandomMergePolicy.java @@ -46,7 +46,7 @@ public class MockRandomMergePolicy extends MergePolicy { int numSegments = segmentInfos.size(); - List segments = new ArrayList(); + List segments = new ArrayList<>(); final Collection merging = writer.get().getMergingSegments(); for(SegmentCommitInfo sipc : segmentInfos) { @@ -75,7 +75,7 @@ public class MockRandomMergePolicy extends MergePolicy { SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToMerge) throws IOException { - final List eligibleSegments = new ArrayList(); + final List eligibleSegments = new ArrayList<>(); for(SegmentCommitInfo info : segmentInfos) { if (segmentsToMerge.containsKey(info)) { eligibleSegments.add(info); diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java index 5e3911fa373..8da536dd63a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomCodec.java @@ -68,16 +68,16 @@ import org.apache.lucene.util.TestUtil; */ public class RandomCodec extends Lucene46Codec { /** Shuffled list of postings formats to use for new mappings */ - private List formats = new ArrayList(); + private List formats = new ArrayList<>(); /** Shuffled list of docvalues formats to use for new mappings */ - private List dvFormats = new ArrayList(); + private List dvFormats = new ArrayList<>(); /** unique set of format names this codec knows about */ - public Set formatNames = new HashSet(); + public Set formatNames = new HashSet<>(); /** unique set of docvalues format names this codec knows about */ - public Set dvFormatNames = new HashSet(); + public Set dvFormatNames = new HashSet<>(); /** memorized field->postingsformat mappings */ // note: we have to sync this map even though its just for debugging/toString, diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java index bbf00a544f6..8beff389228 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java @@ -125,8 +125,8 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas @Override public void run() { // TODO: would be better if this were cross thread, so that we make sure one thread deleting anothers added docs works: - final List toDeleteIDs = new ArrayList(); - final List toDeleteSubDocs = new ArrayList(); + final List toDeleteIDs = new ArrayList<>(); + final List toDeleteSubDocs = new ArrayList<>(); while(System.currentTimeMillis() < stopTime && !failed.get()) { try { @@ -180,9 +180,9 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas } final Field packIDField = newStringField("packID", packID, Field.Store.YES); - final List docIDs = new ArrayList(); + final List docIDs = new ArrayList<>(); final SubDocs subDocs = new SubDocs(packID, docIDs); - final List docsList = new ArrayList(); + final List docsList = new ArrayList<>(); allSubDocs.add(subDocs); doc.add(packIDField); diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java index b396954a31b..705a8a072af 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java @@ -40,7 +40,7 @@ public class AssertingScorer extends Scorer { return other; } final AssertingScorer assertScorer = new AssertingScorer(random, other); - ASSERTING_INSTANCES.put(other, new WeakReference(assertScorer)); + ASSERTING_INSTANCES.put(other, new WeakReference<>(assertScorer)); return assertScorer; } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java b/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java index 730f253bc8b..034396c9a05 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/CheckHits.java @@ -59,7 +59,7 @@ public class CheckHits { throws IOException { String d = q.toString(defaultFieldName); - Set ignore = new TreeSet(); + Set ignore = new TreeSet<>(); for (int i = 0; i < results.length; i++) { ignore.add(Integer.valueOf(results[i])); } @@ -98,11 +98,11 @@ public class CheckHits { QueryUtils.check(random,query,searcher); - Set correct = new TreeSet(); + Set correct = new TreeSet<>(); for (int i = 0; i < results.length; i++) { correct.add(Integer.valueOf(results[i])); } - final Set actual = new TreeSet(); + final Set actual = new TreeSet<>(); final Collector c = new SetCollector(actual); searcher.search(query, c); @@ -168,12 +168,12 @@ public class CheckHits { ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; - Set correct = new TreeSet(); + Set correct = new TreeSet<>(); for (int i = 0; i < results.length; i++) { correct.add(Integer.valueOf(results[i])); } - Set actual = new TreeSet(); + Set actual = new TreeSet<>(); for (int i = 0; i < hits.length; i++) { actual.add(Integer.valueOf(hits[i].doc)); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/RandomSimilarityProvider.java b/lucene/test-framework/src/java/org/apache/lucene/search/RandomSimilarityProvider.java index d7aca2dc00f..5f68765b9fc 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/RandomSimilarityProvider.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/RandomSimilarityProvider.java @@ -65,7 +65,7 @@ import org.apache.lucene.search.similarities.Similarity; public class RandomSimilarityProvider extends PerFieldSimilarityWrapper { final DefaultSimilarity defaultSim = new DefaultSimilarity(); final List knownSims; - Map previousMappings = new HashMap(); + Map previousMappings = new HashMap<>(); final int perFieldSeed; final int coordType; // 0 = no coord, 1 = coord, 2 = crazy coord final boolean shouldQueryNorm; @@ -74,7 +74,7 @@ public class RandomSimilarityProvider extends PerFieldSimilarityWrapper { perFieldSeed = random.nextInt(); coordType = random.nextInt(3); shouldQueryNorm = random.nextBoolean(); - knownSims = new ArrayList(allSims); + knownSims = new ArrayList<>(allSims); Collections.shuffle(knownSims, random); } @@ -138,7 +138,7 @@ public class RandomSimilarityProvider extends PerFieldSimilarityWrapper { }; static List allSims; static { - allSims = new ArrayList(); + allSims = new ArrayList<>(); allSims.add(new DefaultSimilarity()); allSims.add(new BM25Similarity()); for (BasicModel basicModel : BASIC_MODELS) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java index b0f46f4c34a..43b33e9a631 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java @@ -178,7 +178,7 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { // term stats from remote node Map getNodeTermStats(Set terms, int nodeID, long version) throws IOException { final NodeState node = nodes[nodeID]; - final Map stats = new HashMap(); + final Map stats = new HashMap<>(); final IndexSearcher s = node.searchers.acquire(version); if (s == null) { throw new SearcherExpiredException("node=" + nodeID + " version=" + version); @@ -207,8 +207,8 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { // local cache...? And still LRU otherwise (for the // still-live searchers). - private final Map collectionStatsCache = new ConcurrentHashMap(); - private final Map termStatsCache = new ConcurrentHashMap(); + private final Map collectionStatsCache = new ConcurrentHashMap<>(); + private final Map termStatsCache = new ConcurrentHashMap<>(); /** Matches docs in the local shard but scores based on * aggregated stats ("mock distributed scoring") from all @@ -229,7 +229,7 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { @Override public Query rewrite(Query original) throws IOException { final Query rewritten = super.rewrite(original); - final Set terms = new HashSet(); + final Set terms = new HashSet<>(); rewritten.extractTerms(terms); // Make a single request to remote nodes for term @@ -239,7 +239,7 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { continue; } - final Set missing = new HashSet(); + final Set missing = new HashSet<>(); for(Term term : terms) { final TermAndShardVersion key = new TermAndShardVersion(nodeID, nodeVersions[nodeID], term); if (!termStatsCache.containsKey(key)) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java index e8eb0290d24..2f2acacb13e 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java +++ b/lucene/test-framework/src/java/org/apache/lucene/store/MockDirectoryWrapper.java @@ -76,7 +76,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { boolean wrapLockFactory = true; private Set unSyncedFiles; private Set createdFiles; - private Set openFilesForWrite = new HashSet(); + private Set openFilesForWrite = new HashSet<>(); Set openLocks = Collections.synchronizedSet(new HashSet()); volatile boolean crashed; private ThrottledIndexOutput throttledOutput; @@ -101,14 +101,14 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { private synchronized void init() { if (openFiles == null) { - openFiles = new HashMap(); - openFilesDeleted = new HashSet(); + openFiles = new HashMap<>(); + openFilesDeleted = new HashSet<>(); } if (createdFiles == null) - createdFiles = new HashSet(); + createdFiles = new HashSet<>(); if (unSyncedFiles == null) - unSyncedFiles = new HashSet(); + unSyncedFiles = new HashSet<>(); } public MockDirectoryWrapper(Random random, Directory delegate) { @@ -207,14 +207,14 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { * unsynced files. */ public synchronized void crash() throws IOException { crashed = true; - openFiles = new HashMap(); - openFilesForWrite = new HashSet(); - openFilesDeleted = new HashSet(); + openFiles = new HashMap<>(); + openFilesForWrite = new HashSet<>(); + openFilesDeleted = new HashSet<>(); Iterator it = unSyncedFiles.iterator(); - unSyncedFiles = new HashSet(); + unSyncedFiles = new HashSet<>(); // first force-close all files, so we can corrupt on windows etc. // clone the file map, as these guys want to remove themselves on close. - Map m = new IdentityHashMap(openFileHandles); + Map m = new IdentityHashMap<>(openFileHandles); for (Closeable f : m.keySet()) { try { f.close(); @@ -441,7 +441,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { } public synchronized Set getOpenDeletedFiles() { - return new HashSet(openFilesDeleted); + return new HashSet<>(openFilesDeleted); } private boolean failOnCreateOutput = true; @@ -629,11 +629,11 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { public synchronized void close() throws IOException { // files that we tried to delete, but couldn't because readers were open. // all that matters is that we tried! (they will eventually go away) - Set pendingDeletions = new HashSet(openFilesDeleted); + Set pendingDeletions = new HashSet<>(openFilesDeleted); maybeYield(); if (openFiles == null) { - openFiles = new HashMap(); - openFilesDeleted = new HashSet(); + openFiles = new HashMap<>(); + openFilesDeleted = new HashSet<>(); } if (openFiles.size() > 0) { // print the first one as its very verbose otherwise @@ -666,7 +666,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { // TODO: factor this out / share w/ TestIW.assertNoUnreferencedFiles if (assertNoUnreferencedFilesOnClose) { // now look for unreferenced files: discount ones that we tried to delete but could not - Set allFiles = new HashSet(Arrays.asList(listAll())); + Set allFiles = new HashSet<>(Arrays.asList(listAll())); allFiles.removeAll(pendingDeletions); String[] startFiles = allFiles.toArray(new String[0]); IndexWriterConfig iwc = new IndexWriterConfig(LuceneTestCase.TEST_VERSION_CURRENT, null); @@ -674,8 +674,8 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { new IndexWriter(in, iwc).rollback(); String[] endFiles = in.listAll(); - Set startSet = new TreeSet(Arrays.asList(startFiles)); - Set endSet = new TreeSet(Arrays.asList(endFiles)); + Set startSet = new TreeSet<>(Arrays.asList(startFiles)); + Set endSet = new TreeSet<>(Arrays.asList(endFiles)); if (pendingDeletions.contains("segments.gen") && endSet.contains("segments.gen")) { // this is possible if we hit an exception while writing segments.gen, we try to delete it @@ -703,7 +703,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { } try { - Set ghosts = new HashSet(sis.files(in, false)); + Set ghosts = new HashSet<>(sis.files(in, false)); for (String s : ghosts) { if (endSet.contains(s) && !startSet.contains(s)) { assert pendingDeletions.contains(s); @@ -725,14 +725,14 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { endFiles = endSet.toArray(new String[0]); if (!Arrays.equals(startFiles, endFiles)) { - List removed = new ArrayList(); + List removed = new ArrayList<>(); for(String fileName : startFiles) { if (!endSet.contains(fileName)) { removed.add(fileName); } } - List added = new ArrayList(); + List added = new ArrayList<>(); for(String fileName : endFiles) { if (!startSet.contains(fileName)) { added.add(fileName); @@ -841,7 +841,7 @@ public class MockDirectoryWrapper extends BaseDirectoryWrapper { */ synchronized public void failOn(Failure fail) { if (failures == null) { - failures = new ArrayList(); + failures = new ArrayList<>(); } failures.add(fail); } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/AbstractBeforeAfterRule.java b/lucene/test-framework/src/java/org/apache/lucene/util/AbstractBeforeAfterRule.java index 881da275145..fc82bacaf9f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/AbstractBeforeAfterRule.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/AbstractBeforeAfterRule.java @@ -39,7 +39,7 @@ abstract class AbstractBeforeAfterRule implements TestRule { return new Statement() { @Override public void evaluate() throws Throwable { - final ArrayList errors = new ArrayList(); + final ArrayList errors = new ArrayList<>(); try { before(); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java b/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java index 063dec381a9..33d9d1d6917 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/LineFileDocs.java @@ -197,7 +197,7 @@ public class LineFileDocs implements Closeable { } } - private final ThreadLocal threadDocs = new ThreadLocal(); + private final ThreadLocal threadDocs = new ThreadLocal<>(); /** Note: Document instance is re-used per-thread */ public Document nextDoc() throws IOException { diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnit3MethodProvider.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnit3MethodProvider.java index bbc9831cbbf..47ba67f5cf7 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnit3MethodProvider.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnit3MethodProvider.java @@ -34,7 +34,7 @@ public final class LuceneJUnit3MethodProvider implements TestMethodProvider { @Override public Collection getTestMethods(Class suiteClass, ClassModel classModel) { Map methods = classModel.getMethods(); - ArrayList result = new ArrayList(); + ArrayList result = new ArrayList<>(); for (MethodModel mm : methods.values()) { // Skip any methods that have overrieds/ shadows. if (mm.getDown() != null) continue; diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java index 52e8032e353..153211a64c8 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java @@ -319,11 +319,11 @@ public abstract class LuceneTestCase extends Assert { /** All {@link Directory} implementations. */ private static final List CORE_DIRECTORIES; static { - CORE_DIRECTORIES = new ArrayList(FS_DIRECTORIES); + CORE_DIRECTORIES = new ArrayList<>(FS_DIRECTORIES); CORE_DIRECTORIES.add("RAMDirectory"); }; - protected static final Set doesntSupportOffsets = new HashSet(Arrays.asList( + protected static final Set doesntSupportOffsets = new HashSet<>(Arrays.asList( "Lucene3x", "MockFixedIntBlock", "MockVariableIntBlock", @@ -392,7 +392,7 @@ public abstract class LuceneTestCase extends Assert { } ignoreAfterMaxFailuresDelegate = - new AtomicReference( + new AtomicReference<>( new TestRuleIgnoreAfterMaxFailures(maxFailures)); ignoreAfterMaxFailures = TestRuleDelegate.of(ignoreAfterMaxFailuresDelegate); } @@ -414,7 +414,7 @@ public abstract class LuceneTestCase extends Assert { /** By-name list of ignored types like loggers etc. */ private final static Set STATIC_LEAK_IGNORED_TYPES = - Collections.unmodifiableSet(new HashSet(Arrays.asList( + Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "org.slf4j.Logger", "org.apache.solr.SolrLogFormatter", EnumSet.class.getName()))); @@ -691,7 +691,7 @@ public abstract class LuceneTestCase extends Assert { */ @SafeVarargs @SuppressWarnings("varargs") public static Set asSet(T... args) { - return new HashSet(Arrays.asList(args)); + return new HashSet<>(Arrays.asList(args)); } /** @@ -1203,13 +1203,13 @@ public abstract class LuceneTestCase extends Assert { break; case 3: final AtomicReader ar = SlowCompositeReaderWrapper.wrap(r); - final List allFields = new ArrayList(); + final List allFields = new ArrayList<>(); for (FieldInfo fi : ar.getFieldInfos()) { allFields.add(fi.name); } Collections.shuffle(allFields, random); final int end = allFields.isEmpty() ? 0 : random.nextInt(allFields.size()); - final Set fields = new HashSet(allFields.subList(0, end)); + final Set fields = new HashSet<>(allFields.subList(0, end)); // will create no FC insanity as ParallelAtomicReader has own cache key: r = new ParallelAtomicReader( new FieldFilterAtomicReader(ar, fields, false), @@ -1735,7 +1735,7 @@ public abstract class LuceneTestCase extends Assert { Random random = random(); // collect this number of terms from the left side - HashSet tests = new HashSet(); + HashSet tests = new HashSet<>(); int numPasses = 0; while (numPasses < 10 && tests.size() < numTests) { leftEnum = leftTerms.iterator(leftEnum); @@ -1778,7 +1778,7 @@ public abstract class LuceneTestCase extends Assert { rightEnum = rightTerms.iterator(rightEnum); - ArrayList shuffledTests = new ArrayList(tests); + ArrayList shuffledTests = new ArrayList<>(tests); Collections.shuffle(shuffledTests, random); for (BytesRef b : shuffledTests) { @@ -1897,7 +1897,7 @@ public abstract class LuceneTestCase extends Assert { } private static Set getDVFields(IndexReader reader) { - Set fields = new HashSet(); + Set fields = new HashSet<>(); for(FieldInfo fi : MultiFields.getMergedFieldInfos(reader)) { if (fi.hasDocValues()) { fields.add(fi.name); @@ -2050,8 +2050,8 @@ public abstract class LuceneTestCase extends Assert { FieldInfos rightInfos = MultiFields.getMergedFieldInfos(rightReader); // TODO: would be great to verify more than just the names of the fields! - TreeSet left = new TreeSet(); - TreeSet right = new TreeSet(); + TreeSet left = new TreeSet<>(); + TreeSet right = new TreeSet<>(); for (FieldInfo fi : leftInfos) { left.add(fi.name); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java b/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java index 32f2cc0b802..6041d22e96a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/RunListenerPrintReproduceInfo.java @@ -42,7 +42,7 @@ public final class RunListenerPrintReproduceInfo extends RunListener { * A list of all test suite classes executed so far in this JVM (ehm, * under this class's classloader). */ - private static List testClassesRun = new ArrayList(); + private static List testClassesRun = new ArrayList<>(); /** * The currently executing scope. diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java index 08d969ac590..53d1d430385 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleDelegate.java @@ -40,6 +40,6 @@ final class TestRuleDelegate implements TestRule { } static TestRuleDelegate of(AtomicReference delegate) { - return new TestRuleDelegate(delegate); + return new TestRuleDelegate<>(delegate); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java index 5d61527649f..121c1c0371a 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestRuleSetupAndRestoreClassEnv.java @@ -72,7 +72,7 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule { /** * Restore these system property values. */ - private HashMap restoreProperties = new HashMap(); + private HashMap restoreProperties = new HashMap<>(); private Codec savedCodec; private Locale savedLocale; @@ -149,7 +149,7 @@ final class TestRuleSetupAndRestoreClassEnv extends AbstractBeforeAfterRule { } Class targetClass = RandomizedContext.current().getTargetClass(); - avoidCodecs = new HashSet(); + avoidCodecs = new HashSet<>(); if (targetClass.isAnnotationPresent(SuppressCodecs.class)) { SuppressCodecs a = targetClass.getAnnotation(SuppressCodecs.class); avoidCodecs.addAll(Arrays.asList(a.value())); diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java index 0f4337a5c2e..e7718da3eda 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java @@ -799,7 +799,7 @@ public final class TestUtil { * @param reflectedValues contains a map with "AttributeClass#key" as values */ public static void assertAttributeReflection(final AttributeImpl att, Map reflectedValues) { - final Map map = new HashMap(); + final Map map = new HashMap<>(); att.reflectWith(new AttributeReflector() { @Override public void reflect(Class attClass, String key, Object value) { diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java index 67f5eaae829..e55d8ed2bd5 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/automaton/AutomatonTestUtil.java @@ -157,11 +157,11 @@ public class AutomatonTestUtil { // must use IdentityHashmap because two Transitions w/ // different start nodes can be considered the same - leadsToAccept = new IdentityHashMap(); - final Map> allArriving = new HashMap>(); + leadsToAccept = new IdentityHashMap<>(); + final Map> allArriving = new HashMap<>(); - final LinkedList q = new LinkedList(); - final Set seen = new HashSet(); + final LinkedList q = new LinkedList<>(); + final Set seen = new HashSet<>(); // reverse map the transitions, so we can quickly look // up all arriving transitions to a given state @@ -170,7 +170,7 @@ public class AutomatonTestUtil { final Transition t = s.transitionsArray[i]; List tl = allArriving.get(t.to); if (tl == null) { - tl = new ArrayList(); + tl = new ArrayList<>(); allArriving.put(t.to, tl); } tl.add(new ArrivingTransition(s, t)); @@ -201,7 +201,7 @@ public class AutomatonTestUtil { public int[] getRandomAcceptedString(Random r) { - final List soFar = new ArrayList(); + final List soFar = new ArrayList<>(); if (a.isSingleton()) { // accepts only one final String s = a.singleton; @@ -239,7 +239,7 @@ public class AutomatonTestUtil { if (cheat) { // pick a transition that we know is the fastest // path to an accept state - List toAccept = new ArrayList(); + List toAccept = new ArrayList<>(); for(int i=0;i initialset = new HashSet(); + Set initialset = new HashSet<>(); initialset.add(a.initial); determinizeSimple(a, initialset); } @@ -346,9 +346,9 @@ public class AutomatonTestUtil { public static void determinizeSimple(Automaton a, Set initialset) { int[] points = a.getStartPoints(); // subset construction - Map, Set> sets = new HashMap, Set>(); - LinkedList> worklist = new LinkedList>(); - Map, State> newstate = new HashMap, State>(); + Map, Set> sets = new HashMap<>(); + LinkedList> worklist = new LinkedList<>(); + Map, State> newstate = new HashMap<>(); sets.put(initialset, initialset); worklist.add(initialset); a.initial = new State(); @@ -362,7 +362,7 @@ public class AutomatonTestUtil { break; } for (int n = 0; n < points.length; n++) { - Set p = new HashSet(); + Set p = new HashSet<>(); for (State q : s) for (Transition t : q.getTransitions()) if (t.min <= points[n] && points[n] <= t.max) diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/fst/FSTTester.java b/lucene/test-framework/src/java/org/apache/lucene/util/fst/FSTTester.java index bf5c8acc50e..bb72ab0dd64 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/util/fst/FSTTester.java +++ b/lucene/test-framework/src/java/org/apache/lucene/util/fst/FSTTester.java @@ -236,7 +236,7 @@ public class FSTTester { private T randomAcceptedWord(FST fst, IntsRef in) throws IOException { FST.Arc arc = fst.getFirstArc(new FST.Arc()); - final List> arcs = new ArrayList>(); + final List> arcs = new ArrayList<>(); in.length = 0; in.offset = 0; final T NO_OUTPUT = fst.outputs.getNoOutput(); @@ -281,7 +281,7 @@ public class FSTTester { final boolean willRewrite = random.nextBoolean(); - final Builder builder = new Builder(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, + final Builder builder = new Builder<>(inputMode == 0 ? FST.INPUT_TYPE.BYTE1 : FST.INPUT_TYPE.BYTE4, prune1, prune2, prune1==0 && prune2==0, allowRandomSuffixSharing ? random.nextBoolean() : true, @@ -320,7 +320,7 @@ public class FSTTester { out.close(); IndexInput in = dir.openInput("fst.bin", context); try { - fst = new FST(in, outputs); + fst = new FST<>(in, outputs); } finally { in.close(); dir.deleteFile("fst.bin"); @@ -366,7 +366,7 @@ public class FSTTester { if (doReverseLookup) { @SuppressWarnings("unchecked") FST fstLong0 = (FST) fst; fstLong = fstLong0; - validOutputs = new HashSet(); + validOutputs = new HashSet<>(); for(InputOutput pair: pairs) { Long output = (Long) pair.output; maxLong = Math.max(maxLong, output); @@ -402,7 +402,7 @@ public class FSTTester { System.out.println("TEST: check valid terms/next()"); } { - IntsRefFSTEnum fstEnum = new IntsRefFSTEnum(fst); + IntsRefFSTEnum fstEnum = new IntsRefFSTEnum<>(fst); for(InputOutput pair : pairs) { IntsRef term = pair.input; if (LuceneTestCase.VERBOSE) { @@ -421,7 +421,7 @@ public class FSTTester { assertNull(fstEnum.next()); } - final Map termsMap = new HashMap(); + final Map termsMap = new HashMap<>(); for(InputOutput pair : pairs) { termsMap.put(pair.input, pair.output); } @@ -464,7 +464,7 @@ public class FSTTester { if (LuceneTestCase.VERBOSE) { System.out.println("TEST: verify seek"); } - IntsRefFSTEnum fstEnum = new IntsRefFSTEnum(fst); + IntsRefFSTEnum fstEnum = new IntsRefFSTEnum<>(fst); num = LuceneTestCase.atLeast(random, 100); for(int iter=0;iter { System.out.println("TEST: iter " + iter); } // reset: - fstEnum = new IntsRefFSTEnum(fst); + fstEnum = new IntsRefFSTEnum<>(fst); int upto = -1; while(true) { boolean isDone = false; @@ -682,7 +682,7 @@ public class FSTTester { //System.out.println("TEST: tally prefixes"); // build all prefixes - final Map> prefixes = new HashMap>(); + final Map> prefixes = new HashMap<>(); final IntsRef scratch = new IntsRef(10); for(InputOutput pair: pairs) { scratch.copyInts(pair.input); @@ -690,7 +690,7 @@ public class FSTTester { scratch.length = idx; CountMinOutput cmo = prefixes.get(scratch); if (cmo == null) { - cmo = new CountMinOutput(); + cmo = new CountMinOutput<>(); cmo.count = 1; cmo.output = pair.output; prefixes.put(IntsRef.deepCopyOf(scratch), cmo); @@ -787,7 +787,7 @@ public class FSTTester { if (LuceneTestCase.VERBOSE) { System.out.println("TEST: check pruned enum"); } - IntsRefFSTEnum fstEnum = new IntsRefFSTEnum(fst); + IntsRefFSTEnum fstEnum = new IntsRefFSTEnum<>(fst); IntsRefFSTEnum.InputOutput current; while((current = fstEnum.next()) != null) { if (LuceneTestCase.VERBOSE) { diff --git a/lucene/tools/src/java/org/apache/lucene/dependencies/GetMavenDependenciesTask.java b/lucene/tools/src/java/org/apache/lucene/dependencies/GetMavenDependenciesTask.java index 553d73bbea0..33402d03414 100644 --- a/lucene/tools/src/java/org/apache/lucene/dependencies/GetMavenDependenciesTask.java +++ b/lucene/tools/src/java/org/apache/lucene/dependencies/GetMavenDependenciesTask.java @@ -89,9 +89,9 @@ public class GetMavenDependenciesTask extends Task { private static final String DEPENDENCY_MANAGEMENT_PROPERTY = "lucene.solr.dependency.management"; private static final String IVY_USER_DIR_PROPERTY = "ivy.default.ivy.user.dir"; private static final Properties allProperties = new Properties(); - private static final Set modulesWithSeparateCompileAndTestPOMs = new HashSet(); + private static final Set modulesWithSeparateCompileAndTestPOMs = new HashSet<>(); - private static final Set optionalExternalDependencies = new HashSet(); + private static final Set optionalExternalDependencies = new HashSet<>(); static { // Add modules here that have split compile and test POMs // - they need compile-scope deps to also be test-scope deps. @@ -106,13 +106,13 @@ public class GetMavenDependenciesTask extends Task { private final XPath xpath = XPathFactory.newInstance().newXPath(); private final SortedMap> internalCompileScopeDependencies - = new TreeMap>(); - private final Set nonJarDependencies = new HashSet(); - private final Map> dependencyClassifiers = new HashMap>(); - private final Map> interModuleExternalCompileScopeDependencies = new HashMap>(); - private final Map> interModuleExternalTestScopeDependencies = new HashMap>(); + = new TreeMap<>(); + private final Set nonJarDependencies = new HashSet<>(); + private final Map> dependencyClassifiers = new HashMap<>(); + private final Map> interModuleExternalCompileScopeDependencies = new HashMap<>(); + private final Map> interModuleExternalTestScopeDependencies = new HashMap<>(); private final Map> allExternalDependencies - = new HashMap>(); + = new HashMap<>(); private final DocumentBuilder documentBuilder; private File ivyCacheDir; private Pattern internalJarPattern; @@ -257,7 +257,7 @@ public class GetMavenDependenciesTask extends Task { private void addSharedExternalDependencies() { // Delay adding shared compile-scope dependencies until after all have been processed, // so dependency sharing is limited to a depth of one. - Map> sharedDependencies = new HashMap>(); + Map> sharedDependencies = new HashMap<>(); for (String artifactId : interModuleExternalCompileScopeDependencies.keySet()) { TreeSet deps = new TreeSet<>(); sharedDependencies.put(artifactId, deps); @@ -278,7 +278,7 @@ public class GetMavenDependenciesTask extends Task { for (String artifactId : interModuleExternalTestScopeDependencies.keySet()) { SortedSet deps = sharedDependencies.get(artifactId); if (null == deps) { - deps = new TreeSet(); + deps = new TreeSet<>(); sharedDependencies.put(artifactId, deps); } Set moduleDependencies = interModuleExternalTestScopeDependencies.get(artifactId); @@ -311,7 +311,7 @@ public class GetMavenDependenciesTask extends Task { for (String artifactId : sharedDependencies.keySet()) { SortedSet deps = allExternalDependencies.get(artifactId); if (null == deps) { - deps = new TreeSet(); + deps = new TreeSet<>(); allExternalDependencies.put(artifactId, deps); } deps.addAll(sharedDependencies.get(artifactId)); @@ -360,7 +360,7 @@ public class GetMavenDependenciesTask extends Task { private void setGrandparentDependencyManagementProperty() { StringBuilder builder = new StringBuilder(); appendAllInternalDependencies(builder); - Map versionsMap = new HashMap(); + Map versionsMap = new HashMap<>(); appendAllExternalDependencies(builder, versionsMap); builder.setLength(builder.length() - 1); // drop trailing newline allProperties.setProperty(DEPENDENCY_MANAGEMENT_PROPERTY, builder.toString()); @@ -377,7 +377,7 @@ public class GetMavenDependenciesTask extends Task { */ private void appendAllInternalDependencies(StringBuilder builder) { for (String artifactId : internalCompileScopeDependencies.keySet()) { - List exclusions = new ArrayList(); + List exclusions = new ArrayList<>(); exclusions.addAll(internalCompileScopeDependencies.get(artifactId)); SortedSet extDeps = allExternalDependencies.get(artifactId); if (null != extDeps) { @@ -427,7 +427,7 @@ public class GetMavenDependenciesTask extends Task { log("Loading centralized ivy versions from: " + centralizedVersionsFile, verboseLevel); ivyCacheDir = getIvyCacheDir(); Properties versions = loadPropertiesFile(centralizedVersionsFile); - SortedSet sortedEntries = new TreeSet(new Comparator() { + SortedSet sortedEntries = new TreeSet<>(new Comparator() { @Override public int compare(Map.Entry o1, Map.Entry o2) { return ((String)o1.getKey()).compareTo((String)o2.getKey()); } @@ -465,7 +465,7 @@ public class GetMavenDependenciesTask extends Task { */ private Collection getTransitiveDependenciesFromIvyCache (String groupId, String artifactId, String version) { - SortedSet transitiveDependencies = new TreeSet(); + SortedSet transitiveDependencies = new TreeSet<>(); // E.g. ~/.ivy2/cache/xerces/xercesImpl/ivy-2.9.1.xml File ivyXmlFile = new File(new File(new File(ivyCacheDir, groupId), artifactId), "ivy-" + version + ".xml"); if ( ! ivyXmlFile.exists()) { @@ -500,8 +500,8 @@ public class GetMavenDependenciesTask extends Task { private void setInternalDependencyProperties() { log("Loading module dependencies from: " + moduleDependenciesPropertiesFile, verboseLevel); Properties moduleDependencies = loadPropertiesFile(moduleDependenciesPropertiesFile); - Map> testScopeDependencies = new HashMap>(); - Map testScopePropertyKeys = new HashMap(); + Map> testScopeDependencies = new HashMap<>(); + Map testScopePropertyKeys = new HashMap<>(); for (Map.Entry entry : moduleDependencies.entrySet()) { String newPropertyKey = (String)entry.getKey(); StringBuilder newPropertyValue = new StringBuilder(); @@ -527,7 +527,7 @@ public class GetMavenDependenciesTask extends Task { String origModuleDir = antProjectName.replace("analyzers-", "analysis/"); Pattern unwantedInternalDependencies = Pattern.compile ("(?:lucene/build/|solr/build/(?:contrib/)?)" + origModuleDir + "|" + UNWANTED_INTERNAL_DEPENDENCIES); - SortedSet sortedDeps = new TreeSet(); + SortedSet sortedDeps = new TreeSet<>(); for (String dependency : value.split(",")) { matcher = SHARED_EXTERNAL_DEPENDENCIES_PATTERN.matcher(dependency); if (matcher.find()) { @@ -542,7 +542,7 @@ public class GetMavenDependenciesTask extends Task { = isTest ? interModuleExternalTestScopeDependencies : interModuleExternalCompileScopeDependencies; Set sharedSet = sharedDeps.get(artifactName); if (null == sharedSet) { - sharedSet = new HashSet(); + sharedSet = new HashSet<>(); sharedDeps.put(artifactName, sharedSet); } if (isTestScope) { @@ -675,7 +675,7 @@ public class GetMavenDependenciesTask extends Task { boolean isOptional = optionalExternalDependencies.contains(dependencyCoordinate); SortedSet deps = allExternalDependencies.get(module); if (null == deps) { - deps = new TreeSet(); + deps = new TreeSet<>(); allExternalDependencies.put(module, deps); } NodeList artifacts = null; diff --git a/lucene/tools/src/java/org/apache/lucene/validation/LibVersionsCheckTask.java b/lucene/tools/src/java/org/apache/lucene/validation/LibVersionsCheckTask.java index f5f234b8094..7741fb6e971 100644 --- a/lucene/tools/src/java/org/apache/lucene/validation/LibVersionsCheckTask.java +++ b/lucene/tools/src/java/org/apache/lucene/validation/LibVersionsCheckTask.java @@ -98,7 +98,7 @@ public class LibVersionsCheckTask extends Task { * All /org/name version keys found in ivy-versions.properties, and whether they * are referenced in any ivy.xml file. */ - private Map referencedCoordinateKeys = new LinkedHashMap(); + private Map referencedCoordinateKeys = new LinkedHashMap<>(); /** * Adds a set of ivy.xml resources to check. @@ -320,7 +320,7 @@ public class LibVersionsCheckTask extends Task { private class DependencyRevChecker extends DefaultHandler { private final File ivyXmlFile; - private final Stack tags = new Stack(); + private final Stack tags = new Stack<>(); public boolean fail = false; diff --git a/lucene/tools/src/java/org/apache/lucene/validation/LicenseCheckTask.java b/lucene/tools/src/java/org/apache/lucene/validation/LicenseCheckTask.java index 21494ded771..7ee4a6a598e 100644 --- a/lucene/tools/src/java/org/apache/lucene/validation/LicenseCheckTask.java +++ b/lucene/tools/src/java/org/apache/lucene/validation/LicenseCheckTask.java @@ -235,8 +235,8 @@ public class LicenseCheckTask extends Task { } // Get the expected license path base from the mapper and search for license files. - Map foundLicenses = new LinkedHashMap(); - List expectedLocations = new ArrayList(); + Map foundLicenses = new LinkedHashMap<>(); + List expectedLocations = new ArrayList<>(); outer: for (String mappedPath : licenseMapper.mapFileName(jarFile.getName())) { for (LicenseType licenseType : LicenseType.values()) { diff --git a/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java b/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java index c1b2ce7c09d..bed79e6ffb9 100644 --- a/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java +++ b/solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java @@ -287,7 +287,7 @@ public class ICUCollationField extends FieldType { @Override public List createFields(SchemaField field, Object value, float boost) { if (field.hasDocValues()) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); fields.add(createField(field, value, boost)); final BytesRef bytes = getCollationKey(field.getName(), value.toString()); if (field.multiValued()) { diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java index 9d045436bfd..1ed20f3511f 100644 --- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java +++ b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java @@ -210,7 +210,7 @@ public class ClusteringComponent extends SearchComponent implements SolrCoreAwar if( fields == null || fields.size() == 0 ) return; StringBuilder sb = new StringBuilder(); String[] flparams = fl.split( "[,\\s]+" ); - Set flParamSet = new HashSet(flparams.length); + Set flParamSet = new HashSet<>(flparams.length); for( String flparam : flparams ){ // no need trim() because of split() by \s+ flParamSet.add(flparam); diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java index 6147fb4956f..3440ae69068 100644 --- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java +++ b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java @@ -123,7 +123,7 @@ public class CarrotClusteringEngine extends SearchClusteringEngine { final SolrParams initParams = SolrParams.toSolrParams(config); // Initialization attributes for Carrot2 controller. - HashMap initAttributes = new HashMap(); + HashMap initAttributes = new HashMap<>(); // Customize Carrot2's resource lookup to first look for resources // using Solr's resource loader. If that fails, try loading from the classpath. @@ -221,7 +221,7 @@ public class CarrotClusteringEngine extends SearchClusteringEngine { Map docIds, SolrQueryRequest sreq) { try { // Prepare attributes for Carrot2 clustering call - Map attributes = new HashMap(); + Map attributes = new HashMap<>(); List documents = getDocuments(solrDocList, docIds, query, sreq); attributes.put(AttributeNames.DOCUMENTS, documents); attributes.put(AttributeNames.QUERY, query.toString()); @@ -350,7 +350,7 @@ public class CarrotClusteringEngine extends SearchClusteringEngine { } Iterator docsIter = solrDocList.iterator(); - List result = new ArrayList(solrDocList.size()); + List result = new ArrayList<>(solrDocList.size()); float[] scores = {1.0f}; int[] docsHolder = new int[1]; @@ -500,7 +500,7 @@ public class CarrotClusteringEngine extends SearchClusteringEngine { private void clustersToNamedList(List outputClusters, List> parent, boolean outputSubClusters, int maxLabels) { for (Cluster outCluster : outputClusters) { - NamedList cluster = new SimpleOrderedMap(); + NamedList cluster = new SimpleOrderedMap<>(); parent.add(cluster); // Add labels diff --git a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2StemmerFactory.java b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2StemmerFactory.java index d0e7abd7ca8..82887a16c4f 100644 --- a/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2StemmerFactory.java +++ b/solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2StemmerFactory.java @@ -84,7 +84,7 @@ public class LuceneCarrot2StemmerFactory implements IStemmerFactory { */ private static HashMap> snowballStemmerClasses; static { - snowballStemmerClasses = new HashMap>(); + snowballStemmerClasses = new HashMap<>(); snowballStemmerClasses.put(LanguageCode.DANISH, DanishStemmer.class); snowballStemmerClasses.put(LanguageCode.DUTCH, DutchStemmer.class); snowballStemmerClasses.put(LanguageCode.ENGLISH, EnglishStemmer.class); diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/ClusteringComponentTest.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/ClusteringComponentTest.java index e671d771d0c..791fc05c5f7 100644 --- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/ClusteringComponentTest.java +++ b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/ClusteringComponentTest.java @@ -52,7 +52,7 @@ public class ClusteringComponentTest extends AbstractClusteringTestCase { SolrRequestHandler handler = core.getRequestHandler("standard"); SolrQueryResponse rsp; rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); + rsp.add("responseHeader", new SimpleOrderedMap<>()); SolrQueryRequest req = new LocalSolrQueryRequest(core, params); handler.handleRequest(req, rsp); NamedList values = rsp.getValues(); @@ -70,7 +70,7 @@ public class ClusteringComponentTest extends AbstractClusteringTestCase { handler = core.getRequestHandler("docClustering"); rsp = new SolrQueryResponse(); - rsp.add("responseHeader", new SimpleOrderedMap()); + rsp.add("responseHeader", new SimpleOrderedMap<>()); req = new LocalSolrQueryRequest(core, params); handler.handleRequest(req, rsp); values = rsp.getValues(); diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java index bc0c9830f89..db540f1d8dc 100644 --- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java +++ b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/MockDocumentClusteringEngine.java @@ -27,11 +27,11 @@ import org.apache.solr.search.DocSet; public class MockDocumentClusteringEngine extends DocumentClusteringEngine { @Override public NamedList cluster(DocSet docs, SolrParams solrParams) { - return new NamedList(); + return new NamedList<>(); } @Override public NamedList cluster(SolrParams solrParams) { - return new NamedList(); + return new NamedList<>(); } } diff --git a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngineTest.java b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngineTest.java index 6334d631b04..2a55f65aec2 100644 --- a/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngineTest.java +++ b/solr/contrib/clustering/src/test/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngineTest.java @@ -472,7 +472,7 @@ public class CarrotClusteringEngineTest extends AbstractClusteringTestCase { // Perform clustering LocalSolrQueryRequest req = new LocalSolrQueryRequest(h.getCore(), solrParams); - Map docIds = new HashMap(docList.size()); + Map docIds = new HashMap<>(docList.size()); SolrDocumentList solrDocList = SolrPluginUtils.docListToSolrDocumentList( docList, searcher, engine.getFieldsToLoad(req), docIds ); @SuppressWarnings("unchecked") diff --git a/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java b/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java index 1c4d0213fb6..756b76409ed 100644 --- a/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java +++ b/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/MailEntityProcessor.java @@ -140,7 +140,7 @@ public class MailEntityProcessor extends EntityProcessorBase { } private Map getDocumentFromMail(Message mail) { - Map row = new HashMap(); + Map row = new HashMap<>(); try { addPartToDocument(mail, row, true); return row; @@ -201,7 +201,7 @@ public class MailEntityProcessor extends EntityProcessorBase { if ((adresses = mail.getFrom()) != null && adresses.length > 0) row.put(FROM, adresses[0].toString()); - List to = new ArrayList(); + List to = new ArrayList<>(); if ((adresses = mail.getRecipients(Message.RecipientType.TO)) != null) addAddressToList(adresses, to); if ((adresses = mail.getRecipients(Message.RecipientType.CC)) != null) @@ -219,7 +219,7 @@ public class MailEntityProcessor extends EntityProcessorBase { row.put(SENT_DATE, d); } - List flags = new ArrayList(); + List flags = new ArrayList<>(); for (Flags.Flag flag : mail.getFlags().getSystemFlags()) { if (flag == Flags.Flag.ANSWERED) flags.add(FLAG_ANSWERED); @@ -319,7 +319,7 @@ public class MailEntityProcessor extends EntityProcessorBase { public FolderIterator(Store mailBox) { this.mailbox = mailBox; - folders = new ArrayList(); + folders = new ArrayList<>(); getTopLevelFolders(mailBox); } @@ -529,8 +529,8 @@ public class MailEntityProcessor extends EntityProcessorBase { private String protocol; private String folderNames; - private List exclude = new ArrayList(); - private List include = new ArrayList(); + private List exclude = new ArrayList<>(); + private List include = new ArrayList<>(); private boolean recurse; private int batchSize; @@ -550,7 +550,7 @@ public class MailEntityProcessor extends EntityProcessorBase { private boolean connected = false; private FolderIterator folderIter; private MessageIterator msgIter; - private List filters = new ArrayList(); + private List filters = new ArrayList<>(); private static FetchProfile fp = new FetchProfile(); private static final Logger LOG = LoggerFactory.getLogger(DataImporter.class); diff --git a/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java b/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java index 3e957ca3357..20121d47f55 100644 --- a/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java +++ b/solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java @@ -107,7 +107,7 @@ public class TikaEntityProcessor extends EntityProcessorBase { @Override public Map nextRow() { if(done) return null; - Map row = new HashMap(); + Map row = new HashMap<>(); DataSource dataSource = context.getDataSource(); InputStream is = dataSource.getData(context.getResolvedEntityAttribute(URL)); ContentHandler contentHandler = null; diff --git a/solr/contrib/dataimporthandler-extras/src/test/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java b/solr/contrib/dataimporthandler-extras/src/test/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java index a6273c062da..e595c1ee6ec 100644 --- a/solr/contrib/dataimporthandler-extras/src/test/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java +++ b/solr/contrib/dataimporthandler-extras/src/test/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java @@ -53,7 +53,7 @@ public class TestMailEntityProcessor extends AbstractDataImportHandlerTestCase { private static final String host = "host"; private static final String protocol = "imaps"; - private static Map paramMap = new HashMap(); + private static Map paramMap = new HashMap<>(); @Test @Ignore("Needs a Mock Mail Server to work") @@ -172,7 +172,7 @@ public class TestMailEntityProcessor extends AbstractDataImportHandlerTestCase { } static class SolrWriterImpl extends SolrWriter { - List docs = new ArrayList(); + List docs = new ArrayList<>(); Boolean deleteAllCalled; Boolean commitCalled; diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ClobTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ClobTransformer.java index 690e9dbd70c..ac0d3939585 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ClobTransformer.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ClobTransformer.java @@ -47,7 +47,7 @@ public class ClobTransformer extends Transformer { Object o = aRow.get(srcCol); if (o instanceof List) { List inputs = (List) o; - List results = new ArrayList(); + List results = new ArrayList<>(); for (Object input : inputs) { if (input instanceof Clob) { Clob clob = (Clob) input; diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ConfigParseUtil.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ConfigParseUtil.java index 70a4df97050..179df231526 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ConfigParseUtil.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ConfigParseUtil.java @@ -34,7 +34,7 @@ public class ConfigParseUtil { } public static HashMap getAllAttributes(Element e) { - HashMap m = new HashMap(); + HashMap m = new HashMap<>(); NamedNodeMap nnm = e.getAttributes(); for (int i = 0; i < nnm.getLength(); i++) { m.put(nnm.item(i).getNodeName(), nnm.item(i).getNodeValue()); @@ -61,7 +61,7 @@ public class ConfigParseUtil { } public static List getChildNodes(Element e, String byName) { - List result = new ArrayList(); + List result = new ArrayList<>(); NodeList l = e.getChildNodes(); for (int i = 0; i < l.getLength(); i++) { if (e.equals(l.item(i).getParentNode()) diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java index c06401bb618..b233b7b12c1 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java @@ -138,7 +138,7 @@ public class ContextImpl extends Context { } if (Context.SCOPE_ENTITY.equals(scope)) { if (entitySession == null) { - entitySession = new HashMap(); + entitySession = new HashMap<>(); } entitySession.put(name, val); } else if (Context.SCOPE_GLOBAL.equals(scope)) { diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHCacheSupport.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHCacheSupport.java index caa75d99860..d5e7385965b 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHCacheSupport.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHCacheSupport.java @@ -36,7 +36,7 @@ public class DIHCacheSupport { .getLogger(DIHCacheSupport.class); private String cacheForeignKey; private String cacheImplName; - private Map queryVsCache = new HashMap(); + private Map queryVsCache = new HashMap<>(); private Map>> queryVsCacheIterator; private Iterator> dataSourceRowCache; private boolean cacheDoKeyLookup; @@ -94,7 +94,7 @@ public class DIHCacheSupport { public void initNewParent(Context context) { dataSourceRowCache = null; - queryVsCacheIterator = new HashMap>>(); + queryVsCacheIterator = new HashMap<>(); for (Map.Entry entry : queryVsCache.entrySet()) { queryVsCacheIterator.put(entry.getKey(), entry.getValue().iterator()); } diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHWriterBase.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHWriterBase.java index 12cd563df21..a33a202aab7 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHWriterBase.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHWriterBase.java @@ -27,7 +27,7 @@ public abstract class DIHWriterBase implements DIHWriter { @Override public void setDeltaKeys(Set> passedInDeltaKeys) { - deltaKeys = new HashSet(); + deltaKeys = new HashSet<>(); for (Map aMap : passedInDeltaKeys) { if (aMap.size() > 0) { Object key = null; diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java index 4171bb69d4f..e5b74f3518b 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java @@ -210,7 +210,7 @@ public class DataImportHandler extends RequestHandlerBase implements private Map getParamsMap(SolrParams params) { Iterator names = params.getParameterNamesIterator(); - Map result = new HashMap(); + Map result = new HashMap<>(); while (names.hasNext()) { String s = names.next(); String[] val = params.getParams(s); diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java index 9862ba4f276..72512cd5923 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java @@ -80,12 +80,12 @@ public class DataImporter { private DIHConfiguration config; private Date indexStartTime; private Properties store = new Properties(); - private Map> requestLevelDataSourceProps = new HashMap>(); + private Map> requestLevelDataSourceProps = new HashMap<>(); private IndexSchema schema; public DocBuilder docBuilder; public DocBuilder.Statistics cumulativeStatistics = new DocBuilder.Statistics(); private SolrCore core; - private Map coreScopeSession = new ConcurrentHashMap(); + private Map coreScopeSession = new ConcurrentHashMap<>(); private ReentrantLock importLock = new ReentrantLock(); private boolean isDeltaImportSupported = false; private final String handlerName; @@ -131,7 +131,7 @@ public class DataImporter { success = true; } - Map> dsProps = new HashMap>(); + Map> dsProps = new HashMap<>(); if(defaultParams!=null) { int position = 0; while (position < defaultParams.size()) { @@ -143,7 +143,7 @@ public class DataImporter { success = true; NamedList dsConfig = (NamedList) defaultParams.getVal(position); LOG.info("Getting configuration for Global Datasource..."); - Map props = new HashMap(); + Map props = new HashMap<>(); for (int i = 0; i < dsConfig.size(); i++) { props.put(dsConfig.getName(i), dsConfig.getVal(i).toString()); } @@ -225,9 +225,9 @@ public class DataImporter { public DIHConfiguration readFromXml(Document xmlDocument) { DIHConfiguration config; - List> functions = new ArrayList>(); + List> functions = new ArrayList<>(); Script script = null; - Map> dataSources = new HashMap>(); + Map> dataSources = new HashMap<>(); NodeList dataConfigTags = xmlDocument.getElementsByTagName("dataConfig"); if(dataConfigTags == null || dataConfigTags.getLength() == 0) { @@ -263,7 +263,7 @@ public class DataImporter { List dataSourceTags = ConfigParseUtil.getChildNodes(e, ConfigNameConstants.DATA_SRC); if (!dataSourceTags.isEmpty()) { for (Element element : dataSourceTags) { - Map p = new HashMap(); + Map p = new HashMap<>(); HashMap attrs = ConfigParseUtil.getAllAttributes(element); for (Map.Entry entry : attrs.entrySet()) { p.put(entry.getKey(), entry.getValue()); @@ -294,7 +294,7 @@ public class DataImporter { } else { Element pwElement = propertyWriterTags.get(0); String type = null; - Map params = new HashMap(); + Map params = new HashMap<>(); for (Map.Entry entry : ConfigParseUtil.getAllAttributes( pwElement).entrySet()) { if (TYPE.equals(entry.getKey())) { @@ -494,7 +494,7 @@ public class DataImporter { //this map object is a Collections.synchronizedMap(new LinkedHashMap()). if we // synchronize on the object it must be safe to iterate through the map Map statusMessages = (Map) retrieve(STATUS_MSGS); - Map result = new LinkedHashMap(); + Map result = new LinkedHashMap<>(); if (statusMessages != null) { synchronized (statusMessages) { for (Object o : statusMessages.entrySet()) { @@ -520,7 +520,7 @@ public class DataImporter { * used by tests. */ Map getEvaluators(List> fn) { - Map evaluators = new HashMap(); + Map evaluators = new HashMap<>(); evaluators.put(Evaluator.DATE_FORMAT_EVALUATOR, new DateFormatEvaluator()); evaluators.put(Evaluator.SQL_ESCAPE_EVALUATOR, new SqlEscapingEvaluator()); evaluators.put(Evaluator.URL_ENCODE_EVALUATOR, new UrlEvaluator()); diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatEvaluator.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatEvaluator.java index a1f03fdddbd..84bab52ba98 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatEvaluator.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatEvaluator.java @@ -52,9 +52,9 @@ import org.apache.solr.util.DateMathParser; public class DateFormatEvaluator extends Evaluator { public static final String DEFAULT_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss"; - Map cache = new WeakHashMap(); - Map availableLocales = new HashMap(); - Set availableTimezones = new HashSet(); + Map cache = new WeakHashMap<>(); + Map availableLocales = new HashMap<>(); + Set availableTimezones = new HashSet<>(); class DateFormatCacheKey { DateFormatCacheKey(Locale l, TimeZone tz, String df) { diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java index f67dbfa0782..dfc30e5d619 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java @@ -40,7 +40,7 @@ import org.slf4j.LoggerFactory; * @since solr 1.3 */ public class DateFormatTransformer extends Transformer { - private Map fmtCache = new HashMap(); + private Map fmtCache = new HashMap<>(); private static final Logger LOG = LoggerFactory .getLogger(DateFormatTransformer.class); @@ -66,7 +66,7 @@ public class DateFormatTransformer extends Transformer { Object o = aRow.get(srcCol); if (o instanceof List) { List inputs = (List) o; - List results = new ArrayList(); + List results = new ArrayList<>(); for (Object input : inputs) { results.add(process(input, fmt, locale)); } diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugInfo.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugInfo.java index b35fe9110f3..f58dc6eaafe 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugInfo.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugInfo.java @@ -26,12 +26,12 @@ import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.StrUtils; public class DebugInfo { - public List debugDocuments = new ArrayList(0); + public List debugDocuments = new ArrayList<>(0); public NamedList debugVerboseOutput = null; public boolean verbose; public DebugInfo(Map requestParams) { verbose = StrUtils.parseBool((String) requestParams.get("verbose"), false); - debugVerboseOutput = new NamedList(); + debugVerboseOutput = new NamedList<>(); } } diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java index e41d51382d5..1d01a6e2731 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java @@ -74,9 +74,9 @@ public class DocBuilder { boolean verboseDebug = false; - Map session = new HashMap(); + Map session = new HashMap<>(); - static final ThreadLocal INSTANCE = new ThreadLocal(); + static final ThreadLocal INSTANCE = new ThreadLocal<>(); private Map persistedProperties; private DIHProperties propWriter; @@ -132,7 +132,7 @@ public class DocBuilder { resolver = new VariableResolver(); } resolver.setEvaluators(dataImporter.getEvaluators()); - Map indexerNamespace = new HashMap(); + Map indexerNamespace = new HashMap<>(); if (persistedProperties.get(LAST_INDEX_TIME) != null) { indexerNamespace.put(LAST_INDEX_TIME, persistedProperties.get(LAST_INDEX_TIME)); } else { @@ -140,9 +140,9 @@ public class DocBuilder { indexerNamespace.put(LAST_INDEX_TIME, epoch); } indexerNamespace.put(INDEX_START_TIME, dataImporter.getIndexStartTime()); - indexerNamespace.put("request", new HashMap(reqParams.getRawParams())); + indexerNamespace.put("request", new HashMap<>(reqParams.getRawParams())); for (Entity entity : dataImporter.getConfig().getEntities()) { - Map entityNamespace = new HashMap(); + Map entityNamespace = new HashMap<>(); String key = SolrWriter.LAST_INDEX_KEY; Object lastIndex = persistedProperties.get(entity.getName() + "." + key); if (lastIndex != null) { @@ -213,10 +213,10 @@ public class DocBuilder { } AtomicBoolean fullCleanDone = new AtomicBoolean(false); //we must not do a delete of *:* multiple times if there are multiple root entities to be run - Map lastIndexTimeProps = new HashMap(); + Map lastIndexTimeProps = new HashMap<>(); lastIndexTimeProps.put(LAST_INDEX_KEY, dataImporter.getIndexStartTime()); - epwList = new ArrayList(config.getEntities().size()); + epwList = new ArrayList<>(config.getEntities().size()); for (Entity e : config.getEntities()) { epwList.add(getEntityProcessorWrapper(e)); } @@ -342,7 +342,7 @@ public class DocBuilder { addStatusMessage("Identifying Delta"); LOG.info("Starting delta collection."); - Set> deletedKeys = new HashSet>(); + Set> deletedKeys = new HashSet<>(); Set> allPks = collectDelta(currentEntityProcessorWrapper, resolver, deletedKeys); if (stop.get()) return; @@ -411,7 +411,7 @@ public class DocBuilder { private void buildDocument(VariableResolver vr, DocWrapper doc, Map pk, EntityProcessorWrapper epw, boolean isRoot, ContextImpl parentCtx) { - List entitiesToDestroy = new ArrayList(); + List entitiesToDestroy = new ArrayList<>(); try { buildDocument(vr, doc, pk, epw, isRoot, parentCtx, entitiesToDestroy); } catch (Exception e) { @@ -565,7 +565,7 @@ public class DocBuilder { Map session; public void setSessionAttribute(String key, Object val){ - if(session == null) session = new HashMap(); + if(session == null) session = new HashMap<>(); session.put(key, val); } @@ -768,7 +768,7 @@ public class DocBuilder { ContextImpl context1 = new ContextImpl(epw, resolver, null, Context.FIND_DELTA, session, null, this); epw.init(context1); - Set> myModifiedPks = new HashSet>(); + Set> myModifiedPks = new HashSet<>(); @@ -781,7 +781,7 @@ public class DocBuilder { } // identifying the modified rows for this entity - Map> deltaSet = new HashMap>(); + Map> deltaSet = new HashMap<>(); LOG.info("Running ModifiedRowKey() for Entity: " + epw.getEntity().getName()); //get the modified rows in this entity String pk = epw.getEntity().getPk(); @@ -804,7 +804,7 @@ public class DocBuilder { return new HashSet(); } //get the deleted rows for this entity - Set> deletedSet = new HashSet>(); + Set> deletedSet = new HashSet<>(); while (true) { Map row = epw.nextDeletedRowKey(); if (row == null) @@ -834,7 +834,7 @@ public class DocBuilder { LOG.info("Completed DeletedRowKey for Entity: " + epw.getEntity().getName() + " rows obtained : " + deletedSet.size()); myModifiedPks.addAll(deltaSet.values()); - Set> parentKeyList = new HashSet>(); + Set> parentKeyList = new HashSet<>(); //all that we have captured is useless (in a sub-entity) if no rows in the parent is modified because of these //propogate up the changes in the chain if (epw.getEntity().getParentEntity() != null) { @@ -862,7 +862,7 @@ public class DocBuilder { // Do not use entity.isDocRoot here because one of descendant entities may set rootEntity="true" return epw.getEntity().getParentEntity() == null ? - myModifiedPks : new HashSet>(parentKeyList); + myModifiedPks : new HashSet<>(parentKeyList); } private void getModifiedParentRows(VariableResolver resolver, @@ -949,7 +949,7 @@ public class DocBuilder { } public Map getStatsSnapshot() { - Map result = new HashMap(); + Map result = new HashMap<>(); result.put("docCount", docCount.get()); result.put("deletedDocCount", deletedDocCount.get()); result.put("rowCount", rowsCount.get()); diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java index 682e08af831..a09efb1b0f8 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java @@ -43,7 +43,7 @@ public class EntityProcessorWrapper extends EntityProcessor { private EntityProcessor delegate; private Entity entity; private DataSource datasource; - private List children = new ArrayList(); + private List children = new ArrayList<>(); private DocBuilder docBuilder; private boolean initalized; private String onError; @@ -176,7 +176,7 @@ public class EntityProcessorWrapper extends EntityProcessor { if (stopTransform) break; try { if (rows != null) { - List> tmpRows = new ArrayList>(); + List> tmpRows = new ArrayList<>(); for (Map map : rows) { resolver.addNamespace(entityName, map); Object o = t.transformRow(map, context); diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Evaluator.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Evaluator.java index 2801167a018..45168950e03 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Evaluator.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Evaluator.java @@ -71,7 +71,7 @@ public abstract class Evaluator { * @return a List of objects which can either be a string, number or a variable wrapper */ List parseParams(String expression, VariableResolver vr) { - List result = new ArrayList(); + List result = new ArrayList<>(); expression = expression.trim(); String[] ss = expression.split(","); for (int i = 0; i < ss.length; i++) { diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java index 4a63e87bc4f..2ea0a4fb451 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java @@ -200,7 +200,7 @@ public class FileListEntityProcessor extends EntityProcessorBase { public Map nextRow() { if (rowIterator != null) return getNext(); - List> fileDetails = new ArrayList>(); + List> fileDetails = new ArrayList<>(); File dir = new File(baseDir); String dateStr = context.getEntityAttribute(NEWER_THAN); @@ -243,7 +243,7 @@ public class FileListEntityProcessor extends EntityProcessorBase { } private void addDetails(List> files, File dir, String name) { - Map details = new HashMap(); + Map details = new HashMap<>(); File aFile = new File(dir, name); if (aFile.isDirectory()) return; long sz = aFile.length(); diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java index c42a201a391..f6eb5efadeb 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java @@ -49,7 +49,7 @@ public class JdbcDataSource extends private Connection conn; - private Map fieldNameVsType = new HashMap(); + private Map fieldNameVsType = new HashMap<>(); private boolean convertType = false; @@ -245,7 +245,7 @@ public class JdbcDataSource extends private List readFieldNames(ResultSetMetaData metaData) throws SQLException { - List colNames = new ArrayList(); + List colNames = new ArrayList<>(); int count = metaData.getColumnCount(); for (int i = 0; i < count; i++) { colNames.add(metaData.getColumnLabel(i + 1)); @@ -309,7 +309,7 @@ public class JdbcDataSource extends private Map getARow() { if (resultSet == null) return null; - Map result = new HashMap(); + Map result = new HashMap<>(); for (String colName : colNames) { try { if (!convertType) { diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java index 5b919bb1690..99bbea13e57 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java @@ -125,7 +125,7 @@ public class LineEntityProcessor extends EntityProcessorBase { if (acceptLineRegex != null && ! acceptLineRegex.matcher(line).find()) continue; if (skipLineRegex != null && skipLineRegex.matcher(line).find()) continue; // Contruct the 'row' of fields - Map row = new HashMap(); + Map row = new HashMap<>(); row.put("rawLine", line); return row; } diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/MockDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/MockDataSource.java index 469f582c0a2..5036a50c59e 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/MockDataSource.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/MockDataSource.java @@ -33,7 +33,7 @@ import java.util.Properties; public class MockDataSource extends DataSource>> { - private static Map>> cache = new HashMap>>(); + private static Map>> cache = new HashMap<>(); public static void setIterator(String query, Iterator> iter) { diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java index 857865c59bc..ecc9aab2caa 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java @@ -69,7 +69,7 @@ public class PlainTextEntityProcessor extends EntityProcessorBase { if (len <= 0) break; sw.append(new String(buf, 0, len)); } - Map row = new HashMap(); + Map row = new HashMap<>(); row.put(PLAIN_TEXT, sw.toString()); ended = true; IOUtils.closeQuietly(r); diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RegexTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RegexTransformer.java index babd3e3e071..24bf9df4236 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RegexTransformer.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RegexTransformer.java @@ -75,7 +75,7 @@ public class RegexTransformer extends Transformer { Map.Entry entry = (Map.Entry) e; List l = results; if(!col.equals(entry.getKey())){ - if(otherVars == null) otherVars = new HashMap(); + if(otherVars == null) otherVars = new HashMap<>(); l = otherVars.get(entry.getKey()); if(l == null){ l = new ArrayList(); @@ -131,7 +131,7 @@ public class RegexTransformer extends Transformer { @SuppressWarnings("unchecked") private List readBySplit(String splitBy, String value) { String[] vals = value.split(splitBy); - List l = new ArrayList(); + List l = new ArrayList<>(); l.addAll(Arrays.asList(vals)); return l; } @@ -151,7 +151,7 @@ public class RegexTransformer extends Transformer { if(groupNames == null){ l = new ArrayList(); } else { - map = new HashMap(); + map = new HashMap<>(); } for (int i = 1; i <= m.groupCount(); i++) { try { @@ -186,7 +186,7 @@ public class RegexTransformer extends Transformer { return result; } - private HashMap PATTERN_CACHE = new HashMap(); + private HashMap PATTERN_CACHE = new HashMap<>(); public static final String REGEX = "regex"; diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RequestInfo.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RequestInfo.java index 6a4543885a8..d3f1a56d078 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RequestInfo.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RequestInfo.java @@ -91,11 +91,11 @@ public class RequestInfo { List modifiableEntities = null; if(o != null) { if (o instanceof String) { - modifiableEntities = new ArrayList(); + modifiableEntities = new ArrayList<>(); modifiableEntities.add((String) o); } else if (o instanceof List) { @SuppressWarnings("unchecked") - List modifiableEntities1 = new ArrayList((List) o); + List modifiableEntities1 = new ArrayList<>((List) o); modifiableEntities = modifiableEntities1; } entitiesToRun = Collections.unmodifiableList(modifiableEntities); @@ -110,7 +110,7 @@ public class RequestInfo { dataConfigParam = null; } dataConfig = dataConfigParam; - this.rawParams = Collections.unmodifiableMap(new HashMap(requestParams)); + this.rawParams = Collections.unmodifiableMap(new HashMap<>(requestParams)); } public String getCommand() { diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java index 7967558a852..08209f27c0b 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java @@ -171,7 +171,7 @@ public class SimplePropertiesWriter extends DIHProperties { * already converted them. */ protected Map propertiesToMap(Properties p) { - Map theMap = new HashMap(); + Map theMap = new HashMap<>(); for(Map.Entry entry : p.entrySet()) { String key = entry.getKey().toString(); Object val = entry.getValue().toString(); diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrEntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrEntityProcessor.java index f2b5e939acd..2684f729198 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrEntityProcessor.java @@ -222,7 +222,7 @@ public class SolrEntityProcessor extends EntityProcessorBase { public Map next() { SolrDocument solrDocument = solrDocumentIterator.next(); - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); Collection fields = solrDocument.getFieldNames(); for (String field : fields) { Object fieldValue = solrDocument.getFieldValue(field); diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SortedMapBackedCache.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SortedMapBackedCache.java index 109b6d5ba85..aaf38d7033d 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SortedMapBackedCache.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SortedMapBackedCache.java @@ -60,7 +60,7 @@ public class SortedMapBackedCache implements DIHCache { } List> thisKeysRecs = theMap.get(pk); if (thisKeysRecs == null) { - thisKeysRecs = new ArrayList>(); + thisKeysRecs = new ArrayList<>(); theMap.put(pk, thisKeysRecs); } thisKeysRecs.add(rec); @@ -131,7 +131,7 @@ public class SortedMapBackedCache implements DIHCache { return null; } if(key instanceof Iterable) { - List> vals = new ArrayList>(); + List> vals = new ArrayList<>(); Iterator iter = ((Iterable) key).iterator(); while(iter.hasNext()) { List> val = theMap.get(iter.next()); @@ -220,7 +220,7 @@ public class SortedMapBackedCache implements DIHCache { checkOpen(false); isOpen = true; if (theMap == null) { - theMap = new TreeMap>>(); + theMap = new TreeMap<>(); } String pkName = CachePropertyUtil.getAttributeValueAsString(context, diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/TemplateTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/TemplateTransformer.java index 4b7ca30a69c..7d83ea3b5cf 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/TemplateTransformer.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/TemplateTransformer.java @@ -49,7 +49,7 @@ import org.slf4j.LoggerFactory; public class TemplateTransformer extends Transformer { private static final Logger LOG = LoggerFactory.getLogger(TemplateTransformer.class); - private Map> templateVsVars = new HashMap>(); + private Map> templateVsVars = new HashMap<>(); @Override @SuppressWarnings("unchecked") diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolver.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolver.java index f594118d71a..76930e2309d 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolver.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolver.java @@ -54,30 +54,30 @@ public class VariableResolver { .compile("^(\\w*?)\\((.*?)\\)$"); private Map rootNamespace; private Map evaluators; - private Map cache = new WeakHashMap(); + private Map cache = new WeakHashMap<>(); class Resolved { - List startIndexes = new ArrayList(2); - List endOffsets = new ArrayList(2); - List variables = new ArrayList(2); + List startIndexes = new ArrayList<>(2); + List endOffsets = new ArrayList<>(2); + List variables = new ArrayList<>(2); } public static final String FUNCTIONS_NAMESPACE = "dataimporter.functions."; public static final String FUNCTIONS_NAMESPACE_SHORT = "dih.functions."; public VariableResolver() { - rootNamespace = new HashMap(); + rootNamespace = new HashMap<>(); } public VariableResolver(Properties defaults) { - rootNamespace = new HashMap(); + rootNamespace = new HashMap<>(); for (Map.Entry entry : defaults.entrySet()) { rootNamespace.put(entry.getKey().toString(), entry.getValue()); } } public VariableResolver(Map defaults) { - rootNamespace = new HashMap(defaults); + rootNamespace = new HashMap<>(defaults); } /** @@ -184,7 +184,7 @@ public class VariableResolver { if (r == null) { return Collections.emptyList(); } - return new ArrayList(r.variables); + return new ArrayList<>(r.variables); } public void addNamespace(String name, Map newMap) { @@ -221,7 +221,7 @@ public class VariableResolver { Object o = currentLevel.get(keyParts[i]); if (o == null) { if(i == j-1) { - Map nextLevel = new HashMap(); + Map nextLevel = new HashMap<>(); currentLevel.put(keyParts[i], nextLevel); currentLevel = nextLevel; } else { diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java index 4819e75089c..b50cdae533e 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/XPathEntityProcessor.java @@ -57,7 +57,7 @@ public class XPathEntityProcessor extends EntityProcessorBase { private static final Logger LOG = LoggerFactory.getLogger(XPathEntityProcessor.class); private static final XMLErrorLogger xmllog = new XMLErrorLogger(LOG); - private static final Map END_MARKER = new HashMap(); + private static final Map END_MARKER = new HashMap<>(); protected List placeHolderVariables; @@ -179,14 +179,14 @@ public class XPathEntityProcessor extends EntityProcessorBase { for (String s : l) { if (s.startsWith(entityName + ".")) { if (placeHolderVariables == null) - placeHolderVariables = new ArrayList(); + placeHolderVariables = new ArrayList<>(); placeHolderVariables.add(s.substring(entityName.length() + 1)); } } for (Map fld : context.getAllEntityFields()) { if (fld.get(COMMON_FIELD) != null && "true".equals(fld.get(COMMON_FIELD))) { if (commonFields == null) - commonFields = new ArrayList(); + commonFields = new ArrayList<>(); commonFields.add(fld.get(DataImporter.COLUMN)); } } @@ -249,8 +249,8 @@ public class XPathEntityProcessor extends EntityProcessorBase { } private void addNamespace() { - Map namespace = new HashMap(); - Set allNames = new HashSet(); + Map namespace = new HashMap<>(); + Set allNames = new HashSet<>(); if (commonFields != null) allNames.addAll(commonFields); if (placeHolderVariables != null) allNames.addAll(placeHolderVariables); if(allNames.isEmpty()) return; @@ -278,7 +278,7 @@ public class XPathEntityProcessor extends EntityProcessorBase { private void initQuery(String s) { Reader data = null; try { - final List> rows = new ArrayList>(); + final List> rows = new ArrayList<>(); try { data = dataSource.getData(s); } catch (Exception e) { @@ -329,7 +329,7 @@ public class XPathEntityProcessor extends EntityProcessorBase { wrapAndThrow(SEVERE, e, msg); } else if (SKIP.equals(onError)) { LOG.warn(msg, e); - Map map = new HashMap(); + Map map = new HashMap<>(); map.put(DocBuilder.SKIP_DOC, Boolean.TRUE); rows.add(map); } else if (CONTINUE.equals(onError)) { @@ -357,7 +357,7 @@ public class XPathEntityProcessor extends EntityProcessorBase { if (useSolrAddXml) { List names = (List) record.get("name"); List values = (List) record.get("value"); - Map row = new HashMap(); + Map row = new HashMap<>(); for (int i = 0; i < names.size() && i < values.size(); i++) { if (row.containsKey(names.get(i))) { Object existing = row.get(names.get(i)); @@ -417,8 +417,8 @@ public class XPathEntityProcessor extends EntityProcessorBase { private Iterator> getRowIterator(final Reader data, final String s) { //nothing atomic about it. I just needed a StongReference - final AtomicReference exp = new AtomicReference(); - final BlockingQueue> blockingQueue = new ArrayBlockingQueue>(blockingQueueSize); + final AtomicReference exp = new AtomicReference<>(); + final BlockingQueue> blockingQueue = new ArrayBlockingQueue<>(blockingQueueSize); final AtomicBoolean isEnd = new AtomicBoolean(false); final AtomicBoolean throwExp = new AtomicBoolean(true); publisherThread = new Thread() { diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/XPathRecordReader.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/XPathRecordReader.java index 23f4755fb3d..8d2af7ed763 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/XPathRecordReader.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/XPathRecordReader.java @@ -162,7 +162,7 @@ public class XPathRecordReader { * @return results a List of emitted records */ public List> getAllRecords(Reader r) { - final List> results = new ArrayList>(); + final List> results = new ArrayList<>(); streamRecords(r, new Handler() { @Override public void handle(Map record, String s) { @@ -249,7 +249,7 @@ public class XPathRecordReader { // prepare for the clean up that will occurr when the record // is emitted after its END_ELEMENT is matched recordStarted = true; - valuesAddedinThisFrame = new HashSet(); + valuesAddedinThisFrame = new HashSet<>(); stack.push(valuesAddedinThisFrame); } else if (recordStarted) { // This node is a child of some parent which matched against forEach @@ -274,7 +274,7 @@ public class XPathRecordReader { } } - Set childrenFound = new HashSet(); + Set childrenFound = new HashSet<>(); int event = -1; int flattenedStarts=0; // our tag depth when flattening elements StringBuilder text = new StringBuilder(); @@ -342,7 +342,7 @@ public class XPathRecordReader { Stack> stack, boolean recordStarted) throws IOException, XMLStreamException { Node n = getMatchingNode(parser,childNodes); - Map decends=new HashMap(); + Map decends=new HashMap<>(); if (n != null) { childrenFound.add(n); n.parse(parser, handler, values, stack, recordStarted); @@ -466,7 +466,7 @@ public class XPathRecordReader { if (multiValued) { List v = (List) values.get(fieldName); if (v == null) { - v = new ArrayList(); + v = new ArrayList<>(); values.put(fieldName, v); } v.add(value); @@ -510,7 +510,7 @@ public class XPathRecordReader { // we have reached end of element portion of Xpath and can now only // have an element attribute. Add it to this nodes list of attributes if (attributes == null) { - attributes = new ArrayList(); + attributes = new ArrayList<>(); } xpseg = xpseg.substring(1); // strip the '@' attributes.add(new Node(xpseg, fieldName, multiValued)); @@ -518,7 +518,7 @@ public class XPathRecordReader { else if ( xpseg.length() == 0) { // we have a '//' selector for all decendents of the current nodes xpseg = paths.remove(0); // shift out next Xpath segment - if (wildCardNodes == null) wildCardNodes = new ArrayList(); + if (wildCardNodes == null) wildCardNodes = new ArrayList<>(); Node n = getOrAddNode(xpseg, wildCardNodes); if (paths.isEmpty()) { // We are current a leaf node. @@ -535,7 +535,7 @@ public class XPathRecordReader { } else { if (childNodes == null) - childNodes = new ArrayList(); + childNodes = new ArrayList<>(); // does this "name" already exist as a child node. Node n = getOrAddNode(xpseg,childNodes); if (paths.isEmpty()) { @@ -572,13 +572,13 @@ public class XPathRecordReader { n.name = m.group(1); int start = m.start(2); while (true) { - HashMap attribs = new HashMap(); + HashMap attribs = new HashMap<>(); if (!m.find(start)) break; attribs.put(m.group(3), m.group(5)); start = m.end(6); if (n.attribAndValues == null) - n.attribAndValues = new ArrayList>(); + n.attribAndValues = new ArrayList<>(); n.attribAndValues.addAll(attribs.entrySet()); } } @@ -592,7 +592,7 @@ public class XPathRecordReader { * deep-copied for thread safety */ private static Map getDeepCopy(Map values) { - Map result = new HashMap(); + Map result = new HashMap<>(); for (Map.Entry entry : values.entrySet()) { if (entry.getValue() instanceof List) { result.put(entry.getKey(), new ArrayList((List) entry.getValue())); @@ -616,7 +616,7 @@ public class XPathRecordReader { * seperator or if a sequence of multiple seperator's appear. */ private static List splitEscapeQuote(String str) { - List result = new LinkedList(); + List result = new LinkedList<>(); String[] ss = str.split("/"); for (int i=0; i RESERVED_WORDS; static{ - Set rw = new HashSet(); + Set rw = new HashSet<>(); rw.add(IMPORTER_NS); rw.add(IMPORTER_NS_SHORT); rw.add("request"); diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/ConfigParseUtil.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/ConfigParseUtil.java index 96be947c0e3..5c833fab89e 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/ConfigParseUtil.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/ConfigParseUtil.java @@ -41,7 +41,7 @@ public class ConfigParseUtil { } public static HashMap getAllAttributes(Element e) { - HashMap m = new HashMap(); + HashMap m = new HashMap<>(); NamedNodeMap nnm = e.getAttributes(); for (int i = 0; i < nnm.getLength(); i++) { m.put(nnm.item(i).getNodeName(), nnm.item(i).getNodeValue()); @@ -68,7 +68,7 @@ public class ConfigParseUtil { } public static List getChildNodes(Element e, String byName) { - List result = new ArrayList(); + List result = new ArrayList<>(); NodeList l = e.getChildNodes(); for (int i = 0; i < l.getLength(); i++) { if (e.equals(l.item(i).getParentNode()) diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/DIHConfiguration.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/DIHConfiguration.java index bf07e40f16b..ce302f9f2f7 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/DIHConfiguration.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/DIHConfiguration.java @@ -71,7 +71,7 @@ public class DIHConfiguration { this.deleteQuery = ConfigParseUtil.getStringAttribute(element, "deleteQuery", null); this.onImportStart = ConfigParseUtil.getStringAttribute(element, "onImportStart", null); this.onImportEnd = ConfigParseUtil.getStringAttribute(element, "onImportEnd", null); - List modEntities = new ArrayList(); + List modEntities = new ArrayList<>(); List l = ConfigParseUtil.getChildNodes(element, "entity"); boolean docRootFound = false; for (Element e : l) { @@ -84,7 +84,7 @@ public class DIHConfiguration { if(functions==null) { functions = Collections.emptyList(); } - List> modFunc = new ArrayList>(functions.size()); + List> modFunc = new ArrayList<>(functions.size()); for(Map f : functions) { modFunc.add(Collections.unmodifiableMap(f)); } @@ -119,7 +119,7 @@ public class DIHConfiguration { } private Map gatherAllFields(DataImporter di, Entity e) { - Map fields = new HashMap(); + Map fields = new HashMap<>(); if (e.getFields() != null) { for (EntityField f : e.getFields()) { fields.put(f.getName(), f); @@ -132,7 +132,7 @@ public class DIHConfiguration { } private Map loadSchemaFieldMap() { - Map modLnvsf = new HashMap(); + Map modLnvsf = new HashMap<>(); for (Map.Entry entry : schema.getFields().entrySet()) { modLnvsf.put(entry.getKey().toLowerCase(Locale.ROOT), entry.getValue()); } diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/Entity.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/Entity.java index 14726fb1ae0..6d660642bff 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/Entity.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/Entity.java @@ -83,9 +83,9 @@ public class Entity { this.allAttributes = Collections.unmodifiableMap(modAttributes); List n = ConfigParseUtil.getChildNodes(element, "field"); - List modFields = new ArrayList(n.size()); - Map> modColNameVsField = new HashMap>(); - List> modAllFieldAttributes = new ArrayList>(); + List modFields = new ArrayList<>(n.size()); + Map> modColNameVsField = new HashMap<>(); + List> modAllFieldAttributes = new ArrayList<>(); for (Element elem : n) { EntityField.Builder fieldBuilder = new EntityField.Builder(elem); if (config.getSchema() != null) { @@ -115,7 +115,7 @@ public class Entity { } Set fieldSet = modColNameVsField.get(fieldBuilder.column); if (fieldSet == null) { - fieldSet = new HashSet(); + fieldSet = new HashSet<>(); modColNameVsField.put(fieldBuilder.column, fieldSet); } fieldBuilder.allAttributes.put("boost", Float @@ -128,7 +128,7 @@ public class Entity { fieldSet.add(field); modFields.add(field); } - Map> modColNameVsField1 = new HashMap>(); + Map> modColNameVsField1 = new HashMap<>(); for (Map.Entry> entry : modColNameVsField .entrySet()) { if (entry.getValue().size() > 0) { @@ -161,7 +161,7 @@ public class Entity { } pkMappingFromSchema = modPkMappingFromSchema; n = ConfigParseUtil.getChildNodes(element, "entity"); - List modEntities = new ArrayList(); + List modEntities = new ArrayList<>(); for (Element elem : n) { modEntities.add(new Entity((docRootFound || this.docRoot), elem, di, config, this)); } diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/EntityField.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/EntityField.java index 806150c690e..adef4127eac 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/EntityField.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/EntityField.java @@ -46,7 +46,7 @@ public class EntityField { this.multiValued = b.multiValued; this.dynamicName = b.dynamicName; this.entity = b.entity; - this.allAttributes = Collections.unmodifiableMap(new HashMap(b.allAttributes)); + this.allAttributes = Collections.unmodifiableMap(new HashMap<>(b.allAttributes)); } public String getName() { @@ -89,7 +89,7 @@ public class EntityField { public boolean multiValued = false; public boolean dynamicName = false; public Entity entity; - public Map allAttributes = new HashMap(); + public Map allAttributes = new HashMap<>(); public Builder(Element e) { this.name = ConfigParseUtil.getStringAttribute(e, DataImporter.NAME, null); @@ -98,7 +98,7 @@ public class EntityField { throw new DataImportHandlerException(SEVERE, "Field must have a column attribute"); } this.boost = Float.parseFloat(ConfigParseUtil.getStringAttribute(e, "boost", "1.0f")); - this.allAttributes = new HashMap(ConfigParseUtil.getAllAttributes(e)); + this.allAttributes = new HashMap<>(ConfigParseUtil.getAllAttributes(e)); } public String getNameOrColumn() { diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/Field.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/Field.java index da0851dbfa4..43318113a33 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/Field.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/Field.java @@ -46,7 +46,7 @@ public class Field { this.multiValued = b.multiValued; this.dynamicName = b.dynamicName; this.entity = b.entity; - this.allAttributes = Collections.unmodifiableMap(new HashMap(b.allAttributes)); + this.allAttributes = Collections.unmodifiableMap(new HashMap<>(b.allAttributes)); } public String getName() { @@ -89,7 +89,7 @@ public class Field { public boolean multiValued = false; public boolean dynamicName; public Entity entity; - public Map allAttributes = new HashMap(); + public Map allAttributes = new HashMap<>(); public Builder(Element e) { this.name = ConfigParseUtil.getStringAttribute(e, DataImporter.NAME, null); @@ -98,7 +98,7 @@ public class Field { throw new DataImportHandlerException(SEVERE, "Field must have a column attribute"); } this.boost = Float.parseFloat(ConfigParseUtil.getStringAttribute(e, "boost", "1.0f")); - this.allAttributes = new HashMap(ConfigParseUtil.getAllAttributes(e)); + this.allAttributes = new HashMap<>(ConfigParseUtil.getAllAttributes(e)); } public String getNameOrColumn() { diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/PropertyWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/PropertyWriter.java index 5925131331a..811cce044b4 100644 --- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/PropertyWriter.java +++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/config/PropertyWriter.java @@ -27,7 +27,7 @@ public class PropertyWriter { public PropertyWriter(String type, Map parameters) { this.type = type; - this.parameters = Collections.unmodifiableMap(new HashMap(parameters)); + this.parameters = Collections.unmodifiableMap(new HashMap<>(parameters)); } public Map getParameters() { diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java index 6c290f21582..88e1ed890d4 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDIHCacheTestCase.java @@ -42,7 +42,7 @@ public class AbstractDIHCacheTestCase { protected static final Date Feb21_2011 = new Date(1298268000000l); protected final String[] fieldTypes = { "INTEGER", "BIGDECIMAL", "STRING", "STRING", "FLOAT", "DATE", "CLOB" }; protected final String[] fieldNames = { "a_id", "PI", "letter", "examples", "a_float", "a_date", "DESCRIPTION" }; - protected List data = new ArrayList(); + protected List data = new ArrayList<>(); protected Clob APPLE = null; @Before @@ -55,7 +55,7 @@ public class AbstractDIHCacheTestCase { // The first row needs to have all non-null fields, // otherwise we would have to always send the fieldTypes & fieldNames as CacheProperties when building. - data = new ArrayList(); + data = new ArrayList<>(); data.add(new ControlData(new Object[] { new Integer(1), new BigDecimal(Math.PI), "A", "Apple", new Float(1.11), Feb21_2011, APPLE })); data.add(new ControlData(new Object[] { new Integer(2), new BigDecimal(Math.PI), "B", "Ball", new Float(2.22), Feb21_2011, null })); data.add(new ControlData(new Object[] { new Integer(4), new BigDecimal(Math.PI), "D", "Dog", new Float(4.44), Feb21_2011, null })); @@ -102,7 +102,7 @@ public class AbstractDIHCacheTestCase { } protected List extractDataInKeyOrder(DIHCache cache, String[] theFieldNames) { - List data = new ArrayList(); + List data = new ArrayList<>(); Iterator> cacheIter = cache.iterator(); while (cacheIter.hasNext()) { data.add(mapToObjectArray(cacheIter.next(), theFieldNames)); @@ -114,7 +114,7 @@ public class AbstractDIHCacheTestCase { //It will look for id's sequentially until one is skipped, then will stop. protected List extractDataByKeyLookup(DIHCache cache, String[] theFieldNames) { int recId = 1; - List data = new ArrayList(); + List data = new ArrayList<>(); while (true) { Iterator> listORecs = cache.iterator(recId); if (listORecs == null) { @@ -130,7 +130,7 @@ public class AbstractDIHCacheTestCase { } protected List listToControlData(List data) { - List returnData = new ArrayList(data.size()); + List returnData = new ArrayList<>(data.size()); for (int i = 0; i < data.size(); i++) { returnData.add(new ControlData(data.get(i))); } @@ -147,7 +147,7 @@ public class AbstractDIHCacheTestCase { protected void compareData(List theControl, List test) { // The test data should come back primarily in Key order and secondarily in insertion order. - List control = new ArrayList(theControl); + List control = new ArrayList<>(theControl); Collections.sort(control); StringBuilder errors = new StringBuilder(); @@ -189,9 +189,9 @@ public class AbstractDIHCacheTestCase { protected Map controlDataToMap(ControlData cd, String[] theFieldNames, boolean keepOrdered) { Map rec = null; if (keepOrdered) { - rec = new LinkedHashMap(); + rec = new LinkedHashMap<>(); } else { - rec = new HashMap(); + rec = new HashMap<>(); } for (int i = 0; i < cd.data.length; i++) { String fieldName = theFieldNames[i]; diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java index a39d14d67fd..3f3d59670aa 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractDataImportHandlerTestCase.java @@ -113,7 +113,7 @@ public abstract class AbstractDataImportHandlerTestCase extends * @throws Exception in case of any error */ protected void runFullImport(String dataConfig, Map extraParams) throws Exception { - HashMap params = new HashMap(); + HashMap params = new HashMap<>(); params.put("command", "full-import"); params.put("debug", "on"); params.put("dataConfig", dataConfig); @@ -174,7 +174,7 @@ public abstract class AbstractDataImportHandlerTestCase extends public static Map getField(String col, String type, String re, String srcCol, String splitBy) { - HashMap vals = new HashMap(); + HashMap vals = new HashMap<>(); vals.put("column", col); vals.put("type", type); vals.put("regex", re); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractSqlEntityProcessorTestCase.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractSqlEntityProcessorTestCase.java index 94543e32e18..0077005d2ff 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractSqlEntityProcessorTestCase.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/AbstractSqlEntityProcessorTestCase.java @@ -81,7 +81,7 @@ public abstract class AbstractSqlEntityProcessorTestCase extends } protected void logPropertiesFile() { - Map init = new HashMap(); + Map init = new HashMap<>(); init.put("filename", fileName); init.put("directory", fileLocation); SimplePropertiesWriter spw = new SimplePropertiesWriter(); @@ -339,7 +339,7 @@ public abstract class AbstractSqlEntityProcessorTestCase extends conn = newConnection(); s = conn.createStatement(); rs = s.executeQuery(query); - List results = new ArrayList(); + List results = new ArrayList<>(); while (rs.next()) { results.add(rs.getString(1)); } @@ -409,9 +409,9 @@ public abstract class AbstractSqlEntityProcessorTestCase extends public IntChanges modifySomePeople() throws Exception { underlyingDataModified = true; int numberToChange = random().nextInt(people.length + 1); - Set changeSet = new HashSet(); - Set deleteSet = new HashSet(); - Set addSet = new HashSet(); + Set changeSet = new HashSet<>(); + Set deleteSet = new HashSet<>(); + Set addSet = new HashSet<>(); Connection conn = null; PreparedStatement change = null; PreparedStatement delete = null; @@ -484,7 +484,7 @@ public abstract class AbstractSqlEntityProcessorTestCase extends public String[] modifySomeCountries() throws Exception { underlyingDataModified = true; int numberToChange = random().nextInt(countries.length + 1); - Set changeSet = new HashSet(); + Set changeSet = new HashSet<>(); Connection conn = null; PreparedStatement change = null; // One second in the future ensures a change time after the last import (DIH diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/DestroyCountCache.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/DestroyCountCache.java index 8b83f5f6f3c..2cd6e8ae1d8 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/DestroyCountCache.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/DestroyCountCache.java @@ -25,7 +25,7 @@ import java.util.Map; import org.junit.Assert; public class DestroyCountCache extends SortedMapBackedCache { - static Map destroyed = new IdentityHashMap(); + static Map destroyed = new IdentityHashMap<>(); @Override public void destroy() { diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/MockInitialContextFactory.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/MockInitialContextFactory.java index dc44d0280ea..ca6f13c1387 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/MockInitialContextFactory.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/MockInitialContextFactory.java @@ -28,7 +28,7 @@ import org.easymock.IAnswer; import org.easymock.IMocksControl; public class MockInitialContextFactory implements InitialContextFactory { - private static final Map objects = new HashMap(); + private static final Map objects = new HashMap<>(); private final IMocksControl mockControl; private final javax.naming.Context context; diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestBuiltInEvaluators.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestBuiltInEvaluators.java index f07d4abacab..7b8d632ad54 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestBuiltInEvaluators.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestBuiltInEvaluators.java @@ -44,7 +44,7 @@ public class TestBuiltInEvaluators extends AbstractDataImportHandlerTestCase { super.setUp(); resolver = new VariableResolver(); - sqlTests = new HashMap(); + sqlTests = new HashMap<>(); sqlTests.put("foo\"", "foo\"\""); sqlTests.put("foo\\", "foo\\\\"); @@ -53,7 +53,7 @@ public class TestBuiltInEvaluators extends AbstractDataImportHandlerTestCase { sqlTests.put("'foo\"", "''foo\"\""); sqlTests.put("\"Albert D'souza\"", "\"\"Albert D''souza\"\""); - urlTests = new HashMap(); + urlTests = new HashMap<>(); urlTests.put("*:*", URLEncoder.encode("*:*", ENCODING)); urlTests.put("price:[* TO 200]", URLEncoder.encode("price:[* TO 200]", @@ -78,7 +78,7 @@ public class TestBuiltInEvaluators extends AbstractDataImportHandlerTestCase { @Test public void parseParams() { - Map m = new HashMap(); + Map m = new HashMap<>(); m.put("b","B"); VariableResolver vr = new VariableResolver(); vr.addNamespace("a",m); @@ -97,7 +97,7 @@ public class TestBuiltInEvaluators extends AbstractDataImportHandlerTestCase { @Test public void testEscapeSolrQueryFunction() { final VariableResolver resolver = new VariableResolver(); - Map m= new HashMap(); + Map m= new HashMap<>(); m.put("query","c:t"); resolver.setEvaluators(new DataImporter().getEvaluators(Collections.>emptyList())); @@ -147,7 +147,7 @@ public class TestBuiltInEvaluators extends AbstractDataImportHandlerTestCase { } Date d = new Date(); - Map map = new HashMap(); + Map map = new HashMap<>(); map.put("key", d); resolver.addNamespace("A", map); @@ -174,7 +174,7 @@ public class TestBuiltInEvaluators extends AbstractDataImportHandlerTestCase { private void runTests(Map tests, Evaluator evaluator) { ContextImpl ctx = new ContextImpl(null, resolver, null, Context.FULL_DUMP, Collections.emptyMap(), null, null); for (Map.Entry entry : tests.entrySet()) { - Map values = new HashMap(); + Map values = new HashMap<>(); values.put("key", entry.getKey()); resolver.addNamespace("A", values); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestClobTransformer.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestClobTransformer.java index d3919318317..fe00d491d04 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestClobTransformer.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestClobTransformer.java @@ -35,8 +35,8 @@ import java.util.*; public class TestClobTransformer extends AbstractDataImportHandlerTestCase { @Test public void simple() throws Exception { - List> flds = new ArrayList>(); - Map f = new HashMap(); + List> flds = new ArrayList<>(); + Map f = new HashMap<>(); // f.put(DataImporter.COLUMN, "dsc"); f.put(ClobTransformer.CLOB, "true"); @@ -44,7 +44,7 @@ public class TestClobTransformer extends AbstractDataImportHandlerTestCase { flds.add(f); Context ctx = getContext(null, new VariableResolver(), null, Context.FULL_DUMP, flds, Collections.EMPTY_MAP); Transformer t = new ClobTransformer(); - Map row = new HashMap(); + Map row = new HashMap<>(); Clob clob = (Clob) Proxy.newProxyInstance(this.getClass().getClassLoader(), new Class[]{Clob.class}, new InvocationHandler() { @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDateFormatTransformer.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDateFormatTransformer.java index 769ef5ac7df..717eccededa 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDateFormatTransformer.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDateFormatTransformer.java @@ -34,7 +34,7 @@ public class TestDateFormatTransformer extends AbstractDataImportHandlerTestCase @Test @SuppressWarnings("unchecked") public void testTransformRow_SingleRow() throws Exception { - List> fields = new ArrayList>(); + List> fields = new ArrayList<>(); fields.add(createMap(DataImporter.COLUMN, "lastModified")); fields.add(createMap(DataImporter.COLUMN, "dateAdded", RegexTransformer.SRC_COL_NAME, "lastModified", @@ -57,7 +57,7 @@ public class TestDateFormatTransformer extends AbstractDataImportHandlerTestCase @Test @SuppressWarnings("unchecked") public void testTransformRow_MultipleRows() throws Exception { - List> fields = new ArrayList>(); + List> fields = new ArrayList<>(); fields.add(createMap(DataImporter.COLUMN, "lastModified")); fields.add(createMap(DataImporter.COLUMN, "dateAdded", RegexTransformer.SRC_COL_NAME, "lastModified", @@ -67,8 +67,8 @@ public class TestDateFormatTransformer extends AbstractDataImportHandlerTestCase Date now1 = format.parse(format.format(new Date())); Date now2 = format.parse(format.format(new Date())); - Map row = new HashMap(); - List list = new ArrayList(); + Map row = new HashMap<>(); + List list = new ArrayList<>(); list.add(format.format(now1)); list.add(format.format(now2)); row.put("lastModified", list); @@ -79,7 +79,7 @@ public class TestDateFormatTransformer extends AbstractDataImportHandlerTestCase Context context = getContext(null, resolver, null, Context.FULL_DUMP, fields, null); new DateFormatTransformer().transformRow(row, context); - List output = new ArrayList(); + List output = new ArrayList<>(); output.add(now1); output.add(now2); assertEquals(output, row.get("dateAdded")); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder.java index 4023fa2eed9..d40d41a0f42 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder.java @@ -95,7 +95,7 @@ public class TestDocBuilder extends AbstractDataImportHandlerTestCase { di.loadAndInit(dc_singleEntity); DIHConfiguration cfg = di.getConfig(); Entity ent = cfg.getEntities().get(0); - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("id", 1, "desc", "one")); MockDataSource.setIterator("select * from x", l.iterator()); RequestInfo rp = new RequestInfo(null, createMap("command", "full-import"), null); @@ -124,7 +124,7 @@ public class TestDocBuilder extends AbstractDataImportHandlerTestCase { di.loadAndInit(dc_singleEntity); DIHConfiguration cfg = di.getConfig(); Entity ent = cfg.getEntities().get(0); - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("id", 1, "desc", "one")); MockDataSource.setIterator("select * from x", l.iterator()); RequestInfo rp = new RequestInfo(null, createMap("command", "import"), null); @@ -154,7 +154,7 @@ public class TestDocBuilder extends AbstractDataImportHandlerTestCase { DIHConfiguration cfg = di.getConfig(); Entity ent = cfg.getEntities().get(0); RequestInfo rp = new RequestInfo(null, createMap("command", "full-import"), null); - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("id", 1, "desc", "one")); l.add(createMap("id", 2, "desc", "two")); l.add(createMap("id", 3, "desc", "three")); @@ -180,7 +180,7 @@ public class TestDocBuilder extends AbstractDataImportHandlerTestCase { } static class SolrWriterImpl extends SolrWriter { - List docs = new ArrayList(); + List docs = new ArrayList<>(); Boolean deleteAllCalled = Boolean.FALSE; diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java index bf16bbc82d2..623e49e0b01 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEntityProcessorBase.java @@ -35,8 +35,8 @@ public class TestEntityProcessorBase extends AbstractDataImportHandlerTestCase { @Test public void multiTransformer() { - List> fields = new ArrayList>(); - Map entity = new HashMap(); + List> fields = new ArrayList<>(); + Map entity = new HashMap<>(); entity.put("transformer", T1.class.getName() + "," + T2.class.getName() + "," + T3.class.getName()); fields.add(getField("A", null, null, null, null)); @@ -44,7 +44,7 @@ public class TestEntityProcessorBase extends AbstractDataImportHandlerTestCase { Context context = getContext(null, null, new MockDataSource(), Context.FULL_DUMP, fields, entity); - Map src = new HashMap(); + Map src = new HashMap<>(); src.put("A", "NA"); src.put("B", "NA"); EntityProcessorWrapper sep = new EntityProcessorWrapper(new SqlEntityProcessor(), null, null); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestErrorHandling.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestErrorHandling.java index 5edc9d862de..bbf66e310bb 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestErrorHandling.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestErrorHandling.java @@ -83,7 +83,7 @@ public class TestErrorHandling extends AbstractDataImportHandlerTestCase { public void testTransformerErrorContinue() throws Exception { StringDataSource.xml = wellformedXml; - List> rows = new ArrayList>(); + List> rows = new ArrayList<>(); rows.add(createMap("id", "3", "desc", "exception-transformer")); MockDataSource.setIterator("select * from foo", rows.iterator()); runFullImport(dataConfigWithTransformer); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFieldReader.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFieldReader.java index 06f6a8397b7..cac2c28d8fd 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFieldReader.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFieldReader.java @@ -37,7 +37,7 @@ public class TestFieldReader extends AbstractDataImportHandlerTestCase { di.loadAndInit(config); TestDocBuilder.SolrWriterImpl sw = new TestDocBuilder.SolrWriterImpl(); RequestInfo rp = new RequestInfo(null, createMap("command", "full-import"), null); - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("xml", xml)); MockDataSource.setIterator("select * from a", l.iterator()); di.runCmd(rp, sw); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFileListEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFileListEntityProcessor.java index 91ebd1ae41c..654065582e0 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFileListEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFileListEntityProcessor.java @@ -51,7 +51,7 @@ public class TestFileListEntityProcessor extends AbstractDataImportHandlerTestCa new VariableResolver(), null, Context.FULL_DUMP, Collections.EMPTY_LIST, attrs); FileListEntityProcessor fileListEntityProcessor = new FileListEntityProcessor(); fileListEntityProcessor.init(c); - List fList = new ArrayList(); + List fList = new ArrayList<>(); while (true) { Map f = fileListEntityProcessor.nextRow(); if (f == null) @@ -93,10 +93,10 @@ public class TestFileListEntityProcessor extends AbstractDataImportHandlerTestCa FileListEntityProcessor.BIGGER_THAN, String.valueOf(minLength)); List fList = getFiles(null, attrs); assertEquals(2, fList.size()); - Set l = new HashSet(); + Set l = new HashSet<>(); l.add(new File(tmpdir, "a.xml").getAbsolutePath()); l.add(new File(tmpdir, "b.xml").getAbsolutePath()); - assertEquals(l, new HashSet(fList)); + assertEquals(l, new HashSet<>(fList)); attrs = createMap( FileListEntityProcessor.FILE_NAME, ".*", FileListEntityProcessor.BASE_DIR, tmpdir.getAbsolutePath(), @@ -104,7 +104,7 @@ public class TestFileListEntityProcessor extends AbstractDataImportHandlerTestCa fList = getFiles(null, attrs); l.clear(); l.add(new File(tmpdir, smallestFile).getAbsolutePath()); - assertEquals(l, new HashSet(fList)); + assertEquals(l, new HashSet<>(fList)); attrs = createMap( FileListEntityProcessor.FILE_NAME, ".*", FileListEntityProcessor.BASE_DIR, tmpdir.getAbsolutePath(), @@ -112,7 +112,7 @@ public class TestFileListEntityProcessor extends AbstractDataImportHandlerTestCa VariableResolver resolver = new VariableResolver(); resolver.addNamespace("a", createMap("x", "4")); fList = getFiles(resolver, attrs); - assertEquals(l, new HashSet(fList)); + assertEquals(l, new HashSet<>(fList)); } @SuppressWarnings("unchecked") @@ -121,7 +121,7 @@ public class TestFileListEntityProcessor extends AbstractDataImportHandlerTestCa resolver, null, Context.FULL_DUMP, Collections.EMPTY_LIST, attrs); FileListEntityProcessor fileListEntityProcessor = new FileListEntityProcessor(); fileListEntityProcessor.init(c); - List fList = new ArrayList(); + List fList = new ArrayList<>(); while (true) { Map f = fileListEntityProcessor.nextRow(); if (f == null) diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestJdbcDataSource.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestJdbcDataSource.java index da3c0a5c8df..2725b776360 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestJdbcDataSource.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestJdbcDataSource.java @@ -49,7 +49,7 @@ public class TestJdbcDataSource extends AbstractDataImportHandlerTestCase { private Connection connection; private IMocksControl mockControl; private JdbcDataSource jdbcDataSource = new JdbcDataSource(); - List> fields = new ArrayList>(); + List> fields = new ArrayList<>(); Context context = AbstractDataImportHandlerTestCase.getContext(null, null, jdbcDataSource, Context.FULL_DUMP, fields, null); @@ -207,12 +207,12 @@ public class TestJdbcDataSource extends AbstractDataImportHandlerTestCase { p.put("user", "root"); p.put("password", ""); - List> flds = new ArrayList>(); - Map f = new HashMap(); + List> flds = new ArrayList<>(); + Map f = new HashMap<>(); f.put("column", "trim_id"); f.put("type", "long"); flds.add(f); - f = new HashMap(); + f = new HashMap<>(); f.put("column", "msrp"); f.put("type", "float"); flds.add(f); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java index f79ed9a415c..0cb07d304af 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestLineEntityProcessor.java @@ -62,7 +62,7 @@ public class TestLineEntityProcessor extends AbstractDataImportHandlerTestCase { /// call the entity processor to the list of lines if (VERBOSE) System.out.print("\n"); - List fList = new ArrayList(); + List fList = new ArrayList<>(); while (true) { Map f = ep.nextRow(); if (f == null) break; @@ -101,7 +101,7 @@ public class TestLineEntityProcessor extends AbstractDataImportHandlerTestCase { ep.init(c); /// call the entity processor to the list of lines - List fList = new ArrayList(); + List fList = new ArrayList<>(); while (true) { Map f = ep.nextRow(); if (f == null) break; @@ -139,7 +139,7 @@ public class TestLineEntityProcessor extends AbstractDataImportHandlerTestCase { ep.init(c); /// call the entity processor to walk the directory - List fList = new ArrayList(); + List fList = new ArrayList<>(); while (true) { Map f = ep.nextRow(); if (f == null) break; @@ -175,7 +175,7 @@ public class TestLineEntityProcessor extends AbstractDataImportHandlerTestCase { ep.init(c); /// call the entity processor to walk the directory - List fList = new ArrayList(); + List fList = new ArrayList<>(); while (true) { Map f = ep.nextRow(); if (f == null) break; @@ -195,7 +195,7 @@ public class TestLineEntityProcessor extends AbstractDataImportHandlerTestCase { String rw, // DIH regex attribute 'replaceWith' String gn // DIH regex attribute 'groupNames' ) { - HashMap vals = new HashMap(); + HashMap vals = new HashMap<>(); vals.put("column", col); vals.put("type", type); vals.put("sourceColName", srcCol); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestNumberFormatTransformer.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestNumberFormatTransformer.java index 723b5c2b8cf..6f76fb856b1 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestNumberFormatTransformer.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestNumberFormatTransformer.java @@ -40,7 +40,7 @@ public class TestNumberFormatTransformer extends AbstractDataImportHandlerTestCa @Test public void testTransformRow_SingleNumber() { char GERMAN_GROUPING_SEP = new DecimalFormatSymbols(Locale.GERMANY).getGroupingSeparator(); - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("column", "num", NumberFormatTransformer.FORMAT_STYLE, NumberFormatTransformer.NUMBER)); l.add(createMap("column", "localizedNum", @@ -55,13 +55,13 @@ public class TestNumberFormatTransformer extends AbstractDataImportHandlerTestCa @Test @SuppressWarnings("unchecked") public void testTransformRow_MultipleNumbers() throws Exception { - List> fields = new ArrayList>(); + List> fields = new ArrayList<>(); fields.add(createMap(DataImporter.COLUMN, "inputs")); fields.add(createMap(DataImporter.COLUMN, "outputs", RegexTransformer.SRC_COL_NAME, "inputs", NumberFormatTransformer.FORMAT_STYLE, NumberFormatTransformer.NUMBER)); - List inputs = new ArrayList(); + List inputs = new ArrayList<>(); inputs.add("123" + GROUPING_SEP + "567"); inputs.add("245" + GROUPING_SEP + "678"); Map row = createMap("inputs", inputs); @@ -72,7 +72,7 @@ public class TestNumberFormatTransformer extends AbstractDataImportHandlerTestCa Context context = getContext(null, resolver, null, Context.FULL_DUMP, fields, null); new NumberFormatTransformer().transformRow(row, context); - List output = new ArrayList(); + List output = new ArrayList<>(); output.add(new Long(123567)); output.add(new Long(245678)); Map outputRow = createMap("inputs", inputs, "outputs", output); @@ -83,7 +83,7 @@ public class TestNumberFormatTransformer extends AbstractDataImportHandlerTestCa @Test(expected = DataImportHandlerException.class) @SuppressWarnings("unchecked") public void testTransformRow_InvalidInput1_Number() { - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("column", "num", NumberFormatTransformer.FORMAT_STYLE, NumberFormatTransformer.NUMBER)); Context c = getContext(null, null, null, Context.FULL_DUMP, l, null); @@ -94,7 +94,7 @@ public class TestNumberFormatTransformer extends AbstractDataImportHandlerTestCa @Test(expected = DataImportHandlerException.class) @SuppressWarnings("unchecked") public void testTransformRow_InvalidInput2_Number() { - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("column", "num", NumberFormatTransformer.FORMAT_STYLE, NumberFormatTransformer.NUMBER)); Context c = getContext(null, null, null, Context.FULL_DUMP, l, null); @@ -105,7 +105,7 @@ public class TestNumberFormatTransformer extends AbstractDataImportHandlerTestCa @Test(expected = DataImportHandlerException.class) @SuppressWarnings("unchecked") public void testTransformRow_InvalidInput2_Currency() { - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("column", "num", NumberFormatTransformer.FORMAT_STYLE, NumberFormatTransformer.CURRENCY)); Context c = getContext(null, null, null, Context.FULL_DUMP, l, null); @@ -116,7 +116,7 @@ public class TestNumberFormatTransformer extends AbstractDataImportHandlerTestCa @Test(expected = DataImportHandlerException.class) @SuppressWarnings("unchecked") public void testTransformRow_InvalidInput1_Percent() { - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("column", "num", NumberFormatTransformer.FORMAT_STYLE, NumberFormatTransformer.PERCENT)); Context c = getContext(null, null, null, Context.FULL_DUMP, l, null); @@ -127,7 +127,7 @@ public class TestNumberFormatTransformer extends AbstractDataImportHandlerTestCa @Test(expected = DataImportHandlerException.class) @SuppressWarnings("unchecked") public void testTransformRow_InvalidInput3_Currency() { - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("column", "num", NumberFormatTransformer.FORMAT_STYLE, NumberFormatTransformer.CURRENCY)); Context c = getContext(null, null, null, Context.FULL_DUMP, l, null); @@ -138,7 +138,7 @@ public class TestNumberFormatTransformer extends AbstractDataImportHandlerTestCa @Test(expected = DataImportHandlerException.class) @SuppressWarnings("unchecked") public void testTransformRow_InvalidInput3_Number() { - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("column", "num", NumberFormatTransformer.FORMAT_STYLE, NumberFormatTransformer.NUMBER)); Context c = getContext(null, null, null, Context.FULL_DUMP, l, null); @@ -149,7 +149,7 @@ public class TestNumberFormatTransformer extends AbstractDataImportHandlerTestCa @Test @SuppressWarnings("unchecked") public void testTransformRow_MalformedInput_Number() { - List> l = new ArrayList>(); + List> l = new ArrayList<>(); l.add(createMap("column", "num", NumberFormatTransformer.FORMAT_STYLE, NumberFormatTransformer.NUMBER)); Context c = getContext(null, null, null, Context.FULL_DUMP, l, null); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestRegexTransformer.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestRegexTransformer.java index bd7eccfb907..50da4f164ec 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestRegexTransformer.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestRegexTransformer.java @@ -38,12 +38,12 @@ public class TestRegexTransformer extends AbstractDataImportHandlerTestCase { @Test public void testCommaSeparated() { - List> fields = new ArrayList>(); + List> fields = new ArrayList<>(); // fields.add(getField("col1", "string", null, "a", ",")); Context context = getContext(null, null, null, Context.FULL_DUMP, fields, null); - Map src = new HashMap(); + Map src = new HashMap<>(); src.put("a", "a,bb,cc,d"); Map result = new RegexTransformer().transformRow(src, context); @@ -54,21 +54,21 @@ public class TestRegexTransformer extends AbstractDataImportHandlerTestCase { @Test public void testGroupNames() { - List> fields = new ArrayList>(); + List> fields = new ArrayList<>(); // - Map m = new HashMap(); + Map m = new HashMap<>(); m.put(COLUMN,"fullName"); m.put(GROUP_NAMES,",firstName,lastName"); m.put(REGEX,"(\\w*) (\\w*) (\\w*)"); fields.add(m); Context context = getContext(null, null, null, Context.FULL_DUMP, fields, null); - Map src = new HashMap(); + Map src = new HashMap<>(); src.put("fullName", "Mr Noble Paul"); Map result = new RegexTransformer().transformRow(src, context); assertEquals("Noble", result.get("firstName")); assertEquals("Paul", result.get("lastName")); - src= new HashMap(); + src= new HashMap<>(); List l= new ArrayList(); l.add("Mr Noble Paul") ; l.add("Mr Shalin Mangar") ; @@ -84,14 +84,14 @@ public class TestRegexTransformer extends AbstractDataImportHandlerTestCase { @Test public void testReplaceWith() { - List> fields = new ArrayList>(); + List> fields = new ArrayList<>(); // Map fld = getField("name", "string", "'", null, null); fld.put(REPLACE_WITH, "''"); fields.add(fld); Context context = getContext(null, null, null, Context.FULL_DUMP, fields, null); - Map src = new HashMap(); + Map src = new HashMap<>(); String s = "D'souza"; src.put("name", s); @@ -130,7 +130,7 @@ public class TestRegexTransformer extends AbstractDataImportHandlerTestCase { fld.put(GROUP_NAMES,"t4,t5"); fields.add(fld); - Map row = new HashMap(); + Map row = new HashMap<>(); String s = "Fuel Economy Range: 26 mpg Hwy, 19 mpg City"; row.put("rowdata", s); @@ -150,14 +150,14 @@ public class TestRegexTransformer extends AbstractDataImportHandlerTestCase { @Test public void testMultiValuedRegex(){ - List> fields = new ArrayList>(); + List> fields = new ArrayList<>(); // Map fld = getField("participant", null, "(.*)", "person", null); fields.add(fld); Context context = getContext(null, null, null, Context.FULL_DUMP, fields, null); - ArrayList strings = new ArrayList(); + ArrayList strings = new ArrayList<>(); strings.add("hello"); strings.add("world"); Map result = new RegexTransformer().transformRow(createMap("person", strings), context); @@ -165,7 +165,7 @@ public class TestRegexTransformer extends AbstractDataImportHandlerTestCase { } public static List> getFields() { - List> fields = new ArrayList>(); + List> fields = new ArrayList<>(); // map = new HashMap(); + Map map = new HashMap<>(); map.put("name", "Scott"); EntityProcessorWrapper sep = new EntityProcessorWrapper(new SqlEntityProcessor(), null, null); sep.init(context); @@ -62,8 +62,8 @@ public class TestScriptTransformer extends AbstractDataImportHandlerTestCase { } private Context getContext(String funcName, String script) { - List> fields = new ArrayList>(); - Map entity = new HashMap(); + List> fields = new ArrayList<>(); + Map entity = new HashMap<>(); entity.put("name", "hello"); entity.put("transformer", "script:" + funcName); @@ -81,7 +81,7 @@ public class TestScriptTransformer extends AbstractDataImportHandlerTestCase { + "row.put('name','Hello ' + row.get('name'));" + "return row;\n" + "}"; Context context = getContext("f1", script); - Map map = new HashMap(); + Map map = new HashMap<>(); map.put("name", "Scott"); EntityProcessorWrapper sep = new EntityProcessorWrapper(new SqlEntityProcessor(), null, null); sep.init(context); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSimplePropertiesWriter.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSimplePropertiesWriter.java index 1aed8146c3f..3cf21a70a86 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSimplePropertiesWriter.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSimplePropertiesWriter.java @@ -84,13 +84,13 @@ public class TestSimplePropertiesWriter extends AbstractDIHJdbcTestCase { SimpleDateFormat df = new SimpleDateFormat(dateFormat, Locale.ROOT); Date oneSecondAgo = new Date(System.currentTimeMillis() - 1000); - Map init = new HashMap(); + Map init = new HashMap<>(); init.put("dateFormat", dateFormat); init.put("filename", fileName); init.put("directory", fileLocation); SimplePropertiesWriter spw = new SimplePropertiesWriter(); spw.init(new DataImporter(), init); - Map props = new HashMap(); + Map props = new HashMap<>(); props.put("SomeDates.last_index_time", oneSecondAgo); props.put("last_index_time", oneSecondAgo); spw.persist(props); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSolrEntityProcessorEndToEnd.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSolrEntityProcessorEndToEnd.java index 92a0b9c4011..5aa55e71241 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSolrEntityProcessorEndToEnd.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSolrEntityProcessorEndToEnd.java @@ -53,17 +53,17 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe private static final String DEAD_SOLR_SERVER = "http://[ff01::114]:33332/solr"; - private static final List> DB_DOCS = new ArrayList>(); - private static final List> SOLR_DOCS = new ArrayList>(); + private static final List> DB_DOCS = new ArrayList<>(); + private static final List> SOLR_DOCS = new ArrayList<>(); static { // dynamic fields in the destination schema - Map dbDoc = new HashMap(); + Map dbDoc = new HashMap<>(); dbDoc.put("dbid_s", "1"); dbDoc.put("dbdesc_s", "DbDescription"); DB_DOCS.add(dbDoc); - Map solrDoc = new HashMap(); + Map solrDoc = new HashMap<>(); solrDoc.put("id", "1"); solrDoc.put("desc", "SolrDescription"); SOLR_DOCS.add(solrDoc); @@ -161,7 +161,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe try { addDocumentsToSolr(generateSolrDocuments(30)); - Map map = new HashMap(); + Map map = new HashMap<>(); map.put("rows", "50"); runFullImport(generateDIHConfig("query='*:*' fq='desc:Description1*,desc:Description*2' rows='2'", false), map); } catch (Exception e) { @@ -202,15 +202,15 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe assertQ(req("*:*"), "//result[@numFound='0']"); try { - List> DOCS = new ArrayList>(DB_DOCS); - Map doc = new HashMap(); + List> DOCS = new ArrayList<>(DB_DOCS); + Map doc = new HashMap<>(); doc.put("dbid_s", "2"); doc.put("dbdesc_s", "DbDescription2"); DOCS.add(doc); MockDataSource.setIterator("select * from x", DOCS.iterator()); - DOCS = new ArrayList>(SOLR_DOCS); - Map solrDoc = new HashMap(); + DOCS = new ArrayList<>(SOLR_DOCS); + Map solrDoc = new HashMap<>(); solrDoc.put("id", "2"); solrDoc.put("desc", "SolrDescription2"); DOCS.add(solrDoc); @@ -261,9 +261,9 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe } private static List> generateSolrDocuments(int num) { - List> docList = new ArrayList>(); + List> docList = new ArrayList<>(); for (int i = 1; i <= num; i++) { - Map map = new HashMap(); + Map map = new HashMap<>(); map.put("id", i); map.put("desc", "Description" + i); docList.add(map); @@ -272,7 +272,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe } private void addDocumentsToSolr(List> docs) throws SolrServerException, IOException { - List sidl = new ArrayList(); + List sidl = new ArrayList<>(); for (Map doc : docs) { SolrInputDocument sd = new SolrInputDocument(); for (Entry entry : doc.entrySet()) { diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSolrEntityProcessorUnit.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSolrEntityProcessorUnit.java index afd9450d362..cab62414bef 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSolrEntityProcessorUnit.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSolrEntityProcessorUnit.java @@ -46,8 +46,8 @@ public class TestSolrEntityProcessorUnit extends AbstractDataImportHandlerTestCa } public void testMultiValuedFields() { - List docs = new ArrayList(); - List types = new ArrayList(); + List docs = new ArrayList<>(); + List types = new ArrayList<>(); types.add(new FldType(ID, ONE_ONE, new SVal('A', 'Z', 4, 4))); types.add(new FldType("description", new IRange(3, 3), new SVal('a', 'c', 1, 1))); Doc testDoc = createDoc(types); @@ -66,12 +66,12 @@ public class TestSolrEntityProcessorUnit extends AbstractDataImportHandlerTestCa } private List generateUniqueDocs(int numDocs) { - List types = new ArrayList(); + List types = new ArrayList<>(); types.add(new FldType(ID, ONE_ONE, new SVal('A', 'Z', 4, 40))); types.add(new FldType("description", new IRange(1, 3), new SVal('a', 'c', 1, 1))); - Set previousIds = new HashSet(); - List docs = new ArrayList(numDocs); + Set previousIds = new HashSet<>(); + List docs = new ArrayList<>(numDocs); for (int i = 0; i < numDocs; i++) { Doc doc = createDoc(types); while (previousIds.contains(doc.id)) { diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java index 023831aea64..29e1df65fb7 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java @@ -79,11 +79,11 @@ public class TestSortedMapBackedCache extends AbstractDIHCacheTestCase { DIHCache cache = null; try { cache = new SortedMapBackedCache(); - Map cacheProps = new HashMap(); + Map cacheProps = new HashMap<>(); cacheProps.put(DIHCacheSupport.CACHE_PRIMARY_KEY, "a_id"); cache.open(getContext(cacheProps)); - Map data = new HashMap(); + Map data = new HashMap<>(); data.put("a_id", null); data.put("bogus", "data"); cache.add(data); @@ -108,7 +108,7 @@ public class TestSortedMapBackedCache extends AbstractDIHCacheTestCase { public void testCacheReopensWithUpdate() { DIHCache cache = null; try { - Map cacheProps = new HashMap(); + Map cacheProps = new HashMap<>(); cacheProps.put(DIHCacheSupport.CACHE_PRIMARY_KEY, "a_id"); cache = new SortedMapBackedCache(); @@ -120,7 +120,7 @@ public class TestSortedMapBackedCache extends AbstractDIHCacheTestCase { // Close the cache. cache.close(); - List newControlData = new ArrayList(); + List newControlData = new ArrayList<>(); Object[] newIdEqualsThree = null; int j = 0; for (int i = 0; i < data.size(); i++) { diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestURLDataSource.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestURLDataSource.java index 7d8f1b86e07..c1acc5405a1 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestURLDataSource.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestURLDataSource.java @@ -25,7 +25,7 @@ import java.util.Properties; import org.junit.Test; public class TestURLDataSource extends AbstractDataImportHandlerTestCase { - private List> fields = new ArrayList>(); + private List> fields = new ArrayList<>(); private URLDataSource dataSource = new URLDataSource(); private VariableResolver variableResolver = new VariableResolver(); private Context context = AbstractDataImportHandlerTestCase.getContext(null, variableResolver, diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestVariableResolver.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestVariableResolver.java index 2766f82a785..e7ff2e698c0 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestVariableResolver.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestVariableResolver.java @@ -36,7 +36,7 @@ public class TestVariableResolver extends AbstractDataImportHandlerTestCase { @Test public void testSimpleNamespace() { VariableResolver vri = new VariableResolver(); - Map ns = new HashMap(); + Map ns = new HashMap<>(); ns.put("world", "WORLD"); vri.addNamespace("hello", ns); assertEquals("WORLD", vri.resolve("hello.world")); @@ -61,10 +61,10 @@ public class TestVariableResolver extends AbstractDataImportHandlerTestCase { @Test public void testNestedNamespace() { VariableResolver vri = new VariableResolver(); - Map ns = new HashMap(); + Map ns = new HashMap<>(); ns.put("world", "WORLD"); vri.addNamespace("hello", ns); - ns = new HashMap(); + ns = new HashMap<>(); ns.put("world1", "WORLD1"); vri.addNamespace("hello.my", ns); assertEquals("WORLD1", vri.resolve("hello.my.world1")); @@ -73,10 +73,10 @@ public class TestVariableResolver extends AbstractDataImportHandlerTestCase { @Test public void test3LevelNestedNamespace() { VariableResolver vri = new VariableResolver(); - Map ns = new HashMap(); + Map ns = new HashMap<>(); ns.put("world", "WORLD"); vri.addNamespace("hello", ns); - ns = new HashMap(); + ns = new HashMap<>(); ns.put("world1", "WORLD1"); vri.addNamespace("hello.my.new", ns); assertEquals("WORLD1", vri.resolve("hello.my.new.world1")); @@ -87,7 +87,7 @@ public class TestVariableResolver extends AbstractDataImportHandlerTestCase { VariableResolver vri = new VariableResolver(); vri.setEvaluators(new DataImporter().getEvaluators(Collections .> emptyList())); - Map ns = new HashMap(); + Map ns = new HashMap<>(); Date d = new Date(); ns.put("dt", d); vri.addNamespace("A", ns); @@ -115,7 +115,7 @@ public class TestVariableResolver extends AbstractDataImportHandlerTestCase { @Test public void testDefaultNamespace() { VariableResolver vri = new VariableResolver(); - Map ns = new HashMap(); + Map ns = new HashMap<>(); ns.put("world", "WORLD"); vri.addNamespace(null, ns); assertEquals("WORLD", vri.resolve("world")); @@ -124,7 +124,7 @@ public class TestVariableResolver extends AbstractDataImportHandlerTestCase { @Test public void testDefaultNamespace1() { VariableResolver vri = new VariableResolver(); - Map ns = new HashMap(); + Map ns = new HashMap<>(); ns.put("world", "WORLD"); vri.addNamespace(null, ns); assertEquals("WORLD", vri.resolve("world")); @@ -133,8 +133,8 @@ public class TestVariableResolver extends AbstractDataImportHandlerTestCase { @Test public void testFunctionNamespace1() throws Exception { VariableResolver resolver = new VariableResolver(); - final List> l = new ArrayList>(); - Map m = new HashMap(); + final List> l = new ArrayList<>(); + Map m = new HashMap<>(); m.put("name", "test"); m.put("class", E.class.getName()); l.add(m); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java index 3d105f67327..dfbdcbfa979 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathEntityProcessor.java @@ -57,7 +57,7 @@ public class TestXPathEntityProcessor extends AbstractDataImportHandlerTestCase new VariableResolver(), getDataSource(cdData), Context.FULL_DUMP, fields, entityAttrs); XPathEntityProcessor xPathEntityProcessor = new XPathEntityProcessor(); xPathEntityProcessor.init(c); - List> result = new ArrayList>(); + List> result = new ArrayList<>(); while (true) { Map row = xPathEntityProcessor.nextRow(); if (row == null) @@ -80,7 +80,7 @@ public class TestXPathEntityProcessor extends AbstractDataImportHandlerTestCase new VariableResolver(), getDataSource(testXml), Context.FULL_DUMP, fields, entityAttrs); XPathEntityProcessor xPathEntityProcessor = new XPathEntityProcessor(); xPathEntityProcessor.init(c); - List> result = new ArrayList>(); + List> result = new ArrayList<>(); while (true) { Map row = xPathEntityProcessor.nextRow(); if (row == null) @@ -109,7 +109,7 @@ public class TestXPathEntityProcessor extends AbstractDataImportHandlerTestCase new VariableResolver(), getDataSource(textMultipleDocuments), Context.FULL_DUMP, fields, entityAttrs); XPathEntityProcessor xPathEntityProcessor = new XPathEntityProcessor(); xPathEntityProcessor.init(c); - List> result = new ArrayList>(); + List> result = new ArrayList<>(); while (true) { Map row = xPathEntityProcessor.nextRow(); if (row == null) @@ -276,7 +276,7 @@ public class TestXPathEntityProcessor extends AbstractDataImportHandlerTestCase xPathEntityProcessor.blockingQueueTimeOutUnits = TimeUnit.MICROSECONDS; xPathEntityProcessor.init(c); - List> result = new ArrayList>(); + List> result = new ArrayList<>(); while (true) { if (rowsToRead >= 0 && result.size() >= rowsToRead) { Thread.currentThread().interrupt(); @@ -346,7 +346,7 @@ public class TestXPathEntityProcessor extends AbstractDataImportHandlerTestCase new VariableResolver(), getDataSource(cdData), Context.FULL_DUMP, null, entityAttrs); XPathEntityProcessor xPathEntityProcessor = new XPathEntityProcessor(); xPathEntityProcessor.init(c); - List> result = new ArrayList>(); + List> result = new ArrayList<>(); while (true) { Map row = xPathEntityProcessor.nextRow(); if (row == null) diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathRecordReader.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathRecordReader.java index 7eabcc9846e..f3a2f5ab582 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathRecordReader.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestXPathRecordReader.java @@ -136,8 +136,8 @@ public class TestXPathRecordReader extends AbstractDataImportHandlerTestCase { rr.addField("a", "/root/x/b/@a", false); rr.addField("b", "/root/x/b/@b", false); - final List> a = new ArrayList>(); - final List> x = new ArrayList>(); + final List> a = new ArrayList<>(); + final List> x = new ArrayList<>(); rr.streamRecords(new StringReader(xml), new XPathRecordReader.Handler() { @Override public void handle(Map record, String xpath) { diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestZKPropertiesWriter.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestZKPropertiesWriter.java index 79474314115..cc964072e98 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestZKPropertiesWriter.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestZKPropertiesWriter.java @@ -106,11 +106,11 @@ public class TestZKPropertiesWriter extends AbstractDataImportHandlerTestCase { SimpleDateFormat df = new SimpleDateFormat(dateFormat, Locale.ROOT); Date oneSecondAgo = new Date(System.currentTimeMillis() - 1000); - Map init = new HashMap(); + Map init = new HashMap<>(); init.put("dateFormat", dateFormat); ZKPropertiesWriter spw = new ZKPropertiesWriter(); spw.init(new DataImporter(h.getCore(), "dataimport"), init); - Map props = new HashMap(); + Map props = new HashMap<>(); props.put("SomeDates.last_index_time", oneSecondAgo); props.put("last_index_time", oneSecondAgo); spw.persist(props); diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TripleThreatTransformer.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TripleThreatTransformer.java index 380ef92219e..63f3e253bf4 100644 --- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TripleThreatTransformer.java +++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TripleThreatTransformer.java @@ -36,17 +36,17 @@ import java.util.Map; */ public class TripleThreatTransformer { public Object transformRow(Map row) { - List> rows = new ArrayList>(3); + List> rows = new ArrayList<>(3); rows.add(row); rows.add(addDuplicateBackwardsValues(row)); - rows.add(new LinkedHashMap(row)); + rows.add(new LinkedHashMap<>(row)); rows.get(2).put("AddAColumn_s", "Added"); modifyIdColumn(rows.get(1), 1); modifyIdColumn(rows.get(2), 2); return rows; } private LinkedHashMap addDuplicateBackwardsValues(Map row) { - LinkedHashMap n = new LinkedHashMap(); + LinkedHashMap n = new LinkedHashMap<>(); for(Map.Entry entry : row.entrySet()) { String key = entry.getKey(); if(!"id".equalsIgnoreCase(key)) { diff --git a/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/ExtractingRequestHandler.java b/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/ExtractingRequestHandler.java index 360259a2166..172d3b1fb86 100644 --- a/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/ExtractingRequestHandler.java +++ b/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/ExtractingRequestHandler.java @@ -82,7 +82,7 @@ public class ExtractingRequestHandler extends ContentStreamHandlerBase implement } NamedList configDateFormats = (NamedList) initArgs.get(DATE_FORMATS); if (configDateFormats != null && configDateFormats.size() > 0) { - dateFormats = new HashSet(); + dateFormats = new HashSet<>(); Iterator it = configDateFormats.iterator(); while (it.hasNext()) { String format = (String) it.next().getValue(); diff --git a/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/RegexRulesPasswordProvider.java b/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/RegexRulesPasswordProvider.java index 76b8262b3b9..35b8f3c8c6e 100644 --- a/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/RegexRulesPasswordProvider.java +++ b/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/RegexRulesPasswordProvider.java @@ -41,7 +41,7 @@ import org.slf4j.LoggerFactory; public class RegexRulesPasswordProvider implements PasswordProvider { private static final Logger log = LoggerFactory.getLogger(RegexRulesPasswordProvider.class); - private LinkedHashMap passwordMap = new LinkedHashMap(); + private LinkedHashMap passwordMap = new LinkedHashMap<>(); private String explicitPassword; @Override @@ -72,7 +72,7 @@ public class RegexRulesPasswordProvider implements PasswordProvider { * @param is input stream for the file */ public static LinkedHashMap parseRulesFile(InputStream is) { - LinkedHashMap rules = new LinkedHashMap(); + LinkedHashMap rules = new LinkedHashMap<>(); BufferedReader br = new BufferedReader(IOUtils.getDecodingReader(is, IOUtils.CHARSET_UTF_8)); String line; try { diff --git a/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/SolrContentHandler.java b/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/SolrContentHandler.java index f0cc5d0a57e..e4bd7e0b727 100644 --- a/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/SolrContentHandler.java +++ b/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/SolrContentHandler.java @@ -57,7 +57,7 @@ public class SolrContentHandler extends DefaultHandler implements ExtractingPara protected StringBuilder catchAllBuilder = new StringBuilder(2048); protected IndexSchema schema; protected Map fieldBuilders = Collections.emptyMap(); - private LinkedList bldrStack = new LinkedList(); + private LinkedList bldrStack = new LinkedList<>(); protected boolean captureAttribs; protected boolean lowerNames; @@ -89,7 +89,7 @@ public class SolrContentHandler extends DefaultHandler implements ExtractingPara this.defaultField = params.get(DEFAULT_FIELD, ""); String[] captureFields = params.getParams(CAPTURE_ELEMENTS); if (captureFields != null && captureFields.length > 0) { - fieldBuilders = new HashMap(); + fieldBuilders = new HashMap<>(); for (int i = 0; i < captureFields.length; i++) { fieldBuilders.put(captureFields[i], new StringBuilder()); } @@ -158,7 +158,7 @@ public class SolrContentHandler extends DefaultHandler implements ExtractingPara */ protected void addLiterals() { Iterator paramNames = params.getParameterNamesIterator(); - literalFieldNames = new HashSet(); + literalFieldNames = new HashSet<>(); while (paramNames.hasNext()) { String pname = paramNames.next(); if (!pname.startsWith(LITERALS_PREFIX)) continue; diff --git a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java index 2f59ec6e507..4bc476afef3 100644 --- a/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java +++ b/solr/contrib/extraction/src/test/org/apache/solr/handler/extraction/ExtractingRequestHandlerTest.java @@ -616,7 +616,7 @@ public class ExtractingRequestHandlerTest extends SolrTestCaseJ4 { try { // TODO: stop using locally defined streams once stream.file and // stream.body work everywhere - List cs = new ArrayList(); + List cs = new ArrayList<>(); cs.add(new ContentStreamBase.FileStream(getFile(filename))); req.setContentStreams(cs); return h.queryAndResponse("/update/extract", req); diff --git a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangDetectLanguageIdentifierUpdateProcessor.java b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangDetectLanguageIdentifierUpdateProcessor.java index 83e949cad16..2b6d121fe89 100644 --- a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangDetectLanguageIdentifierUpdateProcessor.java +++ b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangDetectLanguageIdentifierUpdateProcessor.java @@ -53,7 +53,7 @@ public class LangDetectLanguageIdentifierUpdateProcessor extends LanguageIdentif Detector detector = DetectorFactory.create(); detector.append(content); ArrayList langlist = detector.getProbabilities(); - ArrayList solrLangList = new ArrayList(); + ArrayList solrLangList = new ArrayList<>(); for (Language l: langlist) { solrLangList.add(new DetectedLanguage(l.lang, l.prob)); } diff --git a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangDetectLanguageIdentifierUpdateProcessorFactory.java b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangDetectLanguageIdentifierUpdateProcessorFactory.java index 2b20143bca2..9a50840f578 100644 --- a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangDetectLanguageIdentifierUpdateProcessorFactory.java +++ b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LangDetectLanguageIdentifierUpdateProcessorFactory.java @@ -125,7 +125,7 @@ public class LangDetectLanguageIdentifierUpdateProcessorFactory extends return; } loaded = true; - List profileData = new ArrayList(); + List profileData = new ArrayList<>(); Charset encoding = Charset.forName("UTF-8"); for (String language : languages) { InputStream stream = LangDetectLanguageIdentifierUpdateProcessor.class.getResourceAsStream("langdetect-profiles/" + language); diff --git a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java index 1717c486783..2fcd02f4a1e 100644 --- a/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java +++ b/solr/contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java @@ -107,7 +107,7 @@ public abstract class LanguageIdentifierUpdateProcessor extends UpdateRequestPro fallbackFields = params.get(FALLBACK_FIELDS).split(","); } overwrite = params.getBool(OVERWRITE, false); - langWhitelist = new HashSet(); + langWhitelist = new HashSet<>(); threshold = params.getDouble(THRESHOLD, DOCID_THRESHOLD_DEFAULT); if(params.get(LANG_WHITELIST, "").length() > 0) { for(String lang : params.get(LANG_WHITELIST, "").split(",")) { @@ -133,15 +133,15 @@ public abstract class LanguageIdentifierUpdateProcessor extends UpdateRequestPro } else { mapIndividualFields = mapFields; } - mapIndividualFieldsSet = new HashSet(Arrays.asList(mapIndividualFields)); + mapIndividualFieldsSet = new HashSet<>(Arrays.asList(mapIndividualFields)); // Compile a union of the lists of fields to map - allMapFieldsSet = new HashSet(Arrays.asList(mapFields)); + allMapFieldsSet = new HashSet<>(Arrays.asList(mapFields)); if(Arrays.equals(mapFields, mapIndividualFields)) { allMapFieldsSet.addAll(mapIndividualFieldsSet); } // Normalize detected langcode onto normalized langcode - lcMap = new HashMap(); + lcMap = new HashMap<>(); if(params.get(LCMAP) != null) { for(String mapping : params.get(LCMAP).split("[, ]")) { String[] keyVal = mapping.split(":"); @@ -154,7 +154,7 @@ public abstract class LanguageIdentifierUpdateProcessor extends UpdateRequestPro } // Language Code mapping - mapLcMap = new HashMap(); + mapLcMap = new HashMap<>(); if(params.get(MAP_LCMAP) != null) { for(String mapping : params.get(MAP_LCMAP).split("[, ]")) { String[] keyVal = mapping.split(":"); @@ -199,7 +199,7 @@ public abstract class LanguageIdentifierUpdateProcessor extends UpdateRequestPro */ protected SolrInputDocument process(SolrInputDocument doc) { String docLang = null; - HashSet docLangs = new HashSet(); + HashSet docLangs = new HashSet<>(); String fallbackLang = getFallbackLang(doc, fallbackFields, fallbackValue); if(langField == null || !doc.containsKey(langField) || (doc.containsKey(langField) && overwrite)) { @@ -323,7 +323,7 @@ public abstract class LanguageIdentifierUpdateProcessor extends UpdateRequestPro * @return a string of the chosen language */ protected String resolveLanguage(String language, String fallbackLang) { - List l = new ArrayList(); + List l = new ArrayList<>(); l.add(new DetectedLanguage(language, 1.0)); return resolveLanguage(l, fallbackLang); } diff --git a/solr/contrib/langid/src/java/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessor.java b/solr/contrib/langid/src/java/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessor.java index 4ba326db42f..5399cf4e75c 100644 --- a/solr/contrib/langid/src/java/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessor.java +++ b/solr/contrib/langid/src/java/org/apache/solr/update/processor/TikaLanguageIdentifierUpdateProcessor.java @@ -41,7 +41,7 @@ public class TikaLanguageIdentifierUpdateProcessor extends LanguageIdentifierUpd @Override protected List detectLanguage(String content) { - List languages = new ArrayList(); + List languages = new ArrayList<>(); if(content.trim().length() != 0) { LanguageIdentifier identifier = new LanguageIdentifier(content); // FIXME: Hack - we get the distance from toString and calculate our own certainty score diff --git a/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java b/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java index d845d182aac..46bfa4f2b9b 100644 --- a/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java +++ b/solr/contrib/langid/src/test/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessorFactoryTestCase.java @@ -124,7 +124,7 @@ public abstract class LanguageIdentifierUpdateProcessorFactoryTestCase extends S assertEquals("zh", liProcessor.resolveLanguage("zh_cn", "NA")); assertEquals("zh", liProcessor.resolveLanguage("zh_tw", "NA")); assertEquals("no", liProcessor.resolveLanguage("no", "NA")); - List langs = new ArrayList(); + List langs = new ArrayList<>(); langs.add(new DetectedLanguage("zh_cn", 0.8)); assertEquals("zh", liProcessor.resolveLanguage(langs, "NA")); } @@ -246,7 +246,7 @@ public abstract class LanguageIdentifierUpdateProcessorFactoryTestCase extends S liProcessor = createLangIdProcessor(parameters); // No detected languages - langs = new ArrayList(); + langs = new ArrayList<>(); assertEquals("", liProcessor.resolveLanguage(langs, null)); assertEquals("fallback", liProcessor.resolveLanguage(langs, "fallback")); @@ -255,7 +255,7 @@ public abstract class LanguageIdentifierUpdateProcessorFactoryTestCase extends S assertEquals("one", liProcessor.resolveLanguage(langs, "fallback")); // One detected language under default threshold - langs = new ArrayList(); + langs = new ArrayList<>(); langs.add(new DetectedLanguage("under", 0.1)); assertEquals("fallback", liProcessor.resolveLanguage(langs, "fallback")); } diff --git a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/BatchWriter.java b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/BatchWriter.java index 1bae0f99dfb..34701d52209 100644 --- a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/BatchWriter.java +++ b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/BatchWriter.java @@ -81,7 +81,7 @@ class BatchWriter { private UpdateResponse result; public Batch(Collection batch) { - documents = new ArrayList(batch); + documents = new ArrayList<>(batch); } public void run() { @@ -111,7 +111,7 @@ class BatchWriter { protected void reset(List documents) { if (this.documents == null) { - this.documents = new ArrayList(documents); + this.documents = new ArrayList<>(documents); } else { this.documents.clear(); this.documents.addAll(documents); @@ -121,7 +121,7 @@ class BatchWriter { protected void reset(SolrInputDocument document) { if (this.documents == null) { - this.documents = new ArrayList(); + this.documents = new ArrayList<>(); } else { this.documents.clear(); } diff --git a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/GoLive.java b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/GoLive.java index 79cc7c36114..6eddb9b5c35 100644 --- a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/GoLive.java +++ b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/GoLive.java @@ -59,8 +59,8 @@ class GoLive { new LinkedBlockingQueue()); try { - CompletionService completionService = new ExecutorCompletionService(executor); - Set> pending = new HashSet>(); + CompletionService completionService = new ExecutorCompletionService<>(executor); + Set> pending = new HashSet<>(); int cnt = -1; for (final FileStatus dir : outDirs) { diff --git a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/MapReduceIndexerTool.java b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/MapReduceIndexerTool.java index 68b2634b7f4..0d94c5395f6 100644 --- a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/MapReduceIndexerTool.java +++ b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/MapReduceIndexerTool.java @@ -541,7 +541,7 @@ public class MapReduceIndexerTool extends Configured implements Tool { static List> buildShardUrls(List urls, Integer numShards) { if (urls == null) return null; - List> shardUrls = new ArrayList>(urls.size()); + List> shardUrls = new ArrayList<>(urls.size()); List list = null; int sz; @@ -551,7 +551,7 @@ public class MapReduceIndexerTool extends Configured implements Tool { sz = (int) Math.ceil(urls.size() / (float)numShards); for (int i = 0; i < urls.size(); i++) { if (i % sz == 0) { - list = new ArrayList(); + list = new ArrayList<>(); shardUrls.add(list); } list.add((String) urls.get(i)); diff --git a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/SolrOutputFormat.java b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/SolrOutputFormat.java index 97b2b79404e..3de00b8d445 100644 --- a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/SolrOutputFormat.java +++ b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/SolrOutputFormat.java @@ -158,7 +158,7 @@ public class SolrOutputFormat extends FileOutputFormat { Utils.getLogConfigFile(context.getConfiguration()); Path workDir = getDefaultWorkFile(context, ""); int batchSize = getBatchSize(context.getConfiguration()); - return new SolrRecordWriter(context, workDir, batchSize); + return new SolrRecordWriter<>(context, workDir, batchSize); } public static void setupSolrHomeCache(File solrHomeDir, Job job) throws IOException{ @@ -202,7 +202,7 @@ public class SolrOutputFormat extends FileOutputFormat { } private static void createZip(File dir, File out) throws IOException { - HashSet files = new HashSet(); + HashSet files = new HashSet<>(); // take only conf/ and lib/ for (String allowedDirectory : SolrRecordWriter .getAllowedConfigDirectories()) { diff --git a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/SolrRecordWriter.java b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/SolrRecordWriter.java index 501b98235d5..3919278256d 100644 --- a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/SolrRecordWriter.java +++ b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/SolrRecordWriter.java @@ -53,10 +53,10 @@ class SolrRecordWriter extends RecordWriter { private static final Logger LOG = LoggerFactory.getLogger(SolrRecordWriter.class); - public final static List allowedConfigDirectories = new ArrayList( + public final static List allowedConfigDirectories = new ArrayList<>( Arrays.asList(new String[] { "conf", "lib", "solr.xml" })); - public final static Set requiredConfigDirectories = new HashSet(); + public final static Set requiredConfigDirectories = new HashSet<>(); static { requiredConfigDirectories.add("conf"); @@ -101,7 +101,7 @@ class SolrRecordWriter extends RecordWriter { private long numDocsWritten = 0; private long nextLogTime = System.nanoTime(); - private static HashMap.Context> contextMap = new HashMap.Context>(); + private static HashMap.Context> contextMap = new HashMap<>(); public SolrRecordWriter(TaskAttemptContext context, Path outputShardDir, int batchSize) { this.batchSize = batchSize; diff --git a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/ZooKeeperInspector.java b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/ZooKeeperInspector.java index c8de94cda5d..e97debde508 100644 --- a/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/ZooKeeperInspector.java +++ b/solr/contrib/map-reduce/src/java/org/apache/solr/hadoop/ZooKeeperInspector.java @@ -55,14 +55,14 @@ final class ZooKeeperInspector { DocCollection docCollection = extractDocCollection(zkHost, collection); List slices = getSortedSlices(docCollection.getSlices()); - List> solrUrls = new ArrayList>(slices.size()); + List> solrUrls = new ArrayList<>(slices.size()); for (Slice slice : slices) { if (slice.getLeader() == null) { throw new IllegalArgumentException("Cannot find SolrCloud slice leader. " + "It looks like not all of your shards are registered in ZooKeeper yet"); } Collection replicas = slice.getReplicas(); - List urls = new ArrayList(replicas.size()); + List urls = new ArrayList<>(replicas.size()); for (Replica replica : replicas) { ZkCoreNodeProps props = new ZkCoreNodeProps(replica); urls.add(props.getCoreUrl()); diff --git a/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/LineRandomizerMapperReducerTest.java b/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/LineRandomizerMapperReducerTest.java index 379e60a4dc9..2354fdd6633 100644 --- a/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/LineRandomizerMapperReducerTest.java +++ b/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/LineRandomizerMapperReducerTest.java @@ -49,12 +49,12 @@ public class LineRandomizerMapperReducerTest extends Assert { @Test public void testMapReduce2Items() throws IOException { mapReduceDriver.withAll(Arrays.asList( - new Pair(new LongWritable(0), new Text("hello")), - new Pair(new LongWritable(1), new Text("world")) + new Pair<>(new LongWritable(0), new Text("hello")), + new Pair<>(new LongWritable(1), new Text("world")) )); mapReduceDriver.withAllOutput(Arrays.asList( - new Pair(new Text("world"), NullWritable.get()), - new Pair(new Text("hello"), NullWritable.get()) + new Pair<>(new Text("world"), NullWritable.get()), + new Pair<>(new Text("hello"), NullWritable.get()) )); mapReduceDriver.runTest(); } @@ -62,14 +62,14 @@ public class LineRandomizerMapperReducerTest extends Assert { @Test public void testMapReduce3Items() throws IOException { mapReduceDriver.withAll(Arrays.asList( - new Pair(new LongWritable(0), new Text("hello")), - new Pair(new LongWritable(1), new Text("world")), - new Pair(new LongWritable(2), new Text("nadja")) + new Pair<>(new LongWritable(0), new Text("hello")), + new Pair<>(new LongWritable(1), new Text("world")), + new Pair<>(new LongWritable(2), new Text("nadja")) )); mapReduceDriver.withAllOutput(Arrays.asList( - new Pair(new Text("nadja"), NullWritable.get()), - new Pair(new Text("world"), NullWritable.get()), - new Pair(new Text("hello"), NullWritable.get()) + new Pair<>(new Text("nadja"), NullWritable.get()), + new Pair<>(new Text("world"), NullWritable.get()), + new Pair<>(new Text("hello"), NullWritable.get()) )); mapReduceDriver.runTest(); } @@ -77,16 +77,16 @@ public class LineRandomizerMapperReducerTest extends Assert { @Test public void testMapReduce4Items() throws IOException { mapReduceDriver.withAll(Arrays.asList( - new Pair(new LongWritable(0), new Text("hello")), - new Pair(new LongWritable(1), new Text("world")), - new Pair(new LongWritable(2), new Text("nadja")), - new Pair(new LongWritable(3), new Text("basti")) + new Pair<>(new LongWritable(0), new Text("hello")), + new Pair<>(new LongWritable(1), new Text("world")), + new Pair<>(new LongWritable(2), new Text("nadja")), + new Pair<>(new LongWritable(3), new Text("basti")) )); mapReduceDriver.withAllOutput(Arrays.asList( - new Pair(new Text("nadja"), NullWritable.get()), - new Pair(new Text("world"), NullWritable.get()), - new Pair(new Text("basti"), NullWritable.get()), - new Pair(new Text("hello"), NullWritable.get()) + new Pair<>(new Text("nadja"), NullWritable.get()), + new Pair<>(new Text("world"), NullWritable.get()), + new Pair<>(new Text("basti"), NullWritable.get()), + new Pair<>(new Text("hello"), NullWritable.get()) )); mapReduceDriver.runTest(); } diff --git a/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineGoLiveMiniMRTest.java b/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineGoLiveMiniMRTest.java index c00d7016b7b..c0db9d69854 100644 --- a/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineGoLiveMiniMRTest.java +++ b/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineGoLiveMiniMRTest.java @@ -245,7 +245,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase { public void testBuildShardUrls() throws Exception { // 2x3 Integer numShards = 2; - List urls = new ArrayList(); + List urls = new ArrayList<>(); urls.add("shard1"); urls.add("shard2"); urls.add("shard3"); @@ -303,7 +303,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase { // null shards 3x1 numShards = null; - urls = new ArrayList(); + urls = new ArrayList<>(); urls.add("shard1"); urls.add("shard2"); urls.add("shard3"); @@ -318,7 +318,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase { // 2x(2,3) off balance numShards = 2; - urls = new ArrayList(); + urls = new ArrayList<>(); urls.add("shard1"); urls.add("shard2"); urls.add("shard3"); @@ -328,7 +328,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase { assertEquals(shardUrls.toString(), 2, shardUrls.size()); - Set counts = new HashSet(); + Set counts = new HashSet<>(); counts.add(shardUrls.get(0).size()); counts.add(shardUrls.get(1).size()); @@ -388,7 +388,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase { "--go-live" }; args = prependInitialArgs(args); - List argList = new ArrayList(); + List argList = new ArrayList<>(); getShardUrlArgs(argList); args = concat(args, argList.toArray(new String[0])); @@ -418,7 +418,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase { "--go-live-threads", Integer.toString(random().nextInt(15) + 1) }; args = prependInitialArgs(args); - argList = new ArrayList(); + argList = new ArrayList<>(); getShardUrlArgs(argList); args = concat(args, argList.toArray(new String[0])); @@ -608,7 +608,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase { }; args = prependInitialArgs(args); - argList = new ArrayList(); + argList = new ArrayList<>(); getShardUrlArgs(argList, replicatedCollection); args = concat(args, argList.toArray(new String[0])); diff --git a/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineReducerTest.java b/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineReducerTest.java index 665ef04f3ea..663ff2a7818 100644 --- a/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineReducerTest.java +++ b/solr/contrib/map-reduce/src/test/org/apache/solr/hadoop/MorphlineReducerTest.java @@ -106,7 +106,7 @@ public class MorphlineReducerTest extends MRUnitBase { Configuration config = reduceDriver.getConfiguration(); setupHadoopConfig(config); - List values = new ArrayList(); + List values = new ArrayList<>(); SolrInputDocument sid = new SolrInputDocument(); String id = "myid1"; sid.addField("id", id); diff --git a/solr/contrib/morphlines-cell/src/java/org/apache/solr/morphlines/cell/SolrCellBuilder.java b/solr/contrib/morphlines-cell/src/java/org/apache/solr/morphlines/cell/SolrCellBuilder.java index 84a85cbe2f7..23e9bf615ec 100644 --- a/solr/contrib/morphlines-cell/src/java/org/apache/solr/morphlines/cell/SolrCellBuilder.java +++ b/solr/contrib/morphlines-cell/src/java/org/apache/solr/morphlines/cell/SolrCellBuilder.java @@ -151,7 +151,7 @@ public final class SolrCellBuilder implements CommandBuilder { cellParams.put(ExtractingParams.XPATH_EXPRESSION, xpathExpr); } - this.dateFormats = getConfigs().getStringList(config, "dateFormats", new ArrayList(DateUtil.DEFAULT_DATE_FORMATS)); + this.dateFormats = getConfigs().getStringList(config, "dateFormats", new ArrayList<>(DateUtil.DEFAULT_DATE_FORMATS)); String handlerStr = getConfigs().getString(config, "solrContentHandlerFactory", TrimSolrContentHandlerFactory.class.getName()); Class factoryClass; @@ -165,7 +165,7 @@ public final class SolrCellBuilder implements CommandBuilder { this.locale = getLocale(getConfigs().getString(config, "locale", "")); - this.mediaTypeToParserMap = new HashMap(); + this.mediaTypeToParserMap = new HashMap<>(); //MimeTypes mimeTypes = MimeTypes.getDefaultMimeTypes(); // FIXME getMediaTypeRegistry.normalize() List parserConfigs = getConfigs().getConfigList(config, "parsers"); diff --git a/solr/contrib/morphlines-cell/src/test/org/apache/solr/morphlines/cell/SolrCellMorphlineTest.java b/solr/contrib/morphlines-cell/src/test/org/apache/solr/morphlines/cell/SolrCellMorphlineTest.java index 8b1a261c364..62aed7e59cd 100644 --- a/solr/contrib/morphlines-cell/src/test/org/apache/solr/morphlines/cell/SolrCellMorphlineTest.java +++ b/solr/contrib/morphlines-cell/src/test/org/apache/solr/morphlines/cell/SolrCellMorphlineTest.java @@ -37,8 +37,8 @@ import org.junit.Test; public class SolrCellMorphlineTest extends AbstractSolrMorphlineTestBase { - private Map expectedRecords = new HashMap(); - private Map> expectedRecordContents = new HashMap>(); + private Map expectedRecords = new HashMap<>(); + private Map> expectedRecordContents = new HashMap<>(); @BeforeClass public static void beforeClass2() { assumeFalse("FIXME: Morphlines currently has issues with Windows paths", Constants.WINDOWS); diff --git a/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/CollectingDocumentLoader.java b/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/CollectingDocumentLoader.java index ed58cffff6e..a4973936548 100644 --- a/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/CollectingDocumentLoader.java +++ b/solr/contrib/morphlines-core/src/test/org/apache/solr/morphlines/solr/CollectingDocumentLoader.java @@ -31,8 +31,8 @@ import org.slf4j.LoggerFactory; class CollectingDocumentLoader implements DocumentLoader { private final int batchSize; - private final List batch = new ArrayList (); - private List results = new ArrayList (); + private final List batch = new ArrayList<> (); + private List results = new ArrayList<> (); private static final Logger LOGGER = LoggerFactory.getLogger(CollectingDocumentLoader.class); diff --git a/solr/contrib/uima/src/java/org/apache/solr/uima/processor/SolrUIMAConfigurationReader.java b/solr/contrib/uima/src/java/org/apache/solr/uima/processor/SolrUIMAConfigurationReader.java index e8e252ac7f2..68fdc48eb1e 100644 --- a/solr/contrib/uima/src/java/org/apache/solr/uima/processor/SolrUIMAConfigurationReader.java +++ b/solr/contrib/uima/src/java/org/apache/solr/uima/processor/SolrUIMAConfigurationReader.java @@ -65,7 +65,7 @@ public class SolrUIMAConfigurationReader { @SuppressWarnings("rawtypes") private Map> readTypesFeaturesFieldsMapping() { - Map> map = new HashMap>(); + Map> map = new HashMap<>(); NamedList fieldMappings = (NamedList) args.get("fieldMappings"); /* iterate over UIMA types */ @@ -73,7 +73,7 @@ public class SolrUIMAConfigurationReader { NamedList type = (NamedList) fieldMappings.get("type", i); String typeName = (String)type.get("name"); - Map subMap = new HashMap(); + Map subMap = new HashMap<>(); /* iterate over mapping definitions */ for(int j = 0; j < type.size() - 1; j++){ NamedList mapping = (NamedList) type.get("mapping", j + 1); @@ -96,7 +96,7 @@ public class SolrUIMAConfigurationReader { @SuppressWarnings("rawtypes") private Map readAEOverridingParameters() { - Map runtimeParameters = new HashMap(); + Map runtimeParameters = new HashMap<>(); NamedList runtimeParams = (NamedList) args.get("runtimeParameters"); for (int i = 0; i < runtimeParams.size(); i++) { String name = runtimeParams.getName(i); diff --git a/solr/contrib/uima/src/test/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorTest.java b/solr/contrib/uima/src/test/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorTest.java index 3f4a8f2da19..af41d2463c1 100644 --- a/solr/contrib/uima/src/test/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorTest.java +++ b/solr/contrib/uima/src/test/org/apache/solr/uima/processor/UIMAUpdateRequestProcessorTest.java @@ -192,7 +192,7 @@ public class UIMAUpdateRequestProcessorTest extends SolrTestCaseJ4 { } private void addDoc(String chain, String doc) throws Exception { - Map params = new HashMap(); + Map params = new HashMap<>(); params.put(UpdateParams.UPDATE_CHAIN, new String[] { chain }); MultiMapSolrParams mmparams = new MultiMapSolrParams(params); SolrQueryRequestBase req = new SolrQueryRequestBase(h.getCore(), (SolrParams) mmparams) { @@ -200,7 +200,7 @@ public class UIMAUpdateRequestProcessorTest extends SolrTestCaseJ4 { UpdateRequestHandler handler = new UpdateRequestHandler(); handler.init(null); - ArrayList streams = new ArrayList(2); + ArrayList streams = new ArrayList<>(2); streams.add(new ContentStreamBase.StringStream(doc)); req.setContentStreams(streams); handler.handleRequestBody(req, new SolrQueryResponse()); diff --git a/solr/contrib/velocity/src/java/org/apache/solr/response/SolrParamResourceLoader.java b/solr/contrib/velocity/src/java/org/apache/solr/response/SolrParamResourceLoader.java index ab486bb6bcd..54d86ddc1d3 100644 --- a/solr/contrib/velocity/src/java/org/apache/solr/response/SolrParamResourceLoader.java +++ b/solr/contrib/velocity/src/java/org/apache/solr/response/SolrParamResourceLoader.java @@ -31,7 +31,7 @@ import java.util.Iterator; import java.util.Map; public class SolrParamResourceLoader extends ResourceLoader { - private Map templates = new HashMap(); + private Map templates = new HashMap<>(); public SolrParamResourceLoader(SolrQueryRequest request) { super(); diff --git a/solr/core/src/java/org/apache/solr/SolrLogFormatter.java b/solr/core/src/java/org/apache/solr/SolrLogFormatter.java index 55c01e1eef5..ff1cf222c63 100644 --- a/solr/core/src/java/org/apache/solr/SolrLogFormatter.java +++ b/solr/core/src/java/org/apache/solr/SolrLogFormatter.java @@ -47,7 +47,7 @@ public class SolrLogFormatter extends Formatter { long startTime = System.currentTimeMillis(); long lastTime = startTime; - Map methodAlias = new HashMap(); + Map methodAlias = new HashMap<>(); public static class Method { public String className; @@ -106,9 +106,9 @@ public class SolrLogFormatter extends Formatter { Map coreProps; } - Map coreInfoMap = new WeakHashMap(); // TODO: use something that survives across a core reload? + Map coreInfoMap = new WeakHashMap<>(); // TODO: use something that survives across a core reload? - public Map classAliases = new HashMap(); + public Map classAliases = new HashMap<>(); @Override public String format(LogRecord record) { @@ -379,7 +379,7 @@ sb.append("(group_name=").append(tg.getName()).append(")"); - static ThreadLocal threadLocal = new ThreadLocal(); + static ThreadLocal threadLocal = new ThreadLocal<>(); public static void main(String[] args) throws Exception { diff --git a/solr/core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java b/solr/core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java index b6481423112..515f8b7207b 100644 --- a/solr/core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java +++ b/solr/core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java @@ -775,7 +775,7 @@ public class LegacyHTMLStripCharFilter extends BaseCharFilter { private static final HashMap entityTable; static { - entityTable = new HashMap(); + entityTable = new HashMap<>(); // entityName and entityVal generated from the python script // included in comments at the end of this file. final String[] entityName={ "zwnj","aring","gt","yen","ograve","Chi","delta","rang","sup","trade","Ntilde","xi","upsih","nbsp","Atilde","radic","otimes","aelig","oelig","equiv","ni","infin","Psi","auml","cup","Epsilon","otilde","lt","Icirc","Eacute","Lambda","sbquo","Prime","prime","psi","Kappa","rsaquo","Tau","uacute","ocirc","lrm","zwj","cedil","Alpha","not","amp","AElig","oslash","acute","lceil","alefsym","laquo","shy","loz","ge","Igrave","nu","Ograve","lsaquo","sube","euro","rarr","sdot","rdquo","Yacute","lfloor","lArr","Auml","Dagger","brvbar","Otilde","szlig","clubs","diams","agrave","Ocirc","Iota","Theta","Pi","zeta","Scaron","frac14","egrave","sub","iexcl","frac12","ordf","sum","prop","Uuml","ntilde","atilde","asymp","uml","prod","nsub","reg","rArr","Oslash","emsp","THORN","yuml","aacute","Mu","hArr","le","thinsp","dArr","ecirc","bdquo","Sigma","Aring","tilde","nabla","mdash","uarr","times","Ugrave","Eta","Agrave","chi","real","circ","eth","rceil","iuml","gamma","lambda","harr","Egrave","frac34","dagger","divide","Ouml","image","ndash","hellip","igrave","Yuml","ang","alpha","frasl","ETH","lowast","Nu","plusmn","bull","sup1","sup2","sup3","Aacute","cent","oline","Beta","perp","Delta","there4","pi","iota","empty","euml","notin","iacute","para","epsilon","weierp","OElig","uuml","larr","icirc","Upsilon","omicron","upsilon","copy","Iuml","Oacute","Xi","kappa","ccedil","Ucirc","cap","mu","scaron","lsquo","isin","Zeta","minus","deg","and","tau","pound","curren","int","ucirc","rfloor","ensp","crarr","ugrave","exist","cong","theta","oplus","permil","Acirc","piv","Euml","Phi","Iacute","quot","Uacute","Omicron","ne","iquest","eta","rsquo","yacute","Rho","darr","Ecirc","Omega","acirc","sim","phi","sigmaf","macr","thetasym","Ccedil","ordm","uArr","forall","beta","fnof","rho","micro","eacute","omega","middot","Gamma","rlm","lang","spades","supe","thorn","ouml","or","raquo","part","sect","ldquo","hearts","sigma","oacute"}; diff --git a/solr/core/src/java/org/apache/solr/analytics/accumulator/BasicAccumulator.java b/solr/core/src/java/org/apache/solr/analytics/accumulator/BasicAccumulator.java index 6a9232f9db5..1e3a2db1719 100644 --- a/solr/core/src/java/org/apache/solr/analytics/accumulator/BasicAccumulator.java +++ b/solr/core/src/java/org/apache/solr/analytics/accumulator/BasicAccumulator.java @@ -103,7 +103,7 @@ public class BasicAccumulator extends ValueAccumulator { } public NamedList export(){ - NamedList base = new NamedList(); + NamedList base = new NamedList<>(); for (int count = 0; count < expressions.length; count++) { if (!hiddenExpressions.contains(expressionNames[count])) { base.add(expressionNames[count], expressions[count].getValue()); diff --git a/solr/core/src/java/org/apache/solr/analytics/accumulator/FacetingAccumulator.java b/solr/core/src/java/org/apache/solr/analytics/accumulator/FacetingAccumulator.java index f22c345e424..c23e63364c9 100644 --- a/solr/core/src/java/org/apache/solr/analytics/accumulator/FacetingAccumulator.java +++ b/solr/core/src/java/org/apache/solr/analytics/accumulator/FacetingAccumulator.java @@ -98,14 +98,14 @@ public class FacetingAccumulator extends BasicAccumulator implements FacetValueA List rangeFreqs = request.getRangeFacets(); List queryFreqs = request.getQueryFacets(); - this.fieldFacetExpressions = new LinkedHashMap>(fieldFreqs.size()); - this.rangeFacetExpressions = new LinkedHashMap>(rangeFreqs.size()); - this.queryFacetExpressions = new LinkedHashMap>(queryFreqs.size()); - this.fieldFacetCollectors = new LinkedHashMap>(fieldFreqs.size()); - this.rangeFacetCollectors = new LinkedHashMap>(rangeFreqs.size()); - this.queryFacetCollectors = new LinkedHashMap>(queryFreqs.size()); - this.facetAccumulators = new ArrayList(); - this.hiddenFieldFacets = new HashSet(); + this.fieldFacetExpressions = new LinkedHashMap<>(fieldFreqs.size()); + this.rangeFacetExpressions = new LinkedHashMap<>(rangeFreqs.size()); + this.queryFacetExpressions = new LinkedHashMap<>(queryFreqs.size()); + this.fieldFacetCollectors = new LinkedHashMap<>(fieldFreqs.size()); + this.rangeFacetCollectors = new LinkedHashMap<>(rangeFreqs.size()); + this.queryFacetCollectors = new LinkedHashMap<>(queryFreqs.size()); + this.facetAccumulators = new ArrayList<>(); + this.hiddenFieldFacets = new HashSet<>(); /** * For each field facet request add a bucket to the {@link Expression} map and {@link StatsCollector} map. @@ -130,13 +130,13 @@ public class FacetingAccumulator extends BasicAccumulator implements FacetValueA * are not created initially. */ for( RangeFacetRequest freq : rangeFreqs ){ - if( rangeFacets == null ) rangeFacets = new ArrayList(); + if( rangeFacets == null ) rangeFacets = new ArrayList<>(); rangeFacets.add(freq); rangeFacetExpressions.put(freq.getName(), new LinkedHashMap() ); rangeFacetCollectors.put(freq.getName(), new LinkedHashMap()); } for( QueryFacetRequest freq : queryFreqs ){ - if( queryFacets == null ) queryFacets = new ArrayList(); + if( queryFacets == null ) queryFacets = new ArrayList<>(); queryFacets.add(freq); queryFacetExpressions.put(freq.getName(), new LinkedHashMap() ); queryFacetCollectors.put(freq.getName(), new LinkedHashMap()); @@ -442,7 +442,7 @@ public class FacetingAccumulator extends BasicAccumulator implements FacetValueA @SuppressWarnings("unchecked") public NamedList export() { final NamedList base = (NamedList)super.export(); - NamedList> facetList = new NamedList>(); + NamedList> facetList = new NamedList<>(); // Add the field facet buckets to the output base.add("fieldFacets",facetList); @@ -452,7 +452,7 @@ public class FacetingAccumulator extends BasicAccumulator implements FacetValueA continue; } final Map buckets = fieldFacetExpressions.get(name); - final NamedList bucketBase = new NamedList(); + final NamedList bucketBase = new NamedList<>(); Iterable> iter = buckets.entrySet(); @@ -471,7 +471,7 @@ public class FacetingAccumulator extends BasicAccumulator implements FacetValueA final Expression first = buckets.values().iterator().next()[sortPlace]; final Comparator comp = (Comparator) first.comparator(sort.getDirection()); - final List> sorted = new ArrayList>(buckets.size()); + final List> sorted = new ArrayList<>(buckets.size()); Iterables.addAll(sorted, iter); Collections.sort(sorted, new EntryComparator(comp,sortPlace)); iter = sorted; @@ -493,12 +493,12 @@ public class FacetingAccumulator extends BasicAccumulator implements FacetValueA } // Add the range facet buckets to the output - facetList = new NamedList>(); + facetList = new NamedList<>(); base.add("rangeFacets",facetList); for( RangeFacetRequest freq : request.getRangeFacets() ){ final String name = freq.getName(); final Map buckets = rangeFacetExpressions.get(name); - final NamedList bucketBase = new NamedList(); + final NamedList bucketBase = new NamedList<>(); Iterable> iter = buckets.entrySet(); @@ -510,12 +510,12 @@ public class FacetingAccumulator extends BasicAccumulator implements FacetValueA } // Add the query facet buckets to the output - facetList = new NamedList>(); + facetList = new NamedList<>(); base.add("queryFacets",facetList); for( QueryFacetRequest freq : request.getQueryFacets() ){ final String name = freq.getName(); final Map buckets = queryFacetExpressions.get(name); - final NamedList bucketBase = new NamedList(); + final NamedList bucketBase = new NamedList<>(); Iterable> iter = buckets.entrySet(); @@ -535,7 +535,7 @@ public class FacetingAccumulator extends BasicAccumulator implements FacetValueA * @return named list of expressions */ public NamedList export(Expression[] expressionArr) { - NamedList base = new NamedList(); + NamedList base = new NamedList<>(); for (int count = 0; count < expressionArr.length; count++) { if (!hiddenExpressions.contains(expressionNames[count])) { base.add(expressionNames[count], expressionArr[count].getValue()); diff --git a/solr/core/src/java/org/apache/solr/analytics/expression/ExpressionFactory.java b/solr/core/src/java/org/apache/solr/analytics/expression/ExpressionFactory.java index 5da5fb011c3..0fd9db01a2e 100644 --- a/solr/core/src/java/org/apache/solr/analytics/expression/ExpressionFactory.java +++ b/solr/core/src/java/org/apache/solr/analytics/expression/ExpressionFactory.java @@ -158,7 +158,7 @@ public class ExpressionFactory { String[] strings = new String[1]; int stack = 0; int start = 0; - List arguments = new ArrayList(); + List arguments = new ArrayList<>(); char[] chars = expression.toCharArray(); for (int count = 0; count < expression.length(); count++) { char c = chars[count]; diff --git a/solr/core/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java b/solr/core/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java index 74db91dfa94..a57c546ff43 100644 --- a/solr/core/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java +++ b/solr/core/src/java/org/apache/solr/analytics/plugin/AnalyticsStatisticsCollector.java @@ -88,7 +88,7 @@ public class AnalyticsStatisticsCollector { } public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap(); + NamedList lst = new SimpleOrderedMap<>(); Snapshot snapshot = requestTimes.getSnapshot(); lst.add("requests", numRequests.longValue()); lst.add("analyticsRequests", numAnalyticsRequests.longValue()); diff --git a/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java b/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java index 1f038bae37b..db21094b219 100644 --- a/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java +++ b/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsContentHandler.java @@ -177,28 +177,28 @@ public class AnalyticsContentHandler implements ContentHandler { } // Initiate Range Facet classes - gaps = new ArrayList(); + gaps = new ArrayList<>(); includeBoundaries = EnumSet.noneOf(FacetRangeInclude.class); otherRanges = EnumSet.noneOf(FacetRangeOther.class); inRangeFacet = true; } else if (localName.equals(QUERY_FACET)) { // Start a Query Facet Request - queries = new ArrayList(); + queries = new ArrayList<>(); inQueryFacet = true; } } else if (localName.equals(ANALYTICS_REQUEST)){ // Start an Analytics Request // Renew each list. - fieldFacetList = new ArrayList(); - rangeFacetList = new ArrayList(); - queryFacetList = new ArrayList(); - expressionList = new ArrayList(); + fieldFacetList = new ArrayList<>(); + rangeFacetList = new ArrayList<>(); + queryFacetList = new ArrayList<>(); + expressionList = new ArrayList<>(); inRequest = true; } } else if (localName.equals(ANALYTICS_REQUEST_ENVELOPE)){ //Begin the parsing of the Analytics Requests - requests = new ArrayList(); + requests = new ArrayList<>(); inEnvelope = true; } } diff --git a/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequest.java b/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequest.java index 8e8282c58e6..2f249994bd1 100644 --- a/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequest.java +++ b/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequest.java @@ -38,11 +38,11 @@ public class AnalyticsRequest { public AnalyticsRequest(String name) { this.name = name; - expressions = new ArrayList(); - hiddenExpressions = new HashSet(); - fieldFacets = new ArrayList(); - rangeFacets = new ArrayList(); - queryFacets = new ArrayList(); + expressions = new ArrayList<>(); + hiddenExpressions = new HashSet<>(); + fieldFacets = new ArrayList<>(); + rangeFacets = new ArrayList<>(); + queryFacets = new ArrayList<>(); } public String getName() { diff --git a/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequestFactory.java b/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequestFactory.java index 62fa7345601..3e2e994e062 100644 --- a/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequestFactory.java +++ b/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsRequestFactory.java @@ -51,14 +51,14 @@ public class AnalyticsRequestFactory implements AnalyticsParams { public static final Pattern queryFacetParamPattern = Pattern.compile("^o(?:lap)?\\.([^\\.]+)\\.(?:"+QUERY_FACET+")\\.([^\\.]+)\\.("+QUERY+"|"+DEPENDENCY+")$", Pattern.CASE_INSENSITIVE); public static List parse(IndexSchema schema, SolrParams params) { - Map requestMap = new HashMap(); - Map> fieldFacetMap = new HashMap>(); - Map> fieldFacetSet = new HashMap>(); - Map> rangeFacetMap = new HashMap>(); - Map> rangeFacetSet = new HashMap>(); - Map> queryFacetMap = new HashMap>(); - Map> queryFacetSet = new HashMap>(); - List requestList = new ArrayList(); + Map requestMap = new HashMap<>(); + Map> fieldFacetMap = new HashMap<>(); + Map> fieldFacetSet = new HashMap<>(); + Map> rangeFacetMap = new HashMap<>(); + Map> rangeFacetSet = new HashMap<>(); + Map> queryFacetMap = new HashMap<>(); + Map> queryFacetSet = new HashMap<>(); + List requestList = new ArrayList<>(); Iterator paramsIterator = params.getParameterNamesIterator(); while (paramsIterator.hasNext()) { @@ -115,7 +115,7 @@ public class AnalyticsRequestFactory implements AnalyticsParams { } for (String reqName : requestMap.keySet()) { AnalyticsRequest ar = requestMap.get(reqName); - List ffrs = new ArrayList(); + List ffrs = new ArrayList<>(); if (fieldFacetSet.get(reqName)!=null) { for (String field : fieldFacetSet.get(reqName)) { ffrs.add(fieldFacetMap.get(reqName).get(field)); @@ -123,7 +123,7 @@ public class AnalyticsRequestFactory implements AnalyticsParams { } ar.setFieldFacets(ffrs); - List rfrs = new ArrayList(); + List rfrs = new ArrayList<>(); if (rangeFacetSet.get(reqName)!=null) { for (String field : rangeFacetSet.get(reqName)) { RangeFacetRequest rfr = rangeFacetMap.get(reqName).get(field); @@ -134,7 +134,7 @@ public class AnalyticsRequestFactory implements AnalyticsParams { } ar.setRangeFacets(rfrs); - List qfrs = new ArrayList(); + List qfrs = new ArrayList<>(); if (queryFacetSet.get(reqName)!=null) { for (String name : queryFacetSet.get(reqName)) { QueryFacetRequest qfr = queryFacetMap.get(reqName).get(name); @@ -157,12 +157,12 @@ public class AnalyticsRequestFactory implements AnalyticsParams { private static void makeFieldFacet(IndexSchema schema, Map> fieldFacetMap, Map> fieldFacetSet, String requestName, String[] fields) { Map facetMap = fieldFacetMap.get(requestName); if (facetMap == null) { - facetMap = new HashMap(); + facetMap = new HashMap<>(); fieldFacetMap.put(requestName, facetMap); } Set set = fieldFacetSet.get(requestName); if (set == null) { - set = new HashSet(); + set = new HashSet<>(); fieldFacetSet.put(requestName, set); } for (String field : fields) { @@ -176,7 +176,7 @@ public class AnalyticsRequestFactory implements AnalyticsParams { private static void setFieldFacetParam(IndexSchema schema, Map> fieldFacetMap, String requestName, String field, String paramType, String[] params) { Map facetMap = fieldFacetMap.get(requestName); if (facetMap == null) { - facetMap = new HashMap(); + facetMap = new HashMap<>(); fieldFacetMap.put(requestName, facetMap); } FieldFacetRequest fr = facetMap.get(field); @@ -202,7 +202,7 @@ public class AnalyticsRequestFactory implements AnalyticsParams { private static void makeRangeFacet(IndexSchema schema, Map> rangeFacetSet, String requestName, String[] fields) { Set set = rangeFacetSet.get(requestName); if (set == null) { - set = new HashSet(); + set = new HashSet<>(); rangeFacetSet.put(requestName, set); } for (String field : fields) { @@ -213,7 +213,7 @@ public class AnalyticsRequestFactory implements AnalyticsParams { private static void setRangeFacetParam(IndexSchema schema, Map> rangeFacetMap, String requestName, String field, String paramType, String[] params) { Map facetMap = rangeFacetMap.get(requestName); if (facetMap == null) { - facetMap = new HashMap(); + facetMap = new HashMap<>(); rangeFacetMap.put(requestName, facetMap); } RangeFacetRequest rr = facetMap.get(field); @@ -243,7 +243,7 @@ public class AnalyticsRequestFactory implements AnalyticsParams { private static void makeQueryFacet(IndexSchema schema,Map> queryFacetSet, String requestName, String[] names) { Set set = queryFacetSet.get(requestName); if (set == null) { - set = new HashSet(); + set = new HashSet<>(); queryFacetSet.put(requestName, set); } for (String name : names) { @@ -254,7 +254,7 @@ public class AnalyticsRequestFactory implements AnalyticsParams { private static void setQueryFacetParam(IndexSchema schema, Map> queryFacetMap, String requestName, String name, String paramType, String[] params) { Map facetMap = queryFacetMap.get(requestName); if (facetMap == null) { - facetMap = new HashMap(); + facetMap = new HashMap<>(); queryFacetMap.put(requestName, facetMap); } QueryFacetRequest qr = facetMap.get(name); diff --git a/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java b/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java index a740fadfd5c..e019569fb0f 100644 --- a/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java +++ b/solr/core/src/java/org/apache/solr/analytics/request/AnalyticsStats.java @@ -60,7 +60,7 @@ public class AnalyticsStats { */ public NamedList execute() throws IOException { statsCollector.startRequest(); - NamedList res = new NamedList(); + NamedList res = new NamedList<>(); List requests; requests = AnalyticsRequestFactory.parse(searcher.getSchema(), params); diff --git a/solr/core/src/java/org/apache/solr/analytics/request/QueryFacetRequest.java b/solr/core/src/java/org/apache/solr/analytics/request/QueryFacetRequest.java index 6d36d58cb8b..b02740cc70c 100644 --- a/solr/core/src/java/org/apache/solr/analytics/request/QueryFacetRequest.java +++ b/solr/core/src/java/org/apache/solr/analytics/request/QueryFacetRequest.java @@ -31,13 +31,13 @@ public class QueryFacetRequest implements FacetRequest { private Set dependencies; public QueryFacetRequest() { - dependencies = new HashSet(); + dependencies = new HashSet<>(); } public QueryFacetRequest(String name) { this.name = name; - this.queries = new ArrayList(); - dependencies = new HashSet(); + this.queries = new ArrayList<>(); + dependencies = new HashSet<>(); } public List getQueries() { diff --git a/solr/core/src/java/org/apache/solr/analytics/statistics/MedianStatsCollector.java b/solr/core/src/java/org/apache/solr/analytics/statistics/MedianStatsCollector.java index c8f9ee064bc..80955367f60 100644 --- a/solr/core/src/java/org/apache/solr/analytics/statistics/MedianStatsCollector.java +++ b/solr/core/src/java/org/apache/solr/analytics/statistics/MedianStatsCollector.java @@ -28,7 +28,7 @@ import org.apache.solr.analytics.util.MedianCalculator; */ public class MedianStatsCollector extends AbstractDelegatingStatsCollector{ - private final List values = new ArrayList(); + private final List values = new ArrayList<>(); protected double median; public MedianStatsCollector(StatsCollector delegate) { diff --git a/solr/core/src/java/org/apache/solr/analytics/statistics/PercentileStatsCollector.java b/solr/core/src/java/org/apache/solr/analytics/statistics/PercentileStatsCollector.java index 88e1c748d31..2ddfb99e081 100644 --- a/solr/core/src/java/org/apache/solr/analytics/statistics/PercentileStatsCollector.java +++ b/solr/core/src/java/org/apache/solr/analytics/statistics/PercentileStatsCollector.java @@ -30,7 +30,7 @@ import com.google.common.collect.Iterables; */ @SuppressWarnings("rawtypes") public class PercentileStatsCollector extends AbstractDelegatingStatsCollector{ - public final List values = new ArrayList(); + public final List values = new ArrayList<>(); public static final Pattern PERCENTILE_PATTERN = Pattern.compile("perc(?:entile)?_(\\d+)",Pattern.CASE_INSENSITIVE); protected final double[] percentiles; protected final String[] percentileNames; diff --git a/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java b/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java index c4dea1b7ab9..7b2d14b74af 100644 --- a/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java +++ b/solr/core/src/java/org/apache/solr/analytics/statistics/StatsCollectorSupplierFactory.java @@ -88,9 +88,9 @@ public class StatsCollectorSupplierFactory { */ @SuppressWarnings("unchecked") public static Supplier create(IndexSchema schema, AnalyticsRequest request) { - final Map> collectorStats = new HashMap>(); - final Map> collectorPercs = new HashMap>(); - final Map collectorSources = new HashMap(); + final Map> collectorStats = new HashMap<>(); + final Map> collectorPercs = new HashMap<>(); + final Map collectorSources = new HashMap<>(); // Iterate through all expression request to make a list of ValueSource strings // and statistics that need to be calculated on those ValueSources. @@ -121,7 +121,7 @@ public class StatsCollectorSupplierFactory { source = arguments[1]; Set percs = collectorPercs.get(source); if (percs == null) { - percs = new HashSet(); + percs = new HashSet<>(); collectorPercs.put(source, percs); } try { @@ -143,7 +143,7 @@ public class StatsCollectorSupplierFactory { // each ValueSource, even across different expression requests Set stats = collectorStats.get(source); if (stats == null) { - stats = new HashSet(); + stats = new HashSet<>(); collectorStats.put(source, stats); } stats.add(stat); @@ -244,7 +244,7 @@ public class StatsCollectorSupplierFactory { * @return The set of statistics (sum, mean, median, etc.) found in the expression */ public static Set getStatistics(String expression) { - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); int firstParen = expression.indexOf('('); if (firstParen>0) { String topOperation = expression.substring(0,firstParen).trim(); @@ -511,7 +511,7 @@ public class StatsCollectorSupplierFactory { } else if (operation.equals(AnalyticsParams.FILTER)) { return buildFilterSource(schema, operands, NUMBER_TYPE); } - List subExpressions = new ArrayList(); + List subExpressions = new ArrayList<>(); for (String argument : arguments) { ValueSource argSource = buildNumericSource(schema, argument); if (argSource == null) { @@ -577,7 +577,7 @@ public class StatsCollectorSupplierFactory { return buildFilterSource(schema, operands, DATE_TYPE); } if (operation.equals(AnalyticsParams.DATE_MATH)) { - List subExpressions = new ArrayList(); + List subExpressions = new ArrayList<>(); boolean first = true; for (String argument : arguments) { ValueSource argSource; @@ -632,7 +632,7 @@ public class StatsCollectorSupplierFactory { } return new ReverseStringFunction(buildStringSource(schema, operands)); } - List subExpressions = new ArrayList(); + List subExpressions = new ArrayList<>(); for (String argument : arguments) { subExpressions.add(buildSourceTree(schema, argument)); } diff --git a/solr/core/src/java/org/apache/solr/analytics/statistics/UniqueStatsCollector.java b/solr/core/src/java/org/apache/solr/analytics/statistics/UniqueStatsCollector.java index ca8d2ab47fb..a06a09345ca 100644 --- a/solr/core/src/java/org/apache/solr/analytics/statistics/UniqueStatsCollector.java +++ b/solr/core/src/java/org/apache/solr/analytics/statistics/UniqueStatsCollector.java @@ -24,7 +24,7 @@ import java.util.Set; * UniqueValueCounter computes the number of unique values. */ public class UniqueStatsCollector extends AbstractDelegatingStatsCollector{ - private final Set uniqueValues = new HashSet(); + private final Set uniqueValues = new HashSet<>(); public UniqueStatsCollector(StatsCollector delegate) { super(delegate); diff --git a/solr/core/src/java/org/apache/solr/analytics/util/PercentileCalculator.java b/solr/core/src/java/org/apache/solr/analytics/util/PercentileCalculator.java index a98ed0c4d8e..714575ef269 100644 --- a/solr/core/src/java/org/apache/solr/analytics/util/PercentileCalculator.java +++ b/solr/core/src/java/org/apache/solr/analytics/util/PercentileCalculator.java @@ -46,7 +46,7 @@ public class PercentileCalculator { throw new IllegalArgumentException(); } - List results = new ArrayList(percs.length); + List results = new ArrayList<>(percs.length); distributeAndFind(list, percentiles, 0, percentiles.length - 1); diff --git a/solr/core/src/java/org/apache/solr/analytics/util/RangeEndpointCalculator.java b/solr/core/src/java/org/apache/solr/analytics/util/RangeEndpointCalculator.java index 50e45c0ce81..83c9b7c5709 100644 --- a/solr/core/src/java/org/apache/solr/analytics/util/RangeEndpointCalculator.java +++ b/solr/core/src/java/org/apache/solr/analytics/util/RangeEndpointCalculator.java @@ -151,7 +151,7 @@ public abstract class RangeEndpointCalculator> { T low = start; - List ranges = new ArrayList(); + List ranges = new ArrayList<>(); int gapCounter = 0; diff --git a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java index d2fffb6861d..bc298e90596 100644 --- a/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java +++ b/solr/core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java @@ -90,7 +90,7 @@ public class JettySolrRunner { private String coreNodeName; /** Maps servlet holders (i.e. factories: class + init params) to path specs */ - private SortedMap extraServlets = new TreeMap(); + private SortedMap extraServlets = new TreeMap<>(); private SortedMap extraRequestFilters; private LinkedList extraFilters; @@ -106,7 +106,7 @@ public class JettySolrRunner { } // TODO: keep track of certain number of last requests - private LinkedList requests = new LinkedList(); + private LinkedList requests = new LinkedList<>(); @Override @@ -188,7 +188,7 @@ public class JettySolrRunner { SortedMap extraRequestFilters) { if (null != extraServlets) { this.extraServlets.putAll(extraServlets); } if (null != extraRequestFilters) { - this.extraRequestFilters = new TreeMap(extraRequestFilters.comparator()); + this.extraRequestFilters = new TreeMap<>(extraRequestFilters.comparator()); this.extraRequestFilters.putAll(extraRequestFilters); } this.solrConfigFilename = solrConfigFilename; @@ -316,7 +316,7 @@ public class JettySolrRunner { // FilterHolder fh = new FilterHolder(filter); debugFilter = root.addFilter(DebugFilter.class, "*", EnumSet.of(DispatcherType.REQUEST) ); if (extraRequestFilters != null) { - extraFilters = new LinkedList(); + extraFilters = new LinkedList<>(); for (Class filterClass : extraRequestFilters.keySet()) { extraFilters.add(root.addFilter(filterClass, extraRequestFilters.get(filterClass), EnumSet.of(DispatcherType.REQUEST))); diff --git a/solr/core/src/java/org/apache/solr/cloud/Assign.java b/solr/core/src/java/org/apache/solr/cloud/Assign.java index 62f62fc3ab1..e231e04a0d2 100644 --- a/solr/core/src/java/org/apache/solr/cloud/Assign.java +++ b/solr/core/src/java/org/apache/solr/cloud/Assign.java @@ -86,7 +86,7 @@ public class Assign { return "shard1"; } - List shardIdNames = new ArrayList(sliceMap.keySet()); + List shardIdNames = new ArrayList<>(sliceMap.keySet()); if (shardIdNames.size() < numShards) { return "shard" + (shardIdNames.size() + 1); @@ -95,7 +95,7 @@ public class Assign { // TODO: don't need to sort to find shard with fewest replicas! // else figure out which shard needs more replicas - final Map map = new HashMap(); + final Map map = new HashMap<>(); for (String shardId : shardIdNames) { int cnt = sliceMap.get(shardId).getReplicasMap().size(); map.put(shardId, cnt); @@ -135,12 +135,12 @@ public class Assign { Set nodes = clusterState.getLiveNodes(); - List nodeList = new ArrayList(nodes.size()); + List nodeList = new ArrayList<>(nodes.size()); nodeList.addAll(nodes); if (createNodeList != null) nodeList.retainAll(createNodeList); - HashMap nodeNameVsShardCount = new HashMap(); + HashMap nodeNameVsShardCount = new HashMap<>(); for (String s : nodeList) nodeNameVsShardCount.put(s,new Node(s)); for (String s : clusterState.getCollections()) { DocCollection c = clusterState.getCollection(s); diff --git a/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java b/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java index f763b027ffe..d6fc0aaf652 100644 --- a/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java +++ b/solr/core/src/java/org/apache/solr/cloud/DistributedQueue.java @@ -82,7 +82,7 @@ public class DistributedQueue { */ private TreeMap orderedChildren(Watcher watcher) throws KeeperException, InterruptedException { - TreeMap orderedChildren = new TreeMap(); + TreeMap orderedChildren = new TreeMap<>(); List childNames = null; try { diff --git a/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java b/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java index 488ae8f7f7a..d1f5f96b955 100644 --- a/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java +++ b/solr/core/src/java/org/apache/solr/cloud/LeaderElector.java @@ -212,7 +212,7 @@ public class LeaderElector { * @return int seqs */ private List getSeqs(List seqs) { - List intSeqs = new ArrayList(seqs.size()); + List intSeqs = new ArrayList<>(seqs.size()); for (String seq : seqs) { intSeqs.add(getSeq(seq)); } diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java index 0704ccccd49..ffc182d8191 100644 --- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java +++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java @@ -337,7 +337,7 @@ public class Overseer { return clusterState; } - ArrayList shardNames = new ArrayList(); + ArrayList shardNames = new ArrayList<>(); if(ImplicitDocRouter.NAME.equals( message.getStr("router.name",DocRouter.DEFAULT_NAME))){ getShardNames(shardNames,message.getStr("shards",DocRouter.DEFAULT_NAME)); @@ -392,10 +392,10 @@ public class Overseer { Map routingRules = slice.getRoutingRules(); if (routingRules == null) - routingRules = new HashMap(); + routingRules = new HashMap<>(); RoutingRule r = routingRules.get(routeKey); if (r == null) { - Map map = new HashMap(); + Map map = new HashMap<>(); map.put("routeRanges", range); map.put("targetCollection", targetCollection); map.put("expireAt", expireAt); @@ -463,7 +463,7 @@ public class Overseer { Slice slice = clusterState.getSlice(collection, shardId); if (slice == null) { Map replicas = Collections.EMPTY_MAP; - Map sliceProps = new HashMap(); + Map sliceProps = new HashMap<>(); String shardRange = message.getStr(ZkStateReader.SHARD_RANGE_PROP); String shardState = message.getStr(ZkStateReader.SHARD_STATE_PROP); String shardParent = message.getStr(ZkStateReader.SHARD_PARENT_PROP); @@ -530,7 +530,7 @@ public class Overseer { Integer numShards = message.getInt(ZkStateReader.NUM_SHARDS_PROP, null); log.info("Update state numShards={} message={}", numShards, message); - List shardNames = new ArrayList(); + List shardNames = new ArrayList<>(); //collection does not yet exist, create placeholders if num shards is specified boolean collectionExists = clusterState.hasCollection(collection); @@ -574,7 +574,7 @@ public class Overseer { Slice slice = clusterState.getSlice(collection, sliceName); - Map replicaProps = new LinkedHashMap(); + Map replicaProps = new LinkedHashMap<>(); replicaProps.putAll(message.getProperties()); // System.out.println("########## UPDATE MESSAGE: " + JSONUtil.toJSON(message)); @@ -594,7 +594,7 @@ public class Overseer { // remove any props with null values Set> entrySet = replicaProps.entrySet(); - List removeKeys = new ArrayList(); + List removeKeys = new ArrayList<>(); for (Entry entry : entrySet) { if (entry.getValue() == null) { removeKeys.add(entry.getKey()); @@ -624,8 +624,8 @@ public class Overseer { sliceProps = slice.getProperties(); replicas = slice.getReplicasCopy(); } else { - replicas = new HashMap(1); - sliceProps = new HashMap(); + replicas = new HashMap<>(1); + sliceProps = new HashMap<>(); sliceProps.put(Slice.RANGE, shardRange); sliceProps.put(Slice.STATE, shardState); sliceProps.put(Slice.PARENT, shardParent); @@ -661,8 +661,8 @@ public class Overseer { if (allActive) { log.info("Shard: {} - all replicas are active. Finding status of fellow sub-shards", sliceName); // find out about other sub shards - Map allSlicesCopy = new HashMap(state.getSlicesMap(collection)); - List subShardSlices = new ArrayList(); + Map allSlicesCopy = new HashMap<>(state.getSlicesMap(collection)); + List subShardSlices = new ArrayList<>(); outer: for (Entry entry : allSlicesCopy.entrySet()) { if (sliceName.equals(entry.getKey())) @@ -688,7 +688,7 @@ public class Overseer { log.info("Shard: {} - All replicas across all fellow sub-shards are now ACTIVE. Preparing to switch shard states.", sliceName); String parentSliceName = (String) sliceProps.remove(Slice.PARENT); - Map propMap = new HashMap(); + Map propMap = new HashMap<>(); propMap.put(Overseer.QUEUE_OPERATION, "updateshardstate"); propMap.put(parentSliceName, Slice.INACTIVE); propMap.put(sliceName, Slice.ACTIVE); @@ -717,7 +717,7 @@ public class Overseer { // Map newCollections = new LinkedHashMap(); - Map newSlices = new LinkedHashMap(); + Map newSlices = new LinkedHashMap<>(); // newCollections.putAll(state.getCollectionStates()); for (int i = 0; i < shards.size(); i++) { String sliceName = shards.get(i); @@ -725,14 +725,14 @@ public class Overseer { for (int i = 0; i < numShards; i++) { final String sliceName = "shard" + (i+1);*/ - Map sliceProps = new LinkedHashMap(1); + Map sliceProps = new LinkedHashMap<>(1); sliceProps.put(Slice.RANGE, ranges == null? null: ranges.get(i)); newSlices.put(sliceName, new Slice(sliceName, null, sliceProps)); } // TODO: fill in with collection properties read from the /collections/ node - Map collectionProps = new HashMap(); + Map collectionProps = new HashMap<>(); for (Entry e : OverseerCollectionProcessor.COLL_PROPS.entrySet()) { Object val = message.get(e.getKey()); @@ -791,7 +791,7 @@ public class Overseer { private ClusterState updateSlice(ClusterState state, String collectionName, Slice slice) { // System.out.println("###!!!### OLD CLUSTERSTATE: " + JSONUtil.toJSON(state.getCollectionStates())); // System.out.println("Updating slice:" + slice); - Map newCollections = new LinkedHashMap(state.getCollectionStates()); // make a shallow copy + Map newCollections = new LinkedHashMap<>(state.getCollectionStates()); // make a shallow copy DocCollection coll = newCollections.get(collectionName); Map slices; Map props; @@ -800,14 +800,14 @@ public class Overseer { if (coll == null) { // when updateSlice is called on a collection that doesn't exist, it's currently when a core is publishing itself // without explicitly creating a collection. In this current case, we assume custom sharding with an "implicit" router. - slices = new HashMap(1); - props = new HashMap(1); + slices = new HashMap<>(1); + props = new HashMap<>(1); props.put(DocCollection.DOC_ROUTER, ZkNodeProps.makeMap("name",ImplicitDocRouter.NAME)); router = new ImplicitDocRouter(); } else { props = coll.getProperties(); router = coll.getRouter(); - slices = new LinkedHashMap(coll.getSlicesMap()); // make a shallow copy + slices = new LinkedHashMap<>(coll.getSlicesMap()); // make a shallow copy } slices.put(slice.getName(), slice); DocCollection newCollection = new DocCollection(collectionName, slices, props, router); @@ -820,7 +820,7 @@ public class Overseer { private ClusterState setShardLeader(ClusterState state, String collectionName, String sliceName, String leaderUrl) { - final Map newCollections = new LinkedHashMap(state.getCollectionStates()); + final Map newCollections = new LinkedHashMap<>(state.getCollectionStates()); DocCollection coll = newCollections.get(collectionName); if(coll == null) { log.error("Could not mark shard leader for non existing collection:" + collectionName); @@ -829,7 +829,7 @@ public class Overseer { Map slices = coll.getSlicesMap(); // make a shallow copy and add it to the new collection - slices = new LinkedHashMap(slices); + slices = new LinkedHashMap<>(slices); Slice slice = slices.get(sliceName); if (slice == null) { @@ -844,7 +844,7 @@ public class Overseer { Replica oldLeader = slice.getLeader(); - final Map newReplicas = new LinkedHashMap(); + final Map newReplicas = new LinkedHashMap<>(); for (Replica replica : slice.getReplicas()) { @@ -852,11 +852,11 @@ public class Overseer { String coreURL = ZkCoreNodeProps.getCoreUrl(replica.getStr(ZkStateReader.BASE_URL_PROP), replica.getStr(ZkStateReader.CORE_NAME_PROP)); if (replica == oldLeader && !coreURL.equals(leaderUrl)) { - Map replicaProps = new LinkedHashMap(replica.getProperties()); + Map replicaProps = new LinkedHashMap<>(replica.getProperties()); replicaProps.remove(Slice.LEADER); replica = new Replica(replica.getName(), replicaProps); } else if (coreURL.equals(leaderUrl)) { - Map replicaProps = new LinkedHashMap(replica.getProperties()); + Map replicaProps = new LinkedHashMap<>(replica.getProperties()); replicaProps.put(Slice.LEADER, "true"); // TODO: allow booleans instead of strings replica = new Replica(replica.getName(), replicaProps); } @@ -901,7 +901,7 @@ public class Overseer { DocCollection coll = clusterState.getCollection(collection); - Map newSlices = new LinkedHashMap(coll.getSlicesMap()); + Map newSlices = new LinkedHashMap<>(coll.getSlicesMap()); newSlices.remove(sliceId); DocCollection newCollection = new DocCollection(coll.getName(), newSlices, coll.getProperties(), coll.getRouter()); @@ -916,7 +916,7 @@ public class Overseer { final String collection = message.getStr(ZkStateReader.COLLECTION_PROP); if (!checkCollectionKeyExistence(message)) return clusterState; -// final Map newCollections = new LinkedHashMap(clusterState.getCollectionStates()); // shallow copy +// final Map newCollections = new LinkedHashMap<>(clusterState.getCollectionStates()); // shallow copy // DocCollection coll = newCollections.get(collection); DocCollection coll = clusterState.getCollectionOrNull(collection) ; if (coll == null) { @@ -933,7 +933,7 @@ public class Overseer { return clusterState; } - Map newSlices = new LinkedHashMap(); + Map newSlices = new LinkedHashMap<>(); boolean lastSlice = false; for (Slice slice : coll.getSlices()) { Replica replica = slice.getReplica(cnn); diff --git a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java index bee9e1848bc..b36f47ff187 100644 --- a/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java +++ b/solr/core/src/java/org/apache/solr/cloud/OverseerCollectionProcessor.java @@ -245,7 +245,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { // ArrayList nodesTobePushedBack = new ArrayList<>(); //ensure that the node right behind the leader , i.r at position 1 is a Overseer - List availableDesignates = new ArrayList(); + List availableDesignates = new ArrayList<>(); log.info("sorted nodes {}", nodeNames);//TODO to be removed for (int i = 0; i < nodeNames.size(); i++) { @@ -322,7 +322,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { children = zk.getChildren(OverseerElectionContext.PATH + LeaderElector.ELECTION_NODE, null, true); } catch (Exception e) { log.warn("error ", e); - return new ArrayList(); + return new ArrayList<>(); } LeaderElector.sortSeqs(children); ArrayList nodeNames = new ArrayList<>(children.size()); @@ -487,7 +487,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { } Replica replica = slice.getReplica(replicaName); if(replica == null){ - ArrayList l = new ArrayList(); + ArrayList l = new ArrayList<>(); for (Replica r : slice.getReplicas()) l.add(r.getName()); throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid replica : " + replicaName + " in shard/collection : " + shard + "/"+ collectionName + " available replicas are "+ StrUtils.join(l,',')); @@ -607,8 +607,8 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { String aliasName = message.getStr("name"); String collections = message.getStr("collections"); - Map> newAliasesMap = new HashMap>(); - Map newCollectionAliasesMap = new HashMap(); + Map> newAliasesMap = new HashMap<>(); + Map newCollectionAliasesMap = new HashMap<>(); Map prevColAliases = aliases.getCollectionAliasMap(); if (prevColAliases != null) { newCollectionAliasesMap.putAll(prevColAliases); @@ -678,8 +678,8 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { private void deleteAlias(Aliases aliases, ZkNodeProps message) { String aliasName = message.getStr("name"); - Map> newAliasesMap = new HashMap>(); - Map newCollectionAliasesMap = new HashMap(); + Map> newAliasesMap = new HashMap<>(); + Map newCollectionAliasesMap = new HashMap<>(); newCollectionAliasesMap.putAll(aliases.getCollectionAliasMap()); newCollectionAliasesMap.remove(aliasName); newAliasesMap.put("collection", newCollectionAliasesMap); @@ -839,7 +839,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { if (ranges.length == 0 || ranges.length == 1) { throw new SolrException(ErrorCode.BAD_REQUEST, "There must be at least two ranges specified to split a shard"); } else { - subRanges = new ArrayList(ranges.length); + subRanges = new ArrayList<>(ranges.length); for (int i = 0; i < ranges.length; i++) { String r = ranges[i]; try { @@ -852,7 +852,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { "Specified hash range: " + r + " is not a subset of parent shard's range: " + range.toString()); } } - List temp = new ArrayList(subRanges); // copy to preserve original order + List temp = new ArrayList<>(subRanges); // copy to preserve original order Collections.sort(temp); if (!range.equals(new DocRouter.Range(temp.get(0).min, temp.get(temp.size() - 1).max))) { throw new SolrException(ErrorCode.BAD_REQUEST, @@ -894,8 +894,8 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { } try { - List subSlices = new ArrayList(subRanges.size()); - List subShardNames = new ArrayList(subRanges.size()); + List subSlices = new ArrayList<>(subRanges.size()); + List subShardNames = new ArrayList<>(subRanges.size()); String nodeName = parentShardLeader.getNodeName(); for (int i = 0; i < subRanges.size(); i++) { String subSlice = slice + "_" + i; @@ -911,7 +911,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { // delete the shards for (String sub : subSlices) { log.info("Sub-shard: {} already exists therefore requesting its deletion", sub); - Map propMap = new HashMap(); + Map propMap = new HashMap<>(); propMap.put(Overseer.QUEUE_OPERATION, "deleteshard"); propMap.put(COLLECTION_PROP, collectionName); propMap.put(SHARD_ID_PROP, sub); @@ -940,7 +940,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { + subSlice + " of collection " + collectionName + " on " + nodeName); - Map propMap = new HashMap(); + Map propMap = new HashMap<>(); propMap.put(Overseer.QUEUE_OPERATION, "createshard"); propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice); propMap.put(ZkStateReader.COLLECTION_PROP, collectionName); @@ -1037,7 +1037,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { // node? // for now we just go random Set nodes = clusterState.getLiveNodes(); - List nodeList = new ArrayList(nodes.size()); + List nodeList = new ArrayList<>(nodes.size()); nodeList.addAll(nodes); Collections.shuffle(nodeList); @@ -1101,7 +1101,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { // switch sub shard states to 'active' log.info("Replication factor is 1 so switching shard states"); DistributedQueue inQueue = Overseer.getInQueue(zkStateReader.getZkClient()); - Map propMap = new HashMap(); + Map propMap = new HashMap<>(); propMap.put(Overseer.QUEUE_OPERATION, "updateshardstate"); propMap.put(slice, Slice.INACTIVE); for (String subSlice : subSlices) { @@ -1113,7 +1113,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { } else { log.info("Requesting shard state be set to 'recovery'"); DistributedQueue inQueue = Overseer.getInQueue(zkStateReader.getZkClient()); - Map propMap = new HashMap(); + Map propMap = new HashMap<>(); propMap.put(Overseer.QUEUE_OPERATION, "updateshardstate"); for (String subSlice : subSlices) { propMap.put(subSlice, Slice.RECOVERY); @@ -1576,7 +1576,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { // node? // for now we just go random Set nodes = clusterState.getLiveNodes(); - List nodeList = new ArrayList(nodes.size()); + List nodeList = new ArrayList<>(nodes.size()); nodeList.addAll(nodes); if (createNodeList != null) nodeList.retainAll(createNodeList); Collections.shuffle(nodeList); @@ -1628,7 +1628,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { throw new SolrException(ErrorCode.SERVER_ERROR, "Could not fully createcollection: " + message.getStr("name")); log.info("Creating SolrCores for new collection, shardNames {} , replicationFactor : {}", shardNames, repFactor); - Map coresToCreate = new LinkedHashMap(); + Map coresToCreate = new LinkedHashMap<>(); for (int i = 1; i <= shardNames.size(); i++) { String sliceName = shardNames.get(i-1); for (int j = 1; j <= repFactor; j++) { @@ -1708,7 +1708,7 @@ public class OverseerCollectionProcessor implements Runnable, ClosableThread { } private Map waitToSeeReplicasInState(String collectionName, Collection coreNames) throws InterruptedException { - Map result = new HashMap(); + Map result = new HashMap<>(); long endTime = System.nanoTime() + TimeUnit.NANOSECONDS.convert(30, TimeUnit.SECONDS); while (true) { DocCollection coll = zkStateReader.getClusterState().getCollection( diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java index 805fb754054..cbff15e506b 100644 --- a/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java +++ b/solr/core/src/java/org/apache/solr/cloud/RecoveryStrategy.java @@ -284,7 +284,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread { recentVersions = recentUpdates.getVersions(ulog.numRecordsToKeep); } catch (Exception e) { SolrException.log(log, "Corrupt tlog - ignoring. core=" + coreName, e); - recentVersions = new ArrayList(0); + recentVersions = new ArrayList<>(0); } finally { if (recentUpdates != null) { recentUpdates.close(); @@ -313,7 +313,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread { log.info("###### startupVersions=" + startingVersions); } catch (Exception e) { SolrException.log(log, "Error getting recent versions. core=" + coreName, e); - recentVersions = new ArrayList(0); + recentVersions = new ArrayList<>(0); } } diff --git a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java index 270bb0a1204..54ae5f6c6af 100644 --- a/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java +++ b/solr/core/src/java/org/apache/solr/cloud/SyncStrategy.java @@ -158,7 +158,7 @@ public class SyncStrategy { return true; } - List syncWith = new ArrayList(); + List syncWith = new ArrayList<>(); for (ZkCoreNodeProps node : nodes) { syncWith.add(node.getCoreUrl()); } diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkController.java b/solr/core/src/java/org/apache/solr/cloud/ZkController.java index 185a50ccb24..b0781d36eda 100644 --- a/solr/core/src/java/org/apache/solr/cloud/ZkController.java +++ b/solr/core/src/java/org/apache/solr/cloud/ZkController.java @@ -578,7 +578,7 @@ public final class ZkController { ClusterState clusterState = zkStateReader.getClusterState(); Set collections = clusterState.getCollections(); - List updatedNodes = new ArrayList(); + List updatedNodes = new ArrayList<>(); for (String collectionName : collections) { DocCollection collection = clusterState.getCollection(collectionName); Collection slices = collection.getSlices(); @@ -755,7 +755,7 @@ public final class ZkController { String shardId = cloudDesc.getShardId(); - Map props = new HashMap(); + Map props = new HashMap<>(); // we only put a subset of props into the leader node props.put(ZkStateReader.BASE_URL_PROP, baseUrl); props.put(ZkStateReader.CORE_NAME_PROP, coreName); @@ -948,7 +948,7 @@ public final class ZkController { String shardId = cd.getCloudDescriptor().getShardId(); - Map props = new HashMap(); + Map props = new HashMap<>(); // we only put a subset of props into the leader node props.put(ZkStateReader.BASE_URL_PROP, getBaseUrl()); props.put(ZkStateReader.CORE_NAME_PROP, cd.getName()); @@ -1131,7 +1131,7 @@ public final class ZkController { SolrParams params = cd.getParams(); try { - Map collectionProps = new HashMap(); + Map collectionProps = new HashMap<>(); // TODO: if collection.configName isn't set, and there isn't already a conf in zk, just use that? String defaultConfigName = System.getProperty(COLLECTION_PARAM_PREFIX+CONFIGNAME_PROP, collection); @@ -1540,7 +1540,7 @@ public final class ZkController { ZkNodeProps props = null; if(data != null) { props = ZkNodeProps.load(data); - Map newProps = new HashMap(); + Map newProps = new HashMap<>(); newProps.putAll(props.getProperties()); newProps.put(CONFIGNAME_PROP, confSetName); props = new ZkNodeProps(newProps); diff --git a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java index 5cbcdbe839f..c77af49a505 100644 --- a/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java +++ b/solr/core/src/java/org/apache/solr/core/CachingDirectoryFactory.java @@ -76,8 +76,8 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory { public boolean closeCacheValueCalled = false; public boolean doneWithDir = false; private boolean deleteAfterCoreClose = false; - public Set removeEntries = new HashSet(); - public Set closeEntries = new HashSet(); + public Set removeEntries = new HashSet<>(); + public Set closeEntries = new HashSet<>(); public void setDeleteOnClose(boolean deleteOnClose, boolean deleteAfterCoreClose) { if (deleteOnClose) { @@ -96,13 +96,13 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory { private static Logger log = LoggerFactory .getLogger(CachingDirectoryFactory.class); - protected Map byPathCache = new HashMap(); + protected Map byPathCache = new HashMap<>(); - protected Map byDirectoryCache = new IdentityHashMap(); + protected Map byDirectoryCache = new IdentityHashMap<>(); - protected Map> closeListeners = new HashMap>(); + protected Map> closeListeners = new HashMap<>(); - protected Set removeEntries = new HashSet(); + protected Set removeEntries = new HashSet<>(); private Double maxWriteMBPerSecFlush; @@ -129,7 +129,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory { } List listeners = closeListeners.get(dir); if (listeners == null) { - listeners = new ArrayList(); + listeners = new ArrayList<>(); closeListeners.put(dir, listeners); } listeners.add(closeListener); @@ -192,7 +192,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory { } values = byDirectoryCache.values(); - Set closedDirs = new HashSet(); + Set closedDirs = new HashSet<>(); for (CacheValue val : values) { try { for (CacheValue v : val.closeEntries) { @@ -248,7 +248,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory { // see if we are a subpath Collection values = byPathCache.values(); - Collection cacheValues = new ArrayList(values); + Collection cacheValues = new ArrayList<>(values); cacheValues.remove(cacheValue); for (CacheValue otherCacheValue : cacheValues) { // if we are a parent path and a sub path is not already closed, get a sub path to close us later @@ -556,7 +556,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory { * @lucene.internal */ public synchronized Set getLivePaths() { - HashSet livePaths = new HashSet(); + HashSet livePaths = new HashSet<>(); for (CacheValue val : byPathCache.values()) { if (!val.doneWithDir) { livePaths.add(val.path); diff --git a/solr/core/src/java/org/apache/solr/core/Config.java b/solr/core/src/java/org/apache/solr/core/Config.java index 2e79b315163..8d1ef498c5b 100644 --- a/solr/core/src/java/org/apache/solr/core/Config.java +++ b/solr/core/src/java/org/apache/solr/core/Config.java @@ -301,14 +301,14 @@ public class Config { * or null if all attributes are known. */ public Set getUnknownAttributes(Element element, String... knownAttributes) { - Set knownAttributeSet = new HashSet(Arrays.asList(knownAttributes)); + Set knownAttributeSet = new HashSet<>(Arrays.asList(knownAttributes)); Set unknownAttributeSet = null; NamedNodeMap attributes = element.getAttributes(); for (int i = 0 ; i < attributes.getLength() ; ++i) { final String attributeName = attributes.item(i).getNodeName(); if ( ! knownAttributeSet.contains(attributeName)) { if (null == unknownAttributeSet) { - unknownAttributeSet = new HashSet(); + unknownAttributeSet = new HashSet<>(); } unknownAttributeSet.add(attributeName); } @@ -321,7 +321,7 @@ public class Config { * contains an attribute name that is not among knownAttributes. */ public void complainAboutUnknownAttributes(String elementXpath, String... knownAttributes) { - SortedMap> problems = new TreeMap>(); + SortedMap> problems = new TreeMap<>(); NodeList nodeList = getNodeList(elementXpath, false); for (int i = 0 ; i < nodeList.getLength() ; ++i) { Element element = (Element)nodeList.item(i); @@ -330,7 +330,7 @@ public class Config { String elementName = element.getNodeName(); SortedSet allUnknownAttributes = problems.get(elementName); if (null == allUnknownAttributes) { - allUnknownAttributes = new TreeSet(); + allUnknownAttributes = new TreeSet<>(); problems.put(elementName, allUnknownAttributes); } allUnknownAttributes.addAll(unknownAttributes); diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSolr.java b/solr/core/src/java/org/apache/solr/core/ConfigSolr.java index e4e0b254006..893cc2a93e0 100644 --- a/solr/core/src/java/org/apache/solr/core/ConfigSolr.java +++ b/solr/core/src/java/org/apache/solr/core/ConfigSolr.java @@ -269,7 +269,7 @@ public abstract class ConfigSolr { } protected Config config; - protected Map propMap = new HashMap(); + protected Map propMap = new HashMap<>(); public ConfigSolr(Config config) { this.config = config; diff --git a/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java b/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java index f33bf620327..0b0dbbf348f 100644 --- a/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java +++ b/solr/core/src/java/org/apache/solr/core/ConfigSolrXmlOld.java @@ -186,8 +186,8 @@ public class ConfigSolrXmlOld extends ConfigSolr { coreNodes = (NodeList) config.evaluate("solr/cores/core", XPathConstants.NODESET); // Check a couple of error conditions - Set names = new HashSet(); // for duplicate names - Map dirs = new HashMap(); // for duplicate + Set names = new HashSet<>(); // for duplicate names + Map dirs = new HashMap<>(); // for duplicate // data dirs. for (int idx = 0; idx < coreNodes.getLength(); ++idx) { @@ -236,7 +236,7 @@ public class ConfigSolrXmlOld extends ConfigSolr { } public List getAllCoreNames() { - List ret = new ArrayList(); + List ret = new ArrayList<>(); synchronized (coreNodes) { for (int idx = 0; idx < coreNodes.getLength(); ++idx) { diff --git a/solr/core/src/java/org/apache/solr/core/CoreContainer.java b/solr/core/src/java/org/apache/solr/core/CoreContainer.java index be8ecba8f36..ccf44058d78 100644 --- a/solr/core/src/java/org/apache/solr/core/CoreContainer.java +++ b/solr/core/src/java/org/apache/solr/core/CoreContainer.java @@ -206,7 +206,7 @@ public class CoreContainer { shareSchema = cfg.hasSchemaCache(); if (shareSchema) { - indexSchemaCache = new ConcurrentHashMap(); + indexSchemaCache = new ConcurrentHashMap<>(); } hostName = cfg.getHost(); @@ -227,10 +227,10 @@ public class CoreContainer { new DefaultSolrThreadFactory("coreLoadExecutor") ); try { - CompletionService completionService = new ExecutorCompletionService( + CompletionService completionService = new ExecutorCompletionService<>( coreLoadExecutor); - Set> pending = new HashSet>(); + Set> pending = new HashSet<>(); List cds = coresLocator.discover(this); checkForDuplicateCoreNames(cds); @@ -657,7 +657,7 @@ public class CoreContainer { */ public Map getCoreInitFailures() { synchronized ( coreInitFailures ) { - return Collections.unmodifiableMap(new LinkedHashMap + return Collections.unmodifiableMap(new LinkedHashMap<> (coreInitFailures)); } } diff --git a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java index 8e7013afd52..f48c9daf41b 100644 --- a/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java +++ b/solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java @@ -44,10 +44,10 @@ import java.util.concurrent.atomic.AtomicInteger; */ public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy { private final IndexDeletionPolicy deletionPolicy; - private volatile Map solrVersionVsCommits = new ConcurrentHashMap(); - private final Map reserves = new ConcurrentHashMap(); + private volatile Map solrVersionVsCommits = new ConcurrentHashMap<>(); + private final Map reserves = new ConcurrentHashMap<>(); private volatile IndexCommit latestCommit; - private final ConcurrentHashMap savedCommits = new ConcurrentHashMap(); + private final ConcurrentHashMap savedCommits = new ConcurrentHashMap<>(); public IndexDeletionPolicyWrapper(IndexDeletionPolicy deletionPolicy) { this.deletionPolicy = deletionPolicy; @@ -102,7 +102,7 @@ public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy { } private List wrap(List list) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (IndexCommit indexCommit : list) result.add(new IndexCommitWrapper(indexCommit)); return result; } @@ -232,7 +232,7 @@ public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy { } private void updateCommitPoints(List list) { - Map map = new ConcurrentHashMap(); + Map map = new ConcurrentHashMap<>(); for (IndexCommitWrapper wrapper : list) { if (!wrapper.isDeleted()) map.put(wrapper.delegate.getGeneration(), wrapper.delegate); diff --git a/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java b/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java index 384fe0896da..7a96d629aec 100644 --- a/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java +++ b/solr/core/src/java/org/apache/solr/core/JmxMonitoredMap.java @@ -182,7 +182,7 @@ public class JmxMonitoredMap extends private ObjectName getObjectName(String key, SolrInfoMBean infoBean) throws MalformedObjectNameException { - Hashtable map = new Hashtable(); + Hashtable map = new Hashtable<>(); map.put("type", key); if (infoBean.getName() != null && !"".equals(infoBean.getName())) { map.put("id", infoBean.getName()); @@ -208,7 +208,7 @@ public class JmxMonitoredMap extends public SolrDynamicMBean(String coreHashCode, SolrInfoMBean managedResource) { this.infoBean = managedResource; - staticStats = new HashSet(); + staticStats = new HashSet<>(); // For which getters are already available in SolrInfoMBean staticStats.add("name"); @@ -221,7 +221,7 @@ public class JmxMonitoredMap extends @Override public MBeanInfo getMBeanInfo() { - ArrayList attrInfoList = new ArrayList(); + ArrayList attrInfoList = new ArrayList<>(); for (String stat : staticStats) { attrInfoList.add(new MBeanAttributeInfo(stat, String.class.getName(), diff --git a/solr/core/src/java/org/apache/solr/core/PluginInfo.java b/solr/core/src/java/org/apache/solr/core/PluginInfo.java index 2ecb617ad7d..19bbf9054e1 100644 --- a/solr/core/src/java/org/apache/solr/core/PluginInfo.java +++ b/solr/core/src/java/org/apache/solr/core/PluginInfo.java @@ -55,7 +55,7 @@ public class PluginInfo { } private List loadSubPlugins(Node node) { - List children = new ArrayList(); + List children = new ArrayList<>(); //if there is another sub tag with a non namedlist tag that has to be another plugin NodeList nlst = node.getChildNodes(); for (int i = 0; i < nlst.getLength(); i++) { @@ -99,13 +99,13 @@ public class PluginInfo { */ public List getChildren(String type){ if(children.isEmpty()) return children; - List result = new ArrayList(); + List result = new ArrayList<>(); for (PluginInfo child : children) if(type.equals(child.type)) result.add(child); return result; } public static final PluginInfo EMPTY_INFO = new PluginInfo("",Collections.emptyMap(), new NamedList(),Collections.emptyList()); - private static final HashSet NL_TAGS = new HashSet + private static final HashSet NL_TAGS = new HashSet<> (Arrays.asList("lst", "arr", "bool", "str", diff --git a/solr/core/src/java/org/apache/solr/core/RequestHandlers.java b/solr/core/src/java/org/apache/solr/core/RequestHandlers.java index 63844cc9801..90fac1b4e07 100644 --- a/solr/core/src/java/org/apache/solr/core/RequestHandlers.java +++ b/solr/core/src/java/org/apache/solr/core/RequestHandlers.java @@ -45,7 +45,7 @@ public final class RequestHandlers { // Use a synchronized map - since the handlers can be changed at runtime, // the map implementation should be thread safe private final Map handlers = - new ConcurrentHashMap() ; + new ConcurrentHashMap<>() ; /** * Trim the trailing '/' if its there, and convert null to empty string. @@ -80,7 +80,7 @@ public final class RequestHandlers { * @return a Map of all registered handlers of the specified type. */ public Map getAll(Class clazz) { - Map result = new HashMap(7); + Map result = new HashMap<>(7); for (Map.Entry e : handlers.entrySet()) { if(clazz.isInstance(e.getValue())) result.put(e.getKey(), clazz.cast(e.getValue())); } @@ -137,7 +137,7 @@ public final class RequestHandlers { void initHandlersFromConfig(SolrConfig config ){ // use link map so we iterate in the same order - Map handlers = new LinkedHashMap(); + Map handlers = new LinkedHashMap<>(); for (PluginInfo info : config.getPluginInfos(SolrRequestHandler.class.getName())) { try { SolrRequestHandler requestHandler; @@ -317,7 +317,7 @@ public final class RequestHandlers { if( _handler != null ) { return _handler.getStatistics(); } - NamedList lst = new SimpleOrderedMap(); + NamedList lst = new SimpleOrderedMap<>(); lst.add("note", "not initialized yet" ); return lst; } diff --git a/solr/core/src/java/org/apache/solr/core/SolrConfig.java b/solr/core/src/java/org/apache/solr/core/SolrConfig.java index 45dcfeecfe9..5681667fa21 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrConfig.java +++ b/solr/core/src/java/org/apache/solr/core/SolrConfig.java @@ -193,7 +193,7 @@ public class SolrConfig extends Config { documentCacheConfig = CacheConfig.getConfig(this, "query/documentCache"); CacheConfig conf = CacheConfig.getConfig(this, "query/fieldValueCache"); if (conf == null) { - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("name","fieldValueCache"); args.put("size","10000"); args.put("initialSize","10"); @@ -318,7 +318,7 @@ public class SolrConfig extends Config { } public List readPluginInfos(String tag, boolean requireName, boolean requireClass) { - ArrayList result = new ArrayList(); + ArrayList result = new ArrayList<>(); NodeList nodes = (NodeList) evaluate(tag, XPathConstants.NODESET); for (int i=0; i> pluginStore = new LinkedHashMap>(); + private Map> pluginStore = new LinkedHashMap<>(); public final int maxWarmingSearchers; public final boolean unlockOnStartup; diff --git a/solr/core/src/java/org/apache/solr/core/SolrCore.java b/solr/core/src/java/org/apache/solr/core/SolrCore.java index a091273a698..a87e2c3fd9a 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrCore.java +++ b/solr/core/src/java/org/apache/solr/core/SolrCore.java @@ -360,8 +360,8 @@ public final class SolrCore implements SolrInfoMBean { } } - final List firstSearcherListeners = new ArrayList(); - final List newSearcherListeners = new ArrayList(); + final List firstSearcherListeners = new ArrayList<>(); + final List newSearcherListeners = new ArrayList<>(); /** * NOTE: this function is not thread safe. However, it is safe to call within the @@ -464,7 +464,7 @@ public final class SolrCore implements SolrInfoMBean { } // protect via synchronized(SolrCore.class) - private static Set dirs = new HashSet(); + private static Set dirs = new HashSet<>(); void initIndex(boolean reload) throws IOException { @@ -716,7 +716,7 @@ public final class SolrCore implements SolrInfoMBean { infoRegistry = new JmxMonitoredMap(name, String.valueOf(this.hashCode()), config.jmxConfig); } else { log.info("JMX monitoring not detected for core: " + name); - infoRegistry = new ConcurrentHashMap(); + infoRegistry = new ConcurrentHashMap<>(); } infoRegistry.put("fieldCache", new SolrFieldCacheMBean()); @@ -923,7 +923,7 @@ public final class SolrCore implements SolrInfoMBean { * Load the request processors */ private Map loadUpdateProcessorChains() { - Map map = new HashMap(); + Map map = new HashMap<>(); UpdateRequestProcessorChain def = initPlugins(map,UpdateRequestProcessorChain.class, UpdateRequestProcessorChain.class.getName()); if(def == null){ def = map.get(null); @@ -1141,7 +1141,7 @@ public final class SolrCore implements SolrInfoMBean { public void addCloseHook( CloseHook hook ) { if( closeHooks == null ) { - closeHooks = new ArrayList(); + closeHooks = new ArrayList<>(); } closeHooks.add( hook ); } @@ -1218,7 +1218,7 @@ public final class SolrCore implements SolrInfoMBean { */ private Map loadSearchComponents() { - Map components = new HashMap(); + Map components = new HashMap<>(); initPlugins(components,SearchComponent.class); for (Map.Entry e : components.entrySet()) { SearchComponent c = e.getValue(); @@ -1303,8 +1303,8 @@ public final class SolrCore implements SolrInfoMBean { // All of the normal open searchers. Don't access this directly. // protected by synchronizing on searcherLock. - private final LinkedList> _searchers = new LinkedList>(); - private final LinkedList> _realtimeSearchers = new LinkedList>(); + private final LinkedList> _searchers = new LinkedList<>(); + private final LinkedList> _realtimeSearchers = new LinkedList<>(); final ExecutorService searcherExecutor = Executors.newSingleThreadExecutor( new DefaultSolrThreadFactory("searcherExecutor")); @@ -1940,7 +1940,7 @@ public final class SolrCore implements SolrInfoMBean { public static void preDecorateResponse(SolrQueryRequest req, SolrQueryResponse rsp) { // setup response header - final NamedList responseHeader = new SimpleOrderedMap(); + final NamedList responseHeader = new SimpleOrderedMap<>(); rsp.add("responseHeader", responseHeader); // toLog is a local ref to the same NamedList used by the response @@ -2010,10 +2010,10 @@ public final class SolrCore implements SolrInfoMBean { private QueryResponseWriter defaultResponseWriter; - private final Map responseWriters = new HashMap(); + private final Map responseWriters = new HashMap<>(); public static final Map DEFAULT_RESPONSE_WRITERS ; static{ - HashMap m= new HashMap(); + HashMap m= new HashMap<>(); m.put("xml", new XMLResponseWriter()); m.put("standard", m.get("xml")); m.put("json", new JSONResponseWriter()); @@ -2032,7 +2032,7 @@ public final class SolrCore implements SolrInfoMBean { * writers may also be configured. */ private void initWriters() { // use link map so we iterate in the same order - Map writers = new LinkedHashMap(); + Map writers = new LinkedHashMap<>(); for (PluginInfo info : solrConfig.getPluginInfos(QueryResponseWriter.class.getName())) { try { QueryResponseWriter writer; @@ -2112,7 +2112,7 @@ public final class SolrCore implements SolrInfoMBean { return getQueryResponseWriter(request.getParams().get(CommonParams.WT)); } - private final Map qParserPlugins = new HashMap(); + private final Map qParserPlugins = new HashMap<>(); /** Configure the query parsers. */ private void initQParsers() { @@ -2140,7 +2140,7 @@ public final class SolrCore implements SolrInfoMBean { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown query parser '"+parserName+"'"); } - private final HashMap valueSourceParsers = new HashMap(); + private final HashMap valueSourceParsers = new HashMap<>(); /** Configure the ValueSource (function) plugins */ private void initValueSourceParsers() { @@ -2161,7 +2161,7 @@ public final class SolrCore implements SolrInfoMBean { } - private final HashMap transformerFactories = new HashMap(); + private final HashMap transformerFactories = new HashMap<>(); /** Configure the TransformerFactory plugins */ private void initTransformerFactories() { @@ -2220,7 +2220,7 @@ public final class SolrCore implements SolrInfoMBean { */ public List initPlugins(List pluginInfos, Class type, String defClassName) { if(pluginInfos.isEmpty()) return Collections.emptyList(); - List result = new ArrayList(); + List result = new ArrayList<>(); for (PluginInfo info : pluginInfos) result.add(createInitInstance(info,type, type.getSimpleName(), defClassName)); return result; } @@ -2251,10 +2251,10 @@ public final class SolrCore implements SolrInfoMBean { "solrconfig.xml uses deprecated , Please "+ "update your config to use the ShowFileRequestHandler." ); if( getRequestHandler( "/admin/file" ) == null ) { - NamedList invariants = new NamedList(); + NamedList invariants = new NamedList<>(); // Hide everything... - Set hide = new HashSet(); + Set hide = new HashSet<>(); for (String file : solrConfig.getResourceLoader().listConfigDir()) { hide.add(file.toUpperCase(Locale.ROOT)); @@ -2269,7 +2269,7 @@ public final class SolrCore implements SolrInfoMBean { invariants.add( ShowFileRequestHandler.HIDDEN, s ); } - NamedList args = new NamedList(); + NamedList args = new NamedList<>(); args.add( "invariants", invariants ); ShowFileRequestHandler handler = new ShowFileRequestHandler(); handler.init( args ); @@ -2330,7 +2330,7 @@ public final class SolrCore implements SolrInfoMBean { @Override public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap(); + NamedList lst = new SimpleOrderedMap<>(); lst.add("coreName", name==null ? "(null)" : name); lst.add("startTime", new Date(startTime)); lst.add("refCount", getOpenCount()); diff --git a/solr/core/src/java/org/apache/solr/core/SolrCores.java b/solr/core/src/java/org/apache/solr/core/SolrCores.java index a22c1c4f423..dc7c232d104 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrCores.java +++ b/solr/core/src/java/org/apache/solr/core/SolrCores.java @@ -36,16 +36,16 @@ import java.util.concurrent.ConcurrentHashMap; class SolrCores { private static Object modifyLock = new Object(); // for locking around manipulating any of the core maps. - private final Map cores = new LinkedHashMap(); // For "permanent" cores + private final Map cores = new LinkedHashMap<>(); // For "permanent" cores //WARNING! The _only_ place you put anything into the list of transient cores is with the putTransientCore method! - private Map transientCores = new LinkedHashMap(); // For "lazily loaded" cores + private Map transientCores = new LinkedHashMap<>(); // For "lazily loaded" cores - private final Map dynamicDescriptors = new LinkedHashMap(); + private final Map dynamicDescriptors = new LinkedHashMap<>(); - private final Map createdCores = new LinkedHashMap(); + private final Map createdCores = new LinkedHashMap<>(); - private Map coreToOrigName = new ConcurrentHashMap(); + private Map coreToOrigName = new ConcurrentHashMap<>(); private final CoreContainer container; @@ -53,11 +53,11 @@ class SolrCores { // This map will hold objects that are being currently operated on. The core (value) may be null in the case of // initial load. The rule is, never to any operation on a core that is currently being operated upon. - private static final Set pendingCoreOps = new HashSet(); + private static final Set pendingCoreOps = new HashSet<>(); // Due to the fact that closes happen potentially whenever anything is _added_ to the transient core list, we need // to essentially queue them up to be handled via pendingCoreOps. - private static final List pendingCloses = new ArrayList(); + private static final List pendingCloses = new ArrayList<>(); SolrCores(CoreContainer container) { this.container = container; @@ -95,7 +95,7 @@ class SolrCores { // We are shutting down. You can't hold the lock on the various lists of cores while they shut down, so we need to // make a temporary copy of the names and shut them down outside the lock. protected void close() { - Collection coreList = new ArrayList(); + Collection coreList = new ArrayList<>(); // It might be possible for one of the cores to move from one list to another while we're closing them. So // loop through the lists until they're all empty. In particular, the core could have moved from the transient @@ -145,7 +145,7 @@ class SolrCores { } List getCores() { - List lst = new ArrayList(); + List lst = new ArrayList<>(); synchronized (modifyLock) { lst.addAll(cores.values()); @@ -154,7 +154,7 @@ class SolrCores { } Set getCoreNames() { - Set set = new TreeSet(); + Set set = new TreeSet<>(); synchronized (modifyLock) { set.addAll(cores.keySet()); @@ -164,7 +164,7 @@ class SolrCores { } List getCoreNames(SolrCore core) { - List lst = new ArrayList(); + List lst = new ArrayList<>(); synchronized (modifyLock) { for (Map.Entry entry : cores.entrySet()) { @@ -187,7 +187,7 @@ class SolrCores { * @return all cores names, whether loaded or unloaded. */ public Collection getAllCoreNames() { - Set set = new TreeSet(); + Set set = new TreeSet<>(); synchronized (modifyLock) { set.addAll(cores.keySet()); set.addAll(transientCores.keySet()); diff --git a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java index 5d6e2895426..df5921e9824 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java +++ b/solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java @@ -390,7 +390,7 @@ public class SolrResourceLoader implements ResourceLoader,Closeable /* * A static map of short class name to fully qualified class name */ - private static final Map classNameCache = new ConcurrentHashMap(); + private static final Map classNameCache = new ConcurrentHashMap<>(); // Using this pattern, legacy analysis components from previous Solr versions are identified and delegated to SPI loader: private static final Pattern legacyAnalysisPattern = @@ -742,7 +742,7 @@ public class SolrResourceLoader implements ResourceLoader,Closeable */ private static final Map awareCompatibility; static { - awareCompatibility = new HashMap(); + awareCompatibility = new HashMap<>(); awareCompatibility.put( SolrCoreAware.class, new Class[] { CodecFactory.class, diff --git a/solr/core/src/java/org/apache/solr/core/SolrXMLCoresLocator.java b/solr/core/src/java/org/apache/solr/core/SolrXMLCoresLocator.java index 632b4d6fef6..bd59ad80f4d 100644 --- a/solr/core/src/java/org/apache/solr/core/SolrXMLCoresLocator.java +++ b/solr/core/src/java/org/apache/solr/core/SolrXMLCoresLocator.java @@ -143,7 +143,7 @@ public class SolrXMLCoresLocator implements CoresLocator { @Override public synchronized final void persist(CoreContainer cc, CoreDescriptor... coreDescriptors) { - List cds = new ArrayList(cc.getCoreDescriptors().size() + coreDescriptors.length); + List cds = new ArrayList<>(cc.getCoreDescriptors().size() + coreDescriptors.length); cds.addAll(cc.getCoreDescriptors()); cds.addAll(Arrays.asList(coreDescriptors)); diff --git a/solr/core/src/java/org/apache/solr/core/ZkContainer.java b/solr/core/src/java/org/apache/solr/core/ZkContainer.java index e0bcd45feaa..e992aaf8b9c 100644 --- a/solr/core/src/java/org/apache/solr/core/ZkContainer.java +++ b/solr/core/src/java/org/apache/solr/core/ZkContainer.java @@ -141,7 +141,7 @@ public class ZkContainer { @Override public List getCurrentDescriptors() { - List descriptors = new ArrayList( + List descriptors = new ArrayList<>( cc.getCoreNames().size()); Collection cores = cc.getCores(); for (SolrCore core : cores) { diff --git a/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java b/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java index ad81138384d..a2cd9ed448b 100644 --- a/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java +++ b/solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java @@ -87,7 +87,7 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase { if (!TokenizerChain.class.isInstance(analyzer)) { try (TokenStream tokenStream = analyzer.tokenStream(context.getFieldName(), value)) { - NamedList> namedList = new NamedList>(); + NamedList> namedList = new NamedList<>(); namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(analyzeTokenStream(tokenStream), context)); return namedList; } catch (IOException e) { @@ -100,7 +100,7 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase { TokenizerFactory tfac = tokenizerChain.getTokenizerFactory(); TokenFilterFactory[] filtfacs = tokenizerChain.getTokenFilterFactories(); - NamedList namedList = new NamedList(); + NamedList namedList = new NamedList<>(); if( cfiltfacs != null ){ String source = value; @@ -144,7 +144,7 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase { */ protected Set getQueryTokenSet(String query, Analyzer analyzer) { try (TokenStream tokenStream = analyzer.tokenStream("", query)){ - final Set tokens = new HashSet(); + final Set tokens = new HashSet<>(); final TermToBytesRefAttribute bytesAtt = tokenStream.getAttribute(TermToBytesRefAttribute.class); final BytesRef bytes = bytesAtt.getBytesRef(); @@ -170,7 +170,7 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase { * @return List of tokens produced from the TokenStream */ private List analyzeTokenStream(TokenStream tokenStream) { - final List tokens = new ArrayList(); + final List tokens = new ArrayList<>(); final PositionIncrementAttribute posIncrAtt = tokenStream.addAttribute(PositionIncrementAttribute.class); final TokenTrackingAttribute trackerAtt = tokenStream.addAttribute(TokenTrackingAttribute.class); // for backwards compatibility, add all "common" attributes @@ -212,7 +212,7 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase { * @return List of NamedLists containing the relevant information taken from the tokens */ private List convertTokensToNamedLists(final List tokenList, AnalysisContext context) { - final List tokensNamedLists = new ArrayList(); + final List tokensNamedLists = new ArrayList<>(); final FieldType fieldType = context.getFieldType(); final AttributeSource[] tokens = tokenList.toArray(new AttributeSource[tokenList.size()]); @@ -241,7 +241,7 @@ public abstract class AnalysisRequestHandlerBase extends RequestHandlerBase { for (int i = 0; i < tokens.length; i++) { AttributeSource token = tokens[i]; - final NamedList tokenNamedList = new SimpleOrderedMap(); + final NamedList tokenNamedList = new SimpleOrderedMap<>(); final TermToBytesRefAttribute termAtt = token.getAttribute(TermToBytesRefAttribute.class); BytesRef rawBytes = termAtt.getBytesRef(); termAtt.fillBytesRef(); diff --git a/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java index 6e8c53ae7d3..a75857e65c2 100644 --- a/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java @@ -198,11 +198,11 @@ public class DocumentAnalysisRequestHandler extends AnalysisRequestHandlerBase { NamedList handleAnalysisRequest(DocumentAnalysisRequest request, IndexSchema schema) { SchemaField uniqueKeyField = schema.getUniqueKeyField(); - NamedList result = new SimpleOrderedMap(); + NamedList result = new SimpleOrderedMap<>(); for (SolrInputDocument document : request.getDocuments()) { - NamedList theTokens = new SimpleOrderedMap(); + NamedList theTokens = new SimpleOrderedMap<>(); result.add(document.getFieldValue(uniqueKeyField.getName()).toString(), theTokens); for (String name : document.getFieldNames()) { @@ -212,7 +212,7 @@ public class DocumentAnalysisRequestHandler extends AnalysisRequestHandlerBase { continue; } - NamedList fieldTokens = new SimpleOrderedMap(); + NamedList fieldTokens = new SimpleOrderedMap<>(); theTokens.add(name, fieldTokens); FieldType fieldType = schema.getFieldType(name); @@ -241,7 +241,7 @@ public class DocumentAnalysisRequestHandler extends AnalysisRequestHandlerBase { AnalysisContext analysisContext = new AnalysisContext(fieldType, analyzer, termsToMatch); Collection fieldValues = document.getFieldValues(name); NamedList> indexTokens - = new SimpleOrderedMap>(); + = new SimpleOrderedMap<>(); for (Object fieldValue : fieldValues) { indexTokens.add(String.valueOf(fieldValue), analyzeValue(fieldValue.toString(), analysisContext)); diff --git a/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java index 7556c304ec6..a539d7c92fe 100644 --- a/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/DumpRequestHandler.java @@ -38,10 +38,10 @@ public class DumpRequestHandler extends RequestHandlerBase // Write the streams... if( req.getContentStreams() != null ) { - ArrayList> streams = new ArrayList>(); + ArrayList> streams = new ArrayList<>(); // Cycle through each stream for( ContentStream content : req.getContentStreams() ) { - NamedList stream = new SimpleOrderedMap(); + NamedList stream = new SimpleOrderedMap<>(); stream.add( "name", content.getName() ); stream.add( "sourceInfo", content.getSourceInfo() ); stream.add( "size", content.getSize() ); diff --git a/solr/core/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java index 5774aa3479c..7bd82a55968 100644 --- a/solr/core/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/FieldAnalysisRequestHandler.java @@ -173,9 +173,9 @@ public class FieldAnalysisRequestHandler extends AnalysisRequestHandlerBase { * @return The analysis breakdown as a named list. */ protected NamedList handleAnalysisRequest(FieldAnalysisRequest request, IndexSchema schema) { - NamedList analysisResults = new SimpleOrderedMap(); + NamedList analysisResults = new SimpleOrderedMap<>(); - NamedList fieldTypeAnalysisResults = new SimpleOrderedMap(); + NamedList fieldTypeAnalysisResults = new SimpleOrderedMap<>(); if (request.getFieldTypes() != null) { for (String fieldTypeName : request.getFieldTypes()) { FieldType fieldType = schema.getFieldTypes().get(fieldTypeName); @@ -183,7 +183,7 @@ public class FieldAnalysisRequestHandler extends AnalysisRequestHandlerBase { } } - NamedList fieldNameAnalysisResults = new SimpleOrderedMap(); + NamedList fieldNameAnalysisResults = new SimpleOrderedMap<>(); if (request.getFieldNames() != null) { for (String fieldName : request.getFieldNames()) { FieldType fieldType = schema.getFieldType(fieldName); @@ -215,7 +215,7 @@ public class FieldAnalysisRequestHandler extends AnalysisRequestHandlerBase { ? getQueryTokenSet(queryValue, fieldType.getQueryAnalyzer()) : EMPTY_BYTES_SET; - NamedList analyzeResults = new SimpleOrderedMap(); + NamedList analyzeResults = new SimpleOrderedMap<>(); if (analysisRequest.getFieldValue() != null) { AnalysisContext context = new AnalysisContext(fieldName, fieldType, fieldType.getAnalyzer(), termsToMatch); NamedList analyzedTokens = analyzeValue(analysisRequest.getFieldValue(), context); diff --git a/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java b/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java index 763f4267e58..280e6b9fa61 100644 --- a/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java @@ -98,7 +98,7 @@ public class MoreLikeThisHandler extends RequestHandlerBase String[] fqs = req.getParams().getParams(CommonParams.FQ); if (fqs!=null && fqs.length!=0) { - filters = new ArrayList(); + filters = new ArrayList<>(); for (String fq : fqs) { if (fq != null && fq.trim().length()!=0) { QParser fqp = QParser.getParser(fq, null, req); @@ -186,14 +186,14 @@ public class MoreLikeThisHandler extends RequestHandlerBase if( interesting != null ) { if( termStyle == TermStyle.DETAILS ) { - NamedList it = new NamedList(); + NamedList it = new NamedList<>(); for( InterestingTerm t : interesting ) { it.add( t.term.toString(), t.boost ); } rsp.add( "interestingTerms", it ); } else { - List it = new ArrayList( interesting.size() ); + List it = new ArrayList<>( interesting.size() ); for( InterestingTerm t : interesting ) { it.add( t.term.text()); } @@ -236,7 +236,7 @@ public class MoreLikeThisHandler extends RequestHandlerBase if (null != dbgInfo) { if (null != filters) { dbgInfo.add("filter_queries",req.getParams().getParams(CommonParams.FQ)); - List fqs = new ArrayList(filters.size()); + List fqs = new ArrayList<>(filters.size()); for (Query fq : filters) { fqs.add(QueryParsing.toString(fq, req.getSchema())); } @@ -388,7 +388,7 @@ public class MoreLikeThisHandler extends RequestHandlerBase public NamedList getMoreLikeThese( DocList docs, int rows, int flags ) throws IOException { IndexSchema schema = searcher.getSchema(); - NamedList mlt = new SimpleOrderedMap(); + NamedList mlt = new SimpleOrderedMap<>(); DocIterator iterator = docs.iterator(); while( iterator.hasNext() ) { int id = iterator.nextDoc(); @@ -404,7 +404,7 @@ public class MoreLikeThisHandler extends RequestHandlerBase public NamedList getMoreLikeTheseQuery(DocList docs) throws IOException { IndexSchema schema = searcher.getSchema(); - NamedList result = new NamedList(); + NamedList result = new NamedList<>(); DocIterator iterator = docs.iterator(); while (iterator.hasNext()) { int id = iterator.nextDoc(); diff --git a/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java b/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java index 03ff2919f79..76fc29ac199 100644 --- a/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/RealTimeGetHandler.java @@ -28,7 +28,7 @@ public class RealTimeGetHandler extends SearchHandler { @Override protected List getDefaultComponents() { - List names = new ArrayList(1); + List names = new ArrayList<>(1); names.add(RealTimeGetComponent.COMPONENT_NAME); return names; } diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java index fbd8a389247..a1238c5deb2 100644 --- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java @@ -140,7 +140,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw private String includeConfFiles; - private NamedList confFileNameAlias = new NamedList(); + private NamedList confFileNameAlias = new NamedList<>(); private boolean isMaster = false; @@ -156,7 +156,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw private int numTimesReplicated = 0; - private final Map confFileInfoCache = new HashMap(); + private final Map confFileInfoCache = new HashMap<>(); private Integer reserveCommitDuration = SnapPuller.readInterval("00:00:10"); @@ -270,14 +270,14 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw private List> getCommits() { Map commits = core.getDeletionPolicy().getCommits(); - List> l = new ArrayList>(); + List> l = new ArrayList<>(); for (IndexCommit c : commits.values()) { try { - NamedList nl = new NamedList(); + NamedList nl = new NamedList<>(); nl.add("indexVersion", IndexDeletionPolicyWrapper.getCommitTimestamp(c)); nl.add(GENERATION, c.getGeneration()); - List commitList = new ArrayList(c.getFileNames().size()); + List commitList = new ArrayList<>(c.getFileNames().size()); commitList.addAll(c.getFileNames()); Collections.sort(commitList); nl.add(CMD_GET_FILE_LIST, commitList); @@ -404,19 +404,19 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw } // reserve the indexcommit for sometime core.getDeletionPolicy().setReserveDuration(gen, reserveCommitDuration); - List> result = new ArrayList>(); + List> result = new ArrayList<>(); Directory dir = null; try { // get all the files in the commit // use a set to workaround possible Lucene bug which returns same file // name multiple times - Collection files = new HashSet(commit.getFileNames()); + Collection files = new HashSet<>(commit.getFileNames()); dir = core.getDirectoryFactory().get(core.getNewIndexDir(), DirContext.DEFAULT, core.getSolrConfig().indexConfig.lockType); try { for (String fileName : files) { if (fileName.endsWith(".lock")) continue; - Map fileMeta = new HashMap(); + Map fileMeta = new HashMap<>(); fileMeta.put(NAME, fileName); fileMeta.put(SIZE, dir.fileLength(fileName)); result.add(fileMeta); @@ -446,7 +446,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw */ List> getConfFileInfoFromCache(NamedList nameAndAlias, final Map confFileInfoCache) { - List> confFiles = new ArrayList>(); + List> confFiles = new ArrayList<>(); synchronized (confFileInfoCache) { File confDir = new File(core.getResourceLoader().getConfigDir()); Checksum checksum = null; @@ -482,7 +482,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw } Map getAsMap() { - Map map = new HashMap(); + Map map = new HashMap<>(); map.put(NAME, name); map.put(SIZE, size); map.put(CHECKSUM, checksum); @@ -608,9 +608,9 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw * Used for showing statistics and progress information. */ private NamedList getReplicationDetails(boolean showSlaveDetails) { - NamedList details = new SimpleOrderedMap(); - NamedList master = new SimpleOrderedMap(); - NamedList slave = new SimpleOrderedMap(); + NamedList details = new SimpleOrderedMap<>(); + NamedList master = new SimpleOrderedMap<>(); + NamedList slave = new SimpleOrderedMap<>(); details.add("indexSize", NumberUtils.readableSize(getIndexSize())); details.add("indexPath", core.getIndexDir()); @@ -677,7 +677,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw if (isReplicating) { try { long bytesToDownload = 0; - List filesToDownload = new ArrayList(); + List filesToDownload = new ArrayList<>(); for (Map file : snapPuller.getFilesToDownload()) { filesToDownload.add((String) file.get(NAME)); bytesToDownload += (Long) file.get(SIZE); @@ -694,7 +694,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw slave.add("bytesToDownload", NumberUtils.readableSize(bytesToDownload)); long bytesDownloaded = 0; - List filesDownloaded = new ArrayList(); + List filesDownloaded = new ArrayList<>(); for (Map file : snapPuller.getFilesDownloaded()) { filesDownloaded.add((String) file.get(NAME)); bytesDownloaded += (Long) file.get(SIZE); @@ -776,7 +776,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw } catch (NumberFormatException e) {/*no op*/ } } else if (clzz == List.class) { String ss[] = s.split(","); - List l = new ArrayList(); + List l = new ArrayList<>(); for (int i = 0; i < ss.length; i++) { l.add(new Date(Long.valueOf(ss[i])).toString()); } @@ -788,7 +788,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw } private List getReplicateAfterStrings() { - List replicateAfter = new ArrayList(); + List replicateAfter = new ArrayList<>(); if (replicateOnCommit) replicateAfter.add("commit"); if (replicateOnOptimize) @@ -873,7 +873,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw if (!enableSlave && !enableMaster) { enableMaster = true; - master = new NamedList(); + master = new NamedList<>(); } if (enableMaster) { diff --git a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java b/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java index 774ed18aaf9..dfe22a670d3 100644 --- a/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java +++ b/solr/core/src/java/org/apache/solr/handler/RequestHandlerBase.java @@ -196,7 +196,7 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo @Override public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap(); + NamedList lst = new SimpleOrderedMap<>(); Snapshot snapshot = requestTimes.getSnapshot(); lst.add("handlerStart",handlerStart); lst.add("requests", numRequests.longValue()); diff --git a/solr/core/src/java/org/apache/solr/handler/RequestHandlerUtils.java b/solr/core/src/java/org/apache/solr/handler/RequestHandlerUtils.java index fb5e8e44c0b..2266b71b9e1 100644 --- a/solr/core/src/java/org/apache/solr/handler/RequestHandlerUtils.java +++ b/solr/core/src/java/org/apache/solr/handler/RequestHandlerUtils.java @@ -75,7 +75,7 @@ public class RequestHandlerUtils } - private static Set commitParams = new HashSet(Arrays.asList(new String[]{UpdateParams.OPEN_SEARCHER, UpdateParams.WAIT_SEARCHER, UpdateParams.SOFT_COMMIT, UpdateParams.EXPUNGE_DELETES, UpdateParams.MAX_OPTIMIZE_SEGMENTS, UpdateParams.PREPARE_COMMIT})); + private static Set commitParams = new HashSet<>(Arrays.asList(new String[]{UpdateParams.OPEN_SEARCHER, UpdateParams.WAIT_SEARCHER, UpdateParams.SOFT_COMMIT, UpdateParams.EXPUNGE_DELETES, UpdateParams.MAX_OPTIMIZE_SEGMENTS, UpdateParams.PREPARE_COMMIT})); public static void validateCommitParams(SolrParams params) { Iterator i = params.getParameterNamesIterator(); diff --git a/solr/core/src/java/org/apache/solr/handler/SnapPuller.java b/solr/core/src/java/org/apache/solr/handler/SnapPuller.java index e9ee9ae24e1..b4d080c6857 100644 --- a/solr/core/src/java/org/apache/solr/handler/SnapPuller.java +++ b/solr/core/src/java/org/apache/solr/handler/SnapPuller.java @@ -558,7 +558,7 @@ public class SnapPuller { * @throws IOException on IO error */ private void logReplicationTimeAndConfFiles(Collection> modifiedConfFiles, boolean successfulInstall) throws IOException { - List confFiles = new ArrayList(); + List confFiles = new ArrayList<>(); if (modifiedConfFiles != null && !modifiedConfFiles.isEmpty()) for (Map map1 : modifiedConfFiles) confFiles.add((String) map1.get(NAME)); @@ -641,7 +641,7 @@ public class SnapPuller { private StringBuilder readToStringBuilder(long replicationTime, String str) { StringBuilder sb = new StringBuilder(); - List l = new ArrayList(); + List l = new ArrayList<>(); if (str != null && str.length() != 0) { String[] ss = str.split(","); for (int i = 0; i < ss.length; i++) { @@ -737,7 +737,7 @@ public class SnapPuller { localFileFetcher = new LocalFsFileFetcher(tmpconfDir, file, saveAs, true, latestGeneration); currentFile = file; localFileFetcher.fetchFile(); - confFilesDownloaded.add(new HashMap(file)); + confFilesDownloaded.add(new HashMap<>(file)); } // this is called before copying the files to the original conf dir // so that if there is an exception avoid corrupting the original files. @@ -769,7 +769,7 @@ public class SnapPuller { (String) file.get(NAME), false, latestGeneration); currentFile = file; dirFileFetcher.fetchFile(); - filesDownloaded.add(new HashMap(file)); + filesDownloaded.add(new HashMap<>(file)); } else { LOG.info("Skipping download for " + file.get(NAME) + " because it already exists"); @@ -836,7 +836,7 @@ public class SnapPuller { } } String segmentsFile = null; - List movedfiles = new ArrayList(); + List movedfiles = new ArrayList<>(); for (Map f : filesDownloaded) { String fname = (String) f.get(NAME); // the segments file must be copied last @@ -973,7 +973,7 @@ public class SnapPuller { } - private final Map confFileInfoCache = new HashMap(); + private final Map confFileInfoCache = new HashMap<>(); /** * The local conf files are compared with the conf files in the master. If they are same (by checksum) do not copy. @@ -986,7 +986,7 @@ public class SnapPuller { if (confFilesToDownload == null || confFilesToDownload.isEmpty()) return Collections.EMPTY_LIST; //build a map with alias/name as the key - Map> nameVsFile = new HashMap>(); + Map> nameVsFile = new HashMap<>(); NamedList names = new NamedList(); for (Map map : confFilesToDownload) { //if alias is present that is the name the file may have in the slave @@ -1063,25 +1063,25 @@ public class SnapPuller { //make a copy first because it can be null later List> tmp = confFilesToDownload; //create a new instance. or else iterator may fail - return tmp == null ? Collections.EMPTY_LIST : new ArrayList>(tmp); + return tmp == null ? Collections.EMPTY_LIST : new ArrayList<>(tmp); } List> getConfFilesDownloaded() { //make a copy first because it can be null later List> tmp = confFilesDownloaded; // NOTE: it's safe to make a copy of a SynchronizedCollection(ArrayList) - return tmp == null ? Collections.EMPTY_LIST : new ArrayList>(tmp); + return tmp == null ? Collections.EMPTY_LIST : new ArrayList<>(tmp); } List> getFilesToDownload() { //make a copy first because it can be null later List> tmp = filesToDownload; - return tmp == null ? Collections.EMPTY_LIST : new ArrayList>(tmp); + return tmp == null ? Collections.EMPTY_LIST : new ArrayList<>(tmp); } List> getFilesDownloaded() { List> tmp = filesDownloaded; - return tmp == null ? Collections.EMPTY_LIST : new ArrayList>(tmp); + return tmp == null ? Collections.EMPTY_LIST : new ArrayList<>(tmp); } // TODO: currently does not reflect conf files @@ -1090,7 +1090,7 @@ public class SnapPuller { DirectoryFileFetcher tmpFileFetcher = dirFileFetcher; if (tmp == null) return null; - tmp = new HashMap(tmp); + tmp = new HashMap<>(tmp); if (tmpFileFetcher != null) tmp.put("bytesDownloaded", tmpFileFetcher.bytesDownloaded); return tmp; diff --git a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java b/solr/core/src/java/org/apache/solr/handler/SnapShooter.java index ba88a3fe13c..14df3d39c18 100644 --- a/solr/core/src/java/org/apache/solr/handler/SnapShooter.java +++ b/solr/core/src/java/org/apache/solr/handler/SnapShooter.java @@ -82,7 +82,7 @@ public class SnapShooter { void createSnapshot(final IndexCommit indexCommit, int numberToKeep, ReplicationHandler replicationHandler) { LOG.info("Creating backup snapshot..."); - NamedList details = new NamedList(); + NamedList details = new NamedList<>(); details.add("startTime", new Date().toString()); File snapShotDir = null; String directoryName = null; @@ -131,7 +131,7 @@ public class SnapShooter { } private void deleteOldBackups(int numberToKeep) { File[] files = new File(snapDir).listFiles(); - List dirs = new ArrayList(); + List dirs = new ArrayList<>(); for(File f : files) { OldBackupDirectory obd = new OldBackupDirectory(f); if(obd.dir != null) { diff --git a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java index aa46b2ef0b8..ed193bf44d7 100644 --- a/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/UpdateRequestHandler.java @@ -98,7 +98,7 @@ public class UpdateRequestHandler extends ContentStreamHandlerBase { String wt = loader.getDefaultWT(); // Make sure it is a valid writer if(req.getCore().getQueryResponseWriter(wt)!=null) { - Map map = new HashMap(1); + Map map = new HashMap<>(1); map.put(CommonParams.WT, wt); req.setParams(SolrParams.wrapDefaults(params, new MapSolrParams(map))); @@ -117,7 +117,7 @@ public class UpdateRequestHandler extends ContentStreamHandlerBase { protected void setAssumeContentType(String ct) { if(invariants==null) { - Map map = new HashMap(); + Map map = new HashMap<>(); map.put(UpdateParams.ASSUME_CONTENT_TYPE,ct); invariants = new MapSolrParams(map); } @@ -133,7 +133,7 @@ public class UpdateRequestHandler extends ContentStreamHandlerBase { if(args!=null) { p = SolrParams.toSolrParams(args); } - Map registry = new HashMap(); + Map registry = new HashMap<>(); registry.put("application/xml", new XMLLoader().init(p) ); registry.put("application/json", new JsonLoader().init(p) ); registry.put("application/csv", new CSVLoader().init(p) ); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java index e387a714e2c..6326a1c4acf 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/CollectionsHandler.java @@ -401,7 +401,7 @@ public class CollectionsHandler extends RequestHandlerBase { } private static void copyIfNotNull(SolrParams params, Map props, String... keys) { - ArrayList prefixes = new ArrayList(1); + ArrayList prefixes = new ArrayList<>(1); if(keys !=null){ for (String key : keys) { if(key.endsWith(".")) { @@ -444,7 +444,7 @@ public class CollectionsHandler extends RequestHandlerBase { String name = req.getParams().required().get(ZkStateReader.COLLECTION_PROP); String shard = req.getParams().required().get(ZkStateReader.SHARD_ID_PROP); - Map props = new HashMap(); + Map props = new HashMap<>(); props.put(ZkStateReader.COLLECTION_PROP, name); props.put(Overseer.QUEUE_OPERATION, OverseerCollectionProcessor.DELETESHARD); props.put(ZkStateReader.SHARD_ID_PROP, shard); @@ -473,7 +473,7 @@ public class CollectionsHandler extends RequestHandlerBase { "Only one of 'ranges' or 'split.key' should be specified"); } - Map props = new HashMap(); + Map props = new HashMap<>(); props.put(Overseer.QUEUE_OPERATION, OverseerCollectionProcessor.SPLITSHARD); props.put("collection", name); if (shard != null) { @@ -495,7 +495,7 @@ public class CollectionsHandler extends RequestHandlerBase { private void handleMigrate(SolrQueryRequest req, SolrQueryResponse rsp) throws KeeperException, InterruptedException { log.info("Migrate action invoked: " + req.getParamString()); req.getParams().required().check("collection", "split.key", "target.collection"); - Map props = new HashMap(); + Map props = new HashMap<>(); props.put(Overseer.QUEUE_OPERATION, OverseerCollectionProcessor.MIGRATE); copyIfNotNull(req.getParams(), props, "collection", "split.key", "target.collection", "forward.timeout"); ZkNodeProps m = new ZkNodeProps(props); @@ -504,7 +504,7 @@ public class CollectionsHandler extends RequestHandlerBase { private void handleAddReplica(SolrQueryRequest req, SolrQueryResponse rsp) throws KeeperException, InterruptedException { log.info("Add replica action invoked: " + req.getParamString()); - Map props = new HashMap(); + Map props = new HashMap<>(); props.put(Overseer.QUEUE_OPERATION, CollectionAction.ADDREPLICA.toString()); copyIfNotNull(req.getParams(), props, COLLECTION_PROP, "node", SHARD_ID_PROP, ShardParams._ROUTE_, CoreAdminParams.NAME, CoreAdminParams.INSTANCE_DIR, CoreAdminParams.DATA_DIR); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java index 456dbb9bfba..50165d2a59a 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java @@ -255,7 +255,7 @@ public class CoreAdminHandler extends RequestHandlerBase { if (rangesArr.length == 0) { throw new SolrException(ErrorCode.BAD_REQUEST, "There must be at least one range specified to split an index"); } else { - ranges = new ArrayList(rangesArr.length); + ranges = new ArrayList<>(rangesArr.length); for (String r : rangesArr) { try { ranges.add(DocRouter.DEFAULT.fromString(r)); @@ -304,7 +304,7 @@ public class CoreAdminHandler extends RequestHandlerBase { } if (pathsArr == null) { - newCores = new ArrayList(partitions); + newCores = new ArrayList<>(partitions); for (String newCoreName : newCoreNames) { SolrCore newcore = coreContainer.getCore(newCoreName); if (newcore != null) { @@ -705,7 +705,7 @@ public class CoreAdminHandler extends RequestHandlerBase { String indexInfo = params.get(CoreAdminParams.INDEX_INFO); boolean isIndexInfoNeeded = Boolean.parseBoolean(null == indexInfo ? "true" : indexInfo); boolean doPersist = false; - NamedList status = new SimpleOrderedMap(); + NamedList status = new SimpleOrderedMap<>(); Map allFailures = coreContainer.getCoreInitFailures(); try { if (cname == null) { @@ -832,7 +832,7 @@ public class CoreAdminHandler extends RequestHandlerBase { if (core != null) { syncStrategy = new SyncStrategy(core.getCoreDescriptor().getCoreContainer()); - Map props = new HashMap(); + Map props = new HashMap<>(); props.put(ZkStateReader.BASE_URL_PROP, zkController.getBaseUrl()); props.put(ZkStateReader.CORE_NAME_PROP, cname); props.put(ZkStateReader.NODE_NAME_PROP, zkController.getNodeName()); @@ -1077,7 +1077,7 @@ public class CoreAdminHandler extends RequestHandlerBase { * @throws IOException - LukeRequestHandler can throw an I/O exception */ protected NamedList getCoreStatus(CoreContainer cores, String cname, boolean isIndexInfoNeeded) throws IOException { - NamedList info = new SimpleOrderedMap(); + NamedList info = new SimpleOrderedMap<>(); if (!cores.isLoaded(cname)) { // Lazily-loaded core, fill in what we can. // It would be a real mistake to load the cores just to get the status diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java index 6e26e869e34..94983229086 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/LoggingHandler.java @@ -118,7 +118,7 @@ public class LoggingHandler extends RequestHandlerBase implements SolrCoreAware return; } else { - SimpleOrderedMap info = new SimpleOrderedMap(); + SimpleOrderedMap info = new SimpleOrderedMap<>(); if(time>0) { info.add("since", time); info.add("found", found); @@ -137,10 +137,10 @@ public class LoggingHandler extends RequestHandlerBase implements SolrCoreAware else { rsp.add("levels", watcher.getAllLevels()); - List loggers = new ArrayList(watcher.getAllLoggers()); + List loggers = new ArrayList<>(watcher.getAllLoggers()); Collections.sort(loggers); - List> info = new ArrayList>(); + List> info = new ArrayList<>(); for(LoggerInfo wrap:loggers) { info.add(wrap.getInfo()); } diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java index d521cb2fd44..af4ebe6d1af 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java @@ -147,7 +147,7 @@ public class LukeRequestHandler extends RequestHandlerBase SimpleOrderedMap info = getDocumentFieldsInfo( doc, docId, reader, schema ); - SimpleOrderedMap docinfo = new SimpleOrderedMap(); + SimpleOrderedMap docinfo = new SimpleOrderedMap<>(); docinfo.add( "docId", docId ); docinfo.add( "lucene", info ); docinfo.add( "solr", doc ); @@ -161,7 +161,7 @@ public class LukeRequestHandler extends RequestHandlerBase } // Add some generally helpful information - NamedList info = new SimpleOrderedMap(); + NamedList info = new SimpleOrderedMap<>(); info.add( "key", getFieldFlagsKey() ); info.add( "NOTE", "Document Frequency (df) is not updated when a document is marked for deletion. df values include deleted documents." ); rsp.add( "info", info ); @@ -241,7 +241,7 @@ public class LukeRequestHandler extends RequestHandlerBase * @return a key to what each character means */ public static SimpleOrderedMap getFieldFlagsKey() { - SimpleOrderedMap key = new SimpleOrderedMap(); + SimpleOrderedMap key = new SimpleOrderedMap<>(); for (FieldFlag f : FieldFlag.values()) { key.add(String.valueOf(f.getAbbreviation()), f.getDisplay() ); } @@ -252,10 +252,10 @@ public class LukeRequestHandler extends RequestHandlerBase IndexSchema schema ) throws IOException { final CharsRef spare = new CharsRef(); - SimpleOrderedMap finfo = new SimpleOrderedMap(); + SimpleOrderedMap finfo = new SimpleOrderedMap<>(); for( Object o : doc.getFields() ) { Field field = (Field)o; - SimpleOrderedMap f = new SimpleOrderedMap(); + SimpleOrderedMap f = new SimpleOrderedMap<>(); SchemaField sfield = schema.getFieldOrNull( field.name() ); FieldType ftype = (sfield==null)?null:sfield.getType(); @@ -283,7 +283,7 @@ public class LukeRequestHandler extends RequestHandlerBase try { Terms v = reader.getTermVector( docId, field.name() ); if( v != null ) { - SimpleOrderedMap tfv = new SimpleOrderedMap(); + SimpleOrderedMap tfv = new SimpleOrderedMap<>(); final TermsEnum termsEnum = v.iterator(null); BytesRef text; while((text = termsEnum.next()) != null) { @@ -313,27 +313,27 @@ public class LukeRequestHandler extends RequestHandlerBase Set fields = null; String fl = params.get(CommonParams.FL); if (fl != null) { - fields = new TreeSet(Arrays.asList(fl.split( "[,\\s]+" ))); + fields = new TreeSet<>(Arrays.asList(fl.split( "[,\\s]+" ))); } AtomicReader reader = searcher.getAtomicReader(); IndexSchema schema = searcher.getSchema(); // Don't be tempted to put this in the loop below, the whole point here is to alphabetize the fields! - Set fieldNames = new TreeSet(); + Set fieldNames = new TreeSet<>(); for(FieldInfo fieldInfo : reader.getFieldInfos()) { fieldNames.add(fieldInfo.name); } // Walk the term enum and keep a priority queue for each map in our set - SimpleOrderedMap finfo = new SimpleOrderedMap(); + SimpleOrderedMap finfo = new SimpleOrderedMap<>(); for (String fieldName : fieldNames) { if (fields != null && ! fields.contains(fieldName) && ! fields.contains("*")) { continue; //we're not interested in this field Still an issue here } - SimpleOrderedMap fieldMap = new SimpleOrderedMap(); + SimpleOrderedMap fieldMap = new SimpleOrderedMap<>(); SchemaField sfield = schema.getFieldOrNull( fieldName ); FieldType ftype = (sfield==null)?null:sfield.getType(); @@ -408,21 +408,21 @@ public class LukeRequestHandler extends RequestHandlerBase * Return info from the index */ private static SimpleOrderedMap getSchemaInfo( IndexSchema schema ) { - Map> typeusemap = new TreeMap>(); - Map fields = new TreeMap(); + Map> typeusemap = new TreeMap<>(); + Map fields = new TreeMap<>(); SchemaField uniqueField = schema.getUniqueKeyField(); for( SchemaField f : schema.getFields().values() ) { populateFieldInfo(schema, typeusemap, fields, uniqueField, f); } - Map dynamicFields = new TreeMap(); + Map dynamicFields = new TreeMap<>(); for (SchemaField f : schema.getDynamicFieldPrototypes()) { populateFieldInfo(schema, typeusemap, dynamicFields, uniqueField, f); } - SimpleOrderedMap types = new SimpleOrderedMap(); - Map sortedTypes = new TreeMap(schema.getFieldTypes()); + SimpleOrderedMap types = new SimpleOrderedMap<>(); + Map sortedTypes = new TreeMap<>(schema.getFieldTypes()); for( FieldType ft : sortedTypes.values() ) { - SimpleOrderedMap field = new SimpleOrderedMap(); + SimpleOrderedMap field = new SimpleOrderedMap<>(); field.add("fields", typeusemap.get( ft.getTypeName() ) ); field.add("tokenized", ft.isTokenized() ); field.add("className", ft.getClass().getName()); @@ -433,15 +433,15 @@ public class LukeRequestHandler extends RequestHandlerBase } // Must go through this to maintain binary compatbility. Putting a TreeMap into a resp leads to casting errors - SimpleOrderedMap finfo = new SimpleOrderedMap(); + SimpleOrderedMap finfo = new SimpleOrderedMap<>(); - SimpleOrderedMap fieldsSimple = new SimpleOrderedMap(); + SimpleOrderedMap fieldsSimple = new SimpleOrderedMap<>(); for (Map.Entry ent : fields.entrySet()) { fieldsSimple.add(ent.getKey(), ent.getValue()); } finfo.add("fields", fieldsSimple); - SimpleOrderedMap dynamicSimple = new SimpleOrderedMap(); + SimpleOrderedMap dynamicSimple = new SimpleOrderedMap<>(); for (Map.Entry ent : dynamicFields.entrySet()) { dynamicSimple.add(ent.getKey(), ent.getValue()); } @@ -455,7 +455,7 @@ public class LukeRequestHandler extends RequestHandlerBase } private static SimpleOrderedMap getSimilarityInfo(Similarity similarity) { - SimpleOrderedMap toReturn = new SimpleOrderedMap(); + SimpleOrderedMap toReturn = new SimpleOrderedMap<>(); if (similarity != null) { toReturn.add("className", similarity.getClass().getName()); toReturn.add("details", similarity.toString()); @@ -464,16 +464,16 @@ public class LukeRequestHandler extends RequestHandlerBase } private static SimpleOrderedMap getAnalyzerInfo(Analyzer analyzer) { - SimpleOrderedMap aninfo = new SimpleOrderedMap(); + SimpleOrderedMap aninfo = new SimpleOrderedMap<>(); aninfo.add("className", analyzer.getClass().getName()); if (analyzer instanceof TokenizerChain) { TokenizerChain tchain = (TokenizerChain)analyzer; CharFilterFactory[] cfiltfacs = tchain.getCharFilterFactories(); - SimpleOrderedMap> cfilters = new SimpleOrderedMap>(); + SimpleOrderedMap> cfilters = new SimpleOrderedMap<>(); for (CharFilterFactory cfiltfac : cfiltfacs) { - Map tok = new HashMap(); + Map tok = new HashMap<>(); String className = cfiltfac.getClass().getName(); tok.put("className", className); tok.put("args", cfiltfac.getOriginalArgs()); @@ -483,16 +483,16 @@ public class LukeRequestHandler extends RequestHandlerBase aninfo.add("charFilters", cfilters); } - SimpleOrderedMap tokenizer = new SimpleOrderedMap(); + SimpleOrderedMap tokenizer = new SimpleOrderedMap<>(); TokenizerFactory tfac = tchain.getTokenizerFactory(); tokenizer.add("className", tfac.getClass().getName()); tokenizer.add("args", tfac.getOriginalArgs()); aninfo.add("tokenizer", tokenizer); TokenFilterFactory[] filtfacs = tchain.getTokenFilterFactories(); - SimpleOrderedMap> filters = new SimpleOrderedMap>(); + SimpleOrderedMap> filters = new SimpleOrderedMap<>(); for (TokenFilterFactory filtfac : filtfacs) { - Map tok = new HashMap(); + Map tok = new HashMap<>(); String className = filtfac.getClass().getName(); tok.put("className", className); tok.put("args", filtfac.getOriginalArgs()); @@ -509,7 +509,7 @@ public class LukeRequestHandler extends RequestHandlerBase Map> typeusemap, Map fields, SchemaField uniqueField, SchemaField f) { FieldType ft = f.getType(); - SimpleOrderedMap field = new SimpleOrderedMap(); + SimpleOrderedMap field = new SimpleOrderedMap<>(); field.add( "type", ft.getTypeName() ); field.add( "flags", getFieldFlags(f) ); if( f.isRequired() ) { @@ -532,7 +532,7 @@ public class LukeRequestHandler extends RequestHandlerBase List v = typeusemap.get( ft.getTypeName() ); if( v == null ) { - v = new ArrayList(); + v = new ArrayList<>(); } v.add( f.getName() ); typeusemap.put( ft.getTypeName(), v ); @@ -550,7 +550,7 @@ public class LukeRequestHandler extends RequestHandlerBase public static SimpleOrderedMap getIndexInfo(DirectoryReader reader) throws IOException { Directory dir = reader.directory(); - SimpleOrderedMap indexInfo = new SimpleOrderedMap(); + SimpleOrderedMap indexInfo = new SimpleOrderedMap<>(); indexInfo.add("numDocs", reader.numDocs()); indexInfo.add("maxDoc", reader.maxDoc()); @@ -638,14 +638,14 @@ public class LukeRequestHandler extends RequestHandlerBase } private static List toListOfStrings(SchemaField[] raw) { - List result = new ArrayList(raw.length); + List result = new ArrayList<>(raw.length); for (SchemaField f : raw) { result.add(f.getName()); } return result; } private static List toListOfStringDests(List raw) { - List result = new ArrayList(raw.size()); + List result = new ArrayList<>(raw.size()); for (CopyField f : raw) { result.add(f.getDestination().getName()); } @@ -689,7 +689,7 @@ public class LukeRequestHandler extends RequestHandlerBase // TODO? should this be a list or a map? public NamedList toNamedList() { - NamedList nl = new NamedList(); + NamedList nl = new NamedList<>(); for( int bucket = 0; bucket <= _maxBucket; bucket++ ) { nl.add( ""+ (1 << bucket), _buckets[bucket] ); } @@ -733,12 +733,12 @@ public class LukeRequestHandler extends RequestHandlerBase public NamedList toNamedList( IndexSchema schema ) { // reverse the list.. - List aslist = new LinkedList(); + List aslist = new LinkedList<>(); while( size() > 0 ) { aslist.add( 0, (TermInfo)pop() ); } - NamedList list = new NamedList(); + NamedList list = new NamedList<>(); for (TermInfo i : aslist) { String txt = i.term.text(); SchemaField ft = schema.getFieldOrNull( i.term.field() ); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java index c8a64f9cb74..3a1f3e657f8 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/PluginInfoHandler.java @@ -46,10 +46,10 @@ public class PluginInfoHandler extends RequestHandlerBase private static SimpleOrderedMap getSolrInfoBeans( SolrCore core, boolean stats ) { - SimpleOrderedMap list = new SimpleOrderedMap(); + SimpleOrderedMap list = new SimpleOrderedMap<>(); for (SolrInfoMBean.Category cat : SolrInfoMBean.Category.values()) { - SimpleOrderedMap category = new SimpleOrderedMap(); + SimpleOrderedMap category = new SimpleOrderedMap<>(); list.add( cat.name(), category ); Map reg = core.getInfoRegistry(); for (Map.Entry entry : reg.entrySet()) { @@ -57,7 +57,7 @@ public class PluginInfoHandler extends RequestHandlerBase if (m.getCategory() != cat) continue; String na = "Not Declared"; - SimpleOrderedMap info = new SimpleOrderedMap(); + SimpleOrderedMap info = new SimpleOrderedMap<>(); category.add( entry.getKey(), info ); info.add( "name", (m.getName() !=null ? m.getName() : na) ); @@ -67,7 +67,7 @@ public class PluginInfoHandler extends RequestHandlerBase URL[] urls = m.getDocs(); if ((urls != null) && (urls.length > 0)) { - ArrayList docs = new ArrayList(urls.length); + ArrayList docs = new ArrayList<>(urls.length); for( URL u : urls ) { docs.add( u.toExternalForm() ); } diff --git a/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java index b8a7890f99d..b6eb2b1566c 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java @@ -36,7 +36,7 @@ public class PropertiesRequestHandler extends RequestHandlerBase Object props = null; String name = req.getParams().get( "name" ); if( name != null ) { - NamedList p = new SimpleOrderedMap(); + NamedList p = new SimpleOrderedMap<>(); p.add( name, System.getProperty(name) ); props = p; } diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java index d8314258be2..74d965fc0cf 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java @@ -113,7 +113,7 @@ public class ShowFileRequestHandler extends RequestHandlerBase public static Set initHidden(SolrParams invariants) { - Set hiddenRet = new HashSet(); + Set hiddenRet = new HashSet<>(); // Build a list of hidden files if (invariants != null) { String[] hidden = invariants.getParams(HIDDEN); @@ -155,13 +155,13 @@ public class ShowFileRequestHandler extends RequestHandlerBase List children = zkClient.getChildren(adminFile, null, true); if (children.size() > 0) { - NamedList> files = new SimpleOrderedMap>(); + NamedList> files = new SimpleOrderedMap<>(); for (String f : children) { if (isHiddenFile(req, rsp, f, false, hiddenFiles)) { continue; } - SimpleOrderedMap fileInfo = new SimpleOrderedMap(); + SimpleOrderedMap fileInfo = new SimpleOrderedMap<>(); files.add(f, fileInfo); List fchildren = zkClient.getChildren(adminFile + "/" + f, null, true); if (fchildren.size() > 0) { @@ -216,7 +216,7 @@ public class ShowFileRequestHandler extends RequestHandlerBase if( adminFile.isDirectory() ) { // it's really a directory, just go for it. int basePath = adminFile.getAbsolutePath().length() + 1; - NamedList> files = new SimpleOrderedMap>(); + NamedList> files = new SimpleOrderedMap<>(); for( File f : adminFile.listFiles() ) { String path = f.getAbsolutePath().substring( basePath ); path = path.replace( '\\', '/' ); // normalize slashes @@ -225,7 +225,7 @@ public class ShowFileRequestHandler extends RequestHandlerBase continue; } - SimpleOrderedMap fileInfo = new SimpleOrderedMap(); + SimpleOrderedMap fileInfo = new SimpleOrderedMap<>(); files.add( path, fileInfo ); if( f.isDirectory() ) { fileInfo.add( "directory", true ); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java index 33fc21c84ab..36984401dfb 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/SolrInfoMBeanHandler.java @@ -52,7 +52,7 @@ public class SolrInfoMBeanHandler extends RequestHandlerBase { * Set is guarantee to never be null (but may be empty) */ private Set arrayToSet(Object[] arr) { - HashSet r = new HashSet(); + HashSet r = new HashSet<>(); if (null == arr) return r; for (Object o : arr) { if (null != o) r.add(o.toString()); @@ -114,7 +114,7 @@ public class SolrInfoMBeanHandler extends RequestHandlerBase { protected NamedList>> getMBeanInfo(SolrQueryRequest req) { - NamedList>> cats = new NamedList>>(); + NamedList>> cats = new NamedList<>(); String[] requestedCats = req.getParams().getParams("cat"); if (null == requestedCats || 0 == requestedCats.length) { @@ -139,7 +139,7 @@ public class SolrInfoMBeanHandler extends RequestHandlerBase { NamedList> catInfo = cats.get(m.getCategory().name()); if ( null == catInfo ) continue; - NamedList mBeanInfo = new SimpleOrderedMap(); + NamedList mBeanInfo = new SimpleOrderedMap<>(); mBeanInfo.add("class", m.getName()); mBeanInfo.add("version", m.getVersion()); mBeanInfo.add("description", m.getDescription()); @@ -148,7 +148,7 @@ public class SolrInfoMBeanHandler extends RequestHandlerBase { // Use an external form URL[] urls = m.getDocs(); if(urls!=null) { - List docs = new ArrayList(urls.length); + List docs = new ArrayList<>(urls.length); for(URL url : urls) { docs.add(url.toExternalForm()); } @@ -168,7 +168,7 @@ public class SolrInfoMBeanHandler extends RequestHandlerBase { NamedList>> now, boolean includeAll ) { - NamedList>> changed = new NamedList>>(); + NamedList>> changed = new NamedList<>(); // Cycle through each category for(int i=0;i> cat = new SimpleOrderedMap>(); + NamedList> cat = new SimpleOrderedMap<>(); for(int j=0;j ref_bean = ref_cat.get(name); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java index 929b3079005..b7367b2f1e6 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/SystemInfoHandler.java @@ -112,7 +112,7 @@ public class SystemInfoHandler extends RequestHandlerBase * Get system info */ private SimpleOrderedMap getCoreInfo( SolrCore core, IndexSchema schema ) { - SimpleOrderedMap info = new SimpleOrderedMap(); + SimpleOrderedMap info = new SimpleOrderedMap<>(); info.add( "schema", schema != null ? schema.getSchemaName():"no schema!" ); @@ -126,7 +126,7 @@ public class SystemInfoHandler extends RequestHandlerBase info.add( "start", new Date(core.getStartTime()) ); // Solr Home - SimpleOrderedMap dirs = new SimpleOrderedMap(); + SimpleOrderedMap dirs = new SimpleOrderedMap<>(); dirs.add( "cwd" , new File( System.getProperty("user.dir")).getAbsolutePath() ); dirs.add( "instance", new File( core.getResourceLoader().getInstanceDir() ).getAbsolutePath() ); try { @@ -150,7 +150,7 @@ public class SystemInfoHandler extends RequestHandlerBase * Get system info */ public static SimpleOrderedMap getSystemInfo() { - SimpleOrderedMap info = new SimpleOrderedMap(); + SimpleOrderedMap info = new SimpleOrderedMap<>(); OperatingSystemMXBean os = ManagementFactory.getOperatingSystemMXBean(); info.add( "name", os.getName() ); @@ -240,7 +240,7 @@ public class SystemInfoHandler extends RequestHandlerBase */ public static SimpleOrderedMap getJvmInfo() { - SimpleOrderedMap jvm = new SimpleOrderedMap(); + SimpleOrderedMap jvm = new SimpleOrderedMap<>(); final String javaVersion = System.getProperty("java.specification.version", "unknown"); final String javaVendor = System.getProperty("java.specification.vendor", "unknown"); @@ -256,16 +256,16 @@ public class SystemInfoHandler extends RequestHandlerBase jvm.add( "name", jreVendor + " " + vmName ); // details - SimpleOrderedMap java = new SimpleOrderedMap(); + SimpleOrderedMap java = new SimpleOrderedMap<>(); java.add( "vendor", javaVendor ); java.add( "name", javaName ); java.add( "version", javaVersion ); jvm.add( "spec", java ); - SimpleOrderedMap jre = new SimpleOrderedMap(); + SimpleOrderedMap jre = new SimpleOrderedMap<>(); jre.add( "vendor", jreVendor ); jre.add( "version", jreVersion ); jvm.add( "jre", jre ); - SimpleOrderedMap vm = new SimpleOrderedMap(); + SimpleOrderedMap vm = new SimpleOrderedMap<>(); vm.add( "vendor", vmVendor ); vm.add( "name", vmName ); vm.add( "version", vmVersion ); @@ -278,8 +278,8 @@ public class SystemInfoHandler extends RequestHandlerBase // not thread safe, but could be thread local DecimalFormat df = new DecimalFormat("#.#", DecimalFormatSymbols.getInstance(Locale.ROOT)); - SimpleOrderedMap mem = new SimpleOrderedMap(); - SimpleOrderedMap raw = new SimpleOrderedMap(); + SimpleOrderedMap mem = new SimpleOrderedMap<>(); + SimpleOrderedMap raw = new SimpleOrderedMap<>(); long free = runtime.freeMemory(); long max = runtime.maxMemory(); long total = runtime.totalMemory(); @@ -300,7 +300,7 @@ public class SystemInfoHandler extends RequestHandlerBase jvm.add("memory", mem); // JMX properties -- probably should be moved to a different handler - SimpleOrderedMap jmx = new SimpleOrderedMap(); + SimpleOrderedMap jmx = new SimpleOrderedMap<>(); try{ RuntimeMXBean mx = ManagementFactory.getRuntimeMXBean(); jmx.add( "bootclasspath", mx.getBootClassPath()); @@ -322,7 +322,7 @@ public class SystemInfoHandler extends RequestHandlerBase } private static SimpleOrderedMap getLuceneInfo() { - SimpleOrderedMap info = new SimpleOrderedMap(); + SimpleOrderedMap info = new SimpleOrderedMap<>(); Package p = SolrCore.class.getPackage(); diff --git a/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java index 1af19bfb05c..997ac53d13e 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java @@ -38,13 +38,13 @@ public class ThreadDumpHandler extends RequestHandlerBase @Override public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException { - SimpleOrderedMap system = new SimpleOrderedMap(); + SimpleOrderedMap system = new SimpleOrderedMap<>(); rsp.add( "system", system ); ThreadMXBean tmbean = ManagementFactory.getThreadMXBean(); // Thread Count - SimpleOrderedMap nl = new SimpleOrderedMap(); + SimpleOrderedMap nl = new SimpleOrderedMap<>(); nl.add( "current",tmbean.getThreadCount() ); nl.add( "peak", tmbean.getPeakThreadCount() ); nl.add( "daemon", tmbean.getDaemonThreadCount() ); @@ -55,7 +55,7 @@ public class ThreadDumpHandler extends RequestHandlerBase long[] tids = tmbean.findMonitorDeadlockedThreads(); if (tids != null) { tinfos = tmbean.getThreadInfo(tids, Integer.MAX_VALUE); - NamedList> lst = new NamedList>(); + NamedList> lst = new NamedList<>(); for (ThreadInfo ti : tinfos) { if (ti != null) { lst.add( "thread", getThreadInfo( ti, tmbean ) ); @@ -67,7 +67,7 @@ public class ThreadDumpHandler extends RequestHandlerBase // Now show all the threads.... tids = tmbean.getAllThreadIds(); tinfos = tmbean.getThreadInfo(tids, Integer.MAX_VALUE); - NamedList> lst = new NamedList>(); + NamedList> lst = new NamedList<>(); for (ThreadInfo ti : tinfos) { if (ti != null) { lst.add( "thread", getThreadInfo( ti, tmbean ) ); @@ -81,7 +81,7 @@ public class ThreadDumpHandler extends RequestHandlerBase //-------------------------------------------------------------------------------- private static SimpleOrderedMap getThreadInfo( ThreadInfo ti, ThreadMXBean tmbean ) { - SimpleOrderedMap info = new SimpleOrderedMap(); + SimpleOrderedMap info = new SimpleOrderedMap<>(); long tid = ti.getThreadId(); info.add( "id", tid ); @@ -104,7 +104,7 @@ public class ThreadDumpHandler extends RequestHandlerBase } if (ti.getLockOwnerName() != null) { - SimpleOrderedMap owner = new SimpleOrderedMap(); + SimpleOrderedMap owner = new SimpleOrderedMap<>(); owner.add( "name", ti.getLockOwnerName() ); owner.add( "id", ti.getLockOwnerId() ); } diff --git a/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java b/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java index 5a35a0356ee..b755e8ed572 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/DebugComponent.java @@ -106,7 +106,7 @@ public class DebugComponent extends SearchComponent if (null != rb.getDebugInfo() ) { if (rb.isDebugQuery() && null != rb.getFilters() ) { info.add("filter_queries",rb.req.getParams().getParams(FQ)); - List fqs = new ArrayList(rb.getFilters().size()); + List fqs = new ArrayList<>(rb.getFilters().size()); for (Query fq : rb.getFilters()) { fqs.add(QueryParsing.toString(fq, rb.req.getSchema())); } @@ -175,7 +175,7 @@ public class DebugComponent extends SearchComponent @SuppressWarnings("unchecked") NamedList stageList = (NamedList) ((NamedList)rb.getDebugInfo().get("track")).get(stages.get(rb.stage)); if(stageList == null) { - stageList = new NamedList(); + stageList = new NamedList<>(); rb.addDebug(stageList, "track", stages.get(rb.stage)); } for(ShardResponse response: sreq.responses) { @@ -184,7 +184,7 @@ public class DebugComponent extends SearchComponent } } - private Set excludeSet = new HashSet(Arrays.asList("explain")); + private Set excludeSet = new HashSet<>(Arrays.asList("explain")); @Override public void finishStage(ResponseBuilder rb) { @@ -207,19 +207,19 @@ public class DebugComponent extends SearchComponent // TODO: lookup won't work for non-string ids... String vs Float ShardDoc sdoc = rb.resultIds.get(id); int idx = sdoc.positionInResponse; - arr[idx] = new NamedList.NamedListEntry(id, sexplain.getVal(i)); + arr[idx] = new NamedList.NamedListEntry<>(id, sexplain.getVal(i)); } } } } if (rb.isDebugResults()) { - explain = SolrPluginUtils.removeNulls(new SimpleOrderedMap(arr)); + explain = SolrPluginUtils.removeNulls(new SimpleOrderedMap<>(arr)); } if (info == null) { // No responses were received from shards. Show local query info. - info = new SimpleOrderedMap(); + info = new SimpleOrderedMap<>(); SolrPluginUtils.doStandardQueryDebug( rb.req, rb.getQueryString(), rb.getQuery(), rb.isDebugQuery(), info); if (rb.isDebugQuery() && rb.getQparser() != null) { @@ -243,7 +243,7 @@ public class DebugComponent extends SearchComponent private NamedList getTrackResponse(ShardResponse shardResponse) { - NamedList namedList = new NamedList(); + NamedList namedList = new NamedList<>(); NamedList responseNL = shardResponse.getSolrResponse().getResponse(); @SuppressWarnings("unchecked") NamedList responseHeader = (NamedList)responseNL.get("responseHeader"); @@ -295,7 +295,7 @@ public class DebugComponent extends SearchComponent if (source instanceof NamedList && dest instanceof NamedList) { - NamedList tmp = new NamedList(); + NamedList tmp = new NamedList<>(); @SuppressWarnings("unchecked") NamedList sl = (NamedList)source; @SuppressWarnings("unchecked") @@ -329,7 +329,7 @@ public class DebugComponent extends SearchComponent } // merge unlike elements in a list - List t = new ArrayList(); + List t = new ArrayList<>(); t.add(dest); t.add(source); return t; diff --git a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java index cb952f6f405..841540a6952 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/FacetComponent.java @@ -141,7 +141,7 @@ public class FacetComponent extends SearchComponent } if (refinements == null) { - refinements = new ArrayList(); + refinements = new ArrayList<>(); } refinements.add(facetCommand); @@ -470,7 +470,7 @@ public class FacetComponent extends SearchComponent dff.needRefinements = true; List lst = dff._toRefine[shardNum]; if (lst == null) { - lst = dff._toRefine[shardNum] = new ArrayList(); + lst = dff._toRefine[shardNum] = new ArrayList<>(); } lst.add(sfc.name); } @@ -526,19 +526,19 @@ public class FacetComponent extends SearchComponent FacetInfo fi = rb._facetInfo; - NamedList facet_counts = new SimpleOrderedMap(); + NamedList facet_counts = new SimpleOrderedMap<>(); - NamedList facet_queries = new SimpleOrderedMap(); + NamedList facet_queries = new SimpleOrderedMap<>(); facet_counts.add("facet_queries",facet_queries); for (QueryFacet qf : fi.queryFacets.values()) { facet_queries.add(qf.getKey(), num(qf.count)); } - NamedList facet_fields = new SimpleOrderedMap(); + NamedList facet_fields = new SimpleOrderedMap<>(); facet_counts.add("facet_fields", facet_fields); for (DistribFieldFacet dff : fi.facets.values()) { - NamedList fieldCounts = new NamedList(); // order is more important for facets + NamedList fieldCounts = new NamedList<>(); // order is more important for facets facet_fields.add(dff.getKey(), fieldCounts); ShardFacetCount[] counts; @@ -634,13 +634,13 @@ public class FacetComponent extends SearchComponent public LinkedHashMap queryFacets; public LinkedHashMap facets; public SimpleOrderedMap> dateFacets - = new SimpleOrderedMap>(); + = new SimpleOrderedMap<>(); public SimpleOrderedMap> rangeFacets - = new SimpleOrderedMap>(); + = new SimpleOrderedMap<>(); void parse(SolrParams params, ResponseBuilder rb) { - queryFacets = new LinkedHashMap(); - facets = new LinkedHashMap(); + queryFacets = new LinkedHashMap<>(); + facets = new LinkedHashMap<>(); String[] facetQs = params.getParams(FacetParams.FACET_QUERY); if (facetQs != null) { @@ -766,7 +766,7 @@ public class FacetComponent extends SearchComponent // the max possible count for a missing term for each shard (indexed by shardNum) public long[] missingMax; public FixedBitSet[] counted; // a bitset for each shard, keeping track of which terms seen - public HashMap counts = new HashMap(128); + public HashMap counts = new HashMap<>(128); public int termNum; public int initialLimit; // how many terms requested in first phase diff --git a/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java b/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java index 0a8703ebfae..70d0f382082 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java +++ b/solr/core/src/java/org/apache/solr/handler/component/FieldFacetStats.java @@ -73,8 +73,8 @@ public class FieldFacetStats { topLevelReader = searcher.getAtomicReader(); valueSource = facet_sf.getType().getValueSource(facet_sf, null); - facetStatsValues = new HashMap(); - facetStatsTerms = new ArrayList>(); + facetStatsValues = new HashMap<>(); + facetStatsTerms = new ArrayList<>(); } private StatsValues getStatsValues(String key) throws IOException { diff --git a/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java b/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java index 9e17ab55bd8..fea94e62bb2 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java @@ -182,7 +182,7 @@ public class HighlightComponent extends SearchComponent implements PluginInfoIni String id = hl.getName(i); ShardDoc sdoc = rb.resultIds.get(id); int idx = sdoc.positionInResponse; - arr[idx] = new NamedList.NamedListEntry(id, hl.getVal(i)); + arr[idx] = new NamedList.NamedListEntry<>(id, hl.getVal(i)); } } } diff --git a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java index 7362daa5b8d..509b2d88581 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/component/HttpShardHandler.java @@ -68,12 +68,12 @@ public class HttpShardHandler extends ShardHandler { this.httpClient = httpClient; this.httpShardHandlerFactory = httpShardHandlerFactory; completionService = httpShardHandlerFactory.newCompletionService(); - pending = new HashSet>(); + pending = new HashSet<>(); // maps "localhost:8983|localhost:7574" to a shuffled List("http://localhost:8983","http://localhost:7574") // This is primarily to keep track of what order we should use to query the replicas of a shard // so that we use the same replica for all phases of a distributed request. - shardToURLs = new HashMap>(); + shardToURLs = new HashMap<>(); } @@ -285,7 +285,7 @@ public class HttpShardHandler extends ShardHandler { if(shardKeys == null) shardKeys = params.get(ShardParams.SHARD_KEYS);//eprecated // This will be the complete list of slices we need to query for this request. - slices = new HashMap(); + slices = new HashMap<>(); // we need to find out what collections this request is for. diff --git a/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java b/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java index 4764cff85c4..c53b5424a28 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java @@ -100,7 +100,7 @@ public class MoreLikeThisComponent extends SearchComponent { NamedList bQuery = mlt.getMoreLikeTheseQuery(rb .getResults().docList); - NamedList temp = new NamedList(); + NamedList temp = new NamedList<>(); Iterator> idToQueryIt = bQuery.iterator(); @@ -164,7 +164,7 @@ public class MoreLikeThisComponent extends SearchComponent { // segment ahead of result/response. if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS && rb.req.getParams().getBool(COMPONENT_NAME, false)) { - Map tempResults = new LinkedHashMap(); + Map tempResults = new LinkedHashMap<>(); int mltcount = rb.req.getParams().getInt(MoreLikeThisParams.DOC_COUNT, 5); String keyName = rb.req.getSchema().getUniqueKeyField().getName(); @@ -221,8 +221,8 @@ public class MoreLikeThisComponent extends SearchComponent { */ NamedList buildMoreLikeThisNamed( Map allMlt, Map resultIds) { - NamedList result = new NamedList(); - TreeMap sortingMap = new TreeMap(); + NamedList result = new NamedList<>(); + TreeMap sortingMap = new TreeMap<>(); for (Entry next : resultIds.entrySet()) { sortingMap.put(next.getValue().positionInResponse, next.getKey()); } @@ -241,10 +241,10 @@ public class MoreLikeThisComponent extends SearchComponent { public SolrDocumentList mergeSolrDocumentList(SolrDocumentList one, SolrDocumentList two, int maxSize, String idField) { - List l = new ArrayList(); + List l = new ArrayList<>(); // De-dup records sets. Shouldn't happen if indexed correctly. - Map map = new HashMap(); + Map map = new HashMap<>(); for (SolrDocument doc : one) { Object id = doc.getFieldValue(idField); assert id != null : doc.toString(); @@ -254,7 +254,7 @@ public class MoreLikeThisComponent extends SearchComponent { map.put(doc.getFieldValue(idField).toString(), doc); } - l = new ArrayList(map.values()); + l = new ArrayList<>(map.values()); // Comparator to sort docs based on score. null scores/docs are set to 0. @@ -352,12 +352,12 @@ public class MoreLikeThisComponent extends SearchComponent { IndexSchema schema = searcher.getSchema(); MoreLikeThisHandler.MoreLikeThisHelper mltHelper = new MoreLikeThisHandler.MoreLikeThisHelper( p, searcher); - NamedList mlt = new SimpleOrderedMap(); + NamedList mlt = new SimpleOrderedMap<>(); DocIterator iterator = docs.iterator(); SimpleOrderedMap dbg = null; if (rb.isDebug()) { - dbg = new SimpleOrderedMap(); + dbg = new SimpleOrderedMap<>(); } while (iterator.hasNext()) { @@ -369,12 +369,12 @@ public class MoreLikeThisComponent extends SearchComponent { mlt.add(name, sim.docList); if (dbg != null) { - SimpleOrderedMap docDbg = new SimpleOrderedMap(); + SimpleOrderedMap docDbg = new SimpleOrderedMap<>(); docDbg.add("rawMLTQuery", mltHelper.getRawMLTQuery().toString()); docDbg .add("boostedMLTQuery", mltHelper.getBoostedMLTQuery().toString()); docDbg.add("realMLTQuery", mltHelper.getRealMLTQuery().toString()); - SimpleOrderedMap explains = new SimpleOrderedMap(); + SimpleOrderedMap explains = new SimpleOrderedMap<>(); DocIterator mltIte = sim.docList.iterator(); while (mltIte.hasNext()) { int mltid = mltIte.nextDoc(); diff --git a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetHelper.java b/solr/core/src/java/org/apache/solr/handler/component/PivotFacetHelper.java index 7606adc332e..d13eefc1da1 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/PivotFacetHelper.java +++ b/solr/core/src/java/org/apache/solr/handler/component/PivotFacetHelper.java @@ -60,7 +60,7 @@ public class PivotFacetHelper extends SimpleFacets if (!rb.doFacets || pivots == null) return null; - SimpleOrderedMap>> pivotResponse = new SimpleOrderedMap>>(); + SimpleOrderedMap>> pivotResponse = new SimpleOrderedMap<>(); for (String pivot : pivots) { //ex: pivot == "features,cat" or even "{!ex=mytag}features,cat" try { @@ -79,7 +79,7 @@ public class PivotFacetHelper extends SimpleFacets String field = fields[0]; String subField = fields[1]; - Deque fnames = new LinkedList(); + Deque fnames = new LinkedList<>(); for( int i=fields.length-1; i>1; i-- ) { fnames.push( fields[i] ); } @@ -106,7 +106,7 @@ public class PivotFacetHelper extends SimpleFacets String nextField = fnames.poll(); - List> values = new ArrayList>( superFacets.size() ); + List> values = new ArrayList<>( superFacets.size() ); for (Map.Entry kv : superFacets) { // Only sub-facet if parent facet has positive count - still may not be any values for the sub-field though if (kv.getValue() >= minMatch) { @@ -118,7 +118,7 @@ public class PivotFacetHelper extends SimpleFacets // constructing Term objects used in TermQueries that may be cached. BytesRef termval = null; - SimpleOrderedMap pivot = new SimpleOrderedMap(); + SimpleOrderedMap pivot = new SimpleOrderedMap<>(); pivot.add( "field", field ); if (null == fieldValue) { pivot.add( "value", null ); diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java index 71f4e4538d2..3515c6f4b02 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java @@ -163,7 +163,7 @@ public class QueryComponent extends SearchComponent if (fqs!=null && fqs.length!=0) { List filters = rb.getFilters(); // if filters already exists, make a copy instead of modifying the original - filters = filters == null ? new ArrayList(fqs.length) : new ArrayList(filters); + filters = filters == null ? new ArrayList(fqs.length) : new ArrayList<>(filters); for (String fq : fqs) { if (fq != null && fq.trim().length()!=0) { QParser fqp = QParser.getParser(fq, null, req); @@ -291,7 +291,7 @@ public class QueryComponent extends SearchComponent res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0); if (rb.isNeedDocSet()) { // TODO: create a cache for this! - List queries = new ArrayList(); + List queries = new ArrayList<>(); queries.add(rb.getQuery()); List filters = rb.getFilters(); if (filters != null) queries.addAll(filters); @@ -353,9 +353,9 @@ public class QueryComponent extends SearchComponent topGroupsParam = new String[0]; } - List> topGroups = new ArrayList>(topGroupsParam.length); + List> topGroups = new ArrayList<>(topGroupsParam.length); for (String topGroup : topGroupsParam) { - SearchGroup searchGroup = new SearchGroup(); + SearchGroup searchGroup = new SearchGroup<>(); if (!topGroup.equals(TopGroupsShardRequestFactory.GROUP_NULL_VALUE)) { searchGroup.groupValue = new BytesRef(searcher.getSchema().getField(field).getType().readableToIndexed(topGroup)); } @@ -488,7 +488,7 @@ public class QueryComponent extends SearchComponent // TODO: See SOLR-5595 boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES,false); if(fsv){ - NamedList sortVals = new NamedList(); // order is important for the sort fields + NamedList sortVals = new NamedList<>(); // order is important for the sort fields IndexReaderContext topReaderContext = searcher.getTopReaderContext(); List leaves = topReaderContext.leaves(); AtomicReaderContext currentLeaf = null; @@ -714,7 +714,7 @@ public class QueryComponent extends SearchComponent for (String field : groupSpec.getFields()) { rb.mergedTopGroups.put(field, new TopGroups(null, null, 0, 0, new GroupDocs[]{}, Float.NaN)); } - rb.resultIds = new HashMap(); + rb.resultIds = new HashMap<>(); } EndResultTransformer.SolrDocumentSource solrDocumentSource = new EndResultTransformer.SolrDocumentSource() { @@ -736,7 +736,7 @@ public class QueryComponent extends SearchComponent } else { return; } - Map combinedMap = new LinkedHashMap(); + Map combinedMap = new LinkedHashMap<>(); combinedMap.putAll(rb.mergedTopGroups); combinedMap.putAll(rb.mergedQueryCommandResults); endResultTransformer.transform(combinedMap, rb, solrDocumentSource); @@ -835,7 +835,7 @@ public class QueryComponent extends SearchComponent // id to shard mapping, to eliminate any accidental dups - HashMap uniqueDoc = new HashMap(); + HashMap uniqueDoc = new HashMap<>(); // Merge the docs via a priority queue so we don't have to sort *all* of the // documents... we only need to order the top (rows+start) @@ -844,7 +844,7 @@ public class QueryComponent extends SearchComponent NamedList shardInfo = null; if(rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) { - shardInfo = new SimpleOrderedMap(); + shardInfo = new SimpleOrderedMap<>(); rb.rsp.getValues().add(ShardParams.SHARDS_INFO,shardInfo); } @@ -855,7 +855,7 @@ public class QueryComponent extends SearchComponent SolrDocumentList docs = null; if(shardInfo!=null) { - SimpleOrderedMap nl = new SimpleOrderedMap(); + SimpleOrderedMap nl = new SimpleOrderedMap<>(); if (srsp.getException() != null) { Throwable t = srsp.getException(); @@ -952,7 +952,7 @@ public class QueryComponent extends SearchComponent int resultSize = queue.size() - ss.getOffset(); resultSize = Math.max(0, resultSize); // there may not be any docs in range - Map resultIds = new HashMap(); + Map resultIds = new HashMap<>(); for (int i=resultSize-1; i>=0; i--) { ShardDoc shardDoc = queue.pop(); shardDoc.positionInResponse = i; @@ -1021,7 +1021,7 @@ public class QueryComponent extends SearchComponent } } SortField[] sortFields = lastCursorMark.getSortSpec().getSort().getSort(); - List nextCursorMarkValues = new ArrayList(sortFields.length); + List nextCursorMarkValues = new ArrayList<>(sortFields.length); for (SortField sf : sortFields) { if (sf.getType().equals(SortField.Type.SCORE)) { assert null != lastDoc.score : "lastDoc has null score"; @@ -1084,11 +1084,11 @@ public class QueryComponent extends SearchComponent // unless those requests always go to the final destination shard // for each shard, collect the documents for that shard. - HashMap> shardMap = new HashMap>(); + HashMap> shardMap = new HashMap<>(); for (ShardDoc sdoc : rb.resultIds.values()) { Collection shardDocs = shardMap.get(sdoc.shard); if (shardDocs == null) { - shardDocs = new ArrayList(); + shardDocs = new ArrayList<>(); shardMap.put(sdoc.shard, shardDocs); } shardDocs.add(sdoc); @@ -1119,7 +1119,7 @@ public class QueryComponent extends SearchComponent sreq.params.add(CommonParams.FL, uniqueField.getName()); } - ArrayList ids = new ArrayList(shardDocs.size()); + ArrayList ids = new ArrayList<>(shardDocs.size()); for (ShardDoc shardDoc : shardDocs) { // TODO: depending on the type, we may need more tha a simple toString()? ids.add(shardDoc.id.toString()); diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java index 42543688441..3d82d491d48 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java @@ -113,7 +113,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore // The key is null if loaded from the config directory, and // is never re-loaded. final Map> elevationCache = - new WeakHashMap>(); + new WeakHashMap<>(); class ElevationObj { final String text; @@ -127,12 +127,12 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore ElevationObj(String qstr, List elevate, List exclude) throws IOException { this.text = qstr; this.analyzed = getAnalyzedQuery(this.text); - this.ids = new HashSet(); - this.excludeIds = new HashSet(); + this.ids = new HashSet<>(); + this.excludeIds = new HashSet<>(); this.include = new BooleanQuery(); this.include.setBoost(0); - this.priority = new HashMap(); + this.priority = new HashMap<>(); int max = elevate.size() + 5; for (String id : elevate) { id = idSchemaFT.readableToIndexed(id); @@ -279,7 +279,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore //load up the elevation map private Map loadElevationMap(Config cfg) throws IOException { XPath xpath = XPathFactory.newInstance().newXPath(); - Map map = new HashMap(); + Map map = new HashMap<>(); NodeList nodes = (NodeList) cfg.evaluate("elevate/query", XPathConstants.NODESET); for (int i = 0; i < nodes.getLength(); i++) { Node node = nodes.item(i); @@ -293,8 +293,8 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore "query requires '' child"); } - ArrayList include = new ArrayList(); - ArrayList exclude = new ArrayList(); + ArrayList include = new ArrayList<>(); + ArrayList exclude = new ArrayList<>(); for (int j = 0; j < children.getLength(); j++) { Node child = children.item(j); String id = DOMUtil.getAttr(child, "id", "missing 'id'"); @@ -333,7 +333,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore Map elev = elevationCache.get(reader); if (elev == null) { - elev = new HashMap(); + elev = new HashMap<>(); elevationCache.put(reader, elev); } ElevationObj obj = new ElevationObj(query, Arrays.asList(ids), Arrays.asList(ex)); @@ -463,14 +463,14 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore List match = null; if (booster != null) { // Extract the elevated terms into a list - match = new ArrayList(booster.priority.size()); + match = new ArrayList<>(booster.priority.size()); for (Object o : booster.include.clauses()) { TermQuery tq = (TermQuery) ((BooleanClause) o).getQuery(); match.add(tq.getTerm().text()); } } - SimpleOrderedMap dbg = new SimpleOrderedMap(); + SimpleOrderedMap dbg = new SimpleOrderedMap<>(); dbg.add("q", qstr); dbg.add("match", match); if (rb.isDebugQuery()) { @@ -490,8 +490,8 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore SortField[] currentSorts = current.getSort().getSort(); List currentFields = current.getSchemaFields(); - ArrayList sorts = new ArrayList(currentSorts.length + 1); - List fields = new ArrayList(currentFields.size() + 1); + ArrayList sorts = new ArrayList<>(currentSorts.length + 1); + List fields = new ArrayList<>(currentFields.size() + 1); // Perhaps force it to always sort by score if (force && currentSorts[0].getType() != SortField.Type.SCORE) { @@ -568,7 +568,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore private int topVal; private TermsEnum termsEnum; private DocsEnum docsEnum; - Set seen = new HashSet(elevations.ids.size()); + Set seen = new HashSet<>(elevations.ids.size()); @Override public int compare(int slot1, int slot2) { diff --git a/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java b/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java index 84093dac688..4ca1c05432d 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java @@ -111,7 +111,7 @@ public class RealTimeGetComponent extends SearchComponent String[] allIds = id==null ? new String[0] : id; if (ids != null) { - List lst = new ArrayList(); + List lst = new ArrayList<>(); for (String s : allIds) { lst.add(s); } @@ -305,7 +305,7 @@ public class RealTimeGetComponent extends SearchComponent if (sf != null && schema.isCopyFieldTarget(sf)) continue; if (sf != null && sf.multiValued()) { - List vals = new ArrayList(); + List vals = new ArrayList<>(); vals.add( f ); out.setField( f.name(), vals ); } @@ -354,7 +354,7 @@ public class RealTimeGetComponent extends SearchComponent return ResponseBuilder.STAGE_DONE; } - List allIds = new ArrayList(); + List allIds = new ArrayList<>(); if (id1 != null) { for (String s : id1) { allIds.add(s); @@ -379,13 +379,13 @@ public class RealTimeGetComponent extends SearchComponent DocCollection coll = clusterState.getCollection(collection); - Map> sliceToId = new HashMap>(); + Map> sliceToId = new HashMap<>(); for (String id : allIds) { Slice slice = coll.getRouter().getTargetSlice(id, null, params, coll); List idsForShard = sliceToId.get(slice.getName()); if (idsForShard == null) { - idsForShard = new ArrayList(2); + idsForShard = new ArrayList<>(2); sliceToId.put(slice.getName(), idsForShard); } idsForShard.add(id); @@ -582,7 +582,7 @@ public class RealTimeGetComponent extends SearchComponent List versions = StrUtils.splitSmart(versionsStr, ",", true); - List updates = new ArrayList(versions.size()); + List updates = new ArrayList<>(versions.size()); long minVersion = Long.MAX_VALUE; diff --git a/solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java b/solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java index ac9bf6f598d..3c5b5d666ef 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java +++ b/solr/core/src/java/org/apache/solr/handler/component/ResponseBuilder.java @@ -168,12 +168,12 @@ public class ResponseBuilder SimpleOrderedMap>> _pivots; // Context fields for grouping - public final Map>> mergedSearchGroups = new HashMap>>(); - public final Map mergedGroupCounts = new HashMap(); - public final Map, Set>> searchGroupToShards = new HashMap, Set>>(); - public final Map> mergedTopGroups = new HashMap>(); - public final Map mergedQueryCommandResults = new HashMap(); - public final Map retrievedDocuments = new HashMap(); + public final Map>> mergedSearchGroups = new HashMap<>(); + public final Map mergedGroupCounts = new HashMap<>(); + public final Map, Set>> searchGroupToShards = new HashMap<>(); + public final Map> mergedTopGroups = new HashMap<>(); + public final Map mergedQueryCommandResults = new HashMap<>(); + public final Map retrievedDocuments = new HashMap<>(); public int totalHitCount; // Hit count used when distributed grouping is performed. // Used for timeAllowed parameter. First phase elapsed time is subtracted from the time allowed for the second phase. public int firstPhaseElapsedTime; @@ -185,14 +185,14 @@ public class ResponseBuilder public void addDebugInfo( String name, Object val ) { if( debugInfo == null ) { - debugInfo = new SimpleOrderedMap(); + debugInfo = new SimpleOrderedMap<>(); } debugInfo.add( name, val ); } public void addDebug(Object val, String... path) { if( debugInfo == null ) { - debugInfo = new SimpleOrderedMap(); + debugInfo = new SimpleOrderedMap<>(); } NamedList target = debugInfo; @@ -200,7 +200,7 @@ public class ResponseBuilder String elem = path[i]; NamedList newTarget = (NamedList)debugInfo.get(elem); if (newTarget == null) { - newTarget = new SimpleOrderedMap(); + newTarget = new SimpleOrderedMap<>(); target.add(elem, newTarget); } target = newTarget; diff --git a/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java b/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java index 67c55a117f8..8ce9ef98a5a 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java @@ -64,7 +64,7 @@ public class SearchHandler extends RequestHandlerBase implements SolrCoreAware , protected List getDefaultComponents() { - ArrayList names = new ArrayList(6); + ArrayList names = new ArrayList<>(6); names.add( QueryComponent.COMPONENT_NAME ); names.add( FacetComponent.COMPONENT_NAME ); names.add( MoreLikeThisComponent.COMPONENT_NAME ); @@ -126,7 +126,7 @@ public class SearchHandler extends RequestHandlerBase implements SolrCoreAware , } // Build the component list - components = new ArrayList( list.size() ); + components = new ArrayList<>( list.size() ); DebugComponent dbgCmp = null; for(String c : list){ SearchComponent comp = core.getSearchComponent( c ); @@ -237,9 +237,9 @@ public class SearchHandler extends RequestHandlerBase implements SolrCoreAware , // a distributed request if (rb.outgoing == null) { - rb.outgoing = new LinkedList(); + rb.outgoing = new LinkedList<>(); } - rb.finished = new ArrayList(); + rb.finished = new ArrayList<>(); int nextStage = 0; do { @@ -263,7 +263,7 @@ public class SearchHandler extends RequestHandlerBase implements SolrCoreAware , if (sreq.actualShards==ShardRequest.ALL_SHARDS) { sreq.actualShards = rb.shards; } - sreq.responses = new ArrayList(); + sreq.responses = new ArrayList<>(); // TODO: map from shard to address[] for (String shard : sreq.actualShards) { diff --git a/solr/core/src/java/org/apache/solr/handler/component/ShardDoc.java b/solr/core/src/java/org/apache/solr/handler/component/ShardDoc.java index 603f262d925..aeae8d6354b 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/ShardDoc.java +++ b/solr/core/src/java/org/apache/solr/handler/component/ShardDoc.java @@ -110,7 +110,7 @@ class ShardFieldSortedHitQueue extends PriorityQueue { protected SortField[] fields; /** The order of these fieldNames should correspond to the order of sort field values retrieved from the shard */ - protected List fieldNames = new ArrayList(); + protected List fieldNames = new ArrayList<>(); public ShardFieldSortedHitQueue(SortField[] fields, int size, IndexSearcher searcher) { super(size); diff --git a/solr/core/src/java/org/apache/solr/handler/component/ShardRequest.java b/solr/core/src/java/org/apache/solr/handler/component/ShardRequest.java index 57ceb437dea..53e319aa7b6 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/ShardRequest.java +++ b/solr/core/src/java/org/apache/solr/handler/component/ShardRequest.java @@ -48,7 +48,7 @@ public class ShardRequest { /** list of responses... filled out by framework */ - public List responses = new ArrayList(); + public List responses = new ArrayList<>(); /** actual shards to send the request to, filled out by framework */ public String[] actualShards; diff --git a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java index a2c609e3a9f..3544630c134 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java @@ -100,7 +100,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar /** * Key is the dictionary, value is the SpellChecker for that dictionary name */ - protected Map spellCheckers = new ConcurrentHashMap(); + protected Map spellCheckers = new ConcurrentHashMap<>(); protected QueryConverter queryConverter; @@ -380,7 +380,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar mergeData.origVsSuggestion.put(suggestion.getToken(), suggestion); HashSet suggested = mergeData.origVsSuggested.get(suggestion.getToken()); if (suggested == null) { - suggested = new HashSet(); + suggested = new HashSet<>(); mergeData.origVsSuggested.put(suggestion.getToken(), suggested); } @@ -470,7 +470,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar } private Collection getTokens(String q, Analyzer analyzer) throws IOException { - Collection result = new ArrayList(); + Collection result = new ArrayList<>(); assert analyzer != null; try (TokenStream ts = analyzer.tokenStream("", q)) { ts.reset(); @@ -555,7 +555,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar Token inputToken = entry.getKey(); String tokenString = new String(inputToken.buffer(), 0, inputToken .length()); - Map theSuggestions = new LinkedHashMap( + Map theSuggestions = new LinkedHashMap<>( entry.getValue()); Iterator sugIter = theSuggestions.keySet().iterator(); while (sugIter.hasNext()) { @@ -585,7 +585,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar suggestionList.add("origFreq", spellingResult .getTokenFrequency(inputToken)); - ArrayList sugs = new ArrayList(); + ArrayList sugs = new ArrayList<>(); suggestionList.add("suggestion", sugs); for (Map.Entry suggEntry : theSuggestions.entrySet()) { SimpleOrderedMap sugEntry = new SimpleOrderedMap(); @@ -660,7 +660,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar } } - Map queryConverters = new HashMap(); + Map queryConverters = new HashMap<>(); core.initPlugins(queryConverters,QueryConverter.class); //ensure that there is at least one query converter defined diff --git a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckMergeData.java b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckMergeData.java index b41ab05f280..cfdab0d3c6c 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SpellCheckMergeData.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SpellCheckMergeData.java @@ -28,16 +28,16 @@ import org.apache.solr.spelling.SpellCheckCollation; public class SpellCheckMergeData { //original token -> corresponding Suggestion object (keep track of start,end) - public Map origVsSuggestion = new HashMap(); + public Map origVsSuggestion = new HashMap<>(); // original token string -> summed up frequency - public Map origVsFreq = new HashMap(); + public Map origVsFreq = new HashMap<>(); // original token string -> # of shards reporting it as misspelled - public Map origVsShards = new HashMap(); + public Map origVsShards = new HashMap<>(); // original token string -> set of alternatives // must preserve order because collation algorithm can only work in-order - public Map> origVsSuggested = new LinkedHashMap>(); + public Map> origVsSuggested = new LinkedHashMap<>(); // alternative string -> corresponding SuggestWord object - public Map suggestedVsWord = new HashMap(); - public Map collations = new HashMap(); + public Map suggestedVsWord = new HashMap<>(); + public Map collations = new HashMap<>(); public int totalNumberShardResponses = 0; } diff --git a/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java b/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java index 34601804370..6eb83955555 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/StatsComponent.java @@ -133,8 +133,8 @@ public class StatsComponent extends SearchComponent { StatsInfo si = rb._statsInfo; - NamedList> stats = new SimpleOrderedMap>(); - NamedList stats_fields = new SimpleOrderedMap(); + NamedList> stats = new SimpleOrderedMap<>(); + NamedList stats_fields = new SimpleOrderedMap<>(); stats.add("stats_fields", stats_fields); for (String field : si.statsFields.keySet()) { NamedList stv = si.statsFields.get(field).getStatsValues(); @@ -171,7 +171,7 @@ class StatsInfo { Map statsFields; void parse(SolrParams params, ResponseBuilder rb) { - statsFields = new HashMap(); + statsFields = new HashMap<>(); String[] statsFs = params.getParams(StatsParams.STATS_FIELD); if (statsFs != null) { @@ -205,13 +205,13 @@ class SimpleStats { } public NamedList getStatsCounts() throws IOException { - NamedList res = new SimpleOrderedMap(); + NamedList res = new SimpleOrderedMap<>(); res.add("stats_fields", getStatsFields()); return res; } public NamedList getStatsFields() throws IOException { - NamedList res = new SimpleOrderedMap(); + NamedList res = new SimpleOrderedMap<>(); String[] statsFs = params.getParams(StatsParams.STATS_FIELD); boolean isShard = params.getBool(ShardParams.IS_SHARD, false); if (null != statsFs) { @@ -249,7 +249,7 @@ class SimpleStats { final StatsValues allstats = StatsValuesFactory.createStatsValues(sf, calcDistinct); - List facetStats = new ArrayList(); + List facetStats = new ArrayList<>(); for( String facetField : facet ) { SchemaField fsf = schema.getField(facetField); diff --git a/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java b/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java index 706bcd344b4..f5f1b62d3ee 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java +++ b/solr/core/src/java/org/apache/solr/handler/component/StatsValuesFactory.java @@ -88,12 +88,12 @@ abstract class AbstractStatsValues implements StatsValues { protected boolean calcDistinct = false; // facetField facetValue - protected Map> facets = new HashMap>(); + protected Map> facets = new HashMap<>(); protected AbstractStatsValues(SchemaField sf, boolean calcDistinct) { this.sf = sf; this.ft = sf.getType(); - this.distinctValues = new TreeSet(); + this.distinctValues = new TreeSet<>(); this.calcDistinct = calcDistinct; } @@ -122,7 +122,7 @@ abstract class AbstractStatsValues implements StatsValues { NamedList vals = (NamedList) f.getVal(i); Map addTo = facets.get(field); if (addTo == null) { - addTo = new HashMap(); + addTo = new HashMap<>(); facets.put(field, addTo); } for (int j = 0; j < vals.size(); j++) { @@ -185,7 +185,7 @@ abstract class AbstractStatsValues implements StatsValues { */ @Override public NamedList getStatsValues() { - NamedList res = new SimpleOrderedMap(); + NamedList res = new SimpleOrderedMap<>(); res.add("min", min); res.add("max", max); @@ -199,9 +199,9 @@ abstract class AbstractStatsValues implements StatsValues { addTypeSpecificStats(res); // add the facet stats - NamedList> nl = new SimpleOrderedMap>(); + NamedList> nl = new SimpleOrderedMap<>(); for (Map.Entry> entry : facets.entrySet()) { - NamedList> nl2 = new SimpleOrderedMap>(); + NamedList> nl2 = new SimpleOrderedMap<>(); nl.add(entry.getKey(), nl2); for (Map.Entry e2 : entry.getValue().entrySet()) { nl2.add(e2.getKey(), e2.getValue().getStatsValues()); diff --git a/solr/core/src/java/org/apache/solr/handler/component/SuggestComponent.java b/solr/core/src/java/org/apache/solr/handler/component/SuggestComponent.java index 31d2925b29c..2024cb04959 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SuggestComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SuggestComponent.java @@ -80,7 +80,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, /** * Key is the dictionary name used in SolrConfig, value is the corresponding {@link SolrSuggester} */ - protected Map suggesters = new ConcurrentHashMap(); + protected Map suggesters = new ConcurrentHashMap<>(); /** Container for various labels used in the responses generated by this component */ private static class SuggesterResultLabels { @@ -211,7 +211,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, if (!buildAll && !reloadAll) { throw ex; } else { - querySuggesters = new HashSet(); + querySuggesters = new HashSet<>(); } } @@ -227,7 +227,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, int count = params.getInt(SUGGEST_COUNT, 1); SuggesterOptions options = new SuggesterOptions(new CharsRef(query), count); Map>> namedListResults = - new HashMap>>(); + new HashMap<>(); for (SolrSuggester suggester : querySuggesters) { SuggesterResult suggesterResult = suggester.getSuggestions(options); toNamedList(suggesterResult, namedListResults); @@ -247,7 +247,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, return; int count = params.getInt(SUGGEST_COUNT, 1); - List suggesterResults = new ArrayList(); + List suggesterResults = new ArrayList<>(); // Collect Shard responses for (ShardRequest sreq : rb.finished) { @@ -266,7 +266,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, // Merge Shard responses SuggesterResult suggesterResult = merge(suggesterResults, count); Map>> namedListResults = - new HashMap>>(); + new HashMap<>(); toNamedList(suggesterResult, namedListResults); rb.rsp.add(SuggesterResultLabels.SUGGEST, namedListResults); @@ -280,8 +280,8 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, * */ private static SuggesterResult merge(List suggesterResults, int count) { SuggesterResult result = new SuggesterResult(); - Set allTokens = new HashSet(); - Set suggesterNames = new HashSet(); + Set allTokens = new HashSet<>(); + Set suggesterNames = new HashSet<>(); // collect all tokens for (SuggesterResult shardResult : suggesterResults) { @@ -305,7 +305,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, resultQueue.insertWithOverflow(res); } } - List sortedSuggests = new LinkedList(); + List sortedSuggests = new LinkedList<>(); Collections.addAll(sortedSuggests, resultQueue.getResults()); result.add(suggesterName, token, sortedSuggests); } @@ -325,7 +325,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, @Override public NamedList getStatistics() { - NamedList stats = new SimpleOrderedMap(); + NamedList stats = new SimpleOrderedMap<>(); stats.add("totalSizeInBytes", String.valueOf(sizeInBytes())); for (Map.Entry entry : suggesters.entrySet()) { SolrSuggester suggester = entry.getValue(); @@ -344,7 +344,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, } private Set getSuggesters(SolrParams params) { - Set solrSuggesters = new HashSet(); + Set solrSuggesters = new HashSet<>(); for(String suggesterName : getSuggesterNames(params)) { SolrSuggester curSuggester = suggesters.get(suggesterName); if (curSuggester != null) { @@ -361,7 +361,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, } private Set getSuggesterNames(SolrParams params) { - Set suggesterNames = new HashSet(); + Set suggesterNames = new HashSet<>(); String[] suggesterNamesFromParams = params.getParams(SUGGEST_DICT); if (suggesterNamesFromParams == null) { suggesterNames.add(DEFAULT_DICT_NAME); @@ -376,12 +376,12 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, /** Convert {@link SuggesterResult} to NamedList for constructing responses */ private void toNamedList(SuggesterResult suggesterResult, Map>> resultObj) { for(String suggesterName : suggesterResult.getSuggesterNames()) { - SimpleOrderedMap> results = new SimpleOrderedMap>(); + SimpleOrderedMap> results = new SimpleOrderedMap<>(); for (String token : suggesterResult.getTokens(suggesterName)) { - SimpleOrderedMap suggestionBody = new SimpleOrderedMap(); + SimpleOrderedMap suggestionBody = new SimpleOrderedMap<>(); List lookupResults = suggesterResult.getLookupResult(suggesterName, token); suggestionBody.add(SuggesterResultLabels.SUGGESTION_NUM_FOUND, lookupResults.size()); - List> suggestEntriesNamedList = new ArrayList>(); + List> suggestEntriesNamedList = new ArrayList<>(); for (LookupResult lookupResult : lookupResults) { String suggestionString = lookupResult.key.toString(); long weight = lookupResult.value; @@ -389,7 +389,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, lookupResult.payload.utf8ToString() : ""; - SimpleOrderedMap suggestEntryNamedList = new SimpleOrderedMap(); + SimpleOrderedMap suggestEntryNamedList = new SimpleOrderedMap<>(); suggestEntryNamedList.add(SuggesterResultLabels.SUGGESTION_TERM, suggestionString); suggestEntryNamedList.add(SuggesterResultLabels.SUGGESTION_WEIGHT, weight); suggestEntryNamedList.add(SuggesterResultLabels.SUGGESTION_PAYLOAD, payload); @@ -415,7 +415,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware, for (Iterator>> suggestionsIter = entry.getValue().iterator(); suggestionsIter.hasNext();) { Map.Entry> suggestions = suggestionsIter.next(); String tokenString = suggestions.getKey(); - List lookupResults = new ArrayList(); + List lookupResults = new ArrayList<>(); NamedList suggestion = suggestions.getValue(); // for each suggestion for (int j = 0; j < suggestion.size(); j++) { diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java index 33788471c76..2b579bc2ebc 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java @@ -127,7 +127,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar } // otherwise us the raw fldList as is, no special parsing or globs - Set fieldNames = new LinkedHashSet(); + Set fieldNames = new LinkedHashSet<>(); for (String fl : fldLst) { fieldNames.addAll(Arrays.asList(SolrPluginUtils.split(fl))); } @@ -141,7 +141,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar return; } - NamedList termVectors = new NamedList(); + NamedList termVectors = new NamedList<>(); rb.rsp.add(TERM_VECTORS, termVectors); IndexSchema schema = rb.req.getSchema(); @@ -170,11 +170,11 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar } //Build up our per field mapping - Map fieldOptions = new HashMap(); - NamedList> warnings = new NamedList>(); - List noTV = new ArrayList(); - List noPos = new ArrayList(); - List noOff = new ArrayList(); + Map fieldOptions = new HashMap<>(); + NamedList> warnings = new NamedList<>(); + List noTV = new ArrayList<>(); + List noPos = new ArrayList<>(); + List noOff = new ArrayList<>(); Set fields = getFields(rb); if ( null != fields ) { @@ -261,7 +261,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar final String finalUniqFieldName = uniqFieldName; - final List uniqValues = new ArrayList(); + final List uniqValues = new ArrayList<>(); // TODO: is this required to be single-valued? if so, we should STOP // once we find it... @@ -291,7 +291,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar while (iter.hasNext()) { Integer docId = iter.next(); - NamedList docNL = new NamedList(); + NamedList docNL = new NamedList<>(); if (keyField != null) { reader.document(docId, getUniqValue); @@ -331,14 +331,14 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar } private void mapOneVector(NamedList docNL, FieldOptions fieldOptions, IndexReader reader, int docID, TermsEnum termsEnum, String field) throws IOException { - NamedList fieldNL = new NamedList(); + NamedList fieldNL = new NamedList<>(); docNL.add(field, fieldNL); BytesRef text; DocsAndPositionsEnum dpEnum = null; while((text = termsEnum.next()) != null) { String term = text.utf8ToString(); - NamedList termInfo = new NamedList(); + NamedList termInfo = new NamedList<>(); fieldNL.add(term, termInfo); final int freq = (int) termsEnum.totalTermFreq(); if (fieldOptions.termFreq == true) { @@ -362,7 +362,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar final int pos = dpEnum.nextPosition(); if (usePositions && pos >= 0) { if (positionsNL == null) { - positionsNL = new NamedList(); + positionsNL = new NamedList<>(); termInfo.add("positions", positionsNL); } positionsNL.add("position", pos); @@ -372,7 +372,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar if (dpEnum.startOffset() == -1) { useOffsets = false; } else { - theOffsets = new NamedList(); + theOffsets = new NamedList<>(); termInfo.add("offsets", theOffsets); } } @@ -404,7 +404,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar private List getInts(String[] vals) { List result = null; if (vals != null && vals.length > 0) { - result = new ArrayList(vals.length); + result = new ArrayList<>(vals.length); for (int i = 0; i < vals.length; i++) { try { result.add(new Integer(vals[i])); @@ -425,7 +425,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar public void finishStage(ResponseBuilder rb) { if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS) { - NamedList termVectors = new NamedList(); + NamedList termVectors = new NamedList<>(); Map.Entry[] arr = new NamedList.NamedListEntry[rb.resultIds.size()]; for (ShardRequest sreq : rb.finished) { @@ -444,13 +444,13 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar } } else { int idx = sdoc.positionInResponse; - arr[idx] = new NamedList.NamedListEntry(key, nl.getVal(i)); + arr[idx] = new NamedList.NamedListEntry<>(key, nl.getVal(i)); } } } } // remove nulls in case not all docs were able to be retrieved - termVectors.addAll(SolrPluginUtils.removeNulls(new NamedList(arr))); + termVectors.addAll(SolrPluginUtils.removeNulls(new NamedList<>(arr))); rb.rsp.add(TERM_VECTORS, termVectors); } } diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java index 1fe142df96d..3e6ba0173b2 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java +++ b/solr/core/src/java/org/apache/solr/handler/component/TermsComponent.java @@ -89,7 +89,7 @@ public class TermsComponent extends SearchComponent { String[] fields = params.getParams(TermsParams.TERMS_FIELD); - NamedList termsResult = new SimpleOrderedMap(); + NamedList termsResult = new SimpleOrderedMap<>(); rb.rsp.add("terms", termsResult); if (fields == null || fields.length==0) return; @@ -121,7 +121,7 @@ public class TermsComponent extends SearchComponent { Fields lfields = indexReader.fields(); for (String field : fields) { - NamedList fieldTerms = new NamedList(); + NamedList fieldTerms = new NamedList<>(); termsResult.add(field, fieldTerms); Terms terms = lfields == null ? null : lfields.terms(field); @@ -208,7 +208,7 @@ public class TermsComponent extends SearchComponent { if (docFreq >= freqmin && docFreq <= freqmax) { // add the term to the list if (sort) { - queue.add(new CountPair(BytesRef.deepCopyOf(term), docFreq)); + queue.add(new CountPair<>(BytesRef.deepCopyOf(term), docFreq)); } else { // TODO: handle raw somehow @@ -326,7 +326,7 @@ public class TermsComponent extends SearchComponent { private SolrParams params; public TermsHelper() { - fieldmap = new HashMap>(5); + fieldmap = new HashMap<>(5); } public void init(SolrParams params) { @@ -374,7 +374,7 @@ public class TermsComponent extends SearchComponent { } public NamedList buildResponse() { - NamedList response = new SimpleOrderedMap(); + NamedList response = new SimpleOrderedMap<>(); // determine if we are going index or count sort boolean sort = !TermsParams.TERMS_SORT_INDEX.equals(params.get( @@ -403,7 +403,7 @@ public class TermsComponent extends SearchComponent { // loop though each field we want terms from for (String key : fieldmap.keySet()) { - NamedList fieldterms = new SimpleOrderedMap(); + NamedList fieldterms = new SimpleOrderedMap<>(); TermsResponse.Term[] data = null; if (sort) { data = getCountSorted(fieldmap.get(key)); diff --git a/solr/core/src/java/org/apache/solr/handler/loader/CSVLoaderBase.java b/solr/core/src/java/org/apache/solr/handler/loader/CSVLoaderBase.java index f525a1225db..3a50431d877 100644 --- a/solr/core/src/java/org/apache/solr/handler/loader/CSVLoaderBase.java +++ b/solr/core/src/java/org/apache/solr/handler/loader/CSVLoaderBase.java @@ -164,7 +164,7 @@ abstract class CSVLoaderBase extends ContentStreamLoader { CSVLoaderBase(SolrQueryRequest req, UpdateRequestProcessor processor) { this.processor = processor; this.params = req.getParams(); - this.literals = new HashMap(); + this.literals = new HashMap<>(); templateAdd = new AddUpdateCommand(req); templateAdd.overwrite=params.getBool(OVERWRITE,true); diff --git a/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java b/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java index 948708b5155..8c9bd188abf 100644 --- a/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java +++ b/solr/core/src/java/org/apache/solr/handler/loader/JsonLoader.java @@ -480,7 +480,7 @@ public class JsonLoader extends ContentStreamLoader { } else { // If we encounter other unknown map keys, then use a map if (extendedInfo == null) { - extendedInfo = new HashMap(2); + extendedInfo = new HashMap<>(2); } // for now, the only extended info will be field values // we could either store this as an Object or a SolrInputField diff --git a/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java b/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java index a9374be0766..d2798fb09f7 100644 --- a/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java +++ b/solr/core/src/java/org/apache/solr/handler/loader/XMLLoader.java @@ -409,10 +409,10 @@ public class XMLLoader extends ContentStreamLoader { // should I warn in some text has been found too Object v = isNull ? null : text.toString(); if (update != null) { - if (updateMap == null) updateMap = new HashMap>(); + if (updateMap == null) updateMap = new HashMap<>(); Map extendedValues = updateMap.get(name); if (extendedValues == null) { - extendedValues = new HashMap(1); + extendedValues = new HashMap<>(1); updateMap.put(name, extendedValues); } Object val = extendedValues.get(update); @@ -424,7 +424,7 @@ public class XMLLoader extends ContentStreamLoader { List list = (List) val; list.add(v); } else { - List values = new ArrayList(); + List values = new ArrayList<>(); values.add(val); values.add(v); extendedValues.put(update, values); diff --git a/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java index 7bd9a07f0e8..ae9809e57ce 100644 --- a/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java +++ b/solr/core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java @@ -70,27 +70,27 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf // Thread safe registry protected final Map formatters = - new HashMap(); + new HashMap<>(); // Thread safe registry protected final Map encoders = - new HashMap(); + new HashMap<>(); // Thread safe registry protected final Map fragmenters = - new HashMap() ; + new HashMap<>() ; // Thread safe registry protected final Map fragListBuilders = - new HashMap() ; + new HashMap<>() ; // Thread safe registry protected final Map fragmentsBuilders = - new HashMap() ; + new HashMap<>() ; // Thread safe registry protected final Map boundaryScanners = - new HashMap() ; + new HashMap<>() ; @Override public void init(PluginInfo info) { @@ -382,7 +382,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf IndexSchema schema = searcher.getSchema(); NamedList fragments = new SimpleOrderedMap(); String[] fieldNames = getHighlightFields(query, req, defaultFields); - Set fset = new HashSet(); + Set fset = new HashSet<>(); { // pre-fetch documents using the Searcher's doc cache @@ -466,7 +466,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf boolean mergeContiguousFragments = isMergeContiguousFragments(fieldName, params); String[] summaries = null; - List frags = new ArrayList(); + List frags = new ArrayList<>(); TermOffsetsTokenStream tots = null; // to be non-null iff we're using TermOffsets optimization TokenStream tvStream = TokenSources.getTokenStreamWithOffsets(searcher.getIndexReader(), docId, fieldName); @@ -555,7 +555,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf // convert fragments back into text // TODO: we can include score and position information in output as snippet attributes if (frags.size() > 0) { - ArrayList fragTexts = new ArrayList(); + ArrayList fragTexts = new ArrayList<>(); for (TextFragment fragment: frags) { if (preserveMulti) { if (fragment != null) { @@ -606,7 +606,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf // The alternate field did not exist, treat the original field as fallback instead docFields = doc.getFields(fieldName); } - List listFields = new ArrayList(); + List listFields = new ArrayList<>(); for (StorableField field : docFields) { if (field.binaryValue() == null) listFields.add(field.stringValue()); @@ -617,7 +617,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf if (altTexts != null && altTexts.length > 0){ Encoder encoder = getEncoder(fieldName, params); int alternateFieldLen = params.getFieldInt(fieldName, HighlightParams.ALTERNATE_FIELD_LENGTH,0); - List altList = new ArrayList(); + List altList = new ArrayList<>(); int len = 0; for( String altText: altTexts ){ if( alternateFieldLen <= 0 ){ @@ -653,7 +653,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf */ final class TokenOrderingFilter extends TokenFilter { private final int windowSize; - private final LinkedList queue = new LinkedList(); + private final LinkedList queue = new LinkedList<>(); private boolean done=false; private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); diff --git a/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java b/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java index 8f6717f5270..9ac35e62209 100644 --- a/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java +++ b/solr/core/src/java/org/apache/solr/highlight/HighlightingPluginBase.java @@ -72,7 +72,7 @@ public abstract class HighlightingPluginBase implements SolrInfoMBean @Override public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap(); + NamedList lst = new SimpleOrderedMap<>(); lst.add("requests", numRequests); return lst; } diff --git a/solr/core/src/java/org/apache/solr/highlight/PostingsSolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/PostingsSolrHighlighter.java index a11e1a048f1..0a02444352d 100644 --- a/solr/core/src/java/org/apache/solr/highlight/PostingsSolrHighlighter.java +++ b/solr/core/src/java/org/apache/solr/highlight/PostingsSolrHighlighter.java @@ -209,9 +209,9 @@ public class PostingsSolrHighlighter extends SolrHighlighter implements PluginIn * @return encoded namedlist of summaries */ protected NamedList encodeSnippets(String[] keys, String[] fieldNames, Map snippets) { - NamedList list = new SimpleOrderedMap(); + NamedList list = new SimpleOrderedMap<>(); for (int i = 0; i < keys.length; i++) { - NamedList summary = new SimpleOrderedMap(); + NamedList summary = new SimpleOrderedMap<>(); for (String field : fieldNames) { String snippet = snippets.get(field)[i]; // box in an array to match the format of existing highlighters, diff --git a/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java b/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java index 01b7f0cfdc7..cc5bd8d89fd 100644 --- a/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java +++ b/solr/core/src/java/org/apache/solr/highlight/RegexFragmenter.java @@ -203,7 +203,7 @@ class LuceneRegexFragmenter implements Fragmenter protected void addHotSpots(String text) { //System.out.println("hot spotting"); - ArrayList temphs = new ArrayList( + ArrayList temphs = new ArrayList<>( text.length() / targetFragChars); Matcher match = textRE.matcher(text); int cur = 0; diff --git a/solr/core/src/java/org/apache/solr/highlight/SolrHighlighter.java b/solr/core/src/java/org/apache/solr/highlight/SolrHighlighter.java index 1a41af94626..ac4e3d3a43c 100644 --- a/solr/core/src/java/org/apache/solr/highlight/SolrHighlighter.java +++ b/solr/core/src/java/org/apache/solr/highlight/SolrHighlighter.java @@ -77,7 +77,7 @@ public abstract class SolrHighlighter // create a Java regular expression from the wildcard string String fieldRegex = fields[0].replaceAll("\\*", ".*"); Collection storedHighlightFieldNames = request.getSearcher().getStoredHighlightFieldNames(); - List storedFieldsToHighlight = new ArrayList(); + List storedFieldsToHighlight = new ArrayList<>(); for (String storedFieldName: storedHighlightFieldNames) { if (storedFieldName.matches(fieldRegex)) { storedFieldsToHighlight.add(storedFieldName); diff --git a/solr/core/src/java/org/apache/solr/logging/CircularList.java b/solr/core/src/java/org/apache/solr/logging/CircularList.java index 6e4641e03e6..e9dc5618f4e 100644 --- a/solr/core/src/java/org/apache/solr/logging/CircularList.java +++ b/solr/core/src/java/org/apache/solr/logging/CircularList.java @@ -108,7 +108,7 @@ public class CircularList implements Iterable public List toList() { - ArrayList list = new ArrayList( size ); + ArrayList list = new ArrayList<>( size ); for( int i=0; i { public abstract boolean isSet(); public SimpleOrderedMap getInfo() { - SimpleOrderedMap info = new SimpleOrderedMap(); + SimpleOrderedMap info = new SimpleOrderedMap<>(); info.add("name", getName()); info.add("level", getLevel()); info.add("set", isSet()); diff --git a/solr/core/src/java/org/apache/solr/logging/jul/JulWatcher.java b/solr/core/src/java/org/apache/solr/logging/jul/JulWatcher.java index 6d20d1c9ecc..ccf31b340cb 100644 --- a/solr/core/src/java/org/apache/solr/logging/jul/JulWatcher.java +++ b/solr/core/src/java/org/apache/solr/logging/jul/JulWatcher.java @@ -88,7 +88,7 @@ public class JulWatcher extends LogWatcher { LogManager manager = LogManager.getLogManager(); Logger root = manager.getLogger(""); - Map map = new HashMap(); + Map map = new HashMap<>(); Enumeration names = manager.getLoggerNames(); while (names.hasMoreElements()) { String name = names.nextElement(); @@ -133,7 +133,7 @@ public class JulWatcher extends LogWatcher { if(history!=null) { throw new IllegalStateException("History already registered"); } - history = new CircularList(cfg.size); + history = new CircularList<>(cfg.size); handler = new RecordHandler(this); if(cfg.threshold != null) { handler.setLevel(Level.parse(cfg.threshold)); diff --git a/solr/core/src/java/org/apache/solr/logging/log4j/Log4jWatcher.java b/solr/core/src/java/org/apache/solr/logging/log4j/Log4jWatcher.java index b906127f328..c137b659fc7 100644 --- a/solr/core/src/java/org/apache/solr/logging/log4j/Log4jWatcher.java +++ b/solr/core/src/java/org/apache/solr/logging/log4j/Log4jWatcher.java @@ -83,7 +83,7 @@ public class Log4jWatcher extends LogWatcher { @Override public Collection getAllLoggers() { org.apache.log4j.Logger root = org.apache.log4j.LogManager.getRootLogger(); - Map map = new HashMap(); + Map map = new HashMap<>(); Enumeration loggers = org.apache.log4j.LogManager.getCurrentLoggers(); while (loggers.hasMoreElements()) { org.apache.log4j.Logger logger = (org.apache.log4j.Logger)loggers.nextElement(); @@ -128,7 +128,7 @@ public class Log4jWatcher extends LogWatcher { if(history!=null) { throw new IllegalStateException("History already registered"); } - history = new CircularList(cfg.size); + history = new CircularList<>(cfg.size); appender = new EventAppender(this); if(cfg.threshold != null) { diff --git a/solr/core/src/java/org/apache/solr/parser/QueryParser.java b/solr/core/src/java/org/apache/solr/parser/QueryParser.java index 709ad00dd38..7624462ef83 100644 --- a/solr/core/src/java/org/apache/solr/parser/QueryParser.java +++ b/solr/core/src/java/org/apache/solr/parser/QueryParser.java @@ -100,7 +100,7 @@ public class QueryParser extends SolrQueryParserBase implements QueryParserConst } final public Query Query(String field) throws ParseException, SyntaxError { - List clauses = new ArrayList(); + List clauses = new ArrayList<>(); Query q, firstQuery=null; int conj, mods; mods = Modifiers(); @@ -581,7 +581,7 @@ public class QueryParser extends SolrQueryParserBase implements QueryParserConst return (jj_ntk = jj_nt.kind); } - private java.util.List jj_expentries = new java.util.ArrayList(); + private java.util.List jj_expentries = new java.util.ArrayList<>(); private int[] jj_expentry; private int jj_kind = -1; private int[] jj_lasttokens = new int[100]; diff --git a/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java b/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java index 7239e670048..7726de2e903 100644 --- a/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java +++ b/solr/core/src/java/org/apache/solr/parser/SolrQueryParserBase.java @@ -114,7 +114,7 @@ public abstract class SolrQueryParserBase extends QueryBuilder { return field; } private final static Map lookup - = new HashMap(); + = new HashMap<>(); static { for(MagicFieldName s : EnumSet.allOf(MagicFieldName.class)) lookup.put(s.toString(), s); @@ -671,7 +671,7 @@ public abstract class SolrQueryParserBase extends QueryBuilder { protected ReversedWildcardFilterFactory getReversedWildcardFilterFactory(FieldType fieldType) { - if (leadingWildcards == null) leadingWildcards = new HashMap(); + if (leadingWildcards == null) leadingWildcards = new HashMap<>(); ReversedWildcardFilterFactory fac = leadingWildcards.get(fieldType); if (fac != null || leadingWildcards.containsKey(fac)) { return fac; diff --git a/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java b/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java index 81c04f87cde..9b3db15e849 100644 --- a/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java +++ b/solr/core/src/java/org/apache/solr/request/DocValuesFacets.java @@ -59,7 +59,7 @@ public class DocValuesFacets { public static NamedList getCounts(SolrIndexSearcher searcher, DocSet docs, String fieldName, int offset, int limit, int mincount, boolean missing, String sort, String prefix) throws IOException { SchemaField schemaField = searcher.getSchema().getField(fieldName); FieldType ft = schemaField.getType(); - NamedList res = new NamedList(); + NamedList res = new NamedList<>(); final SortedSetDocValues si; // for term lookups only OrdinalMap ordinalMap = null; // for mapping per-segment ords to global ones diff --git a/solr/core/src/java/org/apache/solr/request/LocalSolrQueryRequest.java b/solr/core/src/java/org/apache/solr/request/LocalSolrQueryRequest.java index fd601ef86a8..e5730e69f38 100644 --- a/solr/core/src/java/org/apache/solr/request/LocalSolrQueryRequest.java +++ b/solr/core/src/java/org/apache/solr/request/LocalSolrQueryRequest.java @@ -37,7 +37,7 @@ public class LocalSolrQueryRequest extends SolrQueryRequestBase { public final static Map emptyArgs = new HashMap(0,1); protected static SolrParams makeParams(String query, String qtype, int start, int limit, Map args) { - Map map = new HashMap(); + Map map = new HashMap<>(); for (Iterator iter = args.entrySet().iterator(); iter.hasNext();) { Map.Entry e = (Map.Entry)iter.next(); String k = e.getKey().toString(); diff --git a/solr/core/src/java/org/apache/solr/request/NumericFacets.java b/solr/core/src/java/org/apache/solr/request/NumericFacets.java index 62950e2e7ca..d88fecf1df7 100644 --- a/solr/core/src/java/org/apache/solr/request/NumericFacets.java +++ b/solr/core/src/java/org/apache/solr/request/NumericFacets.java @@ -235,13 +235,13 @@ final class NumericFacets { // 4. build the NamedList final ValueSource vs = ft.getValueSource(sf, null); - final NamedList result = new NamedList(); + final NamedList result = new NamedList<>(); // This stuff is complicated because if facet.mincount=0, the counts needs // to be merged with terms from the terms dict if (!zeros || FacetParams.FACET_SORT_COUNT.equals(sort) || FacetParams.FACET_SORT_COUNT_LEGACY.equals(sort)) { // Only keep items we're interested in - final Deque counts = new ArrayDeque(); + final Deque counts = new ArrayDeque<>(); while (pq.size() > offset) { counts.addFirst(pq.pop()); } @@ -258,7 +258,7 @@ final class NumericFacets { throw new IllegalStateException("Cannot use " + FacetParams.FACET_MINCOUNT + "=0 on field " + sf.getName() + " which is not indexed"); } // Add zeros until there are limit results - final Set alreadySeen = new HashSet(); + final Set alreadySeen = new HashSet<>(); while (pq.size() > 0) { Entry entry = pq.pop(); final int readerIdx = ReaderUtil.subIndex(entry.docID, leaves); @@ -314,7 +314,7 @@ final class NumericFacets { if (!sf.indexed()) { throw new IllegalStateException("Cannot use " + FacetParams.FACET_SORT + "=" + FacetParams.FACET_SORT_INDEX + " on a field which is not indexed"); } - final Map counts = new HashMap(); + final Map counts = new HashMap<>(); while (pq.size() > 0) { final Entry entry = pq.pop(); final int readerIdx = ReaderUtil.subIndex(entry.docID, leaves); diff --git a/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java b/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java index 7573a1b6d09..6ad399c514a 100644 --- a/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java +++ b/solr/core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java @@ -78,7 +78,7 @@ class PerSegmentSingleValuedFaceting { NamedList getFacetCounts(Executor executor) throws IOException { - CompletionService completionService = new ExecutorCompletionService(executor); + CompletionService completionService = new ExecutorCompletionService<>(executor); // reuse the translation logic to go from top level set to per-segment set baseSet = docs.getTopFilter(); @@ -87,7 +87,7 @@ class PerSegmentSingleValuedFaceting { // The list of pending tasks that aren't immediately submitted // TODO: Is there a completion service, or a delegating executor that can // limit the number of concurrent tasks submitted to a bigger executor? - LinkedList> pending = new LinkedList>(); + LinkedList> pending = new LinkedList<>(); int threads = nThreads <= 0 ? Integer.MAX_VALUE : nThreads; @@ -308,7 +308,7 @@ class CountSortedFacetCollector extends FacetCollector { this.offset = offset; this.limit = limit; maxsize = limit>0 ? offset+limit : Integer.MAX_VALUE-1; - queue = new BoundedTreeSet>(maxsize); + queue = new BoundedTreeSet<>(maxsize); min=mincount-1; // the smallest value in the top 'N' values } @@ -319,7 +319,7 @@ class CountSortedFacetCollector extends FacetCollector { // index order, so we already know that the keys are ordered. This can be very // important if a lot of the counts are repeated (like zero counts would be). UnicodeUtil.UTF8toUTF16(term, spare); - queue.add(new SimpleFacets.CountPair(spare.toString(), count)); + queue.add(new SimpleFacets.CountPair<>(spare.toString(), count)); if (queue.size()>=maxsize) min=queue.last().val; } return false; @@ -327,7 +327,7 @@ class CountSortedFacetCollector extends FacetCollector { @Override public NamedList getFacetCounts() { - NamedList res = new NamedList(); + NamedList res = new NamedList<>(); int off=offset; int lim=limit>=0 ? limit : Integer.MAX_VALUE; // now select the right page from the results @@ -347,7 +347,7 @@ class IndexSortedFacetCollector extends FacetCollector { int offset; int limit; final int mincount; - final NamedList res = new NamedList(); + final NamedList res = new NamedList<>(); public IndexSortedFacetCollector(int offset, int limit, int mincount) { this.offset = offset; diff --git a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java index 9325c5b7a4f..e247ff4061e 100644 --- a/solr/core/src/java/org/apache/solr/request/SimpleFacets.java +++ b/solr/core/src/java/org/apache/solr/request/SimpleFacets.java @@ -184,7 +184,7 @@ public class SimpleFacets { if (tagMap != null && rb != null) { List excludeTagList = StrUtils.splitSmart(excludeStr,','); - IdentityHashMap excludeSet = new IdentityHashMap(); + IdentityHashMap excludeSet = new IdentityHashMap<>(); for (String excludeTag : excludeTagList) { Object olst = tagMap.get(excludeTag); // tagMap has entries of List>, but subject to change in the future @@ -197,7 +197,7 @@ public class SimpleFacets { } if (excludeSet.size() == 0) return; - List qlist = new ArrayList(); + List qlist = new ArrayList<>(); // add the base query if (!excludeSet.containsKey(rb.getQuery())) { @@ -254,7 +254,7 @@ public class SimpleFacets { if (!params.getBool(FacetParams.FACET,true)) return null; - facetResponse = new SimpleOrderedMap(); + facetResponse = new SimpleOrderedMap<>(); try { facetResponse.add("facet_queries", getFacetQueryCounts()); facetResponse.add("facet_fields", getFacetFieldCounts()); @@ -277,7 +277,7 @@ public class SimpleFacets { */ public NamedList getFacetQueryCounts() throws IOException,SyntaxError { - NamedList res = new SimpleOrderedMap(); + NamedList res = new SimpleOrderedMap<>(); /* Ignore CommonParams.DF - could have init param facet.query assuming * the schema default with query param DF intented to only affect Q. @@ -341,7 +341,7 @@ public class SimpleFacets { public NamedList getTermCounts(String field, DocSet base) throws IOException { int offset = params.getFieldInt(field, FacetParams.FACET_OFFSET, 0); int limit = params.getFieldInt(field, FacetParams.FACET_LIMIT, 100); - if (limit == 0) return new NamedList(); + if (limit == 0) return new NamedList<>(); Integer mincount = params.getFieldInt(field, FacetParams.FACET_MINCOUNT); if (mincount==null) { Boolean zeros = params.getFieldBool(field, FacetParams.FACET_ZEROS); @@ -481,7 +481,7 @@ public class SimpleFacets { CharsRef charsRef = new CharsRef(); FieldType facetFieldType = searcher.getSchema().getFieldType(field); - NamedList facetCounts = new NamedList(); + NamedList facetCounts = new NamedList<>(); List scopedEntries = result.getFacetEntries(offset, limit < 0 ? Integer.MAX_VALUE : limit); for (TermGroupFacetCollector.FacetEntry facetEntry : scopedEntries) { @@ -524,7 +524,7 @@ public class SimpleFacets { public NamedList getFacetFieldCounts() throws IOException, SyntaxError { - NamedList res = new SimpleOrderedMap(); + NamedList res = new SimpleOrderedMap<>(); String[] facetFs = params.getParams(FacetParams.FACET_FIELD); if (null == facetFs) { return res; @@ -536,7 +536,7 @@ public class SimpleFacets { int maxThreads = req.getParams().getInt(FacetParams.FACET_THREADS, 0); Executor executor = maxThreads == 0 ? directExecutor : facetExecutor; final Semaphore semaphore = new Semaphore((maxThreads <= 0) ? Integer.MAX_VALUE : maxThreads); - List> futures = new ArrayList>(facetFs.length); + List> futures = new ArrayList<>(facetFs.length); try { //Loop over fields; submit to executor, keeping the future @@ -550,7 +550,7 @@ public class SimpleFacets { @Override public NamedList call() throws Exception { try { - NamedList result = new SimpleOrderedMap(); + NamedList result = new SimpleOrderedMap<>(); if(termList != null) { result.add(workerKey, getListedTermCounts(workerFacetValue, termList, workerBase)); } else { @@ -568,7 +568,7 @@ public class SimpleFacets { } }; - RunnableFuture runnableFuture = new FutureTask(callable); + RunnableFuture runnableFuture = new FutureTask<>(callable); semaphore.acquire();//may block and/or interrupt executor.execute(runnableFuture);//releases semaphore when done futures.add(runnableFuture); @@ -602,7 +602,7 @@ public class SimpleFacets { private NamedList getListedTermCounts(String field, String termList, DocSet base) throws IOException { FieldType ft = searcher.getSchema().getFieldType(field); List terms = StrUtils.splitSmart(termList, ",", true); - NamedList res = new NamedList(); + NamedList res = new NamedList<>(); for (String term : terms) { String internal = ft.toInternal(term); int count = searcher.numDocs(new TermQuery(new Term(field, internal)), base); @@ -646,7 +646,7 @@ public class SimpleFacets { // trying to pass all the various params around. FieldType ft = searcher.getSchema().getFieldType(fieldName); - NamedList res = new NamedList(); + NamedList res = new NamedList<>(); SortedDocValues si = FieldCache.DEFAULT.getTermsIndex(searcher.getAtomicReader(), fieldName); @@ -808,7 +808,7 @@ public class SimpleFacets { boolean sortByCount = sort.equals("count") || sort.equals("true"); final int maxsize = limit>=0 ? offset+limit : Integer.MAX_VALUE-1; final BoundedTreeSet> queue = sortByCount ? new BoundedTreeSet>(maxsize) : null; - final NamedList res = new NamedList(); + final NamedList res = new NamedList<>(); int min=mincount-1; // the smallest value in the top 'N' values int off=offset; @@ -908,7 +908,7 @@ public class SimpleFacets { if (sortByCount) { if (c>min) { BytesRef termCopy = BytesRef.deepCopyOf(term); - queue.add(new CountPair(termCopy, c)); + queue.add(new CountPair<>(termCopy, c)); if (queue.size()>=maxsize) min=queue.last().val; } } else { @@ -952,7 +952,7 @@ public class SimpleFacets { public NamedList getFacetDateCounts() throws IOException, SyntaxError { - final NamedList resOuter = new SimpleOrderedMap(); + final NamedList resOuter = new SimpleOrderedMap<>(); final String[] fields = params.getParams(FacetParams.FACET_DATE); if (null == fields || 0 == fields.length) return resOuter; @@ -977,7 +977,7 @@ public class SimpleFacets { String f = facetValue; - final NamedList resInner = new SimpleOrderedMap(); + final NamedList resInner = new SimpleOrderedMap<>(); resOuter.add(key, resInner); final SchemaField sf = schema.getField(f); if (! (sf.getType() instanceof DateField)) { @@ -1133,7 +1133,7 @@ public class SimpleFacets { */ public NamedList getFacetRangeCounts() throws IOException, SyntaxError { - final NamedList resOuter = new SimpleOrderedMap(); + final NamedList resOuter = new SimpleOrderedMap<>(); final String[] fields = params.getParams(FacetParams.FACET_RANGE); if (null == fields || 0 == fields.length) return resOuter; @@ -1203,8 +1203,8 @@ public class SimpleFacets { final RangeEndpointCalculator calc) throws IOException { final String f = sf.getName(); - final NamedList res = new SimpleOrderedMap(); - final NamedList counts = new NamedList(); + final NamedList res = new SimpleOrderedMap<>(); + final NamedList counts = new NamedList<>(); res.add("counts", counts); final T start = calc.getValue(required.getFieldParam(f,FacetParams.FACET_RANGE_START)); diff --git a/solr/core/src/java/org/apache/solr/request/SolrQueryRequestBase.java b/solr/core/src/java/org/apache/solr/request/SolrQueryRequestBase.java index 1ad75bb20b5..b243ccf0a9a 100644 --- a/solr/core/src/java/org/apache/solr/request/SolrQueryRequestBase.java +++ b/solr/core/src/java/org/apache/solr/request/SolrQueryRequestBase.java @@ -57,7 +57,7 @@ public abstract class SolrQueryRequestBase implements SolrQueryRequest { @Override public Map getContext() { // SolrQueryRequest as a whole isn't thread safe, and this isn't either. - if (context==null) context = new HashMap(); + if (context==null) context = new HashMap<>(); return context; } diff --git a/solr/core/src/java/org/apache/solr/request/SolrRequestInfo.java b/solr/core/src/java/org/apache/solr/request/SolrRequestInfo.java index bbea9b2f0ff..eaef45accbd 100644 --- a/solr/core/src/java/org/apache/solr/request/SolrRequestInfo.java +++ b/solr/core/src/java/org/apache/solr/request/SolrRequestInfo.java @@ -32,7 +32,7 @@ import java.util.List; public class SolrRequestInfo { - protected final static ThreadLocal threadLocal = new ThreadLocal(); + protected final static ThreadLocal threadLocal = new ThreadLocal<>(); protected SolrQueryRequest req; protected SolrQueryResponse rsp; @@ -132,7 +132,7 @@ public class SolrRequestInfo { // is this better here, or on SolrQueryRequest? synchronized (this) { if (closeHooks == null) { - closeHooks = new LinkedList(); + closeHooks = new LinkedList<>(); } closeHooks.add(hook); } diff --git a/solr/core/src/java/org/apache/solr/request/UnInvertedField.java b/solr/core/src/java/org/apache/solr/request/UnInvertedField.java index a7d62308fa7..4e591bac78d 100644 --- a/solr/core/src/java/org/apache/solr/request/UnInvertedField.java +++ b/solr/core/src/java/org/apache/solr/request/UnInvertedField.java @@ -100,7 +100,7 @@ public class UnInvertedField extends DocTermOrds { int[] maxTermCounts = new int[1024]; - final Map bigTerms = new LinkedHashMap(); + final Map bigTerms = new LinkedHashMap<>(); private SolrIndexSearcher.DocsEnumState deState; private final SolrIndexSearcher searcher; @@ -220,7 +220,7 @@ public class UnInvertedField extends DocTermOrds { FieldType ft = searcher.getSchema().getFieldType(field); - NamedList res = new NamedList(); // order is important + NamedList res = new NamedList<>(); // order is important DocSet docs = baseDocs; int baseSize = docs.size(); diff --git a/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java b/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java index 3ef01126ff6..3ec8ddee2ea 100644 --- a/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java +++ b/solr/core/src/java/org/apache/solr/response/BinaryResponseWriter.java @@ -40,7 +40,7 @@ import org.slf4j.LoggerFactory; public class BinaryResponseWriter implements BinaryQueryResponseWriter { private static final Logger LOG = LoggerFactory.getLogger(BinaryResponseWriter.class); - public static final Set KNOWN_TYPES = new HashSet(); + public static final Set KNOWN_TYPES = new HashSet<>(); @Override public void write(OutputStream out, SolrQueryRequest req, SolrQueryResponse response) throws IOException { diff --git a/solr/core/src/java/org/apache/solr/response/CSVResponseWriter.java b/solr/core/src/java/org/apache/solr/response/CSVResponseWriter.java index e49b078a60e..3ef75c9fef1 100644 --- a/solr/core/src/java/org/apache/solr/response/CSVResponseWriter.java +++ b/solr/core/src/java/org/apache/solr/response/CSVResponseWriter.java @@ -146,12 +146,12 @@ class CSVWriter extends TextResponseWriter { CSVSharedBufPrinter mvPrinter; // printer used to encode multiple values in a single CSV value // used to collect values - List values = new ArrayList(1); // low starting amount in case there are many fields + List values = new ArrayList<>(1); // low starting amount in case there are many fields int tmp; } int pass; - Map csvFields = new LinkedHashMap(); + Map csvFields = new LinkedHashMap<>(); Calendar cal; // for formatting date objects @@ -242,7 +242,7 @@ class CSVWriter extends TextResponseWriter { if (responseObj instanceof SolrDocumentList) { // get the list of fields from the SolrDocumentList if(fields==null) { - fields = new LinkedHashSet(); + fields = new LinkedHashSet<>(); } for (SolrDocument sdoc: (SolrDocumentList)responseObj) { fields.addAll(sdoc.getFieldNames()); diff --git a/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java index 301ad3f25b5..d48d31e8f68 100644 --- a/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java +++ b/solr/core/src/java/org/apache/solr/response/JSONResponseWriter.java @@ -124,7 +124,7 @@ class JSONWriter extends TextResponseWriter { // Disad: this is ambiguous with a real single value that happens to be an array // // Both of these mappings have ambiguities. - HashMap repeats = new HashMap(4); + HashMap repeats = new HashMap<>(4); boolean first=true; for (int i=0; i fields; MultiValueField(SchemaField sfield, IndexableField firstVal) { this.sfield = sfield; - this.fields = new ArrayList(4); + this.fields = new ArrayList<>(4); this.fields.add(firstVal); } } diff --git a/solr/core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java b/solr/core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java index 5e0f3f40a4b..ba1933a3c5b 100644 --- a/solr/core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java +++ b/solr/core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java @@ -121,8 +121,8 @@ class PHPSerializedWriter extends JSONWriter { { writeKey(idx, false); - LinkedHashMap single = new LinkedHashMap(); - LinkedHashMap multi = new LinkedHashMap(); + LinkedHashMap single = new LinkedHashMap<>(); + LinkedHashMap multi = new LinkedHashMap<>(); for (String fname : doc.getFieldNames()) { if(!returnFields.wantsField(fname)){ diff --git a/solr/core/src/java/org/apache/solr/response/SolrQueryResponse.java b/solr/core/src/java/org/apache/solr/response/SolrQueryResponse.java index 88955c9ea88..5d51ebdf7b7 100644 --- a/solr/core/src/java/org/apache/solr/response/SolrQueryResponse.java +++ b/solr/core/src/java/org/apache/solr/response/SolrQueryResponse.java @@ -68,13 +68,13 @@ public class SolrQueryResponse { * @see #setAllValues * @see Note on Returnable Data */ - protected NamedList values = new SimpleOrderedMap(); + protected NamedList values = new SimpleOrderedMap<>(); /** * Container for storing information that should be logged by Solr before returning. */ - protected NamedList toLog = new SimpleOrderedMap(); + protected NamedList toLog = new SimpleOrderedMap<>(); protected ReturnFields returnFields; diff --git a/solr/core/src/java/org/apache/solr/response/TextResponseWriter.java b/solr/core/src/java/org/apache/solr/response/TextResponseWriter.java index 08630c6a135..513ffd649be 100644 --- a/solr/core/src/java/org/apache/solr/response/TextResponseWriter.java +++ b/solr/core/src/java/org/apache/solr/response/TextResponseWriter.java @@ -235,7 +235,7 @@ public abstract class TextResponseWriter { if (existing == null) { SchemaField sf = schema.getFieldOrNull(f.name()); if (sf != null && sf.multiValued()) { - List vals = new ArrayList(); + List vals = new ArrayList<>(); vals.add( f ); out.setField( f.name(), vals ); } diff --git a/solr/core/src/java/org/apache/solr/response/transform/DocTransformers.java b/solr/core/src/java/org/apache/solr/response/transform/DocTransformers.java index 950b1ccfe8f..579e7fee5e1 100644 --- a/solr/core/src/java/org/apache/solr/response/transform/DocTransformers.java +++ b/solr/core/src/java/org/apache/solr/response/transform/DocTransformers.java @@ -32,7 +32,7 @@ import org.apache.solr.request.SolrQueryRequest; */ public class DocTransformers extends DocTransformer { - final List children = new ArrayList(); + final List children = new ArrayList<>(); @Override public String getName() diff --git a/solr/core/src/java/org/apache/solr/response/transform/TransformerFactory.java b/solr/core/src/java/org/apache/solr/response/transform/TransformerFactory.java index 0b389b13b5d..44d46bc1015 100644 --- a/solr/core/src/java/org/apache/solr/response/transform/TransformerFactory.java +++ b/solr/core/src/java/org/apache/solr/response/transform/TransformerFactory.java @@ -41,7 +41,7 @@ public abstract class TransformerFactory implements NamedListInitializedPlugin public abstract DocTransformer create(String field, SolrParams params, SolrQueryRequest req); - public static final Map defaultFactories = new HashMap(); + public static final Map defaultFactories = new HashMap<>(); static { defaultFactories.put( "explain", new ExplainAugmenterFactory() ); defaultFactories.put( "value", new ValueAugmenterFactory() ); diff --git a/solr/core/src/java/org/apache/solr/rest/schema/BaseFieldResource.java b/solr/core/src/java/org/apache/solr/rest/schema/BaseFieldResource.java index b8a093d1375..337f50d7643 100644 --- a/solr/core/src/java/org/apache/solr/rest/schema/BaseFieldResource.java +++ b/solr/core/src/java/org/apache/solr/rest/schema/BaseFieldResource.java @@ -64,7 +64,7 @@ abstract class BaseFieldResource extends BaseSchemaResource { if (null != flParam) { String[] fields = flParam.trim().split("[,\\s]+"); if (fields.length > 0) { - requestedFields = new LinkedHashSet(); + requestedFields = new LinkedHashSet<>(); for (String field : fields) { if ( ! field.trim().isEmpty()) { requestedFields.add(field.trim()); diff --git a/solr/core/src/java/org/apache/solr/rest/schema/CopyFieldCollectionResource.java b/solr/core/src/java/org/apache/solr/rest/schema/CopyFieldCollectionResource.java index 8c8f8e96fd9..a90391ddb5d 100644 --- a/solr/core/src/java/org/apache/solr/rest/schema/CopyFieldCollectionResource.java +++ b/solr/core/src/java/org/apache/solr/rest/schema/CopyFieldCollectionResource.java @@ -77,7 +77,7 @@ public class CopyFieldCollectionResource extends BaseFieldResource implements GE if (null != sourceFieldListParam) { String[] fields = sourceFieldListParam.trim().split("[,\\s]+"); if (fields.length > 0) { - requestedSourceFields = new HashSet(Arrays.asList(fields)); + requestedSourceFields = new HashSet<>(Arrays.asList(fields)); requestedSourceFields.remove(""); // Remove empty values, if any } } @@ -85,7 +85,7 @@ public class CopyFieldCollectionResource extends BaseFieldResource implements GE if (null != destinationFieldListParam) { String[] fields = destinationFieldListParam.trim().split("[,\\s]+"); if (fields.length > 0) { - requestedDestinationFields = new HashSet(Arrays.asList(fields)); + requestedDestinationFields = new HashSet<>(Arrays.asList(fields)); requestedDestinationFields.remove(""); // Remove empty values, if any } } diff --git a/solr/core/src/java/org/apache/solr/rest/schema/DynamicFieldCollectionResource.java b/solr/core/src/java/org/apache/solr/rest/schema/DynamicFieldCollectionResource.java index 107831123e8..cc89d118175 100644 --- a/solr/core/src/java/org/apache/solr/rest/schema/DynamicFieldCollectionResource.java +++ b/solr/core/src/java/org/apache/solr/rest/schema/DynamicFieldCollectionResource.java @@ -56,7 +56,7 @@ public class DynamicFieldCollectionResource extends BaseFieldResource implements public Representation get() { try { - List> props = new ArrayList>(); + List> props = new ArrayList<>(); if (null == getRequestedFields()) { for (IndexSchema.DynamicField dynamicField : getSchema().getDynamicFields()) { if ( ! dynamicField.getRegex().startsWith(IndexSchema.INTERNAL_POLY_FIELD_PREFIX)) { // omit internal polyfields @@ -68,7 +68,7 @@ public class DynamicFieldCollectionResource extends BaseFieldResource implements String message = "Empty " + CommonParams.FL + " parameter value"; throw new SolrException(ErrorCode.BAD_REQUEST, message); } - Map dynamicFieldsByName = new HashMap(); + Map dynamicFieldsByName = new HashMap<>(); for (IndexSchema.DynamicField dynamicField : getSchema().getDynamicFields()) { dynamicFieldsByName.put(dynamicField.getRegex(), dynamicField.getPrototype()); } diff --git a/solr/core/src/java/org/apache/solr/rest/schema/FieldCollectionResource.java b/solr/core/src/java/org/apache/solr/rest/schema/FieldCollectionResource.java index e9a4254de76..cb83e702837 100644 --- a/solr/core/src/java/org/apache/solr/rest/schema/FieldCollectionResource.java +++ b/solr/core/src/java/org/apache/solr/rest/schema/FieldCollectionResource.java @@ -79,9 +79,9 @@ public class FieldCollectionResource extends BaseFieldResource implements GETabl @Override public Representation get() { try { - final List> props = new ArrayList>(); + final List> props = new ArrayList<>(); if (null == getRequestedFields()) { - SortedSet fieldNames = new TreeSet(getSchema().getFields().keySet()); + SortedSet fieldNames = new TreeSet<>(getSchema().getFields().keySet()); for (String fieldName : fieldNames) { props.add(getFieldProperties(getSchema().getFields().get(fieldName))); } @@ -138,7 +138,7 @@ public class FieldCollectionResource extends BaseFieldResource implements GETabl throw new SolrException(ErrorCode.BAD_REQUEST, message); } else { List> list = (List>) object; - List newFields = new ArrayList(); + List newFields = new ArrayList<>(); IndexSchema oldSchema = getSchema(); Map> copyFields = new HashMap<>(); Set malformed = new HashSet<>(); diff --git a/solr/core/src/java/org/apache/solr/rest/schema/FieldTypeCollectionResource.java b/solr/core/src/java/org/apache/solr/rest/schema/FieldTypeCollectionResource.java index 01aa815cc6b..5693e80d326 100644 --- a/solr/core/src/java/org/apache/solr/rest/schema/FieldTypeCollectionResource.java +++ b/solr/core/src/java/org/apache/solr/rest/schema/FieldTypeCollectionResource.java @@ -60,8 +60,8 @@ public class FieldTypeCollectionResource extends BaseFieldTypeResource implement @Override public Representation get() { try { - List> props = new ArrayList>(); - Map sortedFieldTypes = new TreeMap(getSchema().getFieldTypes()); + List> props = new ArrayList<>(); + Map sortedFieldTypes = new TreeMap<>(getSchema().getFieldTypes()); for (FieldType fieldType : sortedFieldTypes.values()) { props.add(getFieldTypeProperties(fieldType)); } @@ -99,12 +99,12 @@ public class FieldTypeCollectionResource extends BaseFieldTypeResource implement * The map only includes field types that are used by at least one field. */ private Map> getFieldsByFieldType() { - Map> fieldsByFieldType = new HashMap>(); + Map> fieldsByFieldType = new HashMap<>(); for (SchemaField schemaField : getSchema().getFields().values()) { final String fieldType = schemaField.getType().getTypeName(); List fields = fieldsByFieldType.get(fieldType); if (null == fields) { - fields = new ArrayList(); + fields = new ArrayList<>(); fieldsByFieldType.put(fieldType, fields); } fields.add(schemaField.getName()); @@ -120,12 +120,12 @@ public class FieldTypeCollectionResource extends BaseFieldTypeResource implement * The map only includes field types that are used by at least one dynamic field. */ private Map> getDynamicFieldsByFieldType() { - Map> dynamicFieldsByFieldType = new HashMap>(); + Map> dynamicFieldsByFieldType = new HashMap<>(); for (SchemaField schemaField : getSchema().getDynamicFieldPrototypes()) { final String fieldType = schemaField.getType().getTypeName(); List dynamicFields = dynamicFieldsByFieldType.get(fieldType); if (null == dynamicFields) { - dynamicFields = new ArrayList(); + dynamicFields = new ArrayList<>(); dynamicFieldsByFieldType.put(fieldType, dynamicFields); } dynamicFields.add(schemaField.getName()); diff --git a/solr/core/src/java/org/apache/solr/rest/schema/FieldTypeResource.java b/solr/core/src/java/org/apache/solr/rest/schema/FieldTypeResource.java index 353aa12a73a..aab85321970 100644 --- a/solr/core/src/java/org/apache/solr/rest/schema/FieldTypeResource.java +++ b/solr/core/src/java/org/apache/solr/rest/schema/FieldTypeResource.java @@ -88,7 +88,7 @@ public class FieldTypeResource extends BaseFieldTypeResource implements GETable */ @Override protected List getFieldsWithFieldType(FieldType fieldType) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); for (SchemaField schemaField : getSchema().getFields().values()) { if (schemaField.getType().getTypeName().equals(fieldType.getTypeName())) { fields.add(schemaField.getName()); @@ -104,7 +104,7 @@ public class FieldTypeResource extends BaseFieldTypeResource implements GETable */ @Override protected List getDynamicFieldsWithFieldType(FieldType fieldType) { - List dynamicFields = new ArrayList(); + List dynamicFields = new ArrayList<>(); for (SchemaField prototype : getSchema().getDynamicFieldPrototypes()) { if (prototype.getType().getTypeName().equals(fieldType.getTypeName())) { dynamicFields.add(prototype.getName()); diff --git a/solr/core/src/java/org/apache/solr/rest/schema/SolrQueryParserResource.java b/solr/core/src/java/org/apache/solr/rest/schema/SolrQueryParserResource.java index 967759ce77d..5103d11d770 100644 --- a/solr/core/src/java/org/apache/solr/rest/schema/SolrQueryParserResource.java +++ b/solr/core/src/java/org/apache/solr/rest/schema/SolrQueryParserResource.java @@ -43,7 +43,7 @@ public class SolrQueryParserResource extends BaseSchemaResource implements GETab @Override public Representation get() { try { - SimpleOrderedMap props = new SimpleOrderedMap(); + SimpleOrderedMap props = new SimpleOrderedMap<>(); props.add(IndexSchema.DEFAULT_OPERATOR, getSchema().getQueryParserDefaultOperator()); getSolrResponse().add(IndexSchema.SOLR_QUERY_PARSER, props); } catch (Exception e) { diff --git a/solr/core/src/java/org/apache/solr/schema/AbstractSpatialFieldType.java b/solr/core/src/java/org/apache/solr/schema/AbstractSpatialFieldType.java index f913fbbfecc..3b1ddd19850 100644 --- a/solr/core/src/java/org/apache/solr/schema/AbstractSpatialFieldType.java +++ b/solr/core/src/java/org/apache/solr/schema/AbstractSpatialFieldType.java @@ -103,7 +103,7 @@ public abstract class AbstractSpatialFieldType extend } //Solr expects us to remove the parameters we've used. - MapListener argsWrap = new MapListener(args); + MapListener argsWrap = new MapListener<>(args); ctx = SpatialContextFactory.makeSpatialContext(argsWrap, schema.getResourceLoader().getClassLoader()); args.keySet().removeAll(argsWrap.getSeenKeys()); @@ -143,7 +143,7 @@ public abstract class AbstractSpatialFieldType extend return Collections.emptyList(); } - List result = new ArrayList(); + List result = new ArrayList<>(); if (field.indexed()) { T strategy = getStrategy(field.getName()); result.addAll(Arrays.asList(strategy.createIndexableFields(shape))); diff --git a/solr/core/src/java/org/apache/solr/schema/AbstractSpatialPrefixTreeFieldType.java b/solr/core/src/java/org/apache/solr/schema/AbstractSpatialPrefixTreeFieldType.java index 59efa75a323..8af7e460b0d 100644 --- a/solr/core/src/java/org/apache/solr/schema/AbstractSpatialPrefixTreeFieldType.java +++ b/solr/core/src/java/org/apache/solr/schema/AbstractSpatialPrefixTreeFieldType.java @@ -43,7 +43,7 @@ public abstract class AbstractSpatialPrefixTreeFieldType argsWrap = new MapListener(args); + MapListener argsWrap = new MapListener<>(args); grid = SpatialPrefixTreeFactory.makeSPT(argsWrap, schema.getResourceLoader().getClassLoader(), ctx); args.keySet().removeAll(argsWrap.getSeenKeys()); diff --git a/solr/core/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java b/solr/core/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java index 36b8af9bc39..bdac4066e52 100644 --- a/solr/core/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java +++ b/solr/core/src/java/org/apache/solr/schema/AbstractSubTypeFieldType.java @@ -83,7 +83,7 @@ public abstract class AbstractSubTypeFieldType extends FieldType implements Sche static SchemaField registerPolyFieldDynamicPrototype(IndexSchema schema, FieldType type) { String name = "*" + FieldType.POLY_FIELD_SEPARATOR + type.typeName; - Map props = new HashMap(); + Map props = new HashMap<>(); //Just set these, delegate everything else to the field type props.put("indexed", "true"); props.put("stored", "false"); diff --git a/solr/core/src/java/org/apache/solr/schema/CollationField.java b/solr/core/src/java/org/apache/solr/schema/CollationField.java index 891e6736139..13f35a8c069 100644 --- a/solr/core/src/java/org/apache/solr/schema/CollationField.java +++ b/solr/core/src/java/org/apache/solr/schema/CollationField.java @@ -262,7 +262,7 @@ public class CollationField extends FieldType { @Override public List createFields(SchemaField field, Object value, float boost) { if (field.hasDocValues()) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); fields.add(createField(field, value, boost)); final BytesRef bytes = getCollationKey(field.getName(), value.toString()); if (field.multiValued()) { diff --git a/solr/core/src/java/org/apache/solr/schema/CurrencyField.java b/solr/core/src/java/org/apache/solr/schema/CurrencyField.java index ab4e839f786..7379e0fa31b 100644 --- a/solr/core/src/java/org/apache/solr/schema/CurrencyField.java +++ b/solr/core/src/java/org/apache/solr/schema/CurrencyField.java @@ -128,7 +128,7 @@ public class CurrencyField extends FieldType implements SchemaAware, ResourceLoa // Initialize field type for amount fieldTypeAmountRaw = new TrieLongField(); fieldTypeAmountRaw.setTypeName("amount_raw_type_tlong"); - Map map = new HashMap(1); + Map map = new HashMap<>(1); map.put("precisionStep", precisionStepString); fieldTypeAmountRaw.init(schema, map); @@ -169,7 +169,7 @@ public class CurrencyField extends FieldType implements SchemaAware, ResourceLoa public List createFields(SchemaField field, Object externalVal, float boost) { CurrencyValue value = CurrencyValue.parse(externalVal.toString(), defaultCurrency); - List f = new ArrayList(); + List f = new ArrayList<>(); SchemaField amountField = getAmountField(field); f.add(amountField.createField(String.valueOf(value.getAmount()), amountField.indexed() && !amountField.omitNorms() ? boost : 1F)); SchemaField currencyField = getCurrencyField(field); @@ -199,7 +199,7 @@ public class CurrencyField extends FieldType implements SchemaAware, ResourceLoa private void createDynamicCurrencyField(String suffix, FieldType type) { String name = "*" + POLY_FIELD_SEPARATOR + suffix; - Map props = new HashMap(); + Map props = new HashMap<>(); props.put("indexed", "true"); props.put("stored", "false"); props.put("multiValued", "false"); @@ -665,7 +665,7 @@ class FileExchangeRateProvider implements ExchangeRateProvider { protected static final String PARAM_CURRENCY_CONFIG = "currencyConfig"; // Exchange rate map, maps Currency Code -> Currency Code -> Rate - private Map> rates = new HashMap>(); + private Map> rates = new HashMap<>(); private String currencyConfigFile; private ResourceLoader loader; @@ -734,7 +734,7 @@ class FileExchangeRateProvider implements ExchangeRateProvider { Map rhs = ratesMap.get(sourceCurrencyCode); if (rhs == null) { - rhs = new HashMap(); + rhs = new HashMap<>(); ratesMap.put(sourceCurrencyCode, rhs); } @@ -763,7 +763,7 @@ class FileExchangeRateProvider implements ExchangeRateProvider { @Override public Set listAvailableCurrencies() { - Set currencies = new HashSet(); + Set currencies = new HashSet<>(); for(String from : rates.keySet()) { currencies.add(from); for(String to : rates.get(from).keySet()) { @@ -776,7 +776,7 @@ class FileExchangeRateProvider implements ExchangeRateProvider { @Override public boolean reload() throws SolrException { InputStream is = null; - Map> tmpRates = new HashMap>(); + Map> tmpRates = new HashMap<>(); try { log.info("Reloading exchange rates from file "+this.currencyConfigFile); diff --git a/solr/core/src/java/org/apache/solr/schema/EnumField.java b/solr/core/src/java/org/apache/solr/schema/EnumField.java index 845b75f85a6..3a9e121cfe1 100644 --- a/solr/core/src/java/org/apache/solr/schema/EnumField.java +++ b/solr/core/src/java/org/apache/solr/schema/EnumField.java @@ -61,8 +61,8 @@ public class EnumField extends PrimitiveFieldType { protected static final Integer DEFAULT_VALUE = -1; protected static final int DEFAULT_PRECISION_STEP = Integer.MAX_VALUE; - protected Map enumStringToIntMap = new HashMap(); - protected Map enumIntToStringMap = new HashMap(); + protected Map enumStringToIntMap = new HashMap<>(); + protected Map enumIntToStringMap = new HashMap<>(); protected String enumsConfigFile; protected String enumName; diff --git a/solr/core/src/java/org/apache/solr/schema/ExternalFileFieldReloader.java b/solr/core/src/java/org/apache/solr/schema/ExternalFileFieldReloader.java index 6fcd6d562ad..a2d6d4e31b3 100644 --- a/solr/core/src/java/org/apache/solr/schema/ExternalFileFieldReloader.java +++ b/solr/core/src/java/org/apache/solr/schema/ExternalFileFieldReloader.java @@ -51,7 +51,7 @@ import java.util.List; public class ExternalFileFieldReloader extends AbstractSolrEventListener { private String datadir; - private List fieldSources = new ArrayList(); + private List fieldSources = new ArrayList<>(); private static final Logger log = LoggerFactory.getLogger(ExternalFileFieldReloader.class); diff --git a/solr/core/src/java/org/apache/solr/schema/FieldProperties.java b/solr/core/src/java/org/apache/solr/schema/FieldProperties.java index 3dab1d7121d..a560fac738f 100644 --- a/solr/core/src/java/org/apache/solr/schema/FieldProperties.java +++ b/solr/core/src/java/org/apache/solr/schema/FieldProperties.java @@ -61,7 +61,7 @@ public abstract class FieldProperties { "storeOffsetsWithPositions", "docValues" }; - static final Map propertyMap = new HashMap(); + static final Map propertyMap = new HashMap<>(); static { for (String prop : propertyNames) { propertyMap.put(prop, propertyNameToInt(prop, true)); diff --git a/solr/core/src/java/org/apache/solr/schema/FieldType.java b/solr/core/src/java/org/apache/solr/schema/FieldType.java index e7e518cb0a0..4d4ae5a225d 100644 --- a/solr/core/src/java/org/apache/solr/schema/FieldType.java +++ b/solr/core/src/java/org/apache/solr/schema/FieldType.java @@ -151,7 +151,7 @@ public abstract class FieldType extends FieldProperties { } this.args = Collections.unmodifiableMap(args); - Map initArgs = new HashMap(args); + Map initArgs = new HashMap<>(args); initArgs.remove(CLASS_NAME); // consume the class arg trueProperties = FieldProperties.parseProperties(initArgs,true,false); @@ -785,7 +785,7 @@ public abstract class FieldType extends FieldProperties { * @param showDefaults if true, include default properties. */ public SimpleOrderedMap getNamedPropertyValues(boolean showDefaults) { - SimpleOrderedMap namedPropertyValues = new SimpleOrderedMap(); + SimpleOrderedMap namedPropertyValues = new SimpleOrderedMap<>(); namedPropertyValues.add(TYPE_NAME, getTypeName()); namedPropertyValues.add(CLASS_NAME, getClassArg()); if (showDefaults) { @@ -829,7 +829,7 @@ public abstract class FieldType extends FieldProperties { namedPropertyValues.add(DOC_VALUES_FORMAT, getDocValuesFormat()); } } else { // Don't show defaults - Set fieldProperties = new HashSet(); + Set fieldProperties = new HashSet<>(); for (String propertyName : FieldProperties.propertyNames) { fieldProperties.add(propertyName); } @@ -861,7 +861,7 @@ public abstract class FieldType extends FieldProperties { /** Returns args to this field type that aren't standard field properties */ protected Map getNonFieldPropertyArgs() { - Map initArgs = new HashMap(args); + Map initArgs = new HashMap<>(args); for (String prop : FieldProperties.propertyNames) { initArgs.remove(prop); } @@ -874,16 +874,16 @@ public abstract class FieldType extends FieldProperties { * name and args. */ protected static SimpleOrderedMap getAnalyzerProperties(Analyzer analyzer) { - SimpleOrderedMap analyzerProps = new SimpleOrderedMap(); + SimpleOrderedMap analyzerProps = new SimpleOrderedMap<>(); if (analyzer instanceof TokenizerChain) { Map factoryArgs; TokenizerChain tokenizerChain = (TokenizerChain)analyzer; CharFilterFactory[] charFilterFactories = tokenizerChain.getCharFilterFactories(); if (null != charFilterFactories && charFilterFactories.length > 0) { - List> charFilterProps = new ArrayList>(); + List> charFilterProps = new ArrayList<>(); for (CharFilterFactory charFilterFactory : charFilterFactories) { - SimpleOrderedMap props = new SimpleOrderedMap(); + SimpleOrderedMap props = new SimpleOrderedMap<>(); props.add(CLASS_NAME, charFilterFactory.getClassArg()); factoryArgs = charFilterFactory.getOriginalArgs(); if (null != factoryArgs) { @@ -904,7 +904,7 @@ public abstract class FieldType extends FieldProperties { analyzerProps.add(CHAR_FILTERS, charFilterProps); } - SimpleOrderedMap tokenizerProps = new SimpleOrderedMap(); + SimpleOrderedMap tokenizerProps = new SimpleOrderedMap<>(); TokenizerFactory tokenizerFactory = tokenizerChain.getTokenizerFactory(); tokenizerProps.add(CLASS_NAME, tokenizerFactory.getClassArg()); factoryArgs = tokenizerFactory.getOriginalArgs(); @@ -925,9 +925,9 @@ public abstract class FieldType extends FieldProperties { TokenFilterFactory[] filterFactories = tokenizerChain.getTokenFilterFactories(); if (null != filterFactories && filterFactories.length > 0) { - List> filterProps = new ArrayList>(); + List> filterProps = new ArrayList<>(); for (TokenFilterFactory filterFactory : filterFactories) { - SimpleOrderedMap props = new SimpleOrderedMap(); + SimpleOrderedMap props = new SimpleOrderedMap<>(); props.add(CLASS_NAME, filterFactory.getClassArg()); factoryArgs = filterFactory.getOriginalArgs(); if (null != factoryArgs) { diff --git a/solr/core/src/java/org/apache/solr/schema/FieldTypePluginLoader.java b/solr/core/src/java/org/apache/solr/schema/FieldTypePluginLoader.java index 95b9555ee40..7f618d542ef 100644 --- a/solr/core/src/java/org/apache/solr/schema/FieldTypePluginLoader.java +++ b/solr/core/src/java/org/apache/solr/schema/FieldTypePluginLoader.java @@ -185,7 +185,7 @@ public final class FieldTypePluginLoader static final KeywordTokenizerFactory keyFactory = new KeywordTokenizerFactory(new HashMap()); ArrayList charFilters = null; - ArrayList filters = new ArrayList(2); + ArrayList filters = new ArrayList<>(2); TokenizerFactory tokenizer = keyFactory; public void add(Object current) { @@ -193,14 +193,14 @@ public final class FieldTypePluginLoader AbstractAnalysisFactory newComponent = ((MultiTermAwareComponent)current).getMultiTermComponent(); if (newComponent instanceof TokenFilterFactory) { if (filters == null) { - filters = new ArrayList(2); + filters = new ArrayList<>(2); } filters.add((TokenFilterFactory)newComponent); } else if (newComponent instanceof TokenizerFactory) { tokenizer = (TokenizerFactory)newComponent; } else if (newComponent instanceof CharFilterFactory) { if (charFilters == null) { - charFilters = new ArrayList(1); + charFilters = new ArrayList<>(1); } charFilters.add( (CharFilterFactory)newComponent); @@ -293,7 +293,7 @@ public final class FieldTypePluginLoader // Load the CharFilters final ArrayList charFilters - = new ArrayList(); + = new ArrayList<>(); AbstractPluginLoader charFilterLoader = new AbstractPluginLoader ("[schema.xml] analyzer/charFilter", CharFilterFactory.class, false, false) { @@ -329,7 +329,7 @@ public final class FieldTypePluginLoader // the configuration is ok final ArrayList tokenizers - = new ArrayList(1); + = new ArrayList<>(1); AbstractPluginLoader tokenizerLoader = new AbstractPluginLoader ("[schema.xml] analyzer/tokenizer", TokenizerFactory.class, false, false) { @@ -369,7 +369,7 @@ public final class FieldTypePluginLoader // Load the Filters final ArrayList filters - = new ArrayList(); + = new ArrayList<>(); AbstractPluginLoader filterLoader = new AbstractPluginLoader("[schema.xml] analyzer/filter", TokenFilterFactory.class, false, false) diff --git a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java index 4a9e4540eb6..0263d7aad9f 100644 --- a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java +++ b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java @@ -120,25 +120,25 @@ public class IndexSchema { protected float version; protected final SolrResourceLoader loader; - protected Map fields = new HashMap(); - protected Map fieldTypes = new HashMap(); + protected Map fields = new HashMap<>(); + protected Map fieldTypes = new HashMap<>(); - protected List fieldsWithDefaultValue = new ArrayList(); - protected Collection requiredFields = new HashSet(); + protected List fieldsWithDefaultValue = new ArrayList<>(); + protected Collection requiredFields = new HashSet<>(); protected volatile DynamicField[] dynamicFields; public DynamicField[] getDynamicFields() { return dynamicFields; } private Analyzer analyzer; private Analyzer queryAnalyzer; - protected List schemaAware = new ArrayList(); + protected List schemaAware = new ArrayList<>(); protected String defaultSearchFieldName=null; protected String queryParserDefaultOperator = "OR"; protected boolean isExplicitQueryParserDefaultOperator = false; - protected Map> copyFieldsMap = new HashMap>(); + protected Map> copyFieldsMap = new HashMap<>(); public Map> getCopyFieldsMap() { return Collections.unmodifiableMap(copyFieldsMap); } protected DynamicCopy[] dynamicCopyFields; @@ -148,7 +148,7 @@ public class IndexSchema { * keys are all fields copied to, count is num of copyField * directives that target them. */ - protected Map copyFieldTargetCounts = new HashMap(); + protected Map copyFieldTargetCounts = new HashMap<>(); /** * Constructs a schema using the specified resource name and stream. @@ -386,7 +386,7 @@ public class IndexSchema { } protected HashMap analyzerCache() { - HashMap cache = new HashMap(); + HashMap cache = new HashMap<>(); for (SchemaField f : getFields().values()) { Analyzer analyzer = f.getType().getAnalyzer(); cache.put(f.getName(), analyzer); @@ -407,7 +407,7 @@ public class IndexSchema { @Override protected HashMap analyzerCache() { - HashMap cache = new HashMap(); + HashMap cache = new HashMap<>(); for (SchemaField f : getFields().values()) { Analyzer analyzer = f.getType().getQueryAnalyzer(); cache.put(f.getName(), analyzer); @@ -633,9 +633,9 @@ public class IndexSchema { */ protected synchronized Map loadFields(Document document, XPath xpath) throws XPathExpressionException { // Hang on to the fields that say if they are required -- this lets us set a reasonable default for the unique key - Map explicitRequiredProp = new HashMap(); + Map explicitRequiredProp = new HashMap<>(); - ArrayList dFields = new ArrayList(); + ArrayList dFields = new ArrayList<>(); // /schema/fields/field | /schema/fields/dynamicField String expression = stepsToPath(SCHEMA, FIELDS, FIELD) @@ -756,7 +756,7 @@ public class IndexSchema { * @param fields The sequence of {@link org.apache.solr.schema.SchemaField} */ public void registerDynamicFields(SchemaField... fields) { - List dynFields = new ArrayList(Arrays.asList(dynamicFields)); + List dynFields = new ArrayList<>(Arrays.asList(dynamicFields)); for (SchemaField field : fields) { if (isDuplicateDynField(dynFields, field)) { log.debug("dynamic field already exists: dynamic field: [" + field.getName() + "]"); @@ -889,7 +889,7 @@ public class IndexSchema { } else { // source & dest: explicit fields List copyFieldList = copyFieldsMap.get(source); if (copyFieldList == null) { - copyFieldList = new ArrayList(); + copyFieldList = new ArrayList<>(); copyFieldsMap.put(source, copyFieldList); } copyFieldList.add(new CopyField(sourceSchemaField, destSchemaField, maxChars)); @@ -1261,7 +1261,7 @@ public class IndexSchema { if (!isCopyFieldTarget(f)) { return Collections.emptyList(); } - List fieldNames = new ArrayList(); + List fieldNames = new ArrayList<>(); for (Map.Entry> cfs : copyFieldsMap.entrySet()) { for (CopyField copyField : cfs.getValue()) { if (copyField.getDestination().getName().equals(destField)) { @@ -1285,7 +1285,7 @@ public class IndexSchema { */ // This is useful when we need the maxSize param of each CopyField public List getCopyFieldsList(final String sourceField){ - final List result = new ArrayList(); + final List result = new ArrayList<>(); for (DynamicCopy dynamicCopy : dynamicCopyFields) { if (dynamicCopy.matches(sourceField)) { result.add(new CopyField(getField(sourceField), dynamicCopy.getTargetField(sourceField), dynamicCopy.maxChars)); @@ -1312,7 +1312,7 @@ public class IndexSchema { * Get a map of property name -> value for the whole schema. */ public SimpleOrderedMap getNamedPropertyValues() { - SimpleOrderedMap topLevel = new SimpleOrderedMap(); + SimpleOrderedMap topLevel = new SimpleOrderedMap<>(); topLevel.add(NAME, getSchemaName()); topLevel.add(VERSION, getVersion()); if (null != uniqueKeyFieldName) { @@ -1322,26 +1322,26 @@ public class IndexSchema { topLevel.add(DEFAULT_SEARCH_FIELD, defaultSearchFieldName); } if (isExplicitQueryParserDefaultOperator) { - SimpleOrderedMap solrQueryParserProperties = new SimpleOrderedMap(); + SimpleOrderedMap solrQueryParserProperties = new SimpleOrderedMap<>(); solrQueryParserProperties.add(DEFAULT_OPERATOR, queryParserDefaultOperator); topLevel.add(SOLR_QUERY_PARSER, solrQueryParserProperties); } if (isExplicitSimilarity) { topLevel.add(SIMILARITY, similarityFactory.getNamedPropertyValues()); } - List> fieldTypeProperties = new ArrayList>(); - SortedMap sortedFieldTypes = new TreeMap(fieldTypes); + List> fieldTypeProperties = new ArrayList<>(); + SortedMap sortedFieldTypes = new TreeMap<>(fieldTypes); for (FieldType fieldType : sortedFieldTypes.values()) { fieldTypeProperties.add(fieldType.getNamedPropertyValues(false)); } topLevel.add(FIELD_TYPES, fieldTypeProperties); - List> fieldProperties = new ArrayList>(); - SortedSet fieldNames = new TreeSet(fields.keySet()); + List> fieldProperties = new ArrayList<>(); + SortedSet fieldNames = new TreeSet<>(fields.keySet()); for (String fieldName : fieldNames) { fieldProperties.add(fields.get(fieldName).getNamedPropertyValues(false)); } topLevel.add(FIELDS, fieldProperties); - List> dynamicFieldProperties = new ArrayList>(); + List> dynamicFieldProperties = new ArrayList<>(); for (IndexSchema.DynamicField dynamicField : dynamicFields) { if ( ! dynamicField.getRegex().startsWith(INTERNAL_POLY_FIELD_PREFIX)) { // omit internal polyfields dynamicFieldProperties.add(dynamicField.getPrototype().getNamedPropertyValues(false)); @@ -1366,8 +1366,8 @@ public class IndexSchema { */ public List> getCopyFieldProperties (boolean showDetails, Set requestedSourceFields, Set requestedDestinationFields) { - List> copyFieldProperties = new ArrayList>(); - SortedMap> sortedCopyFields = new TreeMap>(copyFieldsMap); + List> copyFieldProperties = new ArrayList<>(); + SortedMap> sortedCopyFields = new TreeMap<>(copyFieldsMap); for (List copyFields : sortedCopyFields.values()) { Collections.sort(copyFields, new Comparator() { @Override @@ -1381,7 +1381,7 @@ public class IndexSchema { final String destination = copyField.getDestination().getName(); if ( (null == requestedSourceFields || requestedSourceFields.contains(source)) && (null == requestedDestinationFields || requestedDestinationFields.contains(destination))) { - SimpleOrderedMap props = new SimpleOrderedMap(); + SimpleOrderedMap props = new SimpleOrderedMap<>(); props.add(SOURCE, source); props.add(DESTINATION, destination); if (0 != copyField.getMaxChars()) { @@ -1396,7 +1396,7 @@ public class IndexSchema { final String destination = dynamicCopy.getDestFieldName(); if ( (null == requestedSourceFields || requestedSourceFields.contains(source)) && (null == requestedDestinationFields || requestedDestinationFields.contains(destination))) { - SimpleOrderedMap dynamicCopyProps = new SimpleOrderedMap(); + SimpleOrderedMap dynamicCopyProps = new SimpleOrderedMap<>(); dynamicCopyProps.add(SOURCE, dynamicCopy.getRegex()); if (showDetails) { @@ -1404,7 +1404,7 @@ public class IndexSchema { if (null != sourceDynamicBase) { dynamicCopyProps.add(SOURCE_DYNAMIC_BASE, sourceDynamicBase.getRegex()); } else if (source.contains("*")) { - List sourceExplicitFields = new ArrayList(); + List sourceExplicitFields = new ArrayList<>(); Pattern pattern = Pattern.compile(source.replace("*", ".*")); // glob->regex for (String field : fields.keySet()) { if (pattern.matcher(field).matches()) { diff --git a/solr/core/src/java/org/apache/solr/schema/JsonPreAnalyzedParser.java b/solr/core/src/java/org/apache/solr/schema/JsonPreAnalyzedParser.java index 77b6c04d607..accce1d8d7a 100644 --- a/solr/core/src/java/org/apache/solr/schema/JsonPreAnalyzedParser.java +++ b/solr/core/src/java/org/apache/solr/schema/JsonPreAnalyzedParser.java @@ -208,7 +208,7 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser { @Override public String toFormattedString(Field f) throws IOException { - Map map = new LinkedHashMap(); + Map map = new LinkedHashMap<>(); map.put(VERSION_KEY, VERSION); if (f.fieldType().stored()) { String stringValue = f.stringValue(); @@ -222,12 +222,12 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser { } TokenStream ts = f.tokenStreamValue(); if (ts != null) { - List> tokens = new LinkedList>(); + List> tokens = new LinkedList<>(); while (ts.incrementToken()) { Iterator> it = ts.getAttributeClassesIterator(); String cTerm = null; String tTerm = null; - Map tok = new TreeMap(); + Map tok = new TreeMap<>(); while (it.hasNext()) { Class cl = it.next(); if (!ts.hasAttribute(cl)) { diff --git a/solr/core/src/java/org/apache/solr/schema/LatLonType.java b/solr/core/src/java/org/apache/solr/schema/LatLonType.java index bfc9b482c36..27157d27ce0 100644 --- a/solr/core/src/java/org/apache/solr/schema/LatLonType.java +++ b/solr/core/src/java/org/apache/solr/schema/LatLonType.java @@ -72,7 +72,7 @@ public class LatLonType extends AbstractSubTypeFieldType implements SpatialQuery public List createFields(SchemaField field, Object value, float boost) { String externalVal = value.toString(); //we could have 3 fields (two for the lat & lon, one for storage) - List f = new ArrayList(3); + List f = new ArrayList<>(3); if (field.indexed()) { Point point = SpatialUtils.parsePointSolrException(externalVal, SpatialContext.GEO); //latitude @@ -216,7 +216,7 @@ public class LatLonType extends AbstractSubTypeFieldType implements SpatialQuery @Override public ValueSource getValueSource(SchemaField field, QParser parser) { - ArrayList vs = new ArrayList(2); + ArrayList vs = new ArrayList<>(2); for (int i = 0; i < 2; i++) { SchemaField sub = subField(field, i, parser.getReq().getSchema()); vs.add(sub.getType().getValueSource(sub, parser)); diff --git a/solr/core/src/java/org/apache/solr/schema/OpenExchangeRatesOrgProvider.java b/solr/core/src/java/org/apache/solr/schema/OpenExchangeRatesOrgProvider.java index 0945d4101c5..c82266c391b 100644 --- a/solr/core/src/java/org/apache/solr/schema/OpenExchangeRatesOrgProvider.java +++ b/solr/core/src/java/org/apache/solr/schema/OpenExchangeRatesOrgProvider.java @@ -203,7 +203,7 @@ public class OpenExchangeRatesOrgProvider implements ExchangeRateProvider { public OpenExchangeRates(InputStream ratesStream) throws IOException { parser = new JSONParser(new InputStreamReader(ratesStream, IOUtils.CHARSET_UTF_8)); - rates = new HashMap(); + rates = new HashMap<>(); int ev; do { diff --git a/solr/core/src/java/org/apache/solr/schema/PointType.java b/solr/core/src/java/org/apache/solr/schema/PointType.java index 9bc9c6154ff..3f6ffa52e6c 100644 --- a/solr/core/src/java/org/apache/solr/schema/PointType.java +++ b/solr/core/src/java/org/apache/solr/schema/PointType.java @@ -70,7 +70,7 @@ public class PointType extends CoordinateFieldType implements SpatialQueryable { String[] point = parseCommaSeparatedList(externalVal, dimension); // TODO: this doesn't currently support polyFields as sub-field types - List f = new ArrayList(dimension+1); + List f = new ArrayList<>(dimension+1); if (field.indexed()) { for (int i=0; i vs = new ArrayList(dimension); + ArrayList vs = new ArrayList<>(dimension); for (int i=0; i states = new LinkedList(); + public List states = new LinkedList<>(); } /** @@ -247,7 +247,7 @@ public class PreAnalyzedField extends FieldType { * Token stream that works from a list of saved states. */ private static class PreAnalyzedTokenizer extends Tokenizer { - private final List cachedStates = new LinkedList(); + private final List cachedStates = new LinkedList<>(); private Iterator it = null; private String stringValue = null; private byte[] binaryValue = null; diff --git a/solr/core/src/java/org/apache/solr/schema/SchemaField.java b/solr/core/src/java/org/apache/solr/schema/SchemaField.java index ca6bd90d1b5..47a8f30c810 100644 --- a/solr/core/src/java/org/apache/solr/schema/SchemaField.java +++ b/solr/core/src/java/org/apache/solr/schema/SchemaField.java @@ -201,7 +201,7 @@ public final class SchemaField extends FieldProperties { defaultValue = (String)props.get(DEFAULT_VALUE); } SchemaField field = new SchemaField(name, ft, calcProps(name, ft, props), defaultValue); - field.args = new HashMap(props); + field.args = new HashMap<>(props); return field; } @@ -313,7 +313,7 @@ public final class SchemaField extends FieldProperties { * not overridden in the field declaration). */ public SimpleOrderedMap getNamedPropertyValues(boolean showDefaults) { - SimpleOrderedMap properties = new SimpleOrderedMap(); + SimpleOrderedMap properties = new SimpleOrderedMap<>(); properties.add(FIELD_NAME, getName()); properties.add(TYPE_NAME, getType().getTypeName()); if (showDefaults) { diff --git a/solr/core/src/java/org/apache/solr/schema/SimilarityFactory.java b/solr/core/src/java/org/apache/solr/schema/SimilarityFactory.java index 0f1b89d49c8..7e32e714e4c 100644 --- a/solr/core/src/java/org/apache/solr/schema/SimilarityFactory.java +++ b/solr/core/src/java/org/apache/solr/schema/SimilarityFactory.java @@ -51,7 +51,7 @@ public abstract class SimilarityFactory { /** Returns a serializable description of this similarity(factory) */ public SimpleOrderedMap getNamedPropertyValues() { - SimpleOrderedMap props = new SimpleOrderedMap(); + SimpleOrderedMap props = new SimpleOrderedMap<>(); props.add(CLASS_NAME, getClassArg()); if (null != params) { Iterator iter = params.getParameterNamesIterator(); diff --git a/solr/core/src/java/org/apache/solr/schema/SimplePreAnalyzedParser.java b/solr/core/src/java/org/apache/solr/schema/SimplePreAnalyzedParser.java index 0ab959a3d5a..1e588245a51 100644 --- a/solr/core/src/java/org/apache/solr/schema/SimplePreAnalyzedParser.java +++ b/solr/core/src/java/org/apache/solr/schema/SimplePreAnalyzedParser.java @@ -141,7 +141,7 @@ public final class SimplePreAnalyzedParser implements PreAnalyzedParser { private static class Tok { StringBuilder token = new StringBuilder(); - Map attr = new HashMap(); + Map attr = new HashMap<>(); public boolean isEmpty() { return token.length() == 0 && attr.size() == 0; diff --git a/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java b/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java index 6c008dca5a7..2f93dc1d8af 100644 --- a/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java +++ b/solr/core/src/java/org/apache/solr/schema/SpatialPointVectorFieldType.java @@ -65,7 +65,7 @@ public class SpatialPointVectorFieldType extends AbstractSpatialFieldType newFields = new ArrayList(); + List newFields = new ArrayList<>(); for( SchemaField sf : schema.getFields().values() ) { if( sf.getType() == this ) { String name = sf.getName(); diff --git a/solr/core/src/java/org/apache/solr/schema/StrField.java b/solr/core/src/java/org/apache/solr/schema/StrField.java index 15060b9b74d..e39d7e6ad64 100644 --- a/solr/core/src/java/org/apache/solr/schema/StrField.java +++ b/solr/core/src/java/org/apache/solr/schema/StrField.java @@ -45,7 +45,7 @@ public class StrField extends PrimitiveFieldType { public List createFields(SchemaField field, Object value, float boost) { if (field.hasDocValues()) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); fields.add(createField(field, value, boost)); final BytesRef bytes = new BytesRef(value.toString()); if (field.multiValued()) { diff --git a/solr/core/src/java/org/apache/solr/schema/TrieField.java b/solr/core/src/java/org/apache/solr/schema/TrieField.java index 4859e12856c..a0aca9884ee 100644 --- a/solr/core/src/java/org/apache/solr/schema/TrieField.java +++ b/solr/core/src/java/org/apache/solr/schema/TrieField.java @@ -634,7 +634,7 @@ public class TrieField extends PrimitiveFieldType { @Override public List createFields(SchemaField sf, Object value, float boost) { if (sf.hasDocValues()) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); final StorableField field = createField(sf, value, boost); fields.add(field); diff --git a/solr/core/src/java/org/apache/solr/search/CursorMark.java b/solr/core/src/java/org/apache/solr/search/CursorMark.java index 2c645149fcd..13bdc937ee5 100644 --- a/solr/core/src/java/org/apache/solr/search/CursorMark.java +++ b/solr/core/src/java/org/apache/solr/search/CursorMark.java @@ -160,7 +160,7 @@ public final class CursorMark { } else { assert input.size() == sortSpec.getSort().getSort().length; // defensive copy - this.values = new ArrayList(input); + this.values = new ArrayList<>(input); } } @@ -170,7 +170,7 @@ public final class CursorMark { */ public List getSortValues() { // defensive copy - return null == this.values ? null : new ArrayList(this.values); + return null == this.values ? null : new ArrayList<>(this.values); } /** @@ -218,7 +218,7 @@ public final class CursorMark { } - this.values = new ArrayList(sortFields.length); + this.values = new ArrayList<>(sortFields.length); final BytesRef tmpBytes = new BytesRef(); for (int i = 0; i < sortFields.length; i++) { @@ -248,7 +248,7 @@ public final class CursorMark { } final List schemaFields = sortSpec.getSchemaFields(); - final ArrayList marshalledValues = new ArrayList(values.size()+1); + final ArrayList marshalledValues = new ArrayList<>(values.size()+1); for (int i = 0; i < schemaFields.size(); i++) { SchemaField fld = schemaFields.get(i); Object safeValue = values.get(i); diff --git a/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java b/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java index 844484930b5..748b17622e4 100644 --- a/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java +++ b/solr/core/src/java/org/apache/solr/search/DisMaxQParser.java @@ -137,7 +137,7 @@ public class DisMaxQParser extends QParser { //List boostQueries = SolrPluginUtils.parseQueryStrings(req, boostParams); boostQueries = null; if (boostParams != null && boostParams.length > 0) { - boostQueries = new ArrayList(); + boostQueries = new ArrayList<>(); for (String qs : boostParams) { if (qs.trim().length() == 0) continue; Query q = subQuery(qs, null).getQuery(); diff --git a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java index aecfabea3dd..df106cc1f7a 100644 --- a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java +++ b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParser.java @@ -204,7 +204,7 @@ public class ExtendedDismaxQParser extends QParser { if (allPhraseFields.size() > 0) { // find non-field clauses - List normalClauses = new ArrayList(clauses.size()); + List normalClauses = new ArrayList<>(clauses.size()); for (Clause clause : clauses) { if (clause.field != null || clause.isPhrase) continue; // check for keywords "AND,OR,TO" @@ -218,7 +218,7 @@ public class ExtendedDismaxQParser extends QParser { // full phrase and shingles for (FieldParams phraseField: allPhraseFields) { - Map pf = new HashMap(1); + Map pf = new HashMap<>(1); pf.put(phraseField.getField(),phraseField.getBoost()); addShingledPhraseQueries(query, normalClauses, pf, phraseField.getWordGrams(),config.tiebreaker, phraseField.getSlop()); @@ -407,7 +407,7 @@ public class ExtendedDismaxQParser extends QParser { * Parses all multiplicative boosts */ protected List getMultiplicativeBoosts() throws SyntaxError { - List boosts = new ArrayList(); + List boosts = new ArrayList<>(); if (config.hasMultiplicativeBoosts()) { for (String boostStr : config.multBoosts) { if (boostStr==null || boostStr.length()==0) continue; @@ -428,7 +428,7 @@ public class ExtendedDismaxQParser extends QParser { * Parses all function queries */ protected List getBoostFunctions() throws SyntaxError { - List boostFunctions = new LinkedList(); + List boostFunctions = new LinkedList<>(); if (config.hasBoostFunctions()) { for (String boostFunc : config.boostFuncs) { if(null == boostFunc || "".equals(boostFunc)) continue; @@ -450,7 +450,7 @@ public class ExtendedDismaxQParser extends QParser { * Parses all boost queries */ protected List getBoostQueries() throws SyntaxError { - List boostQueries = new LinkedList(); + List boostQueries = new LinkedList<>(); if (config.hasBoostParams()) { for (String qs : config.boostParams) { if (qs.trim().length()==0) continue; @@ -676,7 +676,7 @@ public class ExtendedDismaxQParser extends QParser { } public List splitIntoClauses(String s, boolean ignoreQuote) { - ArrayList lst = new ArrayList(4); + ArrayList lst = new ArrayList<>(4); Clause clause; int pos=0; @@ -859,7 +859,7 @@ public class ExtendedDismaxQParser extends QParser { } public static List split(String s, boolean ignoreQuote) { - ArrayList lst = new ArrayList(4); + ArrayList lst = new ArrayList<>(4); int pos=0, start=0, end=s.length(); char inString=0; char ch=0; @@ -937,7 +937,7 @@ public class ExtendedDismaxQParser extends QParser { * string, to Alias object containing the fields to use in our * DisjunctionMaxQuery and the tiebreaker to use. */ - protected Map aliases = new HashMap(3); + protected Map aliases = new HashMap<>(3); private QType type; private String field; @@ -1029,7 +1029,7 @@ public class ExtendedDismaxQParser extends QParser { Analyzer actualAnalyzer; if (removeStopFilter) { if (nonStopFilterAnalyzerPerField == null) { - nonStopFilterAnalyzerPerField = new HashMap(); + nonStopFilterAnalyzerPerField = new HashMap<>(); } actualAnalyzer = nonStopFilterAnalyzerPerField.get(field); if (actualAnalyzer == null) { @@ -1127,7 +1127,7 @@ public class ExtendedDismaxQParser extends QParser { * Validate there is no cyclic referencing in the aliasing */ private void validateCyclicAliasing(String field) throws SyntaxError { - Set set = new HashSet(); + Set set = new HashSet<>(); set.add(field); if(validateField(field, set)) { throw new SyntaxError("Field aliases lead to a cycle"); @@ -1155,7 +1155,7 @@ public class ExtendedDismaxQParser extends QParser { protected List getQueries(Alias a) throws SyntaxError { if (a == null) return null; if (a.fields.size()==0) return null; - List lst= new ArrayList(4); + List lst= new ArrayList<>(4); for (String f : a.fields.keySet()) { this.field = f; @@ -1289,8 +1289,8 @@ public class ExtendedDismaxQParser extends QParser { } // Process dynamic patterns in userFields - ArrayList dynUserFields = new ArrayList(); - ArrayList negDynUserFields = new ArrayList(); + ArrayList dynUserFields = new ArrayList<>(); + ArrayList negDynUserFields = new ArrayList<>(); for(String f : userFieldsMap.keySet()) { if(f.contains("*")) { if(f.startsWith("-")) @@ -1459,7 +1459,7 @@ public class ExtendedDismaxQParser extends QParser { List phraseFields2 = U.parseFieldBoostsAndSlop(solrParams.getParams(DMP.PF2),2,pslop[2]); List phraseFields3 = U.parseFieldBoostsAndSlop(solrParams.getParams(DMP.PF3),3,pslop[3]); - allPhraseFields = new ArrayList(phraseFields.size() + phraseFields2.size() + phraseFields3.size()); + allPhraseFields = new ArrayList<>(phraseFields.size() + phraseFields2.size() + phraseFields3.size()); allPhraseFields.addAll(phraseFields); allPhraseFields.addAll(phraseFields2); allPhraseFields.addAll(phraseFields3); diff --git a/solr/core/src/java/org/apache/solr/search/FastLRUCache.java b/solr/core/src/java/org/apache/solr/search/FastLRUCache.java index dfd7000e911..a9abf14111f 100644 --- a/solr/core/src/java/org/apache/solr/search/FastLRUCache.java +++ b/solr/core/src/java/org/apache/solr/search/FastLRUCache.java @@ -85,7 +85,7 @@ public class FastLRUCache extends SolrCacheBase implements SolrCache { str = (String) args.get("showItems"); showItems = str == null ? 0 : Integer.parseInt(str); description = generateDescription(limit, initialSize, minLimit, acceptableLimit, newThread); - cache = new ConcurrentLRUCache(limit, minLimit, acceptableLimit, initialSize, newThread, false, null); + cache = new ConcurrentLRUCache<>(limit, minLimit, acceptableLimit, initialSize, newThread, false, null); cache.setAlive(false); statsList = (List) persistence; @@ -93,7 +93,7 @@ public class FastLRUCache extends SolrCacheBase implements SolrCache { // must be the first time a cache of this type is being created // Use a CopyOnWriteArrayList since puts are very rare and iteration may be a frequent operation // because it is used in getStatistics() - statsList = new CopyOnWriteArrayList(); + statsList = new CopyOnWriteArrayList<>(); // the first entry will be for cumulative stats of caches that have been closed. statsList.add(new ConcurrentLRUCache.Stats()); @@ -197,7 +197,7 @@ public class FastLRUCache extends SolrCacheBase implements SolrCache { @Override public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap(); + NamedList lst = new SimpleOrderedMap<>(); if (cache == null) return lst; ConcurrentLRUCache.Stats stats = cache.getStats(); long lookups = stats.getCumulativeLookups(); diff --git a/solr/core/src/java/org/apache/solr/search/FunctionQParser.java b/solr/core/src/java/org/apache/solr/search/FunctionQParser.java index 53744eaa27d..140399927a1 100644 --- a/solr/core/src/java/org/apache/solr/search/FunctionQParser.java +++ b/solr/core/src/java/org/apache/solr/search/FunctionQParser.java @@ -84,7 +84,7 @@ public class FunctionQParser extends QParser { consumeArgumentDelimiter(); if (lst == null) { - lst = new ArrayList(2); + lst = new ArrayList<>(2); lst.add(valsource); } } @@ -208,7 +208,7 @@ public class FunctionQParser extends QParser { * @return List<ValueSource> */ public List parseValueSourceList() throws SyntaxError { - List sources = new ArrayList(3); + List sources = new ArrayList<>(3); while (hasMoreArguments()) { sources.add(parseValueSource(true)); } diff --git a/solr/core/src/java/org/apache/solr/search/Grouping.java b/solr/core/src/java/org/apache/solr/search/Grouping.java index 024eb3f42d6..88066de69c8 100644 --- a/solr/core/src/java/org/apache/solr/search/Grouping.java +++ b/solr/core/src/java/org/apache/solr/search/Grouping.java @@ -83,7 +83,7 @@ public class Grouping { private final SolrIndexSearcher searcher; private final SolrIndexSearcher.QueryResult qr; private final SolrIndexSearcher.QueryCommand cmd; - private final List commands = new ArrayList(); + private final List commands = new ArrayList<>(); private final boolean main; private final boolean cacheSecondPassSearch; private final int maxDocsPercentageToCache; @@ -105,7 +105,7 @@ public class Grouping { private DocSet filter; private Filter luceneFilter; private NamedList grouped = new SimpleOrderedMap(); - private Set idSet = new LinkedHashSet(); // used for tracking unique docs when we need a doclist + private Set idSet = new LinkedHashSet<>(); // used for tracking unique docs when we need a doclist private int maxMatches; // max number of matches from any grouping command private float maxScore = Float.NEGATIVE_INFINITY; // max score seen in any doclist private boolean signalCacheWarning = false; @@ -331,7 +331,7 @@ public class Grouping { } AbstractAllGroupHeadsCollector allGroupHeadsCollector = null; - List collectors = new ArrayList(commands.size()); + List collectors = new ArrayList<>(commands.size()); for (Command cmd : commands) { Collector collector = cmd.createFirstPassCollector(); if (collector != null) { @@ -649,8 +649,8 @@ public class Grouping { protected DocList createSimpleResponse() { GroupDocs[] groups = result != null ? result.groups : new GroupDocs[0]; - List ids = new ArrayList(); - List scores = new ArrayList(); + List ids = new ArrayList<>(); + List scores = new ArrayList<>(); int docsToGather = getMax(offset, numGroups, maxDoc); int docsGathered = 0; float maxScore = Float.NEGATIVE_INFINITY; @@ -888,7 +888,7 @@ public class Grouping { protected void finish() throws IOException { TopDocsCollector topDocsCollector = (TopDocsCollector) collector.getDelegate(); TopDocs topDocs = topDocsCollector.topDocs(); - GroupDocs groupDocs = new GroupDocs(Float.NaN, topDocs.getMaxScore(), topDocs.totalHits, topDocs.scoreDocs, query.toString(), null); + GroupDocs groupDocs = new GroupDocs<>(Float.NaN, topDocs.getMaxScore(), topDocs.totalHits, topDocs.scoreDocs, query.toString(), null); if (main) { mainResult = getDocList(groupDocs); } else { diff --git a/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java index f410fc156fa..a2b4958bb51 100644 --- a/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java +++ b/solr/core/src/java/org/apache/solr/search/JoinQParserPlugin.java @@ -240,7 +240,7 @@ class JoinQuery extends Query { long end = debug ? System.currentTimeMillis() : 0; if (debug) { - SimpleOrderedMap dbg = new SimpleOrderedMap(); + SimpleOrderedMap dbg = new SimpleOrderedMap<>(); dbg.add("time", (end-start)); dbg.add("fromSetSize", fromSetSize); // the input dbg.add("toSetSize", resultSet.size()); // the output @@ -295,7 +295,7 @@ class JoinQuery extends Query { DocSet fromSet = fromSearcher.getDocSet(q); fromSetSize = fromSet.size(); - List resultList = new ArrayList(10); + List resultList = new ArrayList<>(10); // make sure we have a set that is fast for random access, if we will use it for that DocSet fastForRandomSet = fromSet; diff --git a/solr/core/src/java/org/apache/solr/search/LFUCache.java b/solr/core/src/java/org/apache/solr/search/LFUCache.java index 0093d479a0d..aadae664ff1 100644 --- a/solr/core/src/java/org/apache/solr/search/LFUCache.java +++ b/solr/core/src/java/org/apache/solr/search/LFUCache.java @@ -109,7 +109,7 @@ public class LFUCache implements SolrCache { } description += ')'; - cache = new ConcurrentLFUCache(limit, minLimit, acceptableSize, initialSize, newThread, false, null, timeDecay); + cache = new ConcurrentLFUCache<>(limit, minLimit, acceptableSize, initialSize, newThread, false, null, timeDecay); cache.setAlive(false); statsList = (List) persistence; @@ -117,7 +117,7 @@ public class LFUCache implements SolrCache { // must be the first time a cache of this type is being created // Use a CopyOnWriteArrayList since puts are very rare and iteration may be a frequent operation // because it is used in getStatistics() - statsList = new CopyOnWriteArrayList(); + statsList = new CopyOnWriteArrayList<>(); // the first entry will be for cumulative stats of caches that have been closed. statsList.add(new ConcurrentLFUCache.Stats()); @@ -242,7 +242,7 @@ public class LFUCache implements SolrCache { @Override public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap(); + NamedList lst = new SimpleOrderedMap<>(); if (cache == null) return lst; ConcurrentLFUCache.Stats stats = cache.getStats(); long lookups = stats.getCumulativeLookups(); diff --git a/solr/core/src/java/org/apache/solr/search/MaxScoreQParser.java b/solr/core/src/java/org/apache/solr/search/MaxScoreQParser.java index d105ab11c28..e58c793855f 100644 --- a/solr/core/src/java/org/apache/solr/search/MaxScoreQParser.java +++ b/solr/core/src/java/org/apache/solr/search/MaxScoreQParser.java @@ -58,8 +58,8 @@ public class MaxScoreQParser extends LuceneQParser { return q; } BooleanQuery obq = (BooleanQuery)q; - Collection should = new ArrayList(); - Collection prohibOrReq = new ArrayList(); + Collection should = new ArrayList<>(); + Collection prohibOrReq = new ArrayList<>(); BooleanQuery newq = new BooleanQuery(); for (BooleanClause clause : obq.getClauses()) { diff --git a/solr/core/src/java/org/apache/solr/search/QParser.java b/solr/core/src/java/org/apache/solr/search/QParser.java index 0b415dd145c..0054dba079a 100644 --- a/solr/core/src/java/org/apache/solr/search/QParser.java +++ b/solr/core/src/java/org/apache/solr/search/QParser.java @@ -66,7 +66,7 @@ public abstract class QParser { @SuppressWarnings("unchecked") Map> tagMap = (Map>)req.getContext().get("tags"); if (tagMap == null) { - tagMap = new HashMap>(); + tagMap = new HashMap<>(); context.put("tags", tagMap); } if (tagStr.indexOf(',') >= 0) { @@ -88,7 +88,7 @@ public abstract class QParser { private static void addTag(Map> tagMap, Object key, Object val) { Collection lst = tagMap.get(key); if (lst == null) { - lst = new ArrayList(2); + lst = new ArrayList<>(2); tagMap.put(key, lst); } lst.add(val); @@ -283,7 +283,7 @@ public abstract class QParser { int localParamsEnd = -1; if (qstr != null && qstr.startsWith(QueryParsing.LOCALPARAM_START)) { - Map localMap = new HashMap(); + Map localMap = new HashMap<>(); localParamsEnd = QueryParsing.parseLocalParams(qstr, 0, localMap, globalParams); String val = localMap.get(QueryParsing.V); diff --git a/solr/core/src/java/org/apache/solr/search/QueryParsing.java b/solr/core/src/java/org/apache/solr/search/QueryParsing.java index 75d5b527d06..03389b4d14f 100644 --- a/solr/core/src/java/org/apache/solr/search/QueryParsing.java +++ b/solr/core/src/java/org/apache/solr/search/QueryParsing.java @@ -207,7 +207,7 @@ public class QueryParsing { if (txt == null || !txt.startsWith(LOCALPARAM_START)) { return null; } - Map localParams = new HashMap(); + Map localParams = new HashMap<>(); int start = QueryParsing.parseLocalParams(txt, 0, localParams, params); String val = localParams.get(V); @@ -255,8 +255,8 @@ public class QueryParsing { public static SortSpec parseSortSpec(String sortSpec, SolrQueryRequest req) { if (sortSpec == null || sortSpec.length() == 0) return newEmptySortSpec(); - List sorts = new ArrayList(4); - List fields = new ArrayList(4); + List sorts = new ArrayList<>(4); + List fields = new ArrayList<>(4); try { @@ -920,7 +920,7 @@ public class QueryParsing { * Builds a list of String which are stringified versions of a list of Queries */ public static List toString(List queries, IndexSchema schema) { - List out = new ArrayList(queries.size()); + List out = new ArrayList<>(queries.size()); for (Query q : queries) { out.add(QueryParsing.toString(q, schema)); } diff --git a/solr/core/src/java/org/apache/solr/search/QueryResultKey.java b/solr/core/src/java/org/apache/solr/search/QueryResultKey.java index e0ac6d913a6..893ead73327 100644 --- a/solr/core/src/java/org/apache/solr/search/QueryResultKey.java +++ b/solr/core/src/java/org/apache/solr/search/QueryResultKey.java @@ -144,7 +144,7 @@ public final class QueryResultKey { // (And of course: if the SolrIndexSearcher / QueryCommmand was ever changed to // sort the filter query list, then this whole method could be eliminated). - final ArrayList set2 = new ArrayList(fqList2.subList(start, sz)); + final ArrayList set2 = new ArrayList<>(fqList2.subList(start, sz)); for (int i = start; i < sz; i++) { Query q1 = fqList1.get(i); if ( ! set2.remove(q1) ) { diff --git a/solr/core/src/java/org/apache/solr/search/SimpleQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/SimpleQParserPlugin.java index 54e5ee4c0b4..feaa24a01ec 100644 --- a/solr/core/src/java/org/apache/solr/search/SimpleQParserPlugin.java +++ b/solr/core/src/java/org/apache/solr/search/SimpleQParserPlugin.java @@ -72,7 +72,7 @@ public class SimpleQParserPlugin extends QParserPlugin { public static final String NAME = "simple"; /** Map of string operators to their int counterparts in SimpleQueryParser. */ - private static final Map OPERATORS = new HashMap(); + private static final Map OPERATORS = new HashMap<>(); /* Setup the map of possible operators. */ static { diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java index e7007ef2005..9c970ac990b 100644 --- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java +++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java @@ -153,7 +153,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn // map of generic caches - not synchronized since it's read-only after the constructor. private final HashMap cacheMap; - private static final HashMap noGenericCaches=new HashMap(0); + private static final HashMap noGenericCaches=new HashMap<>(0); // list of all caches associated with this searcher. private final SolrCache[] cacheList; @@ -225,7 +225,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn cachingEnabled=enableCache; if (cachingEnabled) { - ArrayList clist = new ArrayList(); + ArrayList clist = new ArrayList<>(); fieldValueCache = solrConfig.fieldValueCacheConfig==null ? null : solrConfig.fieldValueCacheConfig.newInstance(); if (fieldValueCache!=null) clist.add(fieldValueCache); filterCache= solrConfig.filterCacheConfig==null ? null : solrConfig.filterCacheConfig.newInstance(); @@ -238,7 +238,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn if (solrConfig.userCacheConfigs == null) { cacheMap = noGenericCaches; } else { - cacheMap = new HashMap(solrConfig.userCacheConfigs.length); + cacheMap = new HashMap<>(solrConfig.userCacheConfigs.length); for (CacheConfig userCacheConfig : solrConfig.userCacheConfigs) { SolrCache cache = null; if (userCacheConfig != null) cache = userCacheConfig.newInstance(); @@ -263,7 +263,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn // optimizer = solrConfig.filtOptEnabled ? new LuceneQueryOptimizer(solrConfig.filtOptCacheSize,solrConfig.filtOptThreshold) : null; optimizer = null; - fieldNames = new HashSet(); + fieldNames = new HashSet<>(); fieldInfos = atomicReader.getFieldInfos(); for(FieldInfo fieldInfo : fieldInfos) { fieldNames.add(fieldInfo.name); @@ -388,7 +388,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn public Collection getStoredHighlightFieldNames() { synchronized (this) { if (storedHighlightFieldNames == null) { - storedHighlightFieldNames = new LinkedList(); + storedHighlightFieldNames = new LinkedList<>(); for (String fieldName : fieldNames) { try { SchemaField field = schema.getField(fieldName); @@ -980,10 +980,10 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn ExtendedQuery eq = (ExtendedQuery)q; if (!eq.getCache()) { if (eq.getCost() >= 100 && eq instanceof PostFilter) { - if (postFilters == null) postFilters = new ArrayList(sets.length-end); + if (postFilters == null) postFilters = new ArrayList<>(sets.length-end); postFilters.add(q); } else { - if (notCached == null) notCached = new ArrayList(sets.length-end); + if (notCached == null) notCached = new ArrayList<>(sets.length-end); notCached.add(q); } continue; @@ -992,7 +992,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn if (filterCache == null) { // there is no cache: don't pull bitsets - if (notCached == null) notCached = new ArrayList(sets.length-end); + if (notCached == null) notCached = new ArrayList<>(sets.length-end); WrappedQuery uncached = new WrappedQuery(q); uncached.setCache(false); notCached.add(uncached); @@ -1036,7 +1036,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn if (notCached != null) { Collections.sort(notCached, sortByCost); - List weights = new ArrayList(notCached.size()); + List weights = new ArrayList<>(notCached.size()); for (Query q : notCached) { Query qq = QueryUtils.makeQueryable(q); weights.add(createNormalizedWeight(qq)); @@ -1323,7 +1323,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn if (cmd.getFilterList()==null) { out.docSet = getDocSet(cmd.getQuery()); } else { - List newList = new ArrayList(cmd.getFilterList().size()+1); + List newList = new ArrayList<>(cmd.getFilterList().size()+1); newList.add(cmd.getQuery()); newList.addAll(cmd.getFilterList()); out.docSet = getDocSet(newList); @@ -2241,7 +2241,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn @Override public NamedList getStatistics() { - NamedList lst = new SimpleOrderedMap(); + NamedList lst = new SimpleOrderedMap<>(); lst.add("searcherName", name); lst.add("caching", cachingEnabled); lst.add("numDocs", reader.numDocs()); @@ -2319,7 +2319,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable,SolrIn } filterList = null; if (f != null) { - filterList = new ArrayList(2); + filterList = new ArrayList<>(2); filterList.add(f); } return this; @@ -2465,7 +2465,7 @@ class FilterImpl extends Filter { @Override public DocIdSetIterator iterator() throws IOException { - List iterators = new ArrayList(weights.size()+1); + List iterators = new ArrayList<>(weights.size()+1); if (docIdSet != null) { DocIdSetIterator iter = docIdSet.iterator(); if (iter == null) return null; diff --git a/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java b/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java index 03289bac8e7..d7c6964f6ce 100644 --- a/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java +++ b/solr/core/src/java/org/apache/solr/search/SolrReturnFields.java @@ -49,14 +49,14 @@ public class SolrReturnFields extends ReturnFields { // Special Field Keys public static final String SCORE = "score"; - private final List globs = new ArrayList(1); + private final List globs = new ArrayList<>(1); // The lucene field names to request from the SolrIndexSearcher - private final Set fields = new HashSet(); + private final Set fields = new HashSet<>(); // Field names that are OK to include in the response. // This will include pseudo fields, lucene fields, and matching globs - private Set okFieldNames = new HashSet(); + private Set okFieldNames = new HashSet<>(); // The list of explicitly requested fields // Order is important for CSVResponseWriter @@ -106,7 +106,7 @@ public class SolrReturnFields extends ReturnFields { return; } - NamedList rename = new NamedList(); + NamedList rename = new NamedList<>(); DocTransformers augmenters = new DocTransformers(); for (String fieldList : fl) { add(fieldList,rename,augmenters,req); @@ -122,7 +122,7 @@ public class SolrReturnFields extends ReturnFields { if(from.equals(rename.getName(j))) { rename.setName(j, to); // copy from the current target if(reqFieldNames==null) { - reqFieldNames = new LinkedHashSet(); + reqFieldNames = new LinkedHashSet<>(); } reqFieldNames.add(to); // don't rename our current target } @@ -247,7 +247,7 @@ public class SolrReturnFields extends ReturnFields { // This is identical to localParams syntax except it uses [] instead of {!} if (funcStr.startsWith("[")) { - Map augmenterArgs = new HashMap(); + Map augmenterArgs = new HashMap<>(); int end = QueryParsing.parseLocalParams(funcStr, 0, augmenterArgs, req.getParams(), "[", ']'); sp.pos += end; @@ -356,7 +356,7 @@ public class SolrReturnFields extends ReturnFields { private void addField(String field, String key, DocTransformers augmenters, boolean isPseudoField) { if(reqFieldNames==null) { - reqFieldNames = new LinkedHashSet(); + reqFieldNames = new LinkedHashSet<>(); } if(key==null) { diff --git a/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java b/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java index 6386968f5f5..a2b567efe11 100644 --- a/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java +++ b/solr/core/src/java/org/apache/solr/search/ValueSourceParser.java @@ -65,7 +65,7 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { public abstract ValueSource parse(FunctionQParser fp) throws SyntaxError; /* standard functions */ - public static Map standardValueSourceParsers = new HashMap(); + public static Map standardValueSourceParsers = new HashMap<>(); /** Adds a new parser for the name and returns any existing one that was overriden. * This is not thread safe. @@ -849,8 +849,8 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin { } } else { int dim = sources.size() / 2; - List sources1 = new ArrayList(dim); - List sources2 = new ArrayList(dim); + List sources1 = new ArrayList<>(dim); + List sources2 = new ArrayList<>(dim); //Get dim value sources for the first vector splitSources(dim, sources, sources1, sources2); mvr.mv1 = new VectorValueSource(sources1); diff --git a/solr/core/src/java/org/apache/solr/search/function/FileFloatSource.java b/solr/core/src/java/org/apache/solr/search/function/FileFloatSource.java index b6bb66bb148..a2c1acc2e75 100644 --- a/solr/core/src/java/org/apache/solr/search/function/FileFloatSource.java +++ b/solr/core/src/java/org/apache/solr/search/function/FileFloatSource.java @@ -261,7 +261,7 @@ public class FileFloatSource extends ValueSource { // because of this, simply ask the reader for a new termEnum rather than // trying to use skipTo() - List notFound = new ArrayList(); + List notFound = new ArrayList<>(); int notFoundCount=0; int otherErrors=0; diff --git a/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java b/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java index 96594729f14..4d10c931337 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/CommandHandler.java @@ -53,7 +53,7 @@ public class CommandHandler { public static class Builder { private SolrIndexSearcher.QueryCommand queryCommand; - private List commands = new ArrayList(); + private List commands = new ArrayList<>(); private SolrIndexSearcher searcher; private boolean needDocSet = false; private boolean truncateGroups = false; @@ -137,7 +137,7 @@ public class CommandHandler { @SuppressWarnings("unchecked") public void execute() throws IOException { final int nrOfCommands = commands.size(); - List collectors = new ArrayList(nrOfCommands); + List collectors = new ArrayList<>(nrOfCommands); for (Command command : commands) { collectors.addAll(command.create()); } diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java index 37b66a8088a..b0798470c98 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java @@ -88,7 +88,7 @@ public class SearchGroupsFieldCommand implements Command create() throws IOException { - List collectors = new ArrayList(); + List collectors = new ArrayList<>(); if (topNGroups > 0) { firstPassGroupingCollector = new TermFirstPassGroupingCollector(field.getName(), groupSort, topNGroups); collectors.add(firstPassGroupingCollector); @@ -114,7 +114,7 @@ public class SearchGroupsFieldCommand implements Command>>(groupCount, topGroups); + return new Pair<>(groupCount, topGroups); } @Override diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java index 256f42717bd..35b43befc05 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java @@ -125,7 +125,7 @@ public class TopGroupsFieldCommand implements Command> { return Collections.emptyList(); } - List collectors = new ArrayList(); + List collectors = new ArrayList<>(); secondPassCollector = new TermSecondPassGroupingCollector( field.getName(), firstPhaseGroups, groupSort, sortWithinGroup, maxDocPerGroup, needScores, needMaxScore, true ); @@ -137,7 +137,7 @@ public class TopGroupsFieldCommand implements Command> { @SuppressWarnings("unchecked") public TopGroups result() { if (firstPhaseGroups.isEmpty()) { - return new TopGroups(groupSort.getSort(), sortWithinGroup.getSort(), 0, 0, new GroupDocs[0], Float.NaN); + return new TopGroups<>(groupSort.getSort(), sortWithinGroup.getSort(), 0, 0, new GroupDocs[0], Float.NaN); } return secondPassCollector.getTopGroups(0); diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/requestfactory/StoredFieldsShardRequestFactory.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/requestfactory/StoredFieldsShardRequestFactory.java index f2032f53d2d..ba09068048f 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/requestfactory/StoredFieldsShardRequestFactory.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/requestfactory/StoredFieldsShardRequestFactory.java @@ -42,7 +42,7 @@ public class StoredFieldsShardRequestFactory implements ShardRequestFactory { @Override public ShardRequest[] constructRequest(ResponseBuilder rb) { - HashMap> shardMap = new HashMap>(); + HashMap> shardMap = new HashMap<>(); for (TopGroups topGroups : rb.mergedTopGroups.values()) { for (GroupDocs group : topGroups.groups) { mapShardToDocs(shardMap, group.scoreDocs); @@ -75,7 +75,7 @@ public class StoredFieldsShardRequestFactory implements ShardRequestFactory { } } - List ids = new ArrayList(shardDocs.size()); + List ids = new ArrayList<>(shardDocs.size()); for (ShardDoc shardDoc : shardDocs) { ids.add(shardDoc.id.toString()); } @@ -91,7 +91,7 @@ public class StoredFieldsShardRequestFactory implements ShardRequestFactory { ShardDoc solrDoc = (ShardDoc) scoreDoc; Set shardDocs = shardMap.get(solrDoc.shard); if (shardDocs == null) { - shardMap.put(solrDoc.shard, shardDocs = new HashSet()); + shardMap.put(solrDoc.shard, shardDocs = new HashSet<>()); } shardDocs.add(solrDoc); } diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/requestfactory/TopGroupsShardRequestFactory.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/requestfactory/TopGroupsShardRequestFactory.java index a356455c6ad..d7eba2e8eff 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/requestfactory/TopGroupsShardRequestFactory.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/requestfactory/TopGroupsShardRequestFactory.java @@ -67,7 +67,7 @@ public class TopGroupsShardRequestFactory implements ShardRequestFactory { private ShardRequest[] createRequestForSpecificShards(ResponseBuilder rb) { // Determine all unique shards to query for TopGroups - Set uniqueShards = new HashSet(); + Set uniqueShards = new HashSet<>(); for (String command : rb.searchGroupToShards.keySet()) { Map, Set> groupsToShard = rb.searchGroupToShards.get(command); for (Set shards : groupsToShard.values()) { diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java index af75df77bda..d5cefc1959e 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/SearchGroupShardResponseProcessor.java @@ -52,8 +52,8 @@ public class SearchGroupShardResponseProcessor implements ShardResponseProcessor Sort groupSort = rb.getGroupingSpec().getGroupSort(); String[] fields = rb.getGroupingSpec().getFields(); - Map>>> commandSearchGroups = new HashMap>>>(); - Map, Set>> tempSearchGroupToShards = new HashMap, Set>>(); + Map>>> commandSearchGroups = new HashMap<>(); + Map, Set>> tempSearchGroupToShards = new HashMap<>(); for (String field : fields) { commandSearchGroups.put(field, new ArrayList>>(shardRequest.responses.size())); tempSearchGroupToShards.put(field, new HashMap, Set>()); @@ -69,13 +69,13 @@ public class SearchGroupShardResponseProcessor implements ShardResponseProcessor NamedList shardInfo = null; if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) { - shardInfo = new SimpleOrderedMap(); + shardInfo = new SimpleOrderedMap<>(); rb.rsp.getValues().add(ShardParams.SHARDS_INFO + ".firstPhase", shardInfo); } for (ShardResponse srsp : shardRequest.responses) { if (shardInfo != null) { - SimpleOrderedMap nl = new SimpleOrderedMap(); + SimpleOrderedMap nl = new SimpleOrderedMap<>(); if (srsp.getException() != null) { Throwable t = srsp.getException(); @@ -126,7 +126,7 @@ public class SearchGroupShardResponseProcessor implements ShardResponseProcessor Map, java.util.Set> map = tempSearchGroupToShards.get(field); Set shards = map.get(searchGroup); if (shards == null) { - shards = new HashSet(); + shards = new HashSet<>(); map.put(searchGroup, shards); } shards.add(srsp.getShard()); diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java index f7a22f09841..2d2c1465bf0 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java @@ -70,12 +70,12 @@ public class TopGroupsShardResponseProcessor implements ShardResponseProcessor { } int docsPerGroupDefault = rb.getGroupingSpec().getGroupLimit(); - Map>> commandTopGroups = new HashMap>>(); + Map>> commandTopGroups = new HashMap<>(); for (String field : fields) { commandTopGroups.put(field, new ArrayList>()); } - Map> commandTopDocs = new HashMap>(); + Map> commandTopDocs = new HashMap<>(); for (String query : queries) { commandTopDocs.put(query, new ArrayList()); } @@ -84,14 +84,14 @@ public class TopGroupsShardResponseProcessor implements ShardResponseProcessor { NamedList shardInfo = null; if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) { - shardInfo = new SimpleOrderedMap(); + shardInfo = new SimpleOrderedMap<>(); rb.rsp.getValues().add(ShardParams.SHARDS_INFO, shardInfo); } for (ShardResponse srsp : shardRequest.responses) { SimpleOrderedMap individualShardInfo = null; if (shardInfo != null) { - individualShardInfo = new SimpleOrderedMap(); + individualShardInfo = new SimpleOrderedMap<>(); if (srsp.getException() != null) { Throwable t = srsp.getException(); @@ -161,7 +161,7 @@ public class TopGroupsShardResponseProcessor implements ShardResponseProcessor { for (String query : commandTopDocs.keySet()) { List queryCommandResults = commandTopDocs.get(query); - List topDocs = new ArrayList(queryCommandResults.size()); + List topDocs = new ArrayList<>(queryCommandResults.size()); int mergedMatches = 0; for (QueryCommandResult queryCommandResult : queryCommandResults) { topDocs.add(queryCommandResult.getTopDocs()); @@ -173,7 +173,7 @@ public class TopGroupsShardResponseProcessor implements ShardResponseProcessor { rb.mergedQueryCommandResults.put(query, new QueryCommandResult(mergedTopDocs, mergedMatches)); } - Map resultIds = new HashMap(); + Map resultIds = new HashMap<>(); int i = 0; for (TopGroups topGroups : rb.mergedTopGroups.values()) { for (GroupDocs group : topGroups.groups) { diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/SearchGroupsResultTransformer.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/SearchGroupsResultTransformer.java index 33d3c6b57be..efa7824a51a 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/SearchGroupsResultTransformer.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/SearchGroupsResultTransformer.java @@ -49,9 +49,9 @@ public class SearchGroupsResultTransformer implements ShardResultTransformer data) throws IOException { - NamedList result = new NamedList(); + NamedList result = new NamedList<>(); for (Command command : data) { - final NamedList commandResult = new NamedList(); + final NamedList commandResult = new NamedList<>(); if (SearchGroupsFieldCommand.class.isInstance(command)) { SearchGroupsFieldCommand fieldCommand = (SearchGroupsFieldCommand) command; Pair>> pair = fieldCommand.result(); @@ -77,15 +77,15 @@ public class SearchGroupsResultTransformer implements ShardResultTransformer>>> transformToNative(NamedList shardResponse, Sort groupSort, Sort sortWithinGroup, String shard) { - Map>>> result = new HashMap>>>(); + Map>>> result = new HashMap<>(); for (Map.Entry command : shardResponse) { - List> searchGroups = new ArrayList>(); + List> searchGroups = new ArrayList<>(); NamedList topGroupsAndGroupCount = command.getValue(); @SuppressWarnings("unchecked") NamedList> rawSearchGroups = (NamedList>) topGroupsAndGroupCount.get("topGroups"); if (rawSearchGroups != null) { for (Map.Entry> rawSearchGroup : rawSearchGroups){ - SearchGroup searchGroup = new SearchGroup(); + SearchGroup searchGroup = new SearchGroup<>(); searchGroup.groupValue = rawSearchGroup.getKey() != null ? new BytesRef(rawSearchGroup.getKey()) : null; searchGroup.sortValues = rawSearchGroup.getValue().toArray(new Comparable[rawSearchGroup.getValue().size()]); for (int i = 0; i < searchGroup.sortValues.length; i++) { @@ -108,7 +108,7 @@ public class SearchGroupsResultTransformer implements ShardResultTransformer> data, Sort groupSort) { - NamedList result = new NamedList(); + NamedList result = new NamedList<>(); for (SearchGroup searchGroup : data) { Object[] convertedSortValues = new Object[searchGroup.sortValues.length]; diff --git a/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java b/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java index 1513d09e66d..23bca602d91 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java @@ -66,7 +66,7 @@ public class TopGroupsResultTransformer implements ShardResultTransformer data) throws IOException { - NamedList result = new NamedList(); + NamedList result = new NamedList<>(); final IndexSchema schema = rb.req.getSearcher().getSchema(); for (Command command : data) { NamedList commandResult; @@ -91,7 +91,7 @@ public class TopGroupsResultTransformer implements ShardResultTransformer transformToNative(NamedList shardResponse, Sort groupSort, Sort sortWithinGroup, String shard) { - Map result = new HashMap(); + Map result = new HashMap<>(); final IndexSchema schema = rb.req.getSearcher().getSchema(); @@ -147,7 +147,7 @@ public class TopGroupsResultTransformer implements ShardResultTransformer> groupDocs = new ArrayList>(); + List> groupDocs = new ArrayList<>(); for (int i = 2; i < commandResult.size(); i++) { String groupValue = commandResult.getName(i); @SuppressWarnings("unchecked") @@ -182,12 +182,12 @@ public class TopGroupsResultTransformer implements ShardResultTransformer(Float.NaN, maxScore, totalGroupHits, scoreDocs, groupValueRef, null)); + groupDocs.add(new GroupDocs<>(Float.NaN, maxScore, totalGroupHits, scoreDocs, groupValueRef, null)); } @SuppressWarnings("unchecked") GroupDocs[] groupDocsArr = groupDocs.toArray(new GroupDocs[groupDocs.size()]); - TopGroups topGroups = new TopGroups( + TopGroups topGroups = new TopGroups<>( groupSort.getSort(), sortWithinGroup.getSort(), totalHitCount, totalGroupedHitCount, groupDocsArr, Float.NaN ); @@ -198,7 +198,7 @@ public class TopGroupsResultTransformer implements ShardResultTransformer data, SchemaField groupField) throws IOException { - NamedList result = new NamedList(); + NamedList result = new NamedList<>(); result.add("totalGroupedHitCount", data.totalGroupedHitCount); result.add("totalHitCount", data.totalHitCount); if (data.totalGroupCount != null) { @@ -209,15 +209,15 @@ public class TopGroupsResultTransformer implements ShardResultTransformer searchGroup : data.groups) { - NamedList groupResult = new NamedList(); + NamedList groupResult = new NamedList<>(); groupResult.add("totalHits", searchGroup.totalHits); if (!Float.isNaN(searchGroup.maxScore)) { groupResult.add("maxScore", searchGroup.maxScore); } - List> documents = new ArrayList>(); + List> documents = new ArrayList<>(); for (int i = 0; i < searchGroup.scoreDocs.length; i++) { - NamedList document = new NamedList(); + NamedList document = new NamedList<>(); documents.add(document); StoredDocument doc = retrieveDocument(uniqueField, searchGroup.scoreDocs[i].doc); @@ -254,20 +254,20 @@ public class TopGroupsResultTransformer implements ShardResultTransformer queryResult = new NamedList(); + NamedList queryResult = new NamedList<>(); queryResult.add("matches", result.getMatches()); queryResult.add("totalHits", result.getTopDocs().totalHits); if (rb.getGroupingSpec().isNeedScore()) { queryResult.add("maxScore", result.getTopDocs().getMaxScore()); } - List documents = new ArrayList(); + List documents = new ArrayList<>(); queryResult.add("documents", documents); final IndexSchema schema = rb.req.getSearcher().getSchema(); SchemaField uniqueField = schema.getUniqueKeyField(); CharsRef spare = new CharsRef(); for (ScoreDoc scoreDoc : result.getTopDocs().scoreDocs) { - NamedList document = new NamedList(); + NamedList document = new NamedList<>(); documents.add(document); StoredDocument doc = retrieveDocument(uniqueField, scoreDoc.doc); diff --git a/solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/GroupedEndResultTransformer.java b/solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/GroupedEndResultTransformer.java index 0414affc82b..a1707d0a6c5 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/GroupedEndResultTransformer.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/GroupedEndResultTransformer.java @@ -50,24 +50,24 @@ public class GroupedEndResultTransformer implements EndResultTransformer { */ @Override public void transform(Map result, ResponseBuilder rb, SolrDocumentSource solrDocumentSource) { - NamedList commands = new SimpleOrderedMap(); + NamedList commands = new SimpleOrderedMap<>(); for (Map.Entry entry : result.entrySet()) { Object value = entry.getValue(); if (TopGroups.class.isInstance(value)) { @SuppressWarnings("unchecked") TopGroups topGroups = (TopGroups) value; - NamedList command = new SimpleOrderedMap(); + NamedList command = new SimpleOrderedMap<>(); command.add("matches", rb.totalHitCount); Integer totalGroupCount = rb.mergedGroupCounts.get(entry.getKey()); if (totalGroupCount != null) { command.add("ngroups", totalGroupCount); } - List groups = new ArrayList(); + List groups = new ArrayList<>(); SchemaField groupField = searcher.getSchema().getField(entry.getKey()); FieldType groupFieldType = groupField.getType(); for (GroupDocs group : topGroups.groups) { - SimpleOrderedMap groupResult = new SimpleOrderedMap(); + SimpleOrderedMap groupResult = new SimpleOrderedMap<>(); if (group.groupValue != null) { groupResult.add( "groupValue", groupFieldType.toObject(groupField.createField(group.groupValue.utf8ToString(), 1.0f)) @@ -91,7 +91,7 @@ public class GroupedEndResultTransformer implements EndResultTransformer { commands.add(entry.getKey(), command); } else if (QueryCommandResult.class.isInstance(value)) { QueryCommandResult queryCommandResult = (QueryCommandResult) value; - NamedList command = new SimpleOrderedMap(); + NamedList command = new SimpleOrderedMap<>(); command.add("matches", queryCommandResult.getMatches()); SolrDocumentList docList = new SolrDocumentList(); docList.setNumFound(queryCommandResult.getTopDocs().totalHits); diff --git a/solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/SimpleEndResultTransformer.java b/solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/SimpleEndResultTransformer.java index ff866aea56c..5667181d6c9 100644 --- a/solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/SimpleEndResultTransformer.java +++ b/solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/SimpleEndResultTransformer.java @@ -38,13 +38,13 @@ public class SimpleEndResultTransformer implements EndResultTransformer { */ @Override public void transform(Map result, ResponseBuilder rb, SolrDocumentSource solrDocumentSource) { - NamedList commands = new SimpleOrderedMap(); + NamedList commands = new SimpleOrderedMap<>(); for (Map.Entry entry : result.entrySet()) { Object value = entry.getValue(); if (TopGroups.class.isInstance(value)) { @SuppressWarnings("unchecked") TopGroups topGroups = (TopGroups) value; - NamedList command = new SimpleOrderedMap(); + NamedList command = new SimpleOrderedMap<>(); command.add("matches", rb.totalHitCount); if (topGroups.totalGroupCount != null) { command.add("ngroups", topGroups.totalGroupCount); diff --git a/solr/core/src/java/org/apache/solr/servlet/DirectSolrConnection.java b/solr/core/src/java/org/apache/solr/servlet/DirectSolrConnection.java index c97b91d951c..f2073dafc4a 100644 --- a/solr/core/src/java/org/apache/solr/servlet/DirectSolrConnection.java +++ b/solr/core/src/java/org/apache/solr/servlet/DirectSolrConnection.java @@ -118,7 +118,7 @@ public class DirectSolrConnection params = new MapSolrParams( new HashMap() ); // Make a stream for the 'body' content - List streams = new ArrayList( 1 ); + List streams = new ArrayList<>( 1 ); if( body != null && body.length() > 0 ) { streams.add( new ContentStreamBase.StringStream( body ) ); } diff --git a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java index a7c4a0ff4b5..a8a2ae2584e 100644 --- a/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java +++ b/solr/core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java @@ -479,7 +479,7 @@ public class SolrDispatchFilter implements Filter collectionsList = StrUtils.splitSmart(collection, ",", true); } if (collectionsList != null) { - Set newCollectionsList = new HashSet( + Set newCollectionsList = new HashSet<>( collectionsList.size()); for (String col : collectionsList) { String al = aliases.getCollectionAlias(col); @@ -601,7 +601,7 @@ public class SolrDispatchFilter implements Filter boolean byCoreName = false; if (slices == null) { - slices = new ArrayList(); + slices = new ArrayList<>(); // look by core name byCoreName = true; slices = getSlicesForCollections(clusterState, slices, true); diff --git a/solr/core/src/java/org/apache/solr/servlet/SolrRequestParsers.java b/solr/core/src/java/org/apache/solr/servlet/SolrRequestParsers.java index f8bba33ce74..5518ac8d4ea 100644 --- a/solr/core/src/java/org/apache/solr/servlet/SolrRequestParsers.java +++ b/solr/core/src/java/org/apache/solr/servlet/SolrRequestParsers.java @@ -76,7 +76,7 @@ public class SolrRequestParsers private static final byte[] INPUT_ENCODING_BYTES = INPUT_ENCODING_KEY.getBytes(CHARSET_US_ASCII); private final HashMap parsers = - new HashMap(); + new HashMap<>(); private final boolean enableRemoteStreams; private StandardRequestParser standard; private boolean handleSelect = true; @@ -141,7 +141,7 @@ public class SolrRequestParsers // TODO -- in the future, we could pick a different parser based on the request // Pick the parser from the request... - ArrayList streams = new ArrayList(1); + ArrayList streams = new ArrayList<>(1); SolrParams params = parser.parseParamsAndFillStreams( req, streams ); SolrQueryRequest sreq = buildRequestFrom( core, params, streams ); @@ -213,7 +213,7 @@ public class SolrRequestParsers * Given a url-encoded query string (UTF-8), map it into solr params */ public static MultiMapSolrParams parseQueryString(String queryString) { - Map map = new HashMap(); + Map map = new HashMap<>(); parseQueryString(queryString, map); return new MultiMapSolrParams(map); } @@ -261,7 +261,7 @@ public class SolrRequestParsers @SuppressWarnings({"fallthrough", "resource"}) static long parseFormDataContent(final InputStream postContent, final long maxLen, Charset charset, final Map map, boolean supportCharsetParam) throws IOException { CharsetDecoder charsetDecoder = supportCharsetParam ? null : getCharsetDecoder(charset); - final LinkedList buffer = supportCharsetParam ? new LinkedList() : null; + final LinkedList buffer = supportCharsetParam ? new LinkedList<>() : null; long len = 0L, keyPos = 0L, valuePos = 0L; final ByteArrayOutputStream keyStream = new ByteArrayOutputStream(), valueStream = new ByteArrayOutputStream(); @@ -580,7 +580,7 @@ public class SolrRequestParsers throw new SolrException( ErrorCode.BAD_REQUEST, "Not application/x-www-form-urlencoded content: "+req.getContentType() ); } - final Map map = new HashMap(); + final Map map = new HashMap<>(); // also add possible URL parameters and include into the map (parsed using UTF-8): final String qs = req.getQueryString(); diff --git a/solr/core/src/java/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java b/solr/core/src/java/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java index 1e6b6b853f8..dc976b2545d 100644 --- a/solr/core/src/java/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java +++ b/solr/core/src/java/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java @@ -56,7 +56,7 @@ public final class HttpCacheHeaderUtil { * @see #calcEtag */ private static Map etagCoreCache - = new WeakHashMap(); + = new WeakHashMap<>(); /** @see #etagCoreCache */ private static class EtagCacheVal { diff --git a/solr/core/src/java/org/apache/solr/spelling/ConjunctionSolrSpellChecker.java b/solr/core/src/java/org/apache/solr/spelling/ConjunctionSolrSpellChecker.java index d6efcfbeabb..e1e55f024ed 100644 --- a/solr/core/src/java/org/apache/solr/spelling/ConjunctionSolrSpellChecker.java +++ b/solr/core/src/java/org/apache/solr/spelling/ConjunctionSolrSpellChecker.java @@ -45,7 +45,7 @@ public class ConjunctionSolrSpellChecker extends SolrSpellChecker { private Float accuracy = null; private String dictionaryName = null; private Analyzer queryAnalyzer = null; - private List checkers = new ArrayList(); + private List checkers = new ArrayList<>(); private boolean initalized = false; public void addChecker(SolrSpellChecker checker) { @@ -136,8 +136,8 @@ public class ConjunctionSolrSpellChecker extends SolrSpellChecker { //TODO: This just interleaves the results. In the future, we might want to let users give each checker its // own weight and use that in combination to score & frequency to sort the results ? private SpellingResult mergeCheckers(SpellingResult[] results, int numSug) { - Map combinedTokenFrequency = new HashMap(); - Map>> allSuggestions = new LinkedHashMap>>(); + Map combinedTokenFrequency = new HashMap<>(); + Map>> allSuggestions = new LinkedHashMap<>(); for(SpellingResult result : results) { if(result.getTokenFrequency()!=null) { combinedTokenFrequency.putAll(result.getTokenFrequency()); @@ -145,7 +145,7 @@ public class ConjunctionSolrSpellChecker extends SolrSpellChecker { for(Map.Entry> entry : result.getSuggestions().entrySet()) { List> allForThisToken = allSuggestions.get(entry.getKey()); if(allForThisToken==null) { - allForThisToken = new ArrayList>(); + allForThisToken = new ArrayList<>(); allSuggestions.put(entry.getKey(), allForThisToken); } allForThisToken.add(entry.getValue()); @@ -154,7 +154,7 @@ public class ConjunctionSolrSpellChecker extends SolrSpellChecker { SpellingResult combinedResult = new SpellingResult(); for(Map.Entry>> entry : allSuggestions.entrySet()) { Token original = entry.getKey(); - List>> corrIters = new ArrayList>>(entry.getValue().size()); + List>> corrIters = new ArrayList<>(entry.getValue().size()); for(LinkedHashMap corrections : entry.getValue()) { corrIters.add(corrections.entrySet().iterator()); } diff --git a/solr/core/src/java/org/apache/solr/spelling/PossibilityIterator.java b/solr/core/src/java/org/apache/solr/spelling/PossibilityIterator.java index a2f8d8b165e..0ea275b6cf6 100644 --- a/solr/core/src/java/org/apache/solr/spelling/PossibilityIterator.java +++ b/solr/core/src/java/org/apache/solr/spelling/PossibilityIterator.java @@ -44,7 +44,7 @@ import org.apache.lucene.analysis.Token; */ public class PossibilityIterator implements Iterator { - private List> possibilityList = new ArrayList>(); + private List> possibilityList = new ArrayList<>(); private Iterator rankedPossibilityIterator = null; private int correctionIndex[]; private boolean done = false; @@ -74,7 +74,7 @@ public class PossibilityIterator implements if (entry.getValue().size() == 0) { continue; } - List possibleCorrections = new ArrayList(); + List possibleCorrections = new ArrayList<>(); for (Map.Entry entry1 : entry.getValue().entrySet()) { SpellCheckCorrection correction = new SpellCheckCorrection(); correction.setOriginal(token); @@ -99,11 +99,11 @@ public class PossibilityIterator implements correctionIndex[i] = 0; } } - PriorityQueue rankedPossibilities = new PriorityQueue( + PriorityQueue rankedPossibilities = new PriorityQueue<>( 11, new RankComparator()); Set removeDuplicates = null; if (suggestionsMayOverlap) { - removeDuplicates = new HashSet(); + removeDuplicates = new HashSet<>(); } long numEvaluations = 0; while (numEvaluations < maxEvaluations && internalHasNext()) { @@ -206,7 +206,7 @@ public class PossibilityIterator implements if (done) { throw new NoSuchElementException(); } - possibleCorrection = new ArrayList(); + possibleCorrection = new ArrayList<>(); List> possibleCorrections = null; int rank = 0; while (!done @@ -244,7 +244,7 @@ public class PossibilityIterator implements if (suggestionsMayOverlap) { possibleCorrections = separateOverlappingTokens(possibleCorrection); } else { - possibleCorrections = new ArrayList>(1); + possibleCorrections = new ArrayList<>(1); possibleCorrections.add(possibleCorrection); } } @@ -258,11 +258,11 @@ public class PossibilityIterator implements List possibleCorrection) { List> ret = null; if (possibleCorrection.size() == 1) { - ret = new ArrayList>(1); + ret = new ArrayList<>(1); ret.add(possibleCorrection); return ret; } - ret = new ArrayList>(); + ret = new ArrayList<>(); for (int i = 0; i < possibleCorrection.size(); i++) { List c = compatible(possibleCorrection, i); ret.add(c); @@ -274,7 +274,7 @@ public class PossibilityIterator implements int pos) { List priorPassCompatibles = null; { - List firstPassCompatibles = new ArrayList( + List firstPassCompatibles = new ArrayList<>( all.size()); SpellCheckCorrection sacred = all.get(pos); firstPassCompatibles.add(sacred); @@ -303,7 +303,7 @@ public class PossibilityIterator implements if (pos == priorPassCompatibles.size() - 1) { return priorPassCompatibles; } - List subsequentPassCompatibles = new ArrayList( + List subsequentPassCompatibles = new ArrayList<>( priorPassCompatibles.size()); SpellCheckCorrection sacred = null; for (int i = 0; i <= pos; i++) { diff --git a/solr/core/src/java/org/apache/solr/spelling/SolrSpellChecker.java b/solr/core/src/java/org/apache/solr/spelling/SolrSpellChecker.java index 38e2543154b..01f01a39de9 100644 --- a/solr/core/src/java/org/apache/solr/spelling/SolrSpellChecker.java +++ b/solr/core/src/java/org/apache/solr/spelling/SolrSpellChecker.java @@ -138,7 +138,7 @@ public abstract class SolrSpellChecker { for (SuggestWord word : suggestions) result.add(token, word.string, word.freq); } else { - List words = new ArrayList(sugQueue.size()); + List words = new ArrayList<>(sugQueue.size()); for (SuggestWord word : suggestions) words.add(word.string); result.add(token, words); } diff --git a/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollator.java b/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollator.java index 16955d6c712..e422d6cab0f 100644 --- a/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollator.java +++ b/solr/core/src/java/org/apache/solr/spelling/SpellCheckCollator.java @@ -50,7 +50,7 @@ public class SpellCheckCollator { public List collate(SpellingResult result, String originalQuery, ResponseBuilder ultimateResponse) { - List collations = new ArrayList(); + List collations = new ArrayList<>(); QueryComponent queryComponent = null; if (ultimateResponse.components != null) { @@ -169,7 +169,7 @@ public class SpellCheckCollator { collation.setHits(hits); collation.setInternalRank(suggestionsMayOverlap ? ((possibility.rank * 1000) + possibility.index) : possibility.rank); - NamedList misspellingsAndCorrections = new NamedList(); + NamedList misspellingsAndCorrections = new NamedList<>(); for (SpellCheckCorrection corr : possibility.corrections) { misspellingsAndCorrections.add(corr.getOriginal().toString(), corr.getCorrection()); } diff --git a/solr/core/src/java/org/apache/solr/spelling/SpellingQueryConverter.java b/solr/core/src/java/org/apache/solr/spelling/SpellingQueryConverter.java index 359cc69d63e..8b19e8365a6 100644 --- a/solr/core/src/java/org/apache/solr/spelling/SpellingQueryConverter.java +++ b/solr/core/src/java/org/apache/solr/spelling/SpellingQueryConverter.java @@ -104,7 +104,7 @@ public class SpellingQueryConverter extends QueryConverter { if (original == null) { // this can happen with q.alt = and no query return Collections.emptyList(); } - Collection result = new ArrayList(); + Collection result = new ArrayList<>(); Matcher matcher = QUERY_REGEX.matcher(original); String nextWord = null; int nextStartIndex = 0; diff --git a/solr/core/src/java/org/apache/solr/spelling/SpellingResult.java b/solr/core/src/java/org/apache/solr/spelling/SpellingResult.java index 06816ba1774..f69fb696de9 100644 --- a/solr/core/src/java/org/apache/solr/spelling/SpellingResult.java +++ b/solr/core/src/java/org/apache/solr/spelling/SpellingResult.java @@ -37,7 +37,7 @@ public class SpellingResult { * Key == token * Value = Map -> key is the suggestion, value is the frequency of the token in the collection */ - private Map> suggestions = new LinkedHashMap>(); + private Map> suggestions = new LinkedHashMap<>(); private Map tokenFrequency; public static final int NO_FREQUENCY_INFO = -1; @@ -58,7 +58,7 @@ public class SpellingResult { public void add(Token token, List suggestions) { LinkedHashMap map = this.suggestions.get(token); if (map == null ) { - map = new LinkedHashMap(); + map = new LinkedHashMap<>(); this.suggestions.put(token, map); } for (String suggestion : suggestions) { @@ -74,7 +74,7 @@ public class SpellingResult { */ public void addFrequency(Token token, int docFreq) { if (tokenFrequency == null) { - tokenFrequency = new LinkedHashMap(); + tokenFrequency = new LinkedHashMap<>(); } tokenFrequency.put(token, docFreq); } @@ -89,7 +89,7 @@ public class SpellingResult { LinkedHashMap map = this.suggestions.get(token); //Don't bother adding if we already have this token if (map == null) { - map = new LinkedHashMap(); + map = new LinkedHashMap<>(); this.suggestions.put(token, map); } map.put(suggestion, docFreq); diff --git a/solr/core/src/java/org/apache/solr/spelling/SuggestQueryConverter.java b/solr/core/src/java/org/apache/solr/spelling/SuggestQueryConverter.java index c16f6c65a0d..a1014c09c68 100644 --- a/solr/core/src/java/org/apache/solr/spelling/SuggestQueryConverter.java +++ b/solr/core/src/java/org/apache/solr/spelling/SuggestQueryConverter.java @@ -35,7 +35,7 @@ public class SuggestQueryConverter extends SpellingQueryConverter { return Collections.emptyList(); } - Collection result = new ArrayList(); + Collection result = new ArrayList<>(); try { analyze(result, original, 0, 0); } catch (IOException e) { diff --git a/solr/core/src/java/org/apache/solr/spelling/WordBreakSolrSpellChecker.java b/solr/core/src/java/org/apache/solr/spelling/WordBreakSolrSpellChecker.java index 243ff4f34d8..ad0100576a4 100644 --- a/solr/core/src/java/org/apache/solr/spelling/WordBreakSolrSpellChecker.java +++ b/solr/core/src/java/org/apache/solr/spelling/WordBreakSolrSpellChecker.java @@ -199,9 +199,9 @@ public class WordBreakSolrSpellChecker extends SolrSpellChecker { StringBuilder sb = new StringBuilder(); Token[] tokenArr = options.tokens.toArray(new Token[options.tokens.size()]); - List termArr = new ArrayList(options.tokens.size() + 2); + List termArr = new ArrayList<>(options.tokens.size() + 2); - List breakSuggestionList = new ArrayList(); + List breakSuggestionList = new ArrayList<>(); boolean lastOneProhibited = false; boolean lastOneRequired = false; boolean lastOneprocedesNewBooleanOp = false; @@ -253,7 +253,7 @@ public class WordBreakSolrSpellChecker extends SolrSpellChecker { CombineSuggestion[] combineSuggestions = wbsp.suggestWordCombinations( termArr.toArray(new Term[termArr.size()]), numSuggestions, ir, options.suggestMode); if (combineWords) { - combineSuggestionList = new ArrayList( + combineSuggestionList = new ArrayList<>( combineSuggestions.length); for (CombineSuggestion cs : combineSuggestions) { int firstTermIndex = cs.originalTermIndexes[0]; diff --git a/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java b/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java index a511d905050..cb44d633863 100644 --- a/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java +++ b/solr/core/src/java/org/apache/solr/spelling/suggest/DocumentExpressionDictionaryFactory.java @@ -68,7 +68,7 @@ public class DocumentExpressionDictionaryFactory extends DictionaryFactory { String field = (String) params.get(FIELD); String payloadField = (String) params.get(PAYLOAD_FIELD); String weightExpression = (String) params.get(WEIGHT_EXPRESSION); - Set sortFields = new HashSet(); + Set sortFields = new HashSet<>(); if (field == null) { throw new IllegalArgumentException(FIELD + " is a mandatory parameter"); diff --git a/solr/core/src/java/org/apache/solr/spelling/suggest/SuggesterResult.java b/solr/core/src/java/org/apache/solr/spelling/suggest/SuggesterResult.java index 0ab357c837f..3017349a970 100644 --- a/solr/core/src/java/org/apache/solr/spelling/suggest/SuggesterResult.java +++ b/solr/core/src/java/org/apache/solr/spelling/suggest/SuggesterResult.java @@ -35,7 +35,7 @@ public class SuggesterResult { /** token -> lookup results mapping*/ private Map>> suggestionsMap = - new HashMap>>(); + new HashMap<>(); /** Add suggestion results for token */ public void add(String suggesterName, String token, List results) { diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java b/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java index 592831b0dad..b1b827bb673 100644 --- a/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java +++ b/solr/core/src/java/org/apache/solr/store/blockcache/BlockDirectoryCache.java @@ -27,7 +27,7 @@ import java.util.concurrent.atomic.AtomicInteger; public class BlockDirectoryCache implements Cache { private final BlockCache blockCache; private AtomicInteger counter = new AtomicInteger(); - private Map names = new ConcurrentHashMap(); + private Map names = new ConcurrentHashMap<>(); private String path; private Metrics metrics; diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/BufferStore.java b/solr/core/src/java/org/apache/solr/store/blockcache/BufferStore.java index f54b2757041..ad10357fec5 100644 --- a/solr/core/src/java/org/apache/solr/store/blockcache/BufferStore.java +++ b/solr/core/src/java/org/apache/solr/store/blockcache/BufferStore.java @@ -39,7 +39,7 @@ public class BufferStore implements Store { } }; - private final static ConcurrentMap bufferStores = new ConcurrentHashMap(); + private final static ConcurrentMap bufferStores = new ConcurrentHashMap<>(); private final BlockingQueue buffers; @@ -66,7 +66,7 @@ public class BufferStore implements Store { } private static BlockingQueue setupBuffers(int bufferSize, int count) { - BlockingQueue queue = new ArrayBlockingQueue(count); + BlockingQueue queue = new ArrayBlockingQueue<>(count); for (int i = 0; i < count; i++) { queue.add(new byte[bufferSize]); } diff --git a/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java b/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java index 052e70442f8..04e9867ee69 100644 --- a/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java +++ b/solr/core/src/java/org/apache/solr/store/blockcache/Metrics.java @@ -53,7 +53,7 @@ public class Metrics implements Updater { public AtomicLong shardBuffercacheAllocate8192 = new AtomicLong(0); public AtomicLong shardBuffercacheAllocateOther = new AtomicLong(0); public AtomicLong shardBuffercacheLost = new AtomicLong(0); - public Map methodCalls = new ConcurrentHashMap(); + public Map methodCalls = new ConcurrentHashMap<>(); public AtomicLong tableCount = new AtomicLong(0); public AtomicLong rowCount = new AtomicLong(0); diff --git a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java index 3e5d7711d52..468f56ceb45 100644 --- a/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java +++ b/solr/core/src/java/org/apache/solr/store/hdfs/HdfsDirectory.java @@ -163,7 +163,7 @@ public class HdfsDirectory extends BaseDirectory { @Override public String[] listAll() throws IOException { FileStatus[] listStatus = getFileSystem().listStatus(hdfsDirPath); - List files = new ArrayList(); + List files = new ArrayList<>(); if (listStatus == null) { return new String[] {}; } diff --git a/solr/core/src/java/org/apache/solr/update/AddUpdateCommand.java b/solr/core/src/java/org/apache/solr/update/AddUpdateCommand.java index 6def045784f..607ed34d94f 100644 --- a/solr/core/src/java/org/apache/solr/update/AddUpdateCommand.java +++ b/solr/core/src/java/org/apache/solr/update/AddUpdateCommand.java @@ -193,7 +193,7 @@ public class AddUpdateCommand extends UpdateCommand implements Iterable flatten(SolrInputDocument root) { - List unwrappedDocs = new ArrayList(); + List unwrappedDocs = new ArrayList<>(); recUnwrapp(unwrappedDocs, root); return unwrappedDocs; } diff --git a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java index 864dc83b65c..ec03bcaa6ad 100644 --- a/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java +++ b/solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java @@ -201,7 +201,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState if (deletesAfter != null) { log.info("Reordered DBQs detected. Update=" + cmd + " DBQs=" + deletesAfter); - List dbqList = new ArrayList(deletesAfter.size()); + List dbqList = new ArrayList<>(deletesAfter.size()); for (UpdateLog.DBQ dbq : deletesAfter) { try { DeleteUpdateCommand tmpDel = new DeleteUpdateCommand(cmd.req); @@ -493,7 +493,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState log.info("start "+cmd); RefCounted iw = solrCoreState.getIndexWriter(core); try { - final Map commitData = new HashMap(); + final Map commitData = new HashMap<>(); commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY, String.valueOf(System.currentTimeMillis())); iw.get().setCommitData(commitData); @@ -571,7 +571,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState // SolrCore.verbose("writer.commit() start writer=",writer); if (writer.hasUncommittedChanges()) { - final Map commitData = new HashMap(); + final Map commitData = new HashMap<>(); commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY, String.valueOf(System.currentTimeMillis())); writer.setCommitData(commitData); @@ -759,7 +759,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState } // todo: refactor this shared code (or figure out why a real CommitUpdateCommand can't be used) - final Map commitData = new HashMap(); + final Map commitData = new HashMap<>(); commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY, String.valueOf(System.currentTimeMillis())); writer.setCommitData(commitData); writer.commit(); diff --git a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java index 935774d766a..4b0a7cecef3 100644 --- a/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java +++ b/solr/core/src/java/org/apache/solr/update/HdfsTransactionLog.java @@ -191,7 +191,7 @@ public class HdfsTransactionLog extends TransactionLog { synchronized (this) { globalStringList = (List)header.get("strings"); - globalStringMap = new HashMap(globalStringList.size()); + globalStringMap = new HashMap<>(globalStringList.size()); for (int i=0; i fileList = new ArrayList(files.length); + List fileList = new ArrayList<>(files.length); for (FileStatus file : files) { fileList.add(file.getPath().getName()); } diff --git a/solr/core/src/java/org/apache/solr/update/MemOutputStream.java b/solr/core/src/java/org/apache/solr/update/MemOutputStream.java index 32b459e3cfe..fbc3d455260 100644 --- a/solr/core/src/java/org/apache/solr/update/MemOutputStream.java +++ b/solr/core/src/java/org/apache/solr/update/MemOutputStream.java @@ -25,7 +25,7 @@ import java.util.List; /** @lucene.internal */ public class MemOutputStream extends FastOutputStream { - public List buffers = new LinkedList(); + public List buffers = new LinkedList<>(); public MemOutputStream(byte[] tempBuffer) { super(null, tempBuffer, 0); } diff --git a/solr/core/src/java/org/apache/solr/update/PeerSync.java b/solr/core/src/java/org/apache/solr/update/PeerSync.java index f94a4a3220d..4f7b5d25786 100644 --- a/solr/core/src/java/org/apache/solr/update/PeerSync.java +++ b/solr/core/src/java/org/apache/solr/update/PeerSync.java @@ -227,7 +227,7 @@ public class PeerSync { } // let's merge the lists - List newList = new ArrayList(ourUpdates); + List newList = new ArrayList<>(ourUpdates); for (Long ver : startingVersions) { if (Math.abs(ver) < smallestNewUpdate) { newList.add(ver); @@ -248,8 +248,8 @@ public class PeerSync { } } - ourUpdateSet = new HashSet(ourUpdates); - requestedUpdateSet = new HashSet(ourUpdates); + ourUpdateSet = new HashSet<>(ourUpdates); + requestedUpdateSet = new HashSet<>(ourUpdates); for(;;) { ShardResponse srsp = shardHandler.takeCompletedOrError(); @@ -390,7 +390,7 @@ public class PeerSync { return true; } - List toRequest = new ArrayList(); + List toRequest = new ArrayList<>(); for (Long otherVersion : otherVersions) { // stop when the entries get old enough that reorders may lead us to see updates we don't need if (!completeList && Math.abs(otherVersion) < ourLowThreshold) break; diff --git a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java index 88d6e19d58b..52435398c98 100644 --- a/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java +++ b/solr/core/src/java/org/apache/solr/update/SolrCmdDistributor.java @@ -47,8 +47,8 @@ public class SolrCmdDistributor { private int retryPause = 500; private int maxRetriesOnForward = MAX_RETRIES_ON_FORWARD; - private List allErrors = new ArrayList(); - private List errors = new ArrayList(); + private List allErrors = new ArrayList<>(); + private List errors = new ArrayList<>(); public static interface AbortCheck { public boolean abortCheck(); @@ -76,9 +76,9 @@ public class SolrCmdDistributor { private void doRetriesIfNeeded() { // NOTE: retries will be forwards to a single url - List errors = new ArrayList(this.errors); + List errors = new ArrayList<>(this.errors); errors.addAll(servers.getErrors()); - List resubmitList = new ArrayList(); + List resubmitList = new ArrayList<>(); for (Error err : errors) { try { @@ -266,7 +266,7 @@ public class SolrCmdDistributor { public static class Response { - public List errors = new ArrayList(); + public List errors = new ArrayList<>(); } public static class Error { diff --git a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java index 50ce0da41e8..543e956b8a8 100644 --- a/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java +++ b/solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java @@ -90,7 +90,7 @@ public class SolrIndexSplitter { public void split() throws IOException { List leaves = searcher.getTopReaderContext().leaves(); - List segmentDocSets = new ArrayList(leaves.size()); + List segmentDocSets = new ArrayList<>(leaves.size()); log.info("SolrIndexSplitter: partitions=" + numPieces + " segments="+leaves.size()); diff --git a/solr/core/src/java/org/apache/solr/update/StreamingSolrServers.java b/solr/core/src/java/org/apache/solr/update/StreamingSolrServers.java index 4bd8e1a2efc..261dd427162 100644 --- a/solr/core/src/java/org/apache/solr/update/StreamingSolrServers.java +++ b/solr/core/src/java/org/apache/solr/update/StreamingSolrServers.java @@ -43,7 +43,7 @@ public class StreamingSolrServers { private HttpClient httpClient; - private Map solrServers = new HashMap(); + private Map solrServers = new HashMap<>(); private List errors = Collections.synchronizedList(new ArrayList()); private ExecutorService updateExecutor; @@ -82,7 +82,7 @@ public class StreamingSolrServers { server.setParser(new BinaryResponseParser()); server.setRequestWriter(new BinaryRequestWriter()); server.setPollQueueTime(0); - Set queryParams = new HashSet(2); + Set queryParams = new HashSet<>(2); queryParams.add(DistributedUpdateProcessor.DISTRIB_FROM); queryParams.add(DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM); server.setQueryParams(queryParams); diff --git a/solr/core/src/java/org/apache/solr/update/TransactionLog.java b/solr/core/src/java/org/apache/solr/update/TransactionLog.java index a9cefc657aa..08ba6c46913 100644 --- a/solr/core/src/java/org/apache/solr/update/TransactionLog.java +++ b/solr/core/src/java/org/apache/solr/update/TransactionLog.java @@ -77,8 +77,8 @@ public class TransactionLog { protected volatile boolean deleteOnClose = true; // we can delete old tlogs since they are currently only used for real-time-get (and in the future, recovery) AtomicInteger refcount = new AtomicInteger(1); - Map globalStringMap = new HashMap(); - List globalStringList = new ArrayList(); + Map globalStringMap = new HashMap<>(); + List globalStringList = new ArrayList<>(); long snapshot_size; int snapshot_numRecords; @@ -271,7 +271,7 @@ public class TransactionLog { synchronized (this) { globalStringList = (List)header.get("strings"); - globalStringMap = new HashMap(globalStringList.size()); + globalStringMap = new HashMap<>(globalStringList.size()); for (int i=0; i getGlobalStrings() { synchronized (this) { - return new ArrayList(globalStringList); + return new ArrayList<>(globalStringList); } } diff --git a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java index 03c5a48d698..b9af14a6c05 100644 --- a/solr/core/src/java/org/apache/solr/update/UpdateHandler.java +++ b/solr/core/src/java/org/apache/solr/update/UpdateHandler.java @@ -49,9 +49,9 @@ public abstract class UpdateHandler implements SolrInfoMBean { protected final SchemaField idField; protected final FieldType idFieldType; - protected Vector commitCallbacks = new Vector(); - protected Vector softCommitCallbacks = new Vector(); - protected Vector optimizeCallbacks = new Vector(); + protected Vector commitCallbacks = new Vector<>(); + protected Vector softCommitCallbacks = new Vector<>(); + protected Vector optimizeCallbacks = new Vector<>(); protected final UpdateLog ulog; diff --git a/solr/core/src/java/org/apache/solr/update/UpdateLog.java b/solr/core/src/java/org/apache/solr/update/UpdateLog.java index 7353bfa3e74..a65012cda26 100644 --- a/solr/core/src/java/org/apache/solr/update/UpdateLog.java +++ b/solr/core/src/java/org/apache/solr/update/UpdateLog.java @@ -128,11 +128,11 @@ public class UpdateLog implements PluginInfoInitialized { protected TransactionLog tlog; protected TransactionLog prevTlog; - protected Deque logs = new LinkedList(); // list of recent logs, newest first - protected LinkedList newestLogsOnStartup = new LinkedList(); + protected Deque logs = new LinkedList<>(); // list of recent logs, newest first + protected LinkedList newestLogsOnStartup = new LinkedList<>(); protected int numOldRecords; // number of records in the recent logs - protected Map map = new HashMap(); + protected Map map = new HashMap<>(); protected Map prevMap; // used while committing/reopening is happening protected Map prevMap2; // used while committing/reopening is happening protected TransactionLog prevMapLog; // the transaction log used to look up entries found in prevMap @@ -160,7 +160,7 @@ public class UpdateLog implements PluginInfoInitialized { } } - protected LinkedList deleteByQueries = new LinkedList(); + protected LinkedList deleteByQueries = new LinkedList<>(); protected String[] tlogFiles; protected File tlogDir; @@ -566,7 +566,7 @@ public class UpdateLog implements PluginInfoInitialized { return null; } - List dbqList = new ArrayList(); + List dbqList = new ArrayList<>(); for (DBQ dbq : deleteByQueries) { if (dbq.version <= version) break; dbqList.add(dbq); @@ -582,7 +582,7 @@ public class UpdateLog implements PluginInfoInitialized { prevMap = map; prevMapLog = tlog; - map = new HashMap(); + map = new HashMap<>(); } private void clearOldMaps() { @@ -656,7 +656,7 @@ public class UpdateLog implements PluginInfoInitialized { // any added documents will make it into this commit or not. // But we do know that any updates already added will definitely // show up in the latest reader after the commit succeeds. - map = new HashMap(); + map = new HashMap<>(); if (debug) { log.debug("TLOG: preSoftCommit: prevMap="+ System.identityHashCode(prevMap) + " new map=" + System.identityHashCode(map)); @@ -792,7 +792,7 @@ public class UpdateLog implements PluginInfoInitialized { public Future recoverFromLog() { recoveryInfo = new RecoveryInfo(); - List recoverLogs = new ArrayList(1); + List recoverLogs = new ArrayList<>(1); for (TransactionLog ll : newestLogsOnStartup) { if (!ll.try_incref()) continue; @@ -812,7 +812,7 @@ public class UpdateLog implements PluginInfoInitialized { if (recoverLogs.isEmpty()) return null; - ExecutorCompletionService cs = new ExecutorCompletionService(recoveryExecutor); + ExecutorCompletionService cs = new ExecutorCompletionService<>(recoveryExecutor); LogReplayer replayer = new LogReplayer(recoverLogs, false); versionInfo.blockUpdates(); @@ -925,7 +925,7 @@ public class UpdateLog implements PluginInfoInitialized { /** Returns the list of deleteByQueries that happened after the given version */ public List getDeleteByQuery(long afterVersion) { - List result = new ArrayList(deleteByQueryList.size()); + List result = new ArrayList<>(deleteByQueryList.size()); for (Update update : deleteByQueryList) { if (Math.abs(update.version) > afterVersion) { Object dbq = update.log.lookup(update.pointer); @@ -942,13 +942,13 @@ public class UpdateLog implements PluginInfoInitialized { private void update() { int numUpdates = 0; - updateList = new ArrayList>(logList.size()); - deleteByQueryList = new ArrayList(); - deleteList = new ArrayList(); - updates = new HashMap(numRecordsToKeep); + updateList = new ArrayList<>(logList.size()); + deleteByQueryList = new ArrayList<>(); + deleteList = new ArrayList<>(); + updates = new HashMap<>(numRecordsToKeep); for (TransactionLog oldLog : logList) { - List updatesForLog = new ArrayList(); + List updatesForLog = new ArrayList<>(); TransactionLog.ReverseReader reader = null; try { @@ -1030,7 +1030,7 @@ public class UpdateLog implements PluginInfoInitialized { public RecentUpdates getRecentUpdates() { Deque logList; synchronized (this) { - logList = new LinkedList(logs); + logList = new LinkedList<>(logs); for (TransactionLog log : logList) { log.incref(); } @@ -1156,7 +1156,7 @@ public class UpdateLog implements PluginInfoInitialized { tlog.decref(); throw new RuntimeException("executor is not running..."); } - ExecutorCompletionService cs = new ExecutorCompletionService(recoveryExecutor); + ExecutorCompletionService cs = new ExecutorCompletionService<>(recoveryExecutor); LogReplayer replayer = new LogReplayer(Arrays.asList(new TransactionLog[]{tlog}), true); return cs.submit(replayer, recoveryInfo); } @@ -1188,7 +1188,7 @@ public class UpdateLog implements PluginInfoInitialized { boolean debug = loglog.isDebugEnabled(); public LogReplayer(List translogs, boolean activeLog) { - this.translogs = new LinkedList(); + this.translogs = new LinkedList<>(); this.translogs.addAll(translogs); this.activeLog = activeLog; } diff --git a/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java index d9012497814..9d49cd8edcf 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java @@ -126,7 +126,7 @@ public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcesso private List typeMappings = Collections.emptyList(); private SelectorParams inclusions = new SelectorParams(); - private Collection exclusions = new ArrayList(); + private Collection exclusions = new ArrayList<>(); private FieldNameSelector selector = null; private String defaultFieldType; @@ -191,7 +191,7 @@ public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcesso } private static List parseTypeMappings(NamedList args) { - List typeMappings = new ArrayList(); + List typeMappings = new ArrayList<>(); List typeMappingsParams = args.getAll(TYPE_MAPPING_PARAM); for (Object typeMappingObj : typeMappingsParams) { if (null == typeMappingObj) { @@ -262,7 +262,7 @@ public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcesso if (null == schema.getFieldTypeByName(fieldTypeName)) { throw new SolrException(SERVER_ERROR, "fieldType '" + fieldTypeName + "' not found in the schema"); } - valueClasses = new HashSet>(); + valueClasses = new HashSet<>(); for (String valueClassName : valueClassNames) { try { valueClasses.add(loader.loadClass(valueClassName)); @@ -289,7 +289,7 @@ public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcesso final SolrCore core = cmd.getReq().getCore(); for (;;) { final IndexSchema oldSchema = core.getLatestSchema(); - List newFields = new ArrayList(); + List newFields = new ArrayList<>(); for (final String fieldName : doc.getFieldNames()) { if (selector.shouldMutate(fieldName)) { String fieldTypeName = mapValueClassesToFieldType(doc.getField(fieldName)); diff --git a/solr/core/src/java/org/apache/solr/update/processor/AllValuesOrNoneFieldMutatingUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/AllValuesOrNoneFieldMutatingUpdateProcessor.java index 58969b079ea..a548244ecc9 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/AllValuesOrNoneFieldMutatingUpdateProcessor.java +++ b/solr/core/src/java/org/apache/solr/update/processor/AllValuesOrNoneFieldMutatingUpdateProcessor.java @@ -87,7 +87,7 @@ public abstract class AllValuesOrNoneFieldMutatingUpdateProcessor extends FieldM if (DELETE_VALUE_SINGLETON == destVal) { if (log.isDebugEnabled()) { if (null == messages) { - messages = new ArrayList(); + messages = new ArrayList<>(); } messages.add(String.format(Locale.ROOT, "removing value from field '%s': %s '%s'", srcField.getName(), srcVal.getClass().getSimpleName(), srcVal)); @@ -95,7 +95,7 @@ public abstract class AllValuesOrNoneFieldMutatingUpdateProcessor extends FieldM } else { if (log.isDebugEnabled()) { if (null == messages) { - messages = new ArrayList(); + messages = new ArrayList<>(); } messages.add(String.format(Locale.ROOT, "replace value from field '%s': %s '%s' with %s '%s'", srcField.getName(), srcVal.getClass().getSimpleName(), srcVal, diff --git a/solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java index 2f77947efa1..29ad7d29234 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/CloneFieldUpdateProcessorFactory.java @@ -108,7 +108,7 @@ public class CloneFieldUpdateProcessorFactory private SelectorParams srcInclusions = new SelectorParams(); private Collection srcExclusions - = new ArrayList(); + = new ArrayList<>(); private FieldNameSelector srcSelector = null; private String dest = null; @@ -175,8 +175,7 @@ public class CloneFieldUpdateProcessorFactory } } else { // source better be one or more strings - srcInclusions.fieldName = new HashSet - (args.removeConfigArgs("source")); + srcInclusions.fieldName = new HashSet<>(args.removeConfigArgs("source")); } diff --git a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java index 765e892597d..37c935e9aa1 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java +++ b/solr/core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java @@ -283,13 +283,13 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor { if (replicaProps != null) { if (nodes == null) { - nodes = new ArrayList(replicaProps.size()); + nodes = new ArrayList<>(replicaProps.size()); } // check for test param that lets us miss replicas String[] skipList = req.getParams().getParams(TEST_DISTRIB_SKIP_SERVERS); Set skipListSet = null; if (skipList != null) { - skipListSet = new HashSet(skipList.length); + skipListSet = new HashSet<>(skipList.length); skipListSet.addAll(Arrays.asList(skipList)); log.info("test.distrib.skip.servers was found and contains:" + skipListSet); } @@ -309,7 +309,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor { } else { // I need to forward onto the leader... - nodes = new ArrayList(1); + nodes = new ArrayList<>(1); nodes.add(new RetryNode(new ZkCoreNodeProps(leaderReplica), zkController.getZkStateReader(), collection, shardId)); forwardToLeader = true; } @@ -373,7 +373,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor { Replica sliceLeader = aslice.getLeader(); // slice leader can be null because node/shard is created zk before leader election if (sliceLeader != null && zkController.getClusterState().liveNodesContain(sliceLeader.getNodeName())) { - if (nodes == null) nodes = new ArrayList(); + if (nodes == null) nodes = new ArrayList<>(); ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(sliceLeader); nodes.add(new StdNode(nodeProps)); } @@ -400,7 +400,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor { Collection activeSlices = cstate.getActiveSlices(targetCollectionName); if (activeSlices != null && !activeSlices.isEmpty()) { Slice any = activeSlices.iterator().next(); - if (nodes == null) nodes = new ArrayList(); + if (nodes == null) nodes = new ArrayList<>(); nodes.add(new StdNode(new ZkCoreNodeProps(any.getLeader()))); } } @@ -418,7 +418,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor { int hash = compositeIdRouter.sliceHash(id, doc, null, coll); for (DocRouter.Range range : ranges) { if (range.includes(hash)) { - if (nodes == null) nodes = new ArrayList(); + if (nodes == null) nodes = new ArrayList<>(); DocCollection targetColl = cstate.getCollection(rule.getTargetCollectionName()); Collection activeSlices = targetColl.getRouter().getSearchSlicesSingle(id, null, targetColl); if (activeSlices == null || activeSlices.isEmpty()) { @@ -526,7 +526,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor { .getReplicaProps(collection, shardId, leaderReplica.getName(), req.getCore().getName()); if (replicaProps != null) { - nodes = new ArrayList(replicaProps.size()); + nodes = new ArrayList<>(replicaProps.size()); for (ZkCoreNodeProps props : replicaProps) { nodes.add(new StdNode(props)); } @@ -1100,7 +1100,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor { if(route == null) route = params.get(ShardParams.SHARD_KEYS);// deprecated . kept for backcompat Collection slices = coll.getRouter().getSearchSlices(route, params, coll); - List leaders = new ArrayList(slices.size()); + List leaders = new ArrayList<>(slices.size()); for (Slice slice : slices) { String sliceName = slice.getName(); Replica leader; @@ -1224,7 +1224,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor { .getReplicaProps(collection, myShardId, leaderReplica.getName(), req.getCore().getName(), null, ZkStateReader.DOWN); if (replicaProps != null) { - List myReplicas = new ArrayList(); + List myReplicas = new ArrayList<>(); for (ZkCoreNodeProps replicaProp : replicaProps) { myReplicas.add(new StdNode(replicaProp)); } @@ -1479,7 +1479,7 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor { private List getCollectionUrls(SolrQueryRequest req, String collection) { ClusterState clusterState = req.getCore().getCoreDescriptor() .getCoreContainer().getZkController().getClusterState(); - List urls = new ArrayList(); + List urls = new ArrayList<>(); Map slices = clusterState.getSlicesMap(collection); if (slices == null) { throw new ZooKeeperException(ErrorCode.BAD_REQUEST, diff --git a/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessor.java index 92fc82b216f..042d38765e0 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessor.java +++ b/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessor.java @@ -84,7 +84,7 @@ public abstract class FieldMutatingUpdateProcessor // make a copy we can iterate over while mutating the doc final Collection fieldNames - = new ArrayList(doc.getFieldNames()); + = new ArrayList<>(doc.getFieldNames()); for (final String fname : fieldNames) { @@ -217,7 +217,7 @@ public abstract class FieldMutatingUpdateProcessor this.core = core; this.params = params; - final Collection classes = new ArrayList(params.typeClass.size()); + final Collection classes = new ArrayList<>(params.typeClass.size()); for (String t : params.typeClass) { try { diff --git a/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java index 9375885f484..5df67ba03ba 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessorFactory.java @@ -125,7 +125,7 @@ public abstract class FieldMutatingUpdateProcessorFactory private SelectorParams inclusions = new SelectorParams(); private Collection exclusions - = new ArrayList(); + = new ArrayList<>(); private FieldMutatingUpdateProcessor.FieldNameSelector selector = null; @@ -139,13 +139,13 @@ public abstract class FieldMutatingUpdateProcessorFactory public static SelectorParams parseSelectorParams(NamedList args) { SelectorParams params = new SelectorParams(); - params.fieldName = new HashSet(args.removeConfigArgs("fieldName")); - params.typeName = new HashSet(args.removeConfigArgs("typeName")); + params.fieldName = new HashSet<>(args.removeConfigArgs("fieldName")); + params.typeName = new HashSet<>(args.removeConfigArgs("typeName")); // we can compile the patterns now Collection patterns = args.removeConfigArgs("fieldRegex"); if (! patterns.isEmpty()) { - params.fieldRegex = new ArrayList(patterns.size()); + params.fieldRegex = new ArrayList<>(patterns.size()); for (String s : patterns) { try { params.fieldRegex.add(Pattern.compile(s)); @@ -167,7 +167,7 @@ public abstract class FieldMutatingUpdateProcessorFactory } public static Collection parseSelectorExclusionParams(NamedList args) { - Collection exclusions = new ArrayList(); + Collection exclusions = new ArrayList<>(); List excList = args.getAll("exclude"); for (Object excObj : excList) { if (null == excObj) { diff --git a/solr/core/src/java/org/apache/solr/update/processor/LogUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/LogUpdateProcessorFactory.java index c9d9737a973..38e70feec53 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/LogUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/LogUpdateProcessorFactory.java @@ -89,7 +89,7 @@ class LogUpdateProcessor extends UpdateRequestProcessor { // TODO: make log level configurable as well, or is that overkill? // (ryan) maybe? I added it mostly to show that it *can* be configurable - this.toLog = new SimpleOrderedMap(); + this.toLog = new SimpleOrderedMap<>(); } @Override @@ -101,7 +101,7 @@ class LogUpdateProcessor extends UpdateRequestProcessor { // Add a list of added id's to the response if (adds == null) { - adds = new ArrayList(); + adds = new ArrayList<>(); toLog.add("add",adds); } @@ -122,7 +122,7 @@ class LogUpdateProcessor extends UpdateRequestProcessor { if (cmd.isDeleteById()) { if (deletes == null) { - deletes = new ArrayList(); + deletes = new ArrayList<>(); toLog.add("delete",deletes); } if (deletes.size() < maxNumToLog) { diff --git a/solr/core/src/java/org/apache/solr/update/processor/ParseBooleanFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/ParseBooleanFieldUpdateProcessorFactory.java index 7be8e7f0ae8..e0d2444be14 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/ParseBooleanFieldUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/ParseBooleanFieldUpdateProcessorFactory.java @@ -72,8 +72,8 @@ public class ParseBooleanFieldUpdateProcessorFactory extends FieldMutatingUpdate private static final String FALSE_VALUES_PARAM = "falseValue"; private static final String CASE_SENSITIVE_PARAM = "caseSensitive"; - private Set trueValues = new HashSet(Arrays.asList(new String[] { "true" })); - private Set falseValues = new HashSet(Arrays.asList(new String[] { "false" })); + private Set trueValues = new HashSet<>(Arrays.asList(new String[] { "true" })); + private Set falseValues = new HashSet<>(Arrays.asList(new String[] { "false" })); private boolean caseSensitive = false; @Override diff --git a/solr/core/src/java/org/apache/solr/update/processor/ParseDateFieldUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/ParseDateFieldUpdateProcessorFactory.java index 5fa9bc2561e..231495293a0 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/ParseDateFieldUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/ParseDateFieldUpdateProcessorFactory.java @@ -102,7 +102,7 @@ public class ParseDateFieldUpdateProcessorFactory extends FieldMutatingUpdatePro private static final String DEFAULT_TIME_ZONE_PARAM = "defaultTimeZone"; private static final String LOCALE_PARAM = "locale"; - private Map formats = new LinkedHashMap(); + private Map formats = new LinkedHashMap<>(); @Override public UpdateRequestProcessor getInstance(SolrQueryRequest req, diff --git a/solr/core/src/java/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorFactory.java index 56d9af73565..f27620689d0 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/PreAnalyzedUpdateProcessorFactory.java @@ -121,7 +121,7 @@ public class PreAnalyzedUpdateProcessorFactory extends FieldMutatingUpdateProces public void inform(SolrCore core) { super.inform(core); parser = new PreAnalyzedField(); - Map args = new HashMap(); + Map args = new HashMap<>(); if (parserImpl != null) { args.put(PreAnalyzedField.PARSER_IMPL, parserImpl); } diff --git a/solr/core/src/java/org/apache/solr/update/processor/RegexpBoostProcessor.java b/solr/core/src/java/org/apache/solr/update/processor/RegexpBoostProcessor.java index 62aae608d18..29a7acba426 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/RegexpBoostProcessor.java +++ b/solr/core/src/java/org/apache/solr/update/processor/RegexpBoostProcessor.java @@ -66,7 +66,7 @@ public class RegexpBoostProcessor extends UpdateRequestProcessor { private String inputFieldname = DEFAULT_INPUT_FIELDNAME; private String boostFieldname = DEFAULT_BOOST_FIELDNAME; private String boostFilename; - private List boostEntries = new ArrayList(); + private List boostEntries = new ArrayList<>(); private static final String BOOST_ENTRIES_CACHE_KEY = "boost-entries"; RegexpBoostProcessor(SolrParams parameters, @@ -119,7 +119,7 @@ public class RegexpBoostProcessor extends UpdateRequestProcessor { } private List initBoostEntries(InputStream is) throws IOException { - List newBoostEntries = new ArrayList(); + List newBoostEntries = new ArrayList<>(); BufferedReader reader = new BufferedReader(new InputStreamReader(is, Charset.forName("UTF-8"))); try { diff --git a/solr/core/src/java/org/apache/solr/update/processor/RegexpBoostProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/RegexpBoostProcessorFactory.java index 5bb22ba07ba..a2660c1b70b 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/RegexpBoostProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/RegexpBoostProcessorFactory.java @@ -33,7 +33,7 @@ import org.apache.solr.response.SolrQueryResponse; public class RegexpBoostProcessorFactory extends UpdateRequestProcessorFactory { private SolrParams params; - private final Map sharedObjectCache = new HashMap(); + private final Map sharedObjectCache = new HashMap<>(); @Override public void init(@SuppressWarnings("rawtypes") final NamedList args) { diff --git a/solr/core/src/java/org/apache/solr/update/processor/SignatureUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/SignatureUpdateProcessorFactory.java index a6c38edc1e4..4618aad7712 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/SignatureUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/SignatureUpdateProcessorFactory.java @@ -142,7 +142,7 @@ public class SignatureUpdateProcessorFactory "Can't use SignatureUpdateProcessor with partial updates on signature fields"); } Collection docFields = doc.getFieldNames(); - currDocSigFields = new ArrayList(docFields.size()); + currDocSigFields = new ArrayList<>(docFields.size()); currDocSigFields.addAll(docFields); Collections.sort(currDocSigFields); } else { diff --git a/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java index 8d14dc7ef1d..f330e63c34b 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactory.java @@ -179,7 +179,7 @@ public class StatelessScriptUpdateProcessorFactory extends UpdateRequestProcesso "StatelessScriptUpdateProcessorFactory must be " + "initialized with at least one " + SCRIPT_ARG); } - scriptFiles = new ArrayList(); + scriptFiles = new ArrayList<>(); for (String script : scripts) { scriptFiles.add(new ScriptFile(script)); } @@ -251,7 +251,7 @@ public class StatelessScriptUpdateProcessorFactory extends UpdateRequestProcesso SolrQueryResponse rsp) throws SolrException { - List scriptEngines = new ArrayList(); + List scriptEngines = new ArrayList<>(); ScriptEngineManager scriptEngineManager = new ScriptEngineManager(resourceLoader.getClassLoader()); @@ -338,7 +338,7 @@ public class StatelessScriptUpdateProcessorFactory extends UpdateRequestProcesso List factories = mgr.getEngineFactories(); if (null == factories) return result; - Set engines = new LinkedHashSet(factories.size()); + Set engines = new LinkedHashSet<>(factories.size()); for (ScriptEngineFactory f : factories) { if (ext) { engines.addAll(f.getExtensions()); diff --git a/solr/core/src/java/org/apache/solr/update/processor/TextProfileSignature.java b/solr/core/src/java/org/apache/solr/update/processor/TextProfileSignature.java index 7a24e8df48f..8a4bd2a7b31 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/TextProfileSignature.java +++ b/solr/core/src/java/org/apache/solr/update/processor/TextProfileSignature.java @@ -65,7 +65,7 @@ public class TextProfileSignature extends MD5Signature { @Override public void add(String content) { - HashMap tokens = new HashMap(); + HashMap tokens = new HashMap<>(); StringBuilder curToken = new StringBuilder(); int maxFreq = 0; @@ -105,7 +105,7 @@ public class TextProfileSignature extends MD5Signature { maxFreq = tok.cnt; } Iterator it = tokens.values().iterator(); - ArrayList profile = new ArrayList(); + ArrayList profile = new ArrayList<>(); // calculate the QUANT value int quant = Math.round(maxFreq * quantRate); if (quant < 2) { diff --git a/solr/core/src/java/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactory.java index 13d497b5bcc..17aaf783730 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactory.java @@ -70,8 +70,8 @@ public class UniqFieldsUpdateProcessorFactory extends FieldValueSubsetUpdateProc @Override @SuppressWarnings("unchecked") public Collection pickSubset(Collection values) { - Set uniqs = new HashSet(); - List result = new ArrayList(values.size()); + Set uniqs = new HashSet<>(); + List result = new ArrayList<>(values.size()); for (Object o : values) { if (!uniqs.contains(o)) { uniqs.add(o); diff --git a/solr/core/src/java/org/apache/solr/util/ConcurrentLFUCache.java b/solr/core/src/java/org/apache/solr/util/ConcurrentLFUCache.java index 6d6f84cc0ad..f0b031f9d3e 100644 --- a/solr/core/src/java/org/apache/solr/util/ConcurrentLFUCache.java +++ b/solr/core/src/java/org/apache/solr/util/ConcurrentLFUCache.java @@ -62,7 +62,7 @@ public class ConcurrentLFUCache { if (upperWaterMark < 1) throw new IllegalArgumentException("upperWaterMark must be > 0"); if (lowerWaterMark >= upperWaterMark) throw new IllegalArgumentException("lowerWaterMark must be < upperWaterMark"); - map = new ConcurrentHashMap>(initialSize); + map = new ConcurrentHashMap<>(initialSize); newThreadForCleanup = runNewThreadForCleanup; this.upperWaterMark = upperWaterMark; this.lowerWaterMark = lowerWaterMark; @@ -108,7 +108,7 @@ public class ConcurrentLFUCache { public V put(K key, V val) { if (val == null) return null; - CacheEntry e = new CacheEntry(key, val, stats.accessCounter.incrementAndGet()); + CacheEntry e = new CacheEntry<>(key, val, stats.accessCounter.incrementAndGet()); CacheEntry oldCacheEntry = map.put(key, e); int currentSize; if (oldCacheEntry == null) { @@ -171,7 +171,7 @@ public class ConcurrentLFUCache { int wantToRemove = sz - lowerWaterMark; - TreeSet tree = new TreeSet(); + TreeSet tree = new TreeSet<>(); for (CacheEntry ce : map.values()) { // set hitsCopy to avoid later Atomic reads @@ -223,10 +223,10 @@ public class ConcurrentLFUCache { * @return a LinkedHashMap containing 'n' or less than 'n' entries */ public Map getLeastUsedItems(int n) { - Map result = new LinkedHashMap(); + Map result = new LinkedHashMap<>(); if (n <= 0) return result; - TreeSet tree = new TreeSet(); + TreeSet tree = new TreeSet<>(); // we need to grab the lock since we are changing the copy variables markAndSweepLock.lock(); try { @@ -267,10 +267,10 @@ public class ConcurrentLFUCache { * @return a LinkedHashMap containing 'n' or less than 'n' entries */ public Map getMostUsedItems(int n) { - Map result = new LinkedHashMap(); + Map result = new LinkedHashMap<>(); if (n <= 0) return result; - TreeSet tree = new TreeSet(); + TreeSet tree = new TreeSet<>(); // we need to grab the lock since we are changing the copy variables markAndSweepLock.lock(); try { @@ -427,7 +427,7 @@ public class ConcurrentLFUCache { private boolean stop = false; public CleanupThread(ConcurrentLFUCache c) { - cache = new WeakReference(c); + cache = new WeakReference<>(c); } @Override diff --git a/solr/core/src/java/org/apache/solr/util/ConcurrentLRUCache.java b/solr/core/src/java/org/apache/solr/util/ConcurrentLRUCache.java index a97741966c1..5b5f7df9422 100644 --- a/solr/core/src/java/org/apache/solr/util/ConcurrentLRUCache.java +++ b/solr/core/src/java/org/apache/solr/util/ConcurrentLRUCache.java @@ -64,7 +64,7 @@ public class ConcurrentLRUCache { if (upperWaterMark < 1) throw new IllegalArgumentException("upperWaterMark must be > 0"); if (lowerWaterMark >= upperWaterMark) throw new IllegalArgumentException("lowerWaterMark must be < upperWaterMark"); - map = new ConcurrentHashMap>(initialSize); + map = new ConcurrentHashMap<>(initialSize); newThreadForCleanup = runNewThreadForCleanup; this.upperWaterMark = upperWaterMark; this.lowerWaterMark = lowerWaterMark; @@ -106,7 +106,7 @@ public class ConcurrentLRUCache { public V put(K key, V val) { if (val == null) return null; - CacheEntry e = new CacheEntry(key, val, stats.accessCounter.incrementAndGet()); + CacheEntry e = new CacheEntry<>(key, val, stats.accessCounter.incrementAndGet()); CacheEntry oldCacheEntry = map.put(key, e); int currentSize; if (oldCacheEntry == null) { @@ -284,7 +284,7 @@ public class ConcurrentLRUCache { wantToKeep = lowerWaterMark - numKept; wantToRemove = sz - lowerWaterMark - numRemoved; - PQueue queue = new PQueue(wantToRemove); + PQueue queue = new PQueue<>(wantToRemove); for (int i=eSize-1; i>=0; i--) { CacheEntry ce = eset[i]; @@ -408,10 +408,10 @@ public class ConcurrentLRUCache { * @return a LinkedHashMap containing 'n' or less than 'n' entries */ public Map getOldestAccessedItems(int n) { - Map result = new LinkedHashMap(); + Map result = new LinkedHashMap<>(); if (n <= 0) return result; - TreeSet> tree = new TreeSet>(); + TreeSet> tree = new TreeSet<>(); markAndSweepLock.lock(); try { for (Map.Entry> entry : map.entrySet()) { @@ -436,10 +436,10 @@ public class ConcurrentLRUCache { } public Map getLatestAccessedItems(int n) { - Map result = new LinkedHashMap(); + Map result = new LinkedHashMap<>(); if (n <= 0) return result; - TreeSet> tree = new TreeSet>(); + TreeSet> tree = new TreeSet<>(); // we need to grab the lock since we are changing lastAccessedCopy markAndSweepLock.lock(); try { @@ -587,7 +587,7 @@ public class ConcurrentLRUCache { private boolean stop = false; public CleanupThread(ConcurrentLRUCache c) { - cache = new WeakReference(c); + cache = new WeakReference<>(c); } @Override diff --git a/solr/core/src/java/org/apache/solr/util/DOMUtil.java b/solr/core/src/java/org/apache/solr/util/DOMUtil.java index 0c784473355..5c82a523f48 100644 --- a/solr/core/src/java/org/apache/solr/util/DOMUtil.java +++ b/solr/core/src/java/org/apache/solr/util/DOMUtil.java @@ -38,7 +38,7 @@ public class DOMUtil { } public static Map toMapExcept(NamedNodeMap attrs, String... exclusions) { - Map args = new HashMap(); + Map args = new HashMap<>(); outer: for (int j=0; j nodesToNamedList(NodeList nlst) { - NamedList clst = new NamedList(); + NamedList clst = new NamedList<>(); for (int i=0; i fragments = new ArrayList(); - List propertyRefs = new ArrayList(); + List fragments = new ArrayList<>(); + List propertyRefs = new ArrayList<>(); parsePropertyString(value, fragments, propertyRefs); StringBuilder sb = new StringBuilder(); diff --git a/solr/core/src/java/org/apache/solr/util/DateMathParser.java b/solr/core/src/java/org/apache/solr/util/DateMathParser.java index 53d7c55e379..fbeb61fb915 100644 --- a/solr/core/src/java/org/apache/solr/util/DateMathParser.java +++ b/solr/core/src/java/org/apache/solr/util/DateMathParser.java @@ -128,7 +128,7 @@ public class DateMathParser { // we probably need to change "Locale loc" to default to something // from a param via SolrRequestInfo as well. - Map units = new HashMap(13); + Map units = new HashMap<>(13); units.put("YEAR", Calendar.YEAR); units.put("YEARS", Calendar.YEAR); units.put("MONTH", Calendar.MONTH); diff --git a/solr/core/src/java/org/apache/solr/util/MapListener.java b/solr/core/src/java/org/apache/solr/util/MapListener.java index c6b4b375cb8..c74ccada91d 100644 --- a/solr/core/src/java/org/apache/solr/util/MapListener.java +++ b/solr/core/src/java/org/apache/solr/util/MapListener.java @@ -33,7 +33,7 @@ public class MapListener extends ForwardingMap { public MapListener(Map target) { this.target = target; - seenKeys = new HashSet(target.size()); + seenKeys = new HashSet<>(target.size()); } public Set getSeenKeys() { diff --git a/solr/core/src/java/org/apache/solr/util/PropertiesUtil.java b/solr/core/src/java/org/apache/solr/util/PropertiesUtil.java index 38da33b4e6b..20f594d0ddf 100644 --- a/solr/core/src/java/org/apache/solr/util/PropertiesUtil.java +++ b/solr/core/src/java/org/apache/solr/util/PropertiesUtil.java @@ -38,8 +38,8 @@ public class PropertiesUtil { return value; } - List fragments = new ArrayList(); - List propertyRefs = new ArrayList(); + List fragments = new ArrayList<>(); + List propertyRefs = new ArrayList<>(); parsePropertyString(value, fragments, propertyRefs); StringBuilder sb = new StringBuilder(); diff --git a/solr/core/src/java/org/apache/solr/util/RTimer.java b/solr/core/src/java/org/apache/solr/util/RTimer.java index 2cbd0441dc3..a85f9be6a0e 100644 --- a/solr/core/src/java/org/apache/solr/util/RTimer.java +++ b/solr/core/src/java/org/apache/solr/util/RTimer.java @@ -47,7 +47,7 @@ public class RTimer { public RTimer() { time = 0; culmTime = 0; - children = new SimpleOrderedMap(); + children = new SimpleOrderedMap<>(); startTime = now(); state = STARTED; } @@ -116,7 +116,7 @@ public class RTimer { } public NamedList asNamedList() { - NamedList m = new SimpleOrderedMap(); + NamedList m = new SimpleOrderedMap<>(); m.add( "time", time ); if( children.size() > 0 ) { for( Map.Entry entry : children ) { diff --git a/solr/core/src/java/org/apache/solr/util/SimplePostTool.java b/solr/core/src/java/org/apache/solr/util/SimplePostTool.java index edfe9c73213..80e54eed158 100644 --- a/solr/core/src/java/org/apache/solr/util/SimplePostTool.java +++ b/solr/core/src/java/org/apache/solr/util/SimplePostTool.java @@ -108,10 +108,10 @@ public class SimplePostTool { static HashMap mimeMap; GlobFileFilter globFileFilter; // Backlog for crawling - List> backlog = new ArrayList>(); - Set visited = new HashSet(); + List> backlog = new ArrayList<>(); + Set visited = new HashSet<>(); - static final Set DATA_MODES = new HashSet(); + static final Set DATA_MODES = new HashSet<>(); static final String USAGE_STRING_SHORT = "Usage: java [SystemProperties] -jar post.jar [-h|-] [ [...]]"; @@ -125,7 +125,7 @@ public class SimplePostTool { DATA_MODES.add(DATA_MODE_STDIN); DATA_MODES.add(DATA_MODE_WEB); - mimeMap = new HashMap(); + mimeMap = new HashMap<>(); mimeMap.put("xml", "text/xml"); mimeMap.put("csv", "text/csv"); mimeMap.put("json", "application/json"); @@ -344,8 +344,8 @@ public class SimplePostTool { private void reset() { fileTypes = DEFAULT_FILE_TYPES; globFileFilter = this.getFileFilterFromFileTypes(fileTypes); - backlog = new ArrayList>(); - visited = new HashSet(); + backlog = new ArrayList<>(); + visited = new HashSet<>(); } @@ -512,7 +512,7 @@ public class SimplePostTool { */ public int postWebPages(String[] args, int startIndexInArgs, OutputStream out) { reset(); - LinkedHashSet s = new LinkedHashSet(); + LinkedHashSet s = new LinkedHashSet<>(); for (int j = startIndexInArgs; j < args.length; j++) { try { URL u = new URL(normalizeUrlEnding(args[j])); @@ -558,7 +558,7 @@ public class SimplePostTool { int rawStackSize = stack.size(); stack.removeAll(visited); int stackSize = stack.size(); - LinkedHashSet subStack = new LinkedHashSet(); + LinkedHashSet subStack = new LinkedHashSet<>(); info("Entering crawl at level "+level+" ("+rawStackSize+" links total, "+stackSize+" new)"); for(URL u : stack) { try { @@ -1016,7 +1016,7 @@ public class SimplePostTool { final String DISALLOW = "Disallow:"; public PageFetcher() { - robotsCache = new HashMap>(); + robotsCache = new HashMap<>(); } public PageFetcherResult readPageFromUrl(URL u) { @@ -1074,7 +1074,7 @@ public class SimplePostTool { String strRobot = url.getProtocol() + "://" + host + "/robots.txt"; List disallows = robotsCache.get(host); if(disallows == null) { - disallows = new ArrayList(); + disallows = new ArrayList<>(); URL urlRobot; try { urlRobot = new URL(strRobot); @@ -1104,7 +1104,7 @@ public class SimplePostTool { * @throws IOException if problems reading the stream */ protected List parseRobotsTxt(InputStream is) throws IOException { - List disallows = new ArrayList(); + List disallows = new ArrayList<>(); BufferedReader r = new BufferedReader(new InputStreamReader(is, "UTF-8")); String l; while((l = r.readLine()) != null) { @@ -1130,7 +1130,7 @@ public class SimplePostTool { * @return a set of URLs parsed from the page */ protected Set getLinksFromWebPage(URL u, InputStream is, String type, URL postUrl) { - Set l = new HashSet(); + Set l = new HashSet<>(); URL url = null; try { ByteArrayOutputStream os = new ByteArrayOutputStream(); diff --git a/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java b/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java index 6b6d011acf3..fe4d79ca396 100644 --- a/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java +++ b/solr/core/src/java/org/apache/solr/util/SolrLogLayout.java @@ -44,7 +44,7 @@ public class SolrLogLayout extends Layout { long startTime = System.currentTimeMillis(); long lastTime = startTime; - Map methodAlias = new HashMap(); + Map methodAlias = new HashMap<>(); public static class Method { public String className; @@ -81,9 +81,9 @@ public class SolrLogLayout extends Layout { Map coreProps; } - Map coreInfoMap = new WeakHashMap(); + Map coreInfoMap = new WeakHashMap<>(); - public Map classAliases = new HashMap(); + public Map classAliases = new HashMap<>(); public void appendThread(StringBuilder sb, LoggingEvent event) { Thread th = Thread.currentThread(); diff --git a/solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java b/solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java index af7a9633a76..75be77bada3 100644 --- a/solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java +++ b/solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java @@ -194,7 +194,7 @@ public class SolrPluginUtils { if (rb.doHighlights) { // copy return fields list - fieldFilter = new HashSet(fieldFilter); + fieldFilter = new HashSet<>(fieldFilter); // add highlight fields SolrHighlighter highlighter = HighlightComponent.getHighlighter(req.getCore()); @@ -219,7 +219,7 @@ public class SolrPluginUtils { public static Set getDebugInterests(String[] params, ResponseBuilder rb){ - Set debugInterests = new HashSet(); + Set debugInterests = new HashSet<>(); if (params != null) { for (int i = 0; i < params.length; i++) { if (params[i].equalsIgnoreCase("all") || params[i].equalsIgnoreCase("true")){ @@ -346,7 +346,7 @@ public class SolrPluginUtils { } public static NamedList explanationToNamedList(Explanation e) { - NamedList out = new SimpleOrderedMap(); + NamedList out = new SimpleOrderedMap<>(); out.add("match", e.isMatch()); out.add("value", e.getValue()); @@ -358,7 +358,7 @@ public class SolrPluginUtils { if (null == details || 0 == details.length) return out; List> kids - = new ArrayList>(details.length); + = new ArrayList<>(details.length); for (Explanation d : details) { kids.add(explanationToNamedList(d)); } @@ -371,7 +371,7 @@ public class SolrPluginUtils { (NamedList explanations) { NamedList> out - = new SimpleOrderedMap>(); + = new SimpleOrderedMap<>(); for (Map.Entry entry : explanations) { out.add(entry.getKey(), explanationToNamedList(entry.getValue())); } @@ -390,7 +390,7 @@ public class SolrPluginUtils { SolrIndexSearcher searcher, IndexSchema schema) throws IOException { - NamedList explainList = new SimpleOrderedMap(); + NamedList explainList = new SimpleOrderedMap<>(); DocIterator iterator = docs.iterator(); for (int i=0; i explanationsToStrings (NamedList explanations) { - NamedList out = new SimpleOrderedMap(); + NamedList out = new SimpleOrderedMap<>(); for (Map.Entry entry : explanations) { out.add(entry.getKey(), "\n"+entry.getValue().toString()); } @@ -470,9 +470,9 @@ public class SolrPluginUtils { */ public static Map parseFieldBoosts(String[] fieldLists) { if (null == fieldLists || 0 == fieldLists.length) { - return new HashMap(); + return new HashMap<>(); } - Map out = new HashMap(7); + Map out = new HashMap<>(7); for (String in : fieldLists) { if (null == in) { continue; @@ -502,9 +502,9 @@ public class SolrPluginUtils { */ public static List parseFieldBoostsAndSlop(String[] fieldLists,int wordGrams,int defaultSlop) { if (null == fieldLists || 0 == fieldLists.length) { - return new ArrayList(); + return new ArrayList<>(); } - List out = new ArrayList(); + List out = new ArrayList<>(); for (String in : fieldLists) { if (null == in) { continue; @@ -738,7 +738,7 @@ public class SolrPluginUtils { * string, to Alias object containing the fields to use in our * DisjunctionMaxQuery and the tiebreaker to use. */ - protected Map aliases = new HashMap(3); + protected Map aliases = new HashMap<>(3); public DisjunctionMaxQueryParser(QParser qp, String defaultField) { super(qp,defaultField); // don't trust that our parent class won't ever change it's default @@ -848,7 +848,7 @@ public class SolrPluginUtils { public static List parseQueryStrings(SolrQueryRequest req, String[] queries) throws SyntaxError { if (null == queries || 0 == queries.length) return null; - List out = new ArrayList(queries.length); + List out = new ArrayList<>(queries.length); for (String q : queries) { if (null != q && 0 != q.trim().length()) { out.add(QParser.getParser(q, null, req).getQuery()); diff --git a/solr/core/src/java/org/apache/solr/util/TimeZoneUtils.java b/solr/core/src/java/org/apache/solr/util/TimeZoneUtils.java index 0aa58757f1d..d57be19d2b8 100644 --- a/solr/core/src/java/org/apache/solr/util/TimeZoneUtils.java +++ b/solr/core/src/java/org/apache/solr/util/TimeZoneUtils.java @@ -43,7 +43,7 @@ public final class TimeZoneUtils { * @see TimeZone#getAvailableIDs */ public static final Set KNOWN_TIMEZONE_IDS - = Collections.unmodifiableSet(new HashSet + = Collections.unmodifiableSet(new HashSet<> (Arrays.asList(TimeZone.getAvailableIDs()))); /** diff --git a/solr/core/src/java/org/apache/solr/util/VersionedFile.java b/solr/core/src/java/org/apache/solr/util/VersionedFile.java index 19ab3fbc73a..fc0a13fa384 100644 --- a/solr/core/src/java/org/apache/solr/util/VersionedFile.java +++ b/solr/core/src/java/org/apache/solr/util/VersionedFile.java @@ -63,7 +63,7 @@ public class VersionedFile }); Arrays.sort(names); f = new File(dir, names[names.length-1]); - oldFiles = new ArrayList(); + oldFiles = new ArrayList<>(); for (int i=0; i deleteList = new HashSet(); + private static final Set deleteList = new HashSet<>(); private static synchronized void delete(Collection files) { synchronized (deleteList) { deleteList.addAll(files); - List deleted = new ArrayList(); + List deleted = new ArrayList<>(); for (File df : deleteList) { try { df.delete(); diff --git a/solr/core/src/java/org/apache/solr/util/plugin/AbstractPluginLoader.java b/solr/core/src/java/org/apache/solr/util/plugin/AbstractPluginLoader.java index 40f4f4eb52a..a9c5c4c3b8d 100644 --- a/solr/core/src/java/org/apache/solr/util/plugin/AbstractPluginLoader.java +++ b/solr/core/src/java/org/apache/solr/util/plugin/AbstractPluginLoader.java @@ -135,7 +135,7 @@ public abstract class AbstractPluginLoader */ public T load( SolrResourceLoader loader, NodeList nodes ) { - List info = new ArrayList(); + List info = new ArrayList<>(); T defaultPlugin = null; if (nodes !=null ) { @@ -218,7 +218,7 @@ public abstract class AbstractPluginLoader * */ public T loadSingle(SolrResourceLoader loader, Node node) { - List info = new ArrayList(); + List info = new ArrayList<>(); T plugin = null; try { diff --git a/solr/core/src/java/org/apache/solr/util/stats/ExponentiallyDecayingSample.java b/solr/core/src/java/org/apache/solr/util/stats/ExponentiallyDecayingSample.java index 5f7728a12a9..6a227ec8c38 100644 --- a/solr/core/src/java/org/apache/solr/util/stats/ExponentiallyDecayingSample.java +++ b/solr/core/src/java/org/apache/solr/util/stats/ExponentiallyDecayingSample.java @@ -72,7 +72,7 @@ public class ExponentiallyDecayingSample implements Sample { * sample will be towards newer values */ public ExponentiallyDecayingSample(int reservoirSize, double alpha, Clock clock) { - this.values = new ConcurrentSkipListMap(); + this.values = new ConcurrentSkipListMap<>(); this.lock = new ReentrantReadWriteLock(); this.alpha = alpha; this.reservoirSize = reservoirSize; @@ -187,7 +187,7 @@ public class ExponentiallyDecayingSample implements Sample { try { final long oldStartTime = startTime; this.startTime = currentTimeInSeconds(); - final ArrayList keys = new ArrayList(values.keySet()); + final ArrayList keys = new ArrayList<>(values.keySet()); for (Double key : keys) { final Long value = values.remove(key); values.put(key * exp(-alpha * (startTime - oldStartTime)), value); diff --git a/solr/core/src/java/org/apache/solr/util/stats/Histogram.java b/solr/core/src/java/org/apache/solr/util/stats/Histogram.java index 696884aa53a..07d3b1f37a0 100644 --- a/solr/core/src/java/org/apache/solr/util/stats/Histogram.java +++ b/solr/core/src/java/org/apache/solr/util/stats/Histogram.java @@ -74,7 +74,7 @@ public class Histogram { // These are for the Welford algorithm for calculating running variance // without floating-point doom. private final AtomicReference variance = - new AtomicReference(new double[]{-1, 0}); // M, S + new AtomicReference<>(new double[]{-1, 0}); // M, S private final AtomicLong count = new AtomicLong(); /** diff --git a/solr/core/src/java/org/apache/solr/util/stats/UniformSample.java b/solr/core/src/java/org/apache/solr/util/stats/UniformSample.java index 37f536695c9..0293d69cfa2 100644 --- a/solr/core/src/java/org/apache/solr/util/stats/UniformSample.java +++ b/solr/core/src/java/org/apache/solr/util/stats/UniformSample.java @@ -100,7 +100,7 @@ public class UniformSample implements Sample { @Override public Snapshot getSnapshot() { final int s = size(); - final List copy = new ArrayList(s); + final List copy = new ArrayList<>(s); for (int i = 0; i < s; i++) { copy.add(values.get(i)); } diff --git a/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java b/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java index cfeda9b9228..0b254e5a3a2 100644 --- a/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java +++ b/solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java @@ -351,7 +351,7 @@ public class BasicFunctionalityTest extends SolrTestCaseJ4 { final String BAD_VALUE = "NOT_A_NUMBER"; ignoreException(BAD_VALUE); - final List FIELDS = new LinkedList(); + final List FIELDS = new LinkedList<>(); for (String type : new String[] { "ti", "tf", "td", "tl" }) { FIELDS.add("malformed_" + type); } @@ -550,7 +550,7 @@ public class BasicFunctionalityTest extends SolrTestCaseJ4 { nl.add("bt","true"); nl.add("bf","false"); - Map m = new HashMap(); + Map m = new HashMap<>(); m.put("f.field1.i", "1000"); m.put("s", "BBB"); m.put("ss", "SSS"); diff --git a/solr/core/src/test/org/apache/solr/ConvertedLegacyTest.java b/solr/core/src/test/org/apache/solr/ConvertedLegacyTest.java index 09c5750f8eb..508631b32b6 100644 --- a/solr/core/src/test/org/apache/solr/ConvertedLegacyTest.java +++ b/solr/core/src/test/org/apache/solr/ConvertedLegacyTest.java @@ -42,7 +42,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { public void testABunchOfConvertedStuff() { // these may be reused by things that need a special query SolrQueryRequest req = null; - Map args = new HashMap(); + Map args = new HashMap<>(); lrf.args.put(CommonParams.VERSION,"2.2"); lrf.args.put("defType","lucenePlusSort"); @@ -133,7 +133,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { ,"*[count(//doc)=3] " ,"//*[@start='0']" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 2, 5 , args); assertQ(req @@ -142,28 +142,28 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { ,"*//doc[1]/str[.='pear'] " ,"//*[@start='2']" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 3, 5 , args); assertQ(req ,"//*[@numFound='3'] " ,"*[count(//doc)=0]" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 4, 5 , args); assertQ(req ,"//*[@numFound='3'] " ,"*[count(//doc)=0]" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 25, 5 , args); assertQ(req ,"//*[@numFound='3'] " ,"*[count(//doc)=0]" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 0, 1 , args); assertQ(req @@ -171,7 +171,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { ,"*[count(//doc)=1] " ,"*//doc[1]/str[.='apple']" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 0, 2 , args); assertQ(req @@ -179,7 +179,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { ,"*[count(//doc)=2] " ,"*//doc[2]/str[.='banana']" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 1, 1 , args); assertQ(req @@ -187,35 +187,35 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { ,"*[count(//doc)=1] " ,"*//doc[1]/str[.='banana']" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 3, 1 , args); assertQ(req ,"//*[@numFound='3'] " ,"*[count(//doc)=0]" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 4, 1 , args); assertQ(req ,"//*[@numFound='3'] " ,"*[count(//doc)=0]" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 1, 0 , args); assertQ(req ,"//*[@numFound='3'] " ,"*[count(//doc)=0]" ); - args = new HashMap(); + args = new HashMap<>(); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z]", "standard", 0, 0 , args); assertQ(req ,"//*[@numFound='3'] " ,"*[count(//doc)=0]" ); - args = new HashMap(); + args = new HashMap<>(); args.put("defType","lucenePlusSort"); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z];val_s1 asc", "standard", 0, 0 , args); @@ -223,7 +223,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { ,"//*[@numFound='3'] " ,"*[count(//doc)=0]" ); - args = new HashMap(); + args = new HashMap<>(); args.put("defType","lucenePlusSort"); req = new LocalSolrQueryRequest(h.getCore(), "val_s:[a TO z];val_s1 desc", "standard", 0, 0 , args); @@ -1107,7 +1107,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { assertU(""); assertQ(req("id:44") ); - args = new HashMap(); + args = new HashMap<>(); args.put("fl","fname_s,arr_f "); req = new LocalSolrQueryRequest(h.getCore(), "id:44", "standard", 0, 10, args); @@ -1115,7 +1115,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { ,"//str[.='Yonik'] " ,"//float[.='1.4142135']" ); - args = new HashMap(); + args = new HashMap<>(); args.put("fl","fname_s,score"); req = new LocalSolrQueryRequest(h.getCore(), "id:44", "standard", 0, 10, args); @@ -1126,7 +1126,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { // test addition of score field - args = new HashMap(); + args = new HashMap<>(); args.put("fl","score,* "); req = new LocalSolrQueryRequest(h.getCore(), "id:44", "standard", 0, 10, args); @@ -1136,7 +1136,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { ,"//float[@name='score'] " ,"*[count(//doc/*)>=13]" ); - args = new HashMap(); + args = new HashMap<>(); args.put("fl","*,score "); req = new LocalSolrQueryRequest(h.getCore(), "id:44", "standard", 0, 10, args); @@ -1146,7 +1146,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { ,"//float[@name='score'] " ,"*[count(//doc/*)>=13]" ); - args = new HashMap(); + args = new HashMap<>(); args.put("fl","* "); req = new LocalSolrQueryRequest(h.getCore(), "id:44", "standard", 0, 10, args); @@ -1158,14 +1158,14 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { // test maxScore - args = new HashMap(); + args = new HashMap<>(); args.put("fl","score "); req = new LocalSolrQueryRequest(h.getCore(), "id:44", "standard", 0, 10, args); assertQ(req ,"//result[@maxScore>0]" ); - args = new HashMap(); + args = new HashMap<>(); args.put("fl","score "); args.put("defType","lucenePlusSort"); req = new LocalSolrQueryRequest(h.getCore(), "id:44;id desc;", @@ -1173,7 +1173,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { assertQ(req ,"//result[@maxScore>0]" ); - args = new HashMap(); + args = new HashMap<>(); args.put("fl","score "); args.put("defType","lucenePlusSort"); req = new LocalSolrQueryRequest(h.getCore(), "id:44;", @@ -1181,7 +1181,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { assertQ(req ,"//@maxScore = //doc/float[@name='score']" ); - args = new HashMap(); + args = new HashMap<>(); args.put("fl","score "); args.put("defType","lucenePlusSort"); req = new LocalSolrQueryRequest(h.getCore(), "id:44;id desc;", @@ -1189,7 +1189,7 @@ public class ConvertedLegacyTest extends SolrTestCaseJ4 { assertQ(req ,"//@maxScore = //doc/float[@name='score']" ); - args = new HashMap(); + args = new HashMap<>(); args.put("fl","*,score"); args.put("defType","lucenePlusSort"); req = new LocalSolrQueryRequest(h.getCore(), "id:44;id desc;", diff --git a/solr/core/src/test/org/apache/solr/CursorPagingTest.java b/solr/core/src/test/org/apache/solr/CursorPagingTest.java index 7afaa6f397c..e291d8c144d 100644 --- a/solr/core/src/test/org/apache/solr/CursorPagingTest.java +++ b/solr/core/src/test/org/apache/solr/CursorPagingTest.java @@ -645,7 +645,7 @@ public class CursorPagingTest extends SolrTestCaseJ4 { final boolean prune_dv = ! defaultCodecSupportsMissingDocValues(); - ArrayList names = new ArrayList(37); + ArrayList names = new ArrayList<>(37); for (String f : raw) { if (f.equals("_version_")) { continue; @@ -760,7 +760,7 @@ public class CursorPagingTest extends SolrTestCaseJ4 { assertNotNull("facet.field param not specified", facetField); assertFalse("facet.field param contains multiple values", facetField.contains(",")); assertEquals("facet.limit param not set to -1", "-1", params.get("facet.limit")); - final Map facetCounts = new HashMap(); + final Map facetCounts = new HashMap<>(); SentinelIntSet ids = new SentinelIntSet(maxSize, -1); String cursorMark = CURSOR_MARK_START; int docsOnThisPage = Integer.MAX_VALUE; @@ -945,7 +945,7 @@ public class CursorPagingTest extends SolrTestCaseJ4 { */ public static String buildRandomSort(final Collection fieldNames) { - ArrayList shuffledNames = new ArrayList(fieldNames); + ArrayList shuffledNames = new ArrayList<>(fieldNames); Collections.replaceAll(shuffledNames, "id", "score"); Collections.shuffle(shuffledNames, random()); diff --git a/solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java b/solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java index 40f4fb7a7e8..d77b41808e4 100644 --- a/solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java +++ b/solr/core/src/test/org/apache/solr/SolrInfoMBeanTest.java @@ -46,7 +46,7 @@ public class SolrInfoMBeanTest extends SolrTestCaseJ4 * a name, description, etc... */ public void testCallMBeanInfo() throws Exception { - List classes = new ArrayList(); + List classes = new ArrayList<>(); classes.addAll(getClassesForPackage(StandardRequestHandler.class.getPackage().getName())); classes.addAll(getClassesForPackage(SearchHandler.class.getPackage().getName())); classes.addAll(getClassesForPackage(SearchComponent.class.getPackage().getName())); @@ -90,7 +90,7 @@ public class SolrInfoMBeanTest extends SolrTestCaseJ4 } private static List getClassesForPackage(String pckgname) throws Exception { - ArrayList directories = new ArrayList(); + ArrayList directories = new ArrayList<>(); ClassLoader cld = h.getCore().getResourceLoader().getClassLoader(); String path = pckgname.replace('.', '/'); Enumeration resources = cld.getResources(path); @@ -102,7 +102,7 @@ public class SolrInfoMBeanTest extends SolrTestCaseJ4 directories.add(f); } - ArrayList classes = new ArrayList(); + ArrayList classes = new ArrayList<>(); for (File directory : directories) { if (directory.exists()) { String[] files = directory.list(); diff --git a/solr/core/src/test/org/apache/solr/TestDistributedSearch.java b/solr/core/src/test/org/apache/solr/TestDistributedSearch.java index 5b0a692ab4b..c5831029715 100644 --- a/solr/core/src/test/org/apache/solr/TestDistributedSearch.java +++ b/solr/core/src/test/org/apache/solr/TestDistributedSearch.java @@ -345,10 +345,10 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase { // test shards.tolerant=true for(int numDownServers = 0; numDownServers < jettys.size()-1; numDownServers++) { - List upJettys = new ArrayList(jettys); - List upClients = new ArrayList(clients); - List downJettys = new ArrayList(); - List upShards = new ArrayList(Arrays.asList(shardsArr)); + List upJettys = new ArrayList<>(jettys); + List upClients = new ArrayList<>(clients); + List downJettys = new ArrayList<>(); + List upShards = new ArrayList<>(Arrays.asList(shardsArr)); for(int i=0; i list = new ArrayList(); + List list = new ArrayList<>(); list.add(45); list.add(33); list.add(20); diff --git a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java index d1446a86569..e45259e7140 100644 --- a/solr/core/src/test/org/apache/solr/TestGroupingSearch.java +++ b/solr/core/src/test/org/apache/solr/TestGroupingSearch.java @@ -675,7 +675,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { int indexSize = random().nextInt(25 * RANDOM_MULTIPLIER); //indexSize=2; - List types = new ArrayList(); + List types = new ArrayList<>(); types.add(new FldType("id",ONE_ONE, new SVal('A','Z',4,4))); types.add(new FldType("score_f",ONE_ONE, new FVal(1,100))); // field used to score types.add(new FldType("foo_i",ZERO_ONE, new IRange(0,indexSize))); @@ -774,14 +774,14 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { for (Grp grp : groups.values()) grp.setMaxDoc(sortComparator); } - List sortedGroups = new ArrayList(groups.values()); + List sortedGroups = new ArrayList<>(groups.values()); Collections.sort(sortedGroups, groupComparator==sortComparator ? createFirstDocComparator(sortComparator) : createMaxDocComparator(sortComparator)); boolean includeNGroups = random().nextBoolean(); Object modelResponse = buildGroupedResult(schema, sortedGroups, start, rows, group_offset, group_limit, includeNGroups); boolean truncateGroups = random().nextBoolean(); - Map facetCounts = new TreeMap(); + Map facetCounts = new TreeMap<>(); if (truncateGroups) { for (Grp grp : sortedGroups) { Doc doc = grp.docs.get(0); @@ -808,7 +808,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { } } } - List expectedFacetResponse = new ArrayList(); + List expectedFacetResponse = new ArrayList<>(); for (Map.Entry stringIntegerEntry : facetCounts.entrySet()) { expectedFacetResponse.add(stringIntegerEntry.getKey()); expectedFacetResponse.add(stringIntegerEntry.getValue()); @@ -862,7 +862,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { } public static Object buildGroupedResult(IndexSchema schema, List sortedGroups, int start, int rows, int group_offset, int group_limit, boolean includeNGroups) { - Map result = new LinkedHashMap(); + Map result = new LinkedHashMap<>(); long matches = 0; for (Grp grp : sortedGroups) { @@ -877,13 +877,13 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { for (int i=start; i= rows) break; // directly test rather than calculating, so we can catch any calc errors in the real code - Map group = new LinkedHashMap(); + Map group = new LinkedHashMap<>(); groupList.add(group); Grp grp = sortedGroups.get(i); group.put("groupValue", grp.groupValue); - Map resultSet = new LinkedHashMap(); + Map resultSet = new LinkedHashMap<>(); group.put("doclist", resultSet); resultSet.put("numFound", grp.docs.size()); resultSet.put("start", group_offset); @@ -924,7 +924,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { } public static Map groupBy(Collection docs, String field) { - Map groups = new HashMap(); + Map groups = new HashMap<>(); for (Doc doc : docs) { List vals = doc.getValues(field); if (vals == null) { @@ -932,7 +932,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { if (grp == null) { grp = new Grp(); grp.groupValue = null; - grp.docs = new ArrayList(); + grp.docs = new ArrayList<>(); groups.put(null, grp); } grp.docs.add(doc); @@ -943,7 +943,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { if (grp == null) { grp = new Grp(); grp.groupValue = val; - grp.docs = new ArrayList(); + grp.docs = new ArrayList<>(); groups.put(grp.groupValue, grp); } grp.docs.add(doc); diff --git a/solr/core/src/test/org/apache/solr/TestJoin.java b/solr/core/src/test/org/apache/solr/TestJoin.java index 03be32c11fa..f62ffe039d7 100644 --- a/solr/core/src/test/org/apache/solr/TestJoin.java +++ b/solr/core/src/test/org/apache/solr/TestJoin.java @@ -150,7 +150,7 @@ public class TestJoin extends SolrTestCaseJ4 { while (--indexIter >= 0) { int indexSize = random().nextInt(20 * RANDOM_MULTIPLIER); - List types = new ArrayList(); + List types = new ArrayList<>(); types.add(new FldType("id",ONE_ONE, new SVal('A','Z',4,4))); types.add(new FldType("score_f",ONE_ONE, new FVal(1,100))); // field used to score types.add(new FldType("small_s",ZERO_ONE, new SVal('a',(char)('c'+indexSize/3),1,1))); @@ -164,7 +164,7 @@ public class TestJoin extends SolrTestCaseJ4 { clearIndex(); Map model = indexDocs(types, null, indexSize); - Map>> pivots = new HashMap>>(); + Map>> pivots = new HashMap<>(); for (int qiter=0; qiter fromDocs = model.values(); Set docs = join(fromDocs, pivot); - List docList = new ArrayList(docs.size()); + List docList = new ArrayList<>(docs.size()); for (Comparable id : docs) docList.add(model.get(id)); Collections.sort(docList, createComparator("_docid_",true,false,false,false)); List sortedDocs = new ArrayList(); @@ -198,7 +198,7 @@ public class TestJoin extends SolrTestCaseJ4 { sortedDocs.add(doc.toObject(h.getCore().getLatestSchema())); } - Map resultSet = new LinkedHashMap(); + Map resultSet = new LinkedHashMap<>(); resultSet.put("numFound", docList.size()); resultSet.put("start", 0); resultSet.put("docs", sortedDocs); @@ -235,7 +235,7 @@ public class TestJoin extends SolrTestCaseJ4 { Map> createJoinMap(Map model, String fromField, String toField) { - Map> id_to_id = new HashMap>(); + Map> id_to_id = new HashMap<>(); Map> value_to_id = invertField(model, toField); @@ -248,7 +248,7 @@ public class TestJoin extends SolrTestCaseJ4 { if (toIds == null) continue; Set ids = id_to_id.get(fromId); if (ids == null) { - ids = new HashSet(); + ids = new HashSet<>(); id_to_id.put(fromId, ids); } for (Comparable toId : toIds) @@ -261,7 +261,7 @@ public class TestJoin extends SolrTestCaseJ4 { Set join(Collection input, Map> joinMap) { - Set ids = new HashSet(); + Set ids = new HashSet<>(); for (Doc doc : input) { Collection output = joinMap.get(doc.id); if (output == null) continue; diff --git a/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java index 9f1274456c0..53334b3f0df 100644 --- a/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java +++ b/solr/core/src/test/org/apache/solr/TestRandomDVFaceting.java @@ -58,7 +58,7 @@ public class TestRandomDVFaceting extends SolrTestCaseJ4 { model = null; indexSize = rand.nextBoolean() ? (rand.nextInt(10) + 1) : (rand.nextInt(100) + 10); - types = new ArrayList(); + types = new ArrayList<>(); types.add(new FldType("id",ONE_ONE, new SVal('A','Z',4,4))); types.add(new FldType("score_f",ONE_ONE, new FVal(1,100))); types.add(new FldType("foo_i",ZERO_ONE, new IRange(0,indexSize))); @@ -88,7 +88,7 @@ public class TestRandomDVFaceting extends SolrTestCaseJ4 { Random rand = random(); int percent = rand.nextInt(100); if (model == null) return; - ArrayList ids = new ArrayList(model.size()); + ArrayList ids = new ArrayList<>(model.size()); for (Comparable id : model.keySet()) { if (rand.nextInt(100) < percent) { ids.add(id.toString()); @@ -216,7 +216,7 @@ public class TestRandomDVFaceting extends SolrTestCaseJ4 { String facet_field = ftype.fname; List methods = multiValued ? multiValuedMethods : singleValuedMethods; - List responses = new ArrayList(methods.size()); + List responses = new ArrayList<>(methods.size()); for (String method : methods) { if (method.equals("dv")) { params.set("facet.field", "{!key="+facet_field+"}"+facet_field+"_dv"); diff --git a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java index 6ee53a9e5ae..f2d4cd3231e 100644 --- a/solr/core/src/test/org/apache/solr/TestRandomFaceting.java +++ b/solr/core/src/test/org/apache/solr/TestRandomFaceting.java @@ -53,7 +53,7 @@ public class TestRandomFaceting extends SolrTestCaseJ4 { model = null; indexSize = rand.nextBoolean() ? (rand.nextInt(10) + 1) : (rand.nextInt(100) + 10); - types = new ArrayList(); + types = new ArrayList<>(); types.add(new FldType("id",ONE_ONE, new SVal('A','Z',4,4))); types.add(new FldType("score_f",ONE_ONE, new FVal(1,100))); types.add(new FldType("small_f",ONE_ONE, new FVal(-4,5))); @@ -87,7 +87,7 @@ public class TestRandomFaceting extends SolrTestCaseJ4 { Random rand = random(); int percent = rand.nextInt(100); if (model == null) return; - ArrayList ids = new ArrayList(model.size()); + ArrayList ids = new ArrayList<>(model.size()); for (Comparable id : model.keySet()) { if (rand.nextInt(100) < percent) { ids.add(id.toString()); @@ -209,7 +209,7 @@ public class TestRandomFaceting extends SolrTestCaseJ4 { params.set("facet.field", facet_field); List methods = multiValued ? multiValuedMethods : singleValuedMethods; - List responses = new ArrayList(methods.size()); + List responses = new ArrayList<>(methods.size()); for (String method : methods) { // params.add("facet.field", "{!key="+method+"}" + ftype.fname); // TODO: allow method to be passed on local params? diff --git a/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java b/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java index 7579acc4cb6..10cf2472967 100644 --- a/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java +++ b/solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java @@ -80,7 +80,7 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testGamma() throws Exception { String test = "Γ"; String gold = "\u0393"; - Set set = new HashSet(); + Set set = new HashSet<>(); set.add("reserved"); Reader reader = new LegacyHTMLStripCharFilter(new StringReader(test), set); StringBuilder builder = new StringBuilder(); @@ -97,7 +97,7 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testEntities() throws Exception { String test = "  <foo> Übermensch = Γ bar Γ"; String gold = " \u00DCbermensch = \u0393 bar \u0393"; - Set set = new HashSet(); + Set set = new HashSet<>(); set.add("reserved"); Reader reader = new LegacyHTMLStripCharFilter(new StringReader(test), set); StringBuilder builder = new StringBuilder(); @@ -114,7 +114,7 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testMoreEntities() throws Exception { String test = "  <junk/>   ! @ and ’"; String gold = " ! @ and ’"; - Set set = new HashSet(); + Set set = new HashSet<>(); set.add("reserved"); Reader reader = new LegacyHTMLStripCharFilter(new StringReader(test), set); StringBuilder builder = new StringBuilder(); @@ -130,7 +130,7 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testReserved() throws Exception { String test = "aaa bbb eeee ffff "; - Set set = new HashSet(); + Set set = new HashSet<>(); set.add("reserved"); Reader reader = new LegacyHTMLStripCharFilter(new StringReader(test), set); StringBuilder builder = new StringBuilder(); diff --git a/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java b/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java index bb0709a9aea..181dd68cbec 100644 --- a/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java +++ b/solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java @@ -44,7 +44,7 @@ import org.junit.Test; import static org.apache.lucene.analysis.BaseTokenStreamTestCase.*; public class TestReversedWildcardFilterFactory extends SolrTestCaseJ4 { - Map args = new HashMap(); + Map args = new HashMap<>(); IndexSchema schema; @BeforeClass diff --git a/solr/core/src/test/org/apache/solr/analysis/TestWordDelimiterFilterFactory.java b/solr/core/src/test/org/apache/solr/analysis/TestWordDelimiterFilterFactory.java index 6502eda190c..e71b8c8b7f7 100644 --- a/solr/core/src/test/org/apache/solr/analysis/TestWordDelimiterFilterFactory.java +++ b/solr/core/src/test/org/apache/solr/analysis/TestWordDelimiterFilterFactory.java @@ -199,7 +199,7 @@ public class TestWordDelimiterFilterFactory extends SolrTestCaseJ4 { public void testCustomTypes() throws Exception { String testText = "I borrowed $5,400.00 at 25% interest-rate"; ResourceLoader loader = new SolrResourceLoader("solr/collection1"); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put("generateWordParts", "1"); args.put("generateNumberParts", "1"); args.put("catenateWords", "1"); @@ -221,7 +221,7 @@ public class TestWordDelimiterFilterFactory extends SolrTestCaseJ4 { /* custom behavior */ - args = new HashMap(); + args = new HashMap<>(); // use a custom type mapping args.put("generateWordParts", "1"); args.put("generateNumberParts", "1"); diff --git a/solr/core/src/test/org/apache/solr/analytics/AbstractAnalyticsStatsTest.java b/solr/core/src/test/org/apache/solr/analytics/AbstractAnalyticsStatsTest.java index f68dbdb977d..7be2c339823 100644 --- a/solr/core/src/test/org/apache/solr/analytics/AbstractAnalyticsStatsTest.java +++ b/solr/core/src/test/org/apache/solr/analytics/AbstractAnalyticsStatsTest.java @@ -51,7 +51,7 @@ import com.google.common.collect.ObjectArrays; public class AbstractAnalyticsStatsTest extends SolrTestCaseJ4 { protected static final String[] BASEPARMS = new String[]{ "q", "*:*", "indent", "true", "olap", "true", "rows", "0" }; - protected static final HashMap defaults = new HashMap(); + protected static final HashMap defaults = new HashMap<>(); public static enum VAL_TYPE { INTEGER("int"), @@ -162,7 +162,7 @@ public class AbstractAnalyticsStatsTest extends SolrTestCaseJ4 { } else if (stat.equals("count")) { result = Long.valueOf(list.size()); } else if (stat.equals("unique")) { - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); set.addAll(list); result = Long.valueOf((long)set.size()); } else if (stat.equals("max")) { @@ -198,7 +198,7 @@ public class AbstractAnalyticsStatsTest extends SolrTestCaseJ4 { if (in == null) throw new FileNotFoundException("Resource not found: " + fileName); Scanner file = new Scanner(in, "UTF-8"); try { - ArrayList strList = new ArrayList(); + ArrayList strList = new ArrayList<>(); while (file.hasNextLine()) { String line = file.nextLine(); line = line.trim(); diff --git a/solr/core/src/test/org/apache/solr/analytics/NoFacetTest.java b/solr/core/src/test/org/apache/solr/analytics/NoFacetTest.java index 9193cc542a3..a18e9d627d9 100644 --- a/solr/core/src/test/org/apache/solr/analytics/NoFacetTest.java +++ b/solr/core/src/test/org/apache/solr/analytics/NoFacetTest.java @@ -72,12 +72,12 @@ public class NoFacetTest extends AbstractAnalyticsStatsTest { defaults.put("date_dtd", "1800-12-31T23:59:59Z"); defaults.put("string_sd", "str0"); - intTestStart = new ArrayList(); - longTestStart = new ArrayList(); - floatTestStart = new ArrayList(); - doubleTestStart = new ArrayList(); - dateTestStart = new ArrayList(); - stringTestStart = new ArrayList(); + intTestStart = new ArrayList<>(); + longTestStart = new ArrayList<>(); + floatTestStart = new ArrayList<>(); + doubleTestStart = new ArrayList<>(); + dateTestStart = new ArrayList<>(); + stringTestStart = new ArrayList<>(); for (int j = 0; j < NUM_LOOPS; ++j) { int i = j%INT; @@ -86,7 +86,7 @@ public class NoFacetTest extends AbstractAnalyticsStatsTest { double d = j%DOUBLE; String dt = (1800+j%DATE) + "-12-31T23:59:59Z"; String s = "str" + (j%STRING); - List fields = new ArrayList(); + List fields = new ArrayList<>(); fields.add("id"); fields.add("1000"+j); if( i != 0 ){ diff --git a/solr/core/src/test/org/apache/solr/analytics/expression/ExpressionTest.java b/solr/core/src/test/org/apache/solr/analytics/expression/ExpressionTest.java index e0d49a7fa12..43c5f501c81 100644 --- a/solr/core/src/test/org/apache/solr/analytics/expression/ExpressionTest.java +++ b/solr/core/src/test/org/apache/solr/analytics/expression/ExpressionTest.java @@ -237,7 +237,7 @@ public class ExpressionTest extends AbstractAnalyticsStatsTest { if (in == null) throw new FileNotFoundException("Resource not found: " + fileName); Scanner file = new Scanner(in, "UTF-8"); try { - ArrayList strList = new ArrayList(); + ArrayList strList = new ArrayList<>(); while (file.hasNextLine()) { String line = file.nextLine(); if (line.length()<2) { diff --git a/solr/core/src/test/org/apache/solr/analytics/facet/AbstractAnalyticsFacetTest.java b/solr/core/src/test/org/apache/solr/analytics/facet/AbstractAnalyticsFacetTest.java index 51e72262f6b..820a1c55872 100644 --- a/solr/core/src/test/org/apache/solr/analytics/facet/AbstractAnalyticsFacetTest.java +++ b/solr/core/src/test/org/apache/solr/analytics/facet/AbstractAnalyticsFacetTest.java @@ -53,7 +53,7 @@ import javax.xml.xpath.XPathFactory; @SuppressCodecs({"Lucene3x","Lucene40","Lucene41","Lucene42","Appending","Asserting"}) public class AbstractAnalyticsFacetTest extends SolrTestCaseJ4 { - protected static final HashMap defaults = new HashMap(); + protected static final HashMap defaults = new HashMap<>(); protected String latestType = ""; @@ -88,7 +88,7 @@ public class AbstractAnalyticsFacetTest extends SolrTestCaseJ4 { } protected ArrayList getStringList(String n1, String n2, String n3, String element, String n4) throws XPathExpressionException { - ArrayList ret = new ArrayList(); + ArrayList ret = new ArrayList<>(); NodeList nodes = getNodes(n1, n2, n3, element, n4); for (int idx = 0; idx < nodes.getLength(); ++idx) { ret.add(nodes.item(idx).getTextContent()); @@ -98,7 +98,7 @@ public class AbstractAnalyticsFacetTest extends SolrTestCaseJ4 { protected ArrayList getIntegerList(String n1, String n2, String n3, String element, String n4) throws XPathExpressionException { - ArrayList ret = new ArrayList(); + ArrayList ret = new ArrayList<>(); NodeList nodes = getNodes(n1, n2, n3, element, n4); for (int idx = 0; idx < nodes.getLength(); ++idx) { ret.add(Integer.parseInt(nodes.item(idx).getTextContent())); @@ -107,7 +107,7 @@ public class AbstractAnalyticsFacetTest extends SolrTestCaseJ4 { } protected ArrayList getLongList(String n1, String n2, String n3, String element, String n4) throws XPathExpressionException { - ArrayList ret = new ArrayList(); + ArrayList ret = new ArrayList<>(); NodeList nodes = getNodes(n1, n2, n3, element, n4); for (int idx = 0; idx < nodes.getLength(); ++idx) { ret.add(Long.parseLong(nodes.item(idx).getTextContent())); @@ -116,7 +116,7 @@ public class AbstractAnalyticsFacetTest extends SolrTestCaseJ4 { } protected ArrayList getFloatList(String n1, String n2, String n3, String element, String n4) throws XPathExpressionException { - ArrayList ret = new ArrayList(); + ArrayList ret = new ArrayList<>(); NodeList nodes = getNodes(n1, n2, n3, element, n4); for (int idx = 0; idx < nodes.getLength(); ++idx) { ret.add(Float.parseFloat(nodes.item(idx).getTextContent())); @@ -126,7 +126,7 @@ public class AbstractAnalyticsFacetTest extends SolrTestCaseJ4 { protected ArrayList getDoubleList(String n1, String n2, String n3, String element, String n4) throws XPathExpressionException { - ArrayList ret = new ArrayList(); + ArrayList ret = new ArrayList<>(); NodeList nodes = getNodes(n1, n2, n3, element, n4); for (int idx = 0; idx < nodes.getLength(); ++idx) { ret.add(Double.parseDouble(nodes.item(idx).getTextContent())); @@ -141,7 +141,7 @@ public class AbstractAnalyticsFacetTest extends SolrTestCaseJ4 { } public static String[] filter(String...args){ - List l = new ArrayList(); + List l = new ArrayList<>(); for( int i=0; i (); for (List list : lists) { - HashSet set = new HashSet(); + HashSet set = new HashSet<>(); set.addAll(list); result.add((long)set.size()); } @@ -285,7 +285,7 @@ public class AbstractAnalyticsFacetTest extends SolrTestCaseJ4 { if (in == null) throw new FileNotFoundException("Resource not found: " + fileName); Scanner file = new Scanner(in, "UTF-8"); try { - ArrayList strList = new ArrayList(); + ArrayList strList = new ArrayList<>(); while (file.hasNextLine()) { String line = file.nextLine(); if (line.length()<2) { diff --git a/solr/core/src/test/org/apache/solr/analytics/facet/FieldFacetExtrasTest.java b/solr/core/src/test/org/apache/solr/analytics/facet/FieldFacetExtrasTest.java index 998be47d91a..2e0b6206fba 100644 --- a/solr/core/src/test/org/apache/solr/analytics/facet/FieldFacetExtrasTest.java +++ b/solr/core/src/test/org/apache/solr/analytics/facet/FieldFacetExtrasTest.java @@ -51,10 +51,10 @@ public class FieldFacetExtrasTest extends AbstractAnalyticsFacetTest { h.update("*:*"); //INT - intLongTestStart = new ArrayList>(); - intFloatTestStart = new ArrayList>(); - intDoubleTestStart = new ArrayList>(); - intStringTestStart = new ArrayList>(); + intLongTestStart = new ArrayList<>(); + intFloatTestStart = new ArrayList<>(); + intDoubleTestStart = new ArrayList<>(); + intStringTestStart = new ArrayList<>(); for (int j = 0; j < NUM_LOOPS; ++j) { int i = j%INT; @@ -67,7 +67,7 @@ public class FieldFacetExtrasTest extends AbstractAnalyticsFacetTest { "double_dd", "" + d, "date_dtd", (1800+dt) + "-12-31T23:59:59.999Z", "string_sd", "abc" + s)); //Long if (j-LONG<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); list1.add(i); intLongTestStart.add(list1); } else { @@ -75,7 +75,7 @@ public class FieldFacetExtrasTest extends AbstractAnalyticsFacetTest { } //String if (j-FLOAT<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); list1.add(i); intFloatTestStart.add(list1); } else { @@ -83,7 +83,7 @@ public class FieldFacetExtrasTest extends AbstractAnalyticsFacetTest { } //String if (j-DOUBLE<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); list1.add(i); intDoubleTestStart.add(list1); } else { @@ -91,7 +91,7 @@ public class FieldFacetExtrasTest extends AbstractAnalyticsFacetTest { } //String if (j-STRING<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); list1.add(i); intStringTestStart.add(list1); } else { @@ -127,7 +127,7 @@ public class FieldFacetExtrasTest extends AbstractAnalyticsFacetTest { Collection lon; - List all = new ArrayList(); + List all = new ArrayList<>(); lon = getDoubleList("off0", "fieldFacets", "long_ld", "double", "mean"); assertEquals(getRawResponse(), lon.size(),2); assertArrayEquals(new Double[]{ 1.5, 2.0 }, lon.toArray(new Double[0])); diff --git a/solr/core/src/test/org/apache/solr/analytics/facet/FieldFacetTest.java b/solr/core/src/test/org/apache/solr/analytics/facet/FieldFacetTest.java index 18f83014327..0c28fa5af4e 100644 --- a/solr/core/src/test/org/apache/solr/analytics/facet/FieldFacetTest.java +++ b/solr/core/src/test/org/apache/solr/analytics/facet/FieldFacetTest.java @@ -101,48 +101,48 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ defaults.put("string", "str0"); //INT - intDateTestStart = new ArrayList>(); - intDateTestMissing = new ArrayList(); - intStringTestStart = new ArrayList>(); - intStringTestMissing = new ArrayList(); + intDateTestStart = new ArrayList<>(); + intDateTestMissing = new ArrayList<>(); + intStringTestStart = new ArrayList<>(); + intStringTestMissing = new ArrayList<>(); //LONG - longDateTestStart = new ArrayList>(); - longDateTestMissing = new ArrayList(); - longStringTestStart = new ArrayList>(); - longStringTestMissing = new ArrayList(); + longDateTestStart = new ArrayList<>(); + longDateTestMissing = new ArrayList<>(); + longStringTestStart = new ArrayList<>(); + longStringTestMissing = new ArrayList<>(); //FLOAT - floatDateTestStart = new ArrayList>(); - floatDateTestMissing = new ArrayList(); - floatStringTestStart = new ArrayList>(); - floatStringTestMissing = new ArrayList(); + floatDateTestStart = new ArrayList<>(); + floatDateTestMissing = new ArrayList<>(); + floatStringTestStart = new ArrayList<>(); + floatStringTestMissing = new ArrayList<>(); //DOUBLE - doubleDateTestStart = new ArrayList>(); - doubleDateTestMissing = new ArrayList(); - doubleStringTestStart = new ArrayList>(); - doubleStringTestMissing = new ArrayList(); + doubleDateTestStart = new ArrayList<>(); + doubleDateTestMissing = new ArrayList<>(); + doubleStringTestStart = new ArrayList<>(); + doubleStringTestMissing = new ArrayList<>(); //DATE - dateIntTestStart = new ArrayList>(); - dateIntTestMissing = new ArrayList(); - dateLongTestStart = new ArrayList>(); - dateLongTestMissing = new ArrayList(); + dateIntTestStart = new ArrayList<>(); + dateIntTestMissing = new ArrayList<>(); + dateLongTestStart = new ArrayList<>(); + dateLongTestMissing = new ArrayList<>(); //String - stringIntTestStart = new ArrayList>(); - stringIntTestMissing = new ArrayList(); - stringLongTestStart = new ArrayList>(); - stringLongTestMissing = new ArrayList(); + stringIntTestStart = new ArrayList<>(); + stringIntTestMissing = new ArrayList<>(); + stringLongTestStart = new ArrayList<>(); + stringLongTestMissing = new ArrayList<>(); //Multi-Valued - multiLongTestStart = new ArrayList>(); - multiLongTestMissing = new ArrayList(); - multiStringTestStart = new ArrayList>(); - multiStringTestMissing = new ArrayList(); - multiDateTestStart = new ArrayList>(); - multiDateTestMissing = new ArrayList(); + multiLongTestStart = new ArrayList<>(); + multiLongTestMissing = new ArrayList<>(); + multiStringTestStart = new ArrayList<>(); + multiStringTestMissing = new ArrayList<>(); + multiDateTestStart = new ArrayList<>(); + multiDateTestMissing = new ArrayList<>(); for (int j = 0; j < NUM_LOOPS; ++j) { int i = j%INT; @@ -178,7 +178,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ if( dt != 0 ){ //Dates if (j-DATE<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); if( i != 0 ){ list1.add(i); intDateTestMissing.add(0l); @@ -186,7 +186,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ intDateTestMissing.add(1l); } intDateTestStart.add(list1); - ArrayList list2 = new ArrayList(); + ArrayList list2 = new ArrayList<>(); if( l != 0l ){ list2.add(l); longDateTestMissing.add(0l); @@ -194,7 +194,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ longDateTestMissing.add(1l); } longDateTestStart.add(list2); - ArrayList list3 = new ArrayList(); + ArrayList list3 = new ArrayList<>(); if ( f != 0.0f ){ list3.add(f); floatDateTestMissing.add(0l); @@ -203,7 +203,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ } floatDateTestStart.add(list3); - ArrayList list4 = new ArrayList(); + ArrayList list4 = new ArrayList<>(); if( d != 0.0d ){ list4.add(d); doubleDateTestMissing.add(0l); @@ -211,7 +211,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ doubleDateTestMissing.add(1l); } doubleDateTestStart.add(list4); - ArrayList list5 = new ArrayList(); + ArrayList list5 = new ArrayList<>(); if( i != 0 ){ list5.add(i); multiDateTestMissing.add(0l); @@ -230,7 +230,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ } if (j-DATEM<0 && dtm!=dt && dtm!=0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); if( i != 0 ){ list1.add(i); multiDateTestMissing.add(0l); @@ -245,7 +245,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ if( s != 0 ){ //Strings if (j-STRING<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); if( i != 0 ){ list1.add(i); intStringTestMissing.add(0l); @@ -253,7 +253,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ intStringTestMissing.add(1l); } intStringTestStart.add(list1); - ArrayList list2 = new ArrayList(); + ArrayList list2 = new ArrayList<>(); if( l != 0l ){ list2.add(l); longStringTestMissing.add(0l); @@ -261,7 +261,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ longStringTestMissing.add(1l); } longStringTestStart.add(list2); - ArrayList list3 = new ArrayList(); + ArrayList list3 = new ArrayList<>(); if( f != 0.0f ){ list3.add(f); floatStringTestMissing.add(0l); @@ -269,7 +269,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ floatStringTestMissing.add(1l); } floatStringTestStart.add(list3); - ArrayList list4 = new ArrayList(); + ArrayList list4 = new ArrayList<>(); if( d != 0.0d ){ list4.add(d); doubleStringTestMissing.add(0l); @@ -277,7 +277,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ doubleStringTestMissing.add(1l); } doubleStringTestStart.add(list4); - ArrayList list5 = new ArrayList(); + ArrayList list5 = new ArrayList<>(); if( i != 0 ){ list5.add(i); multiStringTestMissing.add(0l); @@ -297,7 +297,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ //Strings if( sm != 0 ){ if (j-STRINGM<0&&sm!=s) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); if( i != 0 ){ list1.add(i); multiStringTestMissing.add(0l); @@ -313,7 +313,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ //Int if( i != 0 ){ if (j-INT<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); if( dt != 0 ){ list1.add((1800+dt) + "-12-31T23:59:59Z"); dateIntTestMissing.add(0l); @@ -321,7 +321,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ dateIntTestMissing.add(1l); } dateIntTestStart.add(list1); - ArrayList list2 = new ArrayList(); + ArrayList list2 = new ArrayList<>(); if( s != 0 ){ list2.add("str"+s); stringIntTestMissing.add(0l); @@ -338,7 +338,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ //Long if( l != 0 ){ if (j-LONG<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); if( dt != 0 ){ list1.add((1800+dt) + "-12-31T23:59:59Z"); dateLongTestMissing.add(0l); @@ -346,7 +346,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ dateLongTestMissing.add(1l); } dateLongTestStart.add(list1); - ArrayList list2 = new ArrayList(); + ArrayList list2 = new ArrayList<>(); if( s != 0 ){ list2.add("str"+s); stringLongTestMissing.add(0l); @@ -354,7 +354,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ stringLongTestMissing.add(1l); } stringLongTestStart.add(list2); - ArrayList list3 = new ArrayList(); + ArrayList list3 = new ArrayList<>(); if( i != 0 ){ list3.add(i); multiLongTestMissing.add(0l); @@ -372,7 +372,7 @@ public class FieldFacetTest extends AbstractAnalyticsFacetTest{ //Long if( lm != 0 ){ if (j-LONGM<0&&lm!=l) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); if( i != 0 ){ list1.add(i); multiLongTestMissing.add(0l); diff --git a/solr/core/src/test/org/apache/solr/analytics/facet/QueryFacetTest.java b/solr/core/src/test/org/apache/solr/analytics/facet/QueryFacetTest.java index 583f2b198fa..4516c956b94 100644 --- a/solr/core/src/test/org/apache/solr/analytics/facet/QueryFacetTest.java +++ b/solr/core/src/test/org/apache/solr/analytics/facet/QueryFacetTest.java @@ -46,18 +46,18 @@ public class QueryFacetTest extends AbstractAnalyticsFacetTest { public void queryTest() throws Exception { h.update("*:*"); //INT - ArrayList> int1TestStart = new ArrayList>(); + ArrayList> int1TestStart = new ArrayList<>(); int1TestStart.add(new ArrayList()); - ArrayList> int2TestStart = new ArrayList>(); + ArrayList> int2TestStart = new ArrayList<>(); int2TestStart.add(new ArrayList()); //LONG - ArrayList> longTestStart = new ArrayList>(); + ArrayList> longTestStart = new ArrayList<>(); longTestStart.add(new ArrayList()); longTestStart.add(new ArrayList()); //FLOAT - ArrayList> floatTestStart = new ArrayList>(); + ArrayList> floatTestStart = new ArrayList<>(); floatTestStart.add(new ArrayList()); floatTestStart.add(new ArrayList()); floatTestStart.add(new ArrayList()); diff --git a/solr/core/src/test/org/apache/solr/analytics/facet/RangeFacetTest.java b/solr/core/src/test/org/apache/solr/analytics/facet/RangeFacetTest.java index 6f162f052d7..d7477df2aed 100644 --- a/solr/core/src/test/org/apache/solr/analytics/facet/RangeFacetTest.java +++ b/solr/core/src/test/org/apache/solr/analytics/facet/RangeFacetTest.java @@ -53,14 +53,14 @@ public class RangeFacetTest extends AbstractAnalyticsFacetTest { h.update("*:*"); //INT - intLongTestStart = new ArrayList>(); - intDoubleTestStart = new ArrayList>(); - intDateTestStart = new ArrayList>(); + intLongTestStart = new ArrayList<>(); + intDoubleTestStart = new ArrayList<>(); + intDateTestStart = new ArrayList<>(); //FLOAT - floatLongTestStart = new ArrayList>(); - floatDoubleTestStart = new ArrayList>(); - floatDateTestStart = new ArrayList>(); + floatLongTestStart = new ArrayList<>(); + floatDoubleTestStart = new ArrayList<>(); + floatDateTestStart = new ArrayList<>(); for (int j = 0; j < NUM_LOOPS; ++j) { int i = j%INT; @@ -73,10 +73,10 @@ public class RangeFacetTest extends AbstractAnalyticsFacetTest { "double_dd", "" + d, "date_dtd", (1000+dt) + "-01-01T23:59:59Z", "string_sd", "abc" + s)); //Longs if (j-LONG<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); list1.add(i); intLongTestStart.add(list1); - ArrayList list2 = new ArrayList(); + ArrayList list2 = new ArrayList<>(); list2.add(f); floatLongTestStart.add(list2); } else { @@ -85,10 +85,10 @@ public class RangeFacetTest extends AbstractAnalyticsFacetTest { } //Doubles if (j-DOUBLE<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); list1.add(i); intDoubleTestStart.add(list1); - ArrayList list2 = new ArrayList(); + ArrayList list2 = new ArrayList<>(); list2.add(f); floatDoubleTestStart.add(list2); } else { @@ -97,10 +97,10 @@ public class RangeFacetTest extends AbstractAnalyticsFacetTest { } //Dates if (j-DATE<0) { - ArrayList list1 = new ArrayList(); + ArrayList list1 = new ArrayList<>(); list1.add(i); intDateTestStart.add(list1); - ArrayList list2 = new ArrayList(); + ArrayList list2 = new ArrayList<>(); list2.add(f); floatDateTestStart.add(list2); } else { @@ -235,11 +235,11 @@ public class RangeFacetTest extends AbstractAnalyticsFacetTest { end+=gap-off; } - ArrayList> lists = new ArrayList>(); - ArrayList between = new ArrayList(); + ArrayList> lists = new ArrayList<>(); + ArrayList between = new ArrayList<>(); if (incLow && incUp) { for (int i = start; i list = new ArrayList(); + ArrayList list = new ArrayList<>(); for (int j = i; j<=i+gap && j<=end && j list = new ArrayList(); + ArrayList list = new ArrayList<>(); for (int j = i; j list = new ArrayList(); + ArrayList list = new ArrayList<>(); for (int j = i+1; j<=i+gap && j<=end && j list = new ArrayList(); + ArrayList list = new ArrayList<>(); for (int j = i+1; j before = new ArrayList(); - ArrayList after = new ArrayList(); + ArrayList before = new ArrayList<>(); + ArrayList after = new ArrayList<>(); if (incOut || !(incLow||incEdge)) { for (int i = 0; i<=start; i++) { before.addAll(listsStart.get(i)); @@ -341,8 +341,8 @@ public class RangeFacetTest extends AbstractAnalyticsFacetTest { end+=last-off; } - ArrayList> lists = new ArrayList>(); - ArrayList between = new ArrayList(); + ArrayList> lists = new ArrayList<>(); + ArrayList between = new ArrayList<>(); int gap = 0; int gapCounter = 0; if (incLow && incUp) { @@ -350,7 +350,7 @@ public class RangeFacetTest extends AbstractAnalyticsFacetTest { if (gapCounter list = new ArrayList(); + ArrayList list = new ArrayList<>(); for (int j = i; j<=i+gap && j<=end && j list = new ArrayList(); + ArrayList list = new ArrayList<>(); for (int j = i; j list = new ArrayList(); + ArrayList list = new ArrayList<>(); for (int j = i+1; j<=i+gap && j<=end && j list = new ArrayList(); + ArrayList list = new ArrayList<>(); for (int j = i+1; j before = new ArrayList(); - ArrayList after = new ArrayList(); + ArrayList before = new ArrayList<>(); + ArrayList after = new ArrayList<>(); if (incOut || !(incLow||incEdge)) { for (int i = 0; i<=start; i++) { before.addAll(listsStart.get(i)); diff --git a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java index efd6dc2ccbf..a090aef983c 100644 --- a/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/AliasIntegrationTest.java @@ -91,7 +91,7 @@ public class AliasIntegrationTest extends AbstractFullDistribZkTestBase { createCollection("collection2", 2, 1, 10); - List numShardsNumReplicaList = new ArrayList(2); + List numShardsNumReplicaList = new ArrayList<>(2); numShardsNumReplicaList.add(2); numShardsNumReplicaList.add(1); checkForCollection("collection2", numShardsNumReplicaList, null); diff --git a/solr/core/src/test/org/apache/solr/cloud/AssignTest.java b/solr/core/src/test/org/apache/solr/cloud/AssignTest.java index 7d05c2631b8..6d22428b27c 100644 --- a/solr/core/src/test/org/apache/solr/cloud/AssignTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/AssignTest.java @@ -58,11 +58,11 @@ public class AssignTest extends SolrTestCaseJ4 { public void testAssignNode() throws Exception { String cname = "collection1"; - Map collectionStates = new HashMap(); + Map collectionStates = new HashMap<>(); - Map slices = new HashMap(); + Map slices = new HashMap<>(); - Map replicas = new HashMap(); + Map replicas = new HashMap<>(); ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state", ZkStateReader.STATE_PROP, "ACTIVE", @@ -85,7 +85,7 @@ public class AssignTest extends SolrTestCaseJ4 { collectionStates.put(cname, docCollection); - Set liveNodes = new HashSet(); + Set liveNodes = new HashSet<>(); ClusterState state = new ClusterState(-1,liveNodes, collectionStates,ClusterStateTest.getMockZkStateReader(collectionStates.keySet())); String nodeName = Assign.assignNode("collection1", state); diff --git a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java index 7e8d32d2588..449721068ef 100644 --- a/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/BasicDistributedZkTest.java @@ -99,7 +99,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { String missingField="ignore_exception__missing_but_valid_field_t"; String invalidField="ignore_exception__invalid_field_not_in_schema"; - private Map> otherCollectionClients = new HashMap>(); + private Map> otherCollectionClients = new HashMap<>(); private String oneInstanceCollection = "oneInstanceCollection"; private String oneInstanceCollection2 = "oneInstanceCollection2"; @@ -131,8 +131,8 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { sliceCount = 2; shardCount = 4; - completionService = new ExecutorCompletionService(executor); - pending = new HashSet>(); + completionService = new ExecutorCompletionService<>(executor); + pending = new HashSet<>(); } @@ -418,12 +418,12 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { private void testShardParamVariations() throws Exception { SolrQuery query = new SolrQuery("*:*"); - Map shardCounts = new HashMap(); + Map shardCounts = new HashMap<>(); for (String shard : shardToJetty.keySet()) { // every client should give the same numDocs for this shard // shffle the clients in a diff order for each shard - List solrclients = new ArrayList(this.clients); + List solrclients = new ArrayList<>(this.clients); Collections.shuffle(solrclients, random()); for (SolrServer client : solrclients) { query.set("shards", shard); @@ -437,11 +437,11 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { shardCounts.get(shard).longValue(), numDocs); List replicaJetties - = new ArrayList(shardToJetty.get(shard)); + = new ArrayList<>(shardToJetty.get(shard)); Collections.shuffle(replicaJetties, random()); // each replica should also give the same numDocs - ArrayList replicaAlts = new ArrayList(replicaJetties.size() * 2); + ArrayList replicaAlts = new ArrayList<>(replicaJetties.size() * 2); for (CloudJettyRunner replicaJetty : shardToJetty.get(shard)) { String replica = replicaJetty.url; query.set("shards", replica); @@ -474,7 +474,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { // sums of multiple shards should add up regardless of how we // query those shards or which client we use long randomShardCountsExpected = 0; - ArrayList randomShards = new ArrayList(shardCounts.size()); + ArrayList randomShards = new ArrayList<>(shardCounts.size()); for (Map.Entry shardData : shardCounts.entrySet()) { if (random().nextBoolean() || randomShards.size() < 2) { String shard = shardData.getKey(); @@ -484,7 +484,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { randomShards.add(shard); } else { // use some set explicit replicas - ArrayList replicas = new ArrayList(7); + ArrayList replicas = new ArrayList<>(7); for (CloudJettyRunner replicaJetty : shardToJetty.get(shard)) { if (0 == random().nextInt(3) || 0 == replicas.size()) { replicas.add(replicaJetty.url); @@ -604,7 +604,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { if (createNodeSetStr != null) params.set(OverseerCollectionProcessor.CREATE_NODE_SET, createNodeSetStr); int clientIndex = clients.size() > 1 ? random().nextInt(2) : 0; - List list = new ArrayList(); + List list = new ArrayList<>(); list.add(numShards); list.add(numReplicas); if (collectionInfos != null) { @@ -709,7 +709,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { sd = sdoc("id", 1000, "foo_i",5); clients.get(0).add(sd); - List expected = new ArrayList(); + List expected = new ArrayList<>(); int val = 0; for (SolrServer client : clients) { val += 10; @@ -764,7 +764,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception { log.info("### STARTING testANewCollectionInOneInstanceWithManualShardAssignement"); System.clearProperty("numShards"); - List collectionClients = new ArrayList(); + List collectionClients = new ArrayList<>(); SolrServer client = clients.get(0); final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring( 0, @@ -896,7 +896,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { private void testANewCollectionInOneInstance() throws Exception { log.info("### STARTING testANewCollectionInOneInstance"); - List collectionClients = new ArrayList(); + List collectionClients = new ArrayList<>(); SolrServer client = clients.get(0); final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring( 0, @@ -1085,7 +1085,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase { } private void createNewCollection(final String collection) throws InterruptedException { - final List collectionClients = new ArrayList(); + final List collectionClients = new ArrayList<>(); otherCollectionClients.put(collection, collectionClients); int unique = 0; for (final SolrServer client : clients) { diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java index ba0f0817843..a738c2a03c0 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java @@ -127,7 +127,7 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase // as it's not supported for recovery del("*:*"); - List threads = new ArrayList(); + List threads = new ArrayList<>(); int threadCount = 1; int i = 0; for (i = 0; i < threadCount; i++) { @@ -247,7 +247,7 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase } finally { client.shutdown(); } - List numShardsNumReplicas = new ArrayList(2); + List numShardsNumReplicas = new ArrayList<>(2); numShardsNumReplicas.add(1); numShardsNumReplicas.add(1); checkForCollection("testcollection",numShardsNumReplicas, null); diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java index 19e40bfd2db..19ae3ed13a7 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeySafeLeaderTest.java @@ -105,7 +105,7 @@ public class ChaosMonkeySafeLeaderTest extends AbstractFullDistribZkTestBase { del("*:*"); - List threads = new ArrayList(); + List threads = new ArrayList<>(); int threadCount = 2; for (int i = 0; i < threadCount; i++) { StopableIndexingThread indexThread = new StopableIndexingThread(controlClient, cloudClient, Integer.toString(i), true); @@ -169,7 +169,7 @@ public class ChaosMonkeySafeLeaderTest extends AbstractFullDistribZkTestBase { } finally { client.shutdown(); } - List numShardsNumReplicas = new ArrayList(2); + List numShardsNumReplicas = new ArrayList<>(2); numShardsNumReplicas.add(1); numShardsNumReplicas.add(1); checkForCollection("testcollection",numShardsNumReplicas, null); diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java index a438c319d59..811a4559329 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java @@ -41,14 +41,14 @@ import static org.easymock.EasyMock.expectLastCall; public class ClusterStateTest extends SolrTestCaseJ4 { @Test public void testStoreAndRead() throws Exception { - Map collectionStates = new HashMap(); - Set liveNodes = new HashSet(); + Map collectionStates = new HashMap<>(); + Set liveNodes = new HashSet<>(); liveNodes.add("node1"); liveNodes.add("node2"); - Map slices = new HashMap(); - Map sliceToProps = new HashMap(); - Map props = new HashMap(); + Map slices = new HashMap<>(); + Map sliceToProps = new HashMap<>(); + Map props = new HashMap<>(); props.put("prop1", "value"); props.put("prop2", "value2"); diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java index 670ed26733e..35d49d76b2a 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateUpdateTest.java @@ -152,7 +152,7 @@ public class ClusterStateUpdateTest extends SolrTestCaseJ4 { System.setProperty("solrcloud.update.delay", "1"); - Map props2 = new HashMap(); + Map props2 = new HashMap<>(); props2.put("configName", "conf1"); ZkNodeProps zkProps2 = new ZkNodeProps(props2); diff --git a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java index 5a4c45a04a3..c9ed9ff5526 100644 --- a/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/CollectionsAPIDistributedZkTest.java @@ -173,8 +173,8 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa sliceCount = 2; shardCount = 4; - completionService = new ExecutorCompletionService(executor); - pending = new HashSet>(); + completionService = new ExecutorCompletionService<>(executor); + pending = new HashSet<>(); checkCreatedVsState = false; } @@ -223,7 +223,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa String collectionName = "out_of_sync_collection"; - List numShardsNumReplicaList = new ArrayList(); + List numShardsNumReplicaList = new ArrayList<>(); numShardsNumReplicaList.add(2); numShardsNumReplicaList.add(1); @@ -634,12 +634,12 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa request.setPath("/admin/collections"); createNewSolrServer("", baseUrl).request(request); - List numShardsNumReplicaList = new ArrayList(); + List numShardsNumReplicaList = new ArrayList<>(); numShardsNumReplicaList.add(2); numShardsNumReplicaList.add(2); checkForCollection("nodes_used_collection", numShardsNumReplicaList , null); - List createNodeList = new ArrayList(); + List createNodeList = new ArrayList<>(); Set liveNodes = cloudClient.getZkStateReader().getClusterState() .getLiveNodes(); @@ -677,7 +677,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa // env make this pretty fragile // create new collections rapid fire - Map> collectionInfos = new HashMap>(); + Map> collectionInfos = new HashMap<>(); int cnt = random().nextInt(TEST_NIGHTLY ? 6 : 3) + 1; for (int i = 0; i < cnt; i++) { @@ -787,7 +787,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa checkInstanceDirs(jettys.get(0)); - List collectionNameList = new ArrayList(); + List collectionNameList = new ArrayList<>(); collectionNameList.addAll(collectionInfos.keySet()); String collectionName = collectionNameList.get(random().nextInt(collectionNameList.size())); @@ -817,7 +817,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa // lets try a collection reload // get core open times - Map urlToTimeBefore = new HashMap(); + Map urlToTimeBefore = new HashMap<>(); collectStartTimes(collectionName, urlToTimeBefore); assertTrue(urlToTimeBefore.size() > 0); ModifiableSolrParams params = new ModifiableSolrParams(); @@ -883,7 +883,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa request.setPath("/admin/collections"); createNewSolrServer("", baseUrl).request(request); - List list = new ArrayList (2); + List list = new ArrayList<>(2); list.add(1); list.add(2); checkForCollection(collectionName, list, null); @@ -904,7 +904,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa int numShards = (numLiveNodes/2) + 1; int replicationFactor = 2; int maxShardsPerNode = 1; - collectionInfos = new HashMap>(); + collectionInfos = new HashMap<>(); CloudSolrServer client = createCloudClient("awholynewcollection_" + cnt); try { exp = false; @@ -922,7 +922,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa // Test createNodeSet numLiveNodes = getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes().size(); - List createNodeList = new ArrayList(); + List createNodeList = new ArrayList<>(); int numOfCreateNodes = numLiveNodes/2; assertFalse("createNodeSet test is pointless with only " + numLiveNodes + " nodes running", numOfCreateNodes == 0); int i = 0; @@ -937,7 +937,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa maxShardsPerNode = 2; numShards = createNodeList.size() * maxShardsPerNode; replicationFactor = 1; - collectionInfos = new HashMap>(); + collectionInfos = new HashMap<>(); client = createCloudClient("awholynewcollection_" + (cnt+1)); try { createCollection(collectionInfos, "awholynewcollection_" + (cnt+1), numShards, replicationFactor, maxShardsPerNode, client, StrUtils.join(createNodeList, ','), "conf1"); @@ -965,7 +965,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa public void run() { // create new collections rapid fire - Map> collectionInfos = new HashMap>(); + Map> collectionInfos = new HashMap<>(); int cnt = random().nextInt(TEST_NIGHTLY ? 13 : 3) + 1; for (int i = 0; i < cnt; i++) { @@ -1012,7 +1012,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa } } } - List threads = new ArrayList(); + List threads = new ArrayList<>(); int numThreads = TEST_NIGHTLY ? 6 : 2; for (int i = 0; i < numThreads; i++) { CollectionThread thread = new CollectionThread("collection" + i); @@ -1054,7 +1054,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa boolean allTimesAreCorrect = false; while (System.currentTimeMillis() < timeoutAt) { - Map urlToTimeAfter = new HashMap(); + Map urlToTimeAfter = new HashMap<>(); collectStartTimes(collectionName, urlToTimeAfter); boolean retry = false; @@ -1182,13 +1182,13 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa } private void checkNoTwoShardsUseTheSameIndexDir() throws Exception { - Map> indexDirToShardNamesMap = new HashMap>(); + Map> indexDirToShardNamesMap = new HashMap<>(); - List servers = new LinkedList(); + List servers = new LinkedList<>(); servers.add(ManagementFactory.getPlatformMBeanServer()); servers.addAll(MBeanServerFactory.findMBeanServer(null)); for (final MBeanServer server : servers) { - Set mbeans = new HashSet(); + Set mbeans = new HashSet<>(); mbeans.addAll(server.queryNames(null, null)); for (final ObjectName mbean : mbeans) { Object value; @@ -1234,7 +1234,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa try { createCollection(collectionName, client,2,2); String newReplicaName = Assign.assignNode(collectionName , client.getZkStateReader().getClusterState() ); - ArrayList nodeList = new ArrayList(client.getZkStateReader().getClusterState().getLiveNodes()); + ArrayList nodeList = new ArrayList<>(client.getZkStateReader().getClusterState().getLiveNodes()); Collections.shuffle(nodeList); Map m = makeMap( "action", CollectionAction.ADDREPLICA.toString(), @@ -1311,7 +1311,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa REPLICATION_FACTOR, replicationFactor, MAX_SHARDS_PER_NODE, maxShardsPerNode, NUM_SLICES, numShards); - Map> collectionInfos = new HashMap>(); + Map> collectionInfos = new HashMap<>(); createCollection(collectionInfos, COLL_NAME, props, client,"conf1"); waitForRecoveriesToFinish(COLL_NAME, false); } diff --git a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java index 1a2ea9331ef..de318aa8521 100644 --- a/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/CustomCollectionTest.java @@ -99,8 +99,8 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase { sliceCount = 2; shardCount = 4; - completionService = new ExecutorCompletionService(executor); - pending = new HashSet>(); + completionService = new ExecutorCompletionService<>(executor); + pending = new HashSet<>(); checkCreatedVsState = false; } @@ -142,7 +142,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase { // env make this pretty fragile // create new collections rapid fire - Map> collectionInfos = new HashMap>(); + Map> collectionInfos = new HashMap<>(); int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2; int cnt = random().nextInt(6) + 1; @@ -214,7 +214,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase { assertNull("A shard of a Collection configured with implicit router must have null range", coll.getSlice("a").getRange()); - List collectionNameList = new ArrayList(); + List collectionNameList = new ArrayList<>(); collectionNameList.addAll(collectionInfos.keySet()); log.info("Collections created : "+collectionNameList ); @@ -360,7 +360,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase { int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer() .getZkStateReader().getClusterState().getLiveNodes().size())) + 1; - HashMap> collectionInfos = new HashMap>(); + HashMap> collectionInfos = new HashMap<>(); CloudSolrServer client = null; String shard_fld = "shard_s"; try { @@ -419,7 +419,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase { private void testCreateShardRepFactor() throws Exception { String collectionName = "testCreateShardRepFactor"; - HashMap> collectionInfos = new HashMap>(); + HashMap> collectionInfos = new HashMap<>(); CloudSolrServer client = null; try { client = createCloudClient(null); diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java index bcebb34bce4..e9fbca51e9f 100644 --- a/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/DeleteReplicaTest.java @@ -148,7 +148,7 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase { REPLICATION_FACTOR, replicationFactor, MAX_SHARDS_PER_NODE, maxShardsPerNode, NUM_SLICES, numShards); - Map> collectionInfos = new HashMap>(); + Map> collectionInfos = new HashMap<>(); createCollection(collectionInfos, COLL_NAME, props, client); } } diff --git a/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java b/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java index 97f86ba48b3..a55bd6cd6f6 100644 --- a/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/DeleteShardTest.java @@ -145,7 +145,7 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase { protected void setSliceAsInactive(String slice) throws SolrServerException, IOException, KeeperException, InterruptedException { DistributedQueue inQueue = Overseer.getInQueue(cloudClient.getZkStateReader().getZkClient()); - Map propMap = new HashMap(); + Map propMap = new HashMap<>(); propMap.put(Overseer.QUEUE_OPERATION, "updateshardstate"); propMap.put(slice, Slice.INACTIVE); propMap.put(ZkStateReader.COLLECTION_PROP, "collection1"); diff --git a/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java b/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java index 835bbd20742..8b8e3b5fb72 100644 --- a/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/DistribCursorPagingTest.java @@ -523,7 +523,7 @@ public class DistribCursorPagingTest extends AbstractFullDistribZkTestBase { // start with a smallish number of documents, and test that we can do a full walk using a // sort on *every* field in the schema... - List initialDocs = new ArrayList(); + List initialDocs = new ArrayList<>(); for (int i = 1; i <= numInitialDocs; i++) { SolrInputDocument doc = CursorPagingTest.buildRandomDocument(i); initialDocs.add(doc); @@ -606,7 +606,7 @@ public class DistribCursorPagingTest extends AbstractFullDistribZkTestBase { req.setShowSchema(true); NamedList rsp = controlClient.request(req); NamedList fields = (NamedList) ((NamedList)rsp.get("schema")).get("fields"); - ArrayList names = new ArrayList(fields.size()); + ArrayList names = new ArrayList<>(fields.size()); for (Map.Entry item : fields) { names.add(item.getKey()); } diff --git a/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java b/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java index 0e3f91eaad3..1963432e9dd 100644 --- a/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java @@ -387,7 +387,7 @@ public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase } } }; - List threads = new ArrayList(); + List threads = new ArrayList<>(); int nthreads = random().nextInt(TEST_NIGHTLY ? 4 : 2) + 1; for (int i = 0; i < nthreads; i++) { diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java index acaa0c7257e..54e83263703 100644 --- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionIntegrationTest.java @@ -57,9 +57,9 @@ public class LeaderElectionIntegrationTest extends SolrTestCaseJ4 { protected String zkDir; - private Map containerMap = new HashMap(); + private Map containerMap = new HashMap<>(); - private Map> shardPorts = new HashMap>(); + private Map> shardPorts = new HashMap<>(); private SolrZkClient zkClient; @@ -142,7 +142,7 @@ public class LeaderElectionIntegrationTest extends SolrTestCaseJ4 { System.setProperty("solr.solr.home", TEST_HOME()); Set ports = shardPorts.get(shard); if (ports == null) { - ports = new HashSet(); + ports = new HashSet<>(); shardPorts.put(shard, ports); } ports.add(port); diff --git a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java index 21772017b49..45302648156 100644 --- a/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java @@ -228,7 +228,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 { @Test public void testElection() throws Exception { - List threads = new ArrayList(); + List threads = new ArrayList<>(); for (int i = 0; i < 15; i++) { ClientThread thread = new ClientThread(i); diff --git a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java index 71d48d25e7b..f0eab046e85 100644 --- a/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/MigrateRouteKeyTest.java @@ -141,7 +141,7 @@ public class MigrateRouteKeyTest extends BasicDistributedZkTest { } private void createCollection(String targetCollection) throws Exception { - HashMap> collectionInfos = new HashMap>(); + HashMap> collectionInfos = new HashMap<>(); CloudSolrServer client = null; try { client = createCloudClient(null); diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionProcessorTest.java index 9b8e012668c..7b24d2a95bf 100644 --- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionProcessorTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionProcessorTest.java @@ -81,7 +81,7 @@ public class OverseerCollectionProcessorTest extends SolrTestCaseJ4 { private OverseerCollectionProcessorToBeTested underTest; private Thread thread; - private Queue queue = new BlockingArrayQueue(); + private Queue queue = new BlockingArrayQueue<>(); private class OverseerCollectionProcessorToBeTested extends OverseerCollectionProcessor { @@ -202,7 +202,7 @@ public class OverseerCollectionProcessorTest extends SolrTestCaseJ4 { return collectionsSet; } }).anyTimes(); - final Set liveNodes = new HashSet(); + final Set liveNodes = new HashSet<>(); for (int i = 0; i < liveNodesCount; i++) { final String address = "localhost:" + (8963 + i) + "_solr"; liveNodes.add(address); @@ -316,14 +316,14 @@ public class OverseerCollectionProcessorTest extends SolrTestCaseJ4 { } private class SubmitCapture { - public Capture shardRequestCapture = new Capture(); - public Capture nodeUrlsWithoutProtocolPartCapture = new Capture(); - public Capture params = new Capture(); + public Capture shardRequestCapture = new Capture<>(); + public Capture nodeUrlsWithoutProtocolPartCapture = new Capture<>(); + public Capture params = new Capture<>(); } protected List mockShardHandlerForCreateJob( Integer numberOfSlices, Integer numberOfReplica) { - List submitCaptures = new ArrayList(); + List submitCaptures = new ArrayList<>(); for (int i = 0; i < (numberOfSlices * numberOfReplica); i++) { SubmitCapture submitCapture = new SubmitCapture(); shardHandlerMock.submit(capture(submitCapture.shardRequestCapture), @@ -370,9 +370,9 @@ public class OverseerCollectionProcessorTest extends SolrTestCaseJ4 { protected void verifySubmitCaptures(List submitCaptures, Integer numberOfSlices, Integer numberOfReplica, Collection createNodes) { - List coreNames = new ArrayList(); - Map> sliceToNodeUrlsWithoutProtocolPartToNumberOfShardsRunningMapMap = new HashMap>(); - List nodeUrlWithoutProtocolPartForLiveNodes = new ArrayList( + List coreNames = new ArrayList<>(); + Map> sliceToNodeUrlsWithoutProtocolPartToNumberOfShardsRunningMapMap = new HashMap<>(); + List nodeUrlWithoutProtocolPartForLiveNodes = new ArrayList<>( createNodes.size()); for (String nodeName : createNodes) { String nodeUrlWithoutProtocolPart = nodeName.replaceAll("_", "/"); @@ -510,7 +510,7 @@ public class OverseerCollectionProcessorTest extends SolrTestCaseJ4 { assertTrue("Wrong usage of testTemplage. createNodeListOption has to be " + CreateNodeListOptions.SEND + " when numberOfNodes and numberOfNodesToCreateOn are unequal", ((createNodeListOption == CreateNodeListOptions.SEND) || (numberOfNodes.intValue() == numberOfNodesToCreateOn.intValue()))); Set liveNodes = commonMocks(numberOfNodes); - List createNodeList = new ArrayList(); + List createNodeList = new ArrayList<>(); int i = 0; for (String node : liveNodes) { if (i++ < numberOfNodesToCreateOn) { diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java index 2f0eb4d1ae7..67ac49b8718 100644 --- a/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/OverseerRolesTest.java @@ -216,7 +216,7 @@ public class OverseerRolesTest extends AbstractFullDistribZkTestBase{ REPLICATION_FACTOR, replicationFactor, MAX_SHARDS_PER_NODE, maxShardsPerNode, NUM_SLICES, numShards); - Map> collectionInfos = new HashMap>(); + Map> collectionInfos = new HashMap<>(); createCollection(collectionInfos, COLL_NAME, props, client); } diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java index a67a8be527a..921315dcc90 100644 --- a/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/OverseerTest.java @@ -61,8 +61,8 @@ public class OverseerTest extends SolrTestCaseJ4 { static final int TIMEOUT = 10000; private static final boolean DEBUG = false; - private List overseers = new ArrayList(); - private List readers = new ArrayList(); + private List overseers = new ArrayList<>(); + private List readers = new ArrayList<>(); private String collection = "collection1"; @@ -435,7 +435,7 @@ public class OverseerTest extends SolrTestCaseJ4 { assertEquals("Unable to verify all cores have been returned an id", coreCount, assignedCount); - final HashMap counters = new HashMap(); + final HashMap counters = new HashMap<>(); for (int i = 1; i < sliceCount+1; i++) { counters.put("shard" + i, new AtomicInteger()); } diff --git a/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java b/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java index fc7bb631942..46bd5980262 100644 --- a/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/RemoteQueryErrorTest.java @@ -49,7 +49,7 @@ public class RemoteQueryErrorTest extends AbstractFullDistribZkTestBase { createCollection("collection2", 2, 1, 10); - List numShardsNumReplicaList = new ArrayList(2); + List numShardsNumReplicaList = new ArrayList<>(2); numShardsNumReplicaList.add(2); numShardsNumReplicaList.add(1); checkForCollection("collection2", numShardsNumReplicaList, null); diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java index f432ccd1207..b942cc25330 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ShardRoutingTest.java @@ -341,10 +341,10 @@ public class ShardRoutingTest extends AbstractFullDistribZkTestBase { // TODO: refactor some of this stuff up into a base class for use by other tests void doQuery(String expectedDocs, String... queryParams) throws Exception { - Set expectedIds = new HashSet( StrUtils.splitSmart(expectedDocs, ",", true) ); + Set expectedIds = new HashSet<>( StrUtils.splitSmart(expectedDocs, ",", true) ); QueryResponse rsp = cloudClient.query(params(queryParams)); - Set obtainedIds = new HashSet(); + Set obtainedIds = new HashSet<>(); for (SolrDocument doc : rsp.getResults()) { obtainedIds.add((String) doc.get("id")); } @@ -355,10 +355,10 @@ public class ShardRoutingTest extends AbstractFullDistribZkTestBase { void doRTG(String ids) throws Exception { cloudClient.query(params("qt","/get", "ids",ids)); - Set expectedIds = new HashSet( StrUtils.splitSmart(ids, ",", true) ); + Set expectedIds = new HashSet<>( StrUtils.splitSmart(ids, ",", true) ); QueryResponse rsp = cloudClient.query(params("qt","/get", "ids",ids)); - Set obtainedIds = new HashSet(); + Set obtainedIds = new HashSet<>(); for (SolrDocument doc : rsp.getResults()) { obtainedIds.add((String) doc.get("id")); } diff --git a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java index f3d0e27f512..be4a6755f20 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java @@ -115,7 +115,7 @@ public class ShardSplitTest extends BasicDistributedZkTest { Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1); DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange(); - List subRanges = new ArrayList(); + List subRanges = new ArrayList<>(); List ranges = router.partitionRange(4, shard1Range); // test with only one range @@ -158,7 +158,7 @@ public class ShardSplitTest extends BasicDistributedZkTest { final DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter(); Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1); DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange(); - List subRanges = new ArrayList(); + List subRanges = new ArrayList<>(); if (usually()) { List ranges = router.partitionRange(4, shard1Range); // 75% of range goes to shard1_0 and the rest to shard1_1 @@ -185,7 +185,7 @@ public class ShardSplitTest extends BasicDistributedZkTest { int max = atLeast(random, 401); int sleep = atLeast(random, 25); log.info("SHARDSPLITTEST: Going to add " + max + " number of docs at 1 doc per " + sleep + "ms"); - Set deleted = new HashSet(); + Set deleted = new HashSet<>(); for (int id = 101; id < max; id++) { try { indexAndUpdateCount(router, ranges, docCounts, String.valueOf(id), id); @@ -246,7 +246,7 @@ public class ShardSplitTest extends BasicDistributedZkTest { int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer() .getZkStateReader().getClusterState().getLiveNodes().size())) + 1; - HashMap> collectionInfos = new HashMap>(); + HashMap> collectionInfos = new HashMap<>(); CloudSolrServer client = null; String shard_fld = "shard_s"; try { @@ -324,7 +324,7 @@ public class ShardSplitTest extends BasicDistributedZkTest { int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer() .getZkStateReader().getClusterState().getLiveNodes().size())) + 1; - HashMap> collectionInfos = new HashMap>(); + HashMap> collectionInfos = new HashMap<>(); CloudSolrServer client = null; try { client = createCloudClient(null); @@ -566,9 +566,9 @@ public class ShardSplitTest extends BasicDistributedZkTest { log.info("Actual docCount for shard1_0 = {}", shard10Count); log.info("Actual docCount for shard1_1 = {}", shard11Count); - Map idVsVersion = new HashMap(); - Map shard10Docs = new HashMap(); - Map shard11Docs = new HashMap(); + Map idVsVersion = new HashMap<>(); + Map shard10Docs = new HashMap<>(); + Map shard11Docs = new HashMap<>(); for (int i = 0; i < response.getResults().size(); i++) { SolrDocument document = response.getResults().get(i); idVsVersion.put(document.getFieldValue("id").toString(), document.getFieldValue("_version_").toString()); diff --git a/solr/core/src/test/org/apache/solr/cloud/SliceStateTest.java b/solr/core/src/test/org/apache/solr/cloud/SliceStateTest.java index cdf001a1393..3b7f7694141 100644 --- a/solr/core/src/test/org/apache/solr/cloud/SliceStateTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/SliceStateTest.java @@ -34,13 +34,13 @@ import java.util.Set; public class SliceStateTest extends SolrTestCaseJ4 { @Test public void testDefaultSliceState() throws Exception { - Map collectionStates = new HashMap(); - Set liveNodes = new HashSet(); + Map collectionStates = new HashMap<>(); + Set liveNodes = new HashSet<>(); liveNodes.add("node1"); - Map slices = new HashMap(); - Map sliceToProps = new HashMap(); - Map props = new HashMap(); + Map slices = new HashMap<>(); + Map sliceToProps = new HashMap<>(); + Map props = new HashMap<>(); Replica replica = new Replica("node1", props); sliceToProps.put("node1", replica); diff --git a/solr/core/src/test/org/apache/solr/cloud/SliceStateUpdateTest.java b/solr/core/src/test/org/apache/solr/cloud/SliceStateUpdateTest.java index 8dde806418b..50f5ba1be0d 100644 --- a/solr/core/src/test/org/apache/solr/cloud/SliceStateUpdateTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/SliceStateUpdateTest.java @@ -86,7 +86,7 @@ public class SliceStateUpdateTest extends SolrTestCaseJ4 { .getZkAddress(), "solrconfig.xml", "schema.xml"); log.info("####SETUP_START " + getTestName()); - Map props2 = new HashMap(); + Map props2 = new HashMap<>(); props2.put("configName", "conf1"); ZkNodeProps zkProps2 = new ZkNodeProps(props2); @@ -147,7 +147,7 @@ public class SliceStateUpdateTest extends SolrTestCaseJ4 { // new LinkedHashMap(clusterState.getCollectionStates()); Map slicesMap = clusterState.getSlicesMap("collection1"); - Map props = new HashMap(1); + Map props = new HashMap<>(1); Slice slice = slicesMap.get("shard1"); Map prop = slice.getProperties(); prop.put("state", "inactive"); diff --git a/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java b/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java index b1f04d8599b..adfceeaaded 100644 --- a/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/SyncSliceTest.java @@ -96,7 +96,7 @@ public class SyncSliceTest extends AbstractFullDistribZkTestBase { waitForThingsToLevelOut(30); del("*:*"); - List skipServers = new ArrayList(); + List skipServers = new ArrayList<>(); int docId = 0; indexDoc(skipServers, id, docId++, i1, 50, tlong, 50, t1, "to come to the aid of their country."); @@ -156,7 +156,7 @@ public class SyncSliceTest extends AbstractFullDistribZkTestBase { "to come to the aid of their country."); - Set jetties = new HashSet(); + Set jetties = new HashSet<>(); jetties.addAll(shardToJetty.get("shard1")); jetties.remove(leaderJetty); assertEquals(shardCount - 1, jetties.size()); @@ -217,7 +217,7 @@ public class SyncSliceTest extends AbstractFullDistribZkTestBase { "Test Setup Failure: shard1 should have just been set up to be inconsistent - but it's still consistent. Leader:" + leaderJetty.url + " Dead Guy:" + deadJetty.url + "skip list:" + skipServers, shardFailMessage); - jetties = new HashSet(); + jetties = new HashSet<>(); jetties.addAll(shardToJetty.get("shard1")); jetties.remove(leaderJetty); assertEquals(shardCount - 1, jetties.size()); @@ -292,8 +292,8 @@ public class SyncSliceTest extends AbstractFullDistribZkTestBase { } private List getRandomOtherJetty(CloudJettyRunner leader, CloudJettyRunner down) { - List skipServers = new ArrayList(); - List candidates = new ArrayList(); + List skipServers = new ArrayList<>(); + List candidates = new ArrayList<>(); candidates.addAll(shardToJetty.get("shard1")); if (leader != null) { diff --git a/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java b/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java index 86043b0f011..8e3b0b0c4a7 100755 --- a/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestDistribDocBasedVersion.java @@ -290,7 +290,7 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase { void doQuery(String expectedDocs, String... queryParams) throws Exception { List strs = StrUtils.splitSmart(expectedDocs, ",", true); - Map expectedIds = new HashMap(); + Map expectedIds = new HashMap<>(); for (int i=0; i obtainedIds = new HashMap(); + Map obtainedIds = new HashMap<>(); for (SolrDocument doc : rsp.getResults()) { obtainedIds.put((String) doc.get("id"), doc.get(vfield)); } @@ -309,7 +309,7 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase { void doRTG(String ids, String versions) throws Exception { - Map expectedIds = new HashMap(); + Map expectedIds = new HashMap<>(); List strs = StrUtils.splitSmart(ids, ",", true); List verS = StrUtils.splitSmart(versions, ",", true); for (int i=0; i obtainedIds = new HashMap(); + Map obtainedIds = new HashMap<>(); for (SolrDocument doc : rsp.getResults()) { obtainedIds.put((String) doc.get("id"), doc.get(vfield)); } @@ -330,10 +330,10 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase { void doRTG(String ids) throws Exception { ss.query(params("qt","/get", "ids",ids)); - Set expectedIds = new HashSet( StrUtils.splitSmart(ids, ",", true) ); + Set expectedIds = new HashSet<>( StrUtils.splitSmart(ids, ",", true) ); QueryResponse rsp = cloudClient.query(params("qt","/get", "ids",ids)); - Set obtainedIds = new HashSet(); + Set obtainedIds = new HashSet<>(); for (SolrDocument doc : rsp.getResults()) { obtainedIds.add((String) doc.get("id")); } diff --git a/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java b/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java index 37f9ca72110..8ddca1d2e9b 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java @@ -146,8 +146,8 @@ public class TestHashPartitioner extends SolrTestCaseJ4 { List expectedShardStr = StrUtils.splitSmart(expectedShards, ",", true); - HashSet expectedSet = new HashSet(expectedShardStr); - HashSet obtainedSet = new HashSet(); + HashSet expectedSet = new HashSet<>(expectedShardStr); + HashSet obtainedSet = new HashSet<>(); for (Slice slice : slices) { obtainedSet.add(slice.getName()); } @@ -222,7 +222,7 @@ public class TestHashPartitioner extends SolrTestCaseJ4 { DocCollection createCollection(int nSlices, DocRouter router) { List ranges = router.partitionRange(nSlices, router.fullRange()); - Map slices = new HashMap(); + Map slices = new HashMap<>(); for (int i=0; i idMap = new HashMap(); + HashMap idMap = new HashMap<>(); for (int i = 1; i <= sliceCount; i++) { @@ -122,7 +122,7 @@ public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest { commit(); - HashMap idMap = new HashMap(); + HashMap idMap = new HashMap<>(); for (int i = 1; i <= sliceCount; i++) { @@ -142,7 +142,7 @@ public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest { Set doQueryGetUniqueIdKeys(String... queryParams) throws Exception { QueryResponse rsp = cloudClient.query(params(queryParams)); - Set obtainedIdKeys = new HashSet(); + Set obtainedIdKeys = new HashSet<>(); for (SolrDocument doc : rsp.getResults()) { obtainedIdKeys.add(getKey((String) doc.get("id"))); } diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java index b143e99f5a5..64ce1246535 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ZkControllerTest.java @@ -175,7 +175,7 @@ public class ZkControllerTest extends SolrTestCaseJ4 { zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + actualConfigName, true); - Map props = new HashMap(); + Map props = new HashMap<>(); props.put("configName", actualConfigName); ZkNodeProps zkProps = new ZkNodeProps(props); zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/" diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkNodePropsTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkNodePropsTest.java index f47091acc8a..5f68019ec58 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ZkNodePropsTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ZkNodePropsTest.java @@ -30,7 +30,7 @@ public class ZkNodePropsTest extends SolrTestCaseJ4 { @Test public void testBasic() throws IOException { - Map props = new HashMap(); + Map props = new HashMap<>(); props.put("prop1", "value1"); props.put("prop2", "value2"); props.put("prop3", "value3"); diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java index 6dae9b555ce..1384cb270d0 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsTestUtil.java @@ -36,7 +36,7 @@ public class HdfsTestUtil { private static Locale savedLocale; - private static Map timers = new ConcurrentHashMap(); + private static Map timers = new ConcurrentHashMap<>(); public static MiniDFSCluster setupClass(String dataDir) throws Exception { LuceneTestCase.assumeFalse("HDFS tests were disabled by -Dtests.disableHdfs", diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java index 5a737826566..a43ef3c27ef 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/HdfsWriteToMultipleCollectionsTest.java @@ -98,8 +98,8 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest { for (int i = 0; i < cnt; i++) { waitForRecoveriesToFinish(ACOLLECTION + i, false); } - List cloudServers = new ArrayList(); - List threads = new ArrayList(); + List cloudServers = new ArrayList<>(); + List threads = new ArrayList<>(); for (int i = 0; i < cnt; i++) { CloudSolrServer server = new CloudSolrServer(zkServer.getZkAddress()); server.setDefaultCollection(ACOLLECTION + i); diff --git a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java b/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java index 9004b9113f5..232536d8dce 100644 --- a/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/hdfs/StressHdfsTest.java @@ -155,7 +155,7 @@ public class StressHdfsTest extends BasicDistributedZkTest { } // collect the data dirs - List dataDirs = new ArrayList(); + List dataDirs = new ArrayList<>(); int i = 0; for (SolrServer client : clients) { diff --git a/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java b/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java index e8129ae42fc..aa503b9c87d 100644 --- a/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java +++ b/solr/core/src/test/org/apache/solr/core/CachingDirectoryFactoryTest.java @@ -33,7 +33,7 @@ import org.junit.Test; */ public class CachingDirectoryFactoryTest extends SolrTestCaseJ4 { - private Map dirs = new HashMap(); + private Map dirs = new HashMap<>(); private volatile boolean stop = false; private class Tracker { @@ -46,7 +46,7 @@ public class CachingDirectoryFactoryTest extends SolrTestCaseJ4 { public void stressTest() throws Exception { final CachingDirectoryFactory df = new RAMDirectoryFactory(); - List threads = new ArrayList(); + List threads = new ArrayList<>(); int threadCount = 11; for (int i = 0; i < threadCount; i++) { Thread getDirThread = new GetDirThread(df); @@ -126,7 +126,7 @@ public class CachingDirectoryFactoryTest extends SolrTestCaseJ4 { synchronized (dirs) { int sz = dirs.size(); - List dirsList = new ArrayList(); + List dirsList = new ArrayList<>(); dirsList.addAll(dirs.values()); if (sz > 0) { Tracker tracker = dirsList.get(Math.min(dirsList.size() - 1, diff --git a/solr/core/src/test/org/apache/solr/core/CountUsageValueSourceParser.java b/solr/core/src/test/org/apache/solr/core/CountUsageValueSourceParser.java index 60f454ed4e9..fa8eb85edce 100644 --- a/solr/core/src/test/org/apache/solr/core/CountUsageValueSourceParser.java +++ b/solr/core/src/test/org/apache/solr/core/CountUsageValueSourceParser.java @@ -40,7 +40,7 @@ import java.util.concurrent.atomic.AtomicInteger; public class CountUsageValueSourceParser extends ValueSourceParser { private static final ConcurrentMap counters - = new ConcurrentHashMap(); + = new ConcurrentHashMap<>(); public static void clearCounters() { counters.clear(); diff --git a/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java b/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java index bf921ef1d6c..73171e1fb2d 100644 --- a/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java +++ b/solr/core/src/test/org/apache/solr/core/OpenCloseCoreStressTest.java @@ -69,8 +69,8 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 { File solrHomeDirectory; - List indexingServers = new ArrayList(indexingThreads); - List queryServers = new ArrayList(queryThreads); + List indexingServers = new ArrayList<>(indexingThreads); + List queryServers = new ArrayList<>(queryThreads); static String savedFactory; @@ -81,8 +81,8 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 { @Before public void setupServer() throws Exception { - coreCounts = new TreeMap(); - coreNames = new ArrayList(); + coreCounts = new TreeMap<>(); + coreNames = new ArrayList<>(); cumulativeDocs = 0; solrHomeDirectory = new File(TEMP_DIR, "OpenCloseCoreStressTest_"); @@ -328,7 +328,7 @@ class Indexer { static volatile int lastCount; static volatile long nextTime; - ArrayList _threads = new ArrayList(); + ArrayList _threads = new ArrayList<>(); public Indexer(OpenCloseCoreStressTest OCCST, String url, List servers, int numThreads, int secondsToRun, Random random) { stopTime = System.currentTimeMillis() + (secondsToRun * 1000); @@ -436,7 +436,7 @@ class OneIndexer extends Thread { class Queries { static AtomicBoolean _keepon = new AtomicBoolean(true); - List _threads = new ArrayList(); + List _threads = new ArrayList<>(); static AtomicInteger _errors = new AtomicInteger(0); String baseUrl; diff --git a/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java b/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java index c97d500e188..d6aa2c60ae0 100644 --- a/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java +++ b/solr/core/src/test/org/apache/solr/core/QueryResultKeyTest.java @@ -178,7 +178,7 @@ public class QueryResultKeyTest extends SolrTestCaseJ4 { * specified ints */ private List buildFiltersFromNumbers(int[] values) { - ArrayList filters = new ArrayList(values.length); + ArrayList filters = new ArrayList<>(values.length); for (int val : values) { filters.add(new FlatHashTermQuery(String.valueOf(val))); } diff --git a/solr/core/src/test/org/apache/solr/core/SolrCoreTest.java b/solr/core/src/test/org/apache/solr/core/SolrCoreTest.java index ead3bc809f6..1ae9277053b 100644 --- a/solr/core/src/test/org/apache/solr/core/SolrCoreTest.java +++ b/solr/core/src/test/org/apache/solr/core/SolrCoreTest.java @@ -172,7 +172,7 @@ public class SolrCoreTest extends SolrTestCaseJ4 { final int LOOP = 100; final int MT = 16; ExecutorService service = Executors.newFixedThreadPool(MT, new DefaultSolrThreadFactory("refCountMT")); - List> callees = new ArrayList>(MT); + List> callees = new ArrayList<>(MT); final CoreContainer cores = h.getCoreContainer(); for (int i = 0; i < MT; ++i) { Callable call = new Callable() { diff --git a/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java b/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java index 25d1e6a78b6..ba94f7ee9a3 100644 --- a/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java +++ b/solr/core/src/test/org/apache/solr/core/TestCoreContainer.java @@ -130,7 +130,7 @@ public class TestCoreContainer extends SolrTestCaseJ4 { } } - List threads = new ArrayList(); + List threads = new ArrayList<>(); int numThreads = 4; for (int i = 0; i < numThreads; i++) { threads.add(new TestThread()); diff --git a/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java b/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java index 42cbfed71fd..a837d5662ff 100644 --- a/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java +++ b/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java @@ -178,7 +178,7 @@ public class TestJmxIntegration extends AbstractSolrTestCase { private ObjectName getObjectName(String key, SolrInfoMBean infoBean) throws MalformedObjectNameException { - Hashtable map = new Hashtable(); + Hashtable map = new Hashtable<>(); map.put("type", key); map.put("id", infoBean.getName()); String coreName = h.getCore().getName(); diff --git a/solr/core/src/test/org/apache/solr/core/TestJmxMonitoredMap.java b/solr/core/src/test/org/apache/solr/core/TestJmxMonitoredMap.java index f60402faaec..11c5a045a00 100644 --- a/solr/core/src/test/org/apache/solr/core/TestJmxMonitoredMap.java +++ b/solr/core/src/test/org/apache/solr/core/TestJmxMonitoredMap.java @@ -83,7 +83,7 @@ public class TestJmxMonitoredMap extends LuceneTestCase { AbstractSolrTestCase.log.info("Using port: " + port); String url = "service:jmx:rmi:///jndi/rmi://127.0.0.1:"+port+"/solrjmx"; JmxConfiguration config = new JmxConfiguration(true, null, url, null); - monitoredMap = new JmxMonitoredMap("", "", config); + monitoredMap = new JmxMonitoredMap<>("", "", config); JMXServiceURL u = new JMXServiceURL(url); connector = JMXConnectorFactory.connect(u); mbeanServer = connector.getMBeanServerConnection(); diff --git a/solr/core/src/test/org/apache/solr/core/TestLazyCores.java b/solr/core/src/test/org/apache/solr/core/TestLazyCores.java index 53f414e33ca..4e456aa5107 100644 --- a/solr/core/src/test/org/apache/solr/core/TestLazyCores.java +++ b/solr/core/src/test/org/apache/solr/core/TestLazyCores.java @@ -251,7 +251,7 @@ public class TestLazyCores extends SolrTestCaseJ4 { @Test public void testRace() throws Exception { - final List theCores = new ArrayList(); + final List theCores = new ArrayList<>(); final CoreContainer cc = init(); try { @@ -713,9 +713,9 @@ public class TestLazyCores extends SolrTestCaseJ4 { } NamedList.NamedListEntry[] entries = new NamedList.NamedListEntry[q.length / 2]; for (int i = 0; i < q.length; i += 2) { - entries[i / 2] = new NamedList.NamedListEntry(q[i], q[i + 1]); + entries[i / 2] = new NamedList.NamedListEntry<>(q[i], q[i + 1]); } - return new LocalSolrQueryRequest(core, new NamedList(entries)); + return new LocalSolrQueryRequest(core, new NamedList<>(entries)); } private final static String LOTS_SOLR_XML = " " + diff --git a/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java b/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java index 0443f2d0ea4..775fcb052b1 100644 --- a/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java +++ b/solr/core/src/test/org/apache/solr/core/TestSolrXMLSerializer.java @@ -116,12 +116,12 @@ public class TestSolrXMLSerializer extends LuceneTestCase { String adminPathVal, String shareSchemaKey, String shareSchemaVal, String instanceDirKey, String instanceDirVal) { // - Map rootSolrAttribs = new HashMap(); + Map rootSolrAttribs = new HashMap<>(); rootSolrAttribs.put(sharedLibKey, sharedLibVal); rootSolrAttribs.put(peristentKey, persistentVal); // - Map coresAttribs = new HashMap(); + Map coresAttribs = new HashMap<>(); coresAttribs.put(adminPathKey, adminPathVal); coresAttribs.put(shareSchemaKey, shareSchemaVal); coresAttribs.put(defaultCoreNameKey, defaultCoreNameVal); @@ -129,9 +129,9 @@ public class TestSolrXMLSerializer extends LuceneTestCase { SolrXMLDef solrXMLDef = new SolrXMLDef(); // - List solrCoreXMLDefs = new ArrayList(); + List solrCoreXMLDefs = new ArrayList<>(); SolrCoreXMLDef coreDef = new SolrCoreXMLDef(); - Map coreAttribs = new HashMap(); + Map coreAttribs = new HashMap<>(); coreAttribs.put(instanceDirKey, instanceDirVal); coreDef.coreAttribs = coreAttribs ; coreDef.coreProperties = new Properties(); @@ -142,9 +142,9 @@ public class TestSolrXMLSerializer extends LuceneTestCase { solrXMLDef.containerProperties = containerProperties ; solrXMLDef.solrAttribs = rootSolrAttribs; solrXMLDef.coresAttribs = coresAttribs; - solrXMLDef.loggingAttribs = new HashMap(); - solrXMLDef.loggingAttribs = new HashMap(); - solrXMLDef.watcherAttribs = new HashMap(); + solrXMLDef.loggingAttribs = new HashMap<>(); + solrXMLDef.loggingAttribs = new HashMap<>(); + solrXMLDef.watcherAttribs = new HashMap<>(); return solrXMLDef; } diff --git a/solr/core/src/test/org/apache/solr/core/TestSolrXmlPersistence.java b/solr/core/src/test/org/apache/solr/core/TestSolrXmlPersistence.java index 22a8c8aa3d4..2977e7d631a 100644 --- a/solr/core/src/test/org/apache/solr/core/TestSolrXmlPersistence.java +++ b/solr/core/src/test/org/apache/solr/core/TestSolrXmlPersistence.java @@ -497,7 +497,7 @@ public class TestSolrXmlPersistence extends SolrTestCaseJ4 { private String[] getAllNodes(InputStream is) throws ParserConfigurationException, IOException, SAXException { - List expressions = new ArrayList(); // XPATH and value for all elements in the indicated XML + List expressions = new ArrayList<>(); // XPATH and value for all elements in the indicated XML DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory .newInstance(); DocumentBuilder docBuilder = docBuilderFactory.newDocumentBuilder(); diff --git a/solr/core/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java index 6c8d25a2ac9..28c0cd03372 100644 --- a/solr/core/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/handler/MoreLikeThisHandlerTest.java @@ -59,7 +59,7 @@ public class MoreLikeThisHandlerTest extends SolrTestCaseJ4 { // requires 'q' or single content stream try { - ArrayList streams = new ArrayList( 2 ); + ArrayList streams = new ArrayList<>( 2 ); streams.add( new ContentStreamBase.StringStream( "hello" ) ); streams.add( new ContentStreamBase.StringStream( "there" ) ); req.setContentStreams( streams ); diff --git a/solr/core/src/test/org/apache/solr/handler/StandardRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/StandardRequestHandlerTest.java index 797e73d61d4..1a617ae8e1e 100644 --- a/solr/core/src/test/org/apache/solr/handler/StandardRequestHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/handler/StandardRequestHandlerTest.java @@ -52,7 +52,7 @@ public class StandardRequestHandlerTest extends AbstractSolrTestCase { assertU(adoc("id", "12", "title", "test", "val_s1", "ccc")); assertU(commit()); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put( CommonParams.Q, "title:test" ); args.put( "indent", "true" ); SolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); diff --git a/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java b/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java index a51187ce6e8..4000d478d5a 100644 --- a/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java +++ b/solr/core/src/test/org/apache/solr/handler/TestCSVLoader.java @@ -93,7 +93,7 @@ public class TestCSVLoader extends SolrTestCaseJ4 { // TODO: stop using locally defined streams once stream.file and // stream.body work everywhere - List cs = new ArrayList(1); + List cs = new ArrayList<>(1); ContentStreamBase f = new ContentStreamBase.FileStream(new File(filename)); f.setContentType("text/csv"); cs.add(f); diff --git a/solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java index b2ad0875679..fd198c2de42 100644 --- a/solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/handler/XmlUpdateRequestHandlerTest.java @@ -191,7 +191,7 @@ public class XmlUpdateRequestHandlerTest extends SolrTestCaseJ4 { private class MockUpdateRequestProcessor extends UpdateRequestProcessor { - private Queue deleteCommands = new LinkedList(); + private Queue deleteCommands = new LinkedList<>(); public MockUpdateRequestProcessor(UpdateRequestProcessor next) { super(next); diff --git a/solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java index 1273abaf9ad..a7fb7743fcd 100644 --- a/solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/handler/XsltUpdateRequestHandlerTest.java @@ -68,12 +68,12 @@ public class XsltUpdateRequestHandlerTest extends SolrTestCaseJ4 { " " + ""; - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(CommonParams.TR, "xsl-update-handler-test.xsl"); SolrCore core = h.getCore(); LocalSolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); - ArrayList streams = new ArrayList(); + ArrayList streams = new ArrayList<>(); streams.add(new ContentStreamBase.StringStream(xml)); req.setContentStreams(streams); SolrQueryResponse rsp = new SolrQueryResponse(); diff --git a/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java index ce0a8d54d6a..66ba14c25b9 100644 --- a/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/handler/admin/MBeansHandlerTest.java @@ -43,7 +43,7 @@ public class MBeansHandlerTest extends SolrTestCaseJ4 { "stats","true", CommonParams.WT,"xml" )); - List streams = new ArrayList(); + List streams = new ArrayList<>(); streams.add(new ContentStreamBase.StringStream(xml)); LocalSolrQueryRequest req = lrf.makeRequest( diff --git a/solr/core/src/test/org/apache/solr/handler/admin/SystemInfoHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/admin/SystemInfoHandlerTest.java index fccc1a0e418..527b39166fb 100644 --- a/solr/core/src/test/org/apache/solr/handler/admin/SystemInfoHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/handler/admin/SystemInfoHandlerTest.java @@ -31,13 +31,13 @@ public class SystemInfoHandlerTest extends LuceneTestCase { OperatingSystemMXBean os = ManagementFactory.getOperatingSystemMXBean(); // make one directly - SimpleOrderedMap info = new SimpleOrderedMap(); + SimpleOrderedMap info = new SimpleOrderedMap<>(); info.add( "name", os.getName() ); info.add( "version", os.getVersion() ); info.add( "arch", os.getArch() ); // make another using addGetterIfAvaliable - SimpleOrderedMap info2 = new SimpleOrderedMap(); + SimpleOrderedMap info2 = new SimpleOrderedMap<>(); SystemInfoHandler.addGetterIfAvaliable( os, "name", info2 ); SystemInfoHandler.addGetterIfAvaliable( os, "version", info2 ); SystemInfoHandler.addGetterIfAvaliable( os, "arch", info2 ); diff --git a/solr/core/src/test/org/apache/solr/handler/component/DebugComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/DebugComponentTest.java index 521cd05707c..3d1b3f272a1 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/DebugComponentTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/DebugComponentTest.java @@ -161,7 +161,7 @@ public class DebugComponentTest extends SolrTestCaseJ4 { @Test public void testModifyRequestTrack() { DebugComponent component = new DebugComponent(); - List components = new ArrayList(1); + List components = new ArrayList<>(1); components.add(component); for(int i = 0; i < 10; i++) { SolrQueryRequest req = req("q", "test query", "distrib", "true", CommonParams.REQUEST_ID, "123456-my_rid"); @@ -196,7 +196,7 @@ public class DebugComponentTest extends SolrTestCaseJ4 { @Test public void testPrepare() throws IOException { DebugComponent component = new DebugComponent(); - List components = new ArrayList(1); + List components = new ArrayList<>(1); components.add(component); SolrQueryRequest req; ResponseBuilder rb; diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java index c4b356ef331..38f71cc95ac 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java @@ -187,7 +187,7 @@ public class DistributedSpellCheckComponentTest extends BaseDistributedSearchTes collate, "true", maxCollationTries, "0", maxCollations, "1", collateExtended, "true")); } private Object[] buildRequest(String q, boolean useSpellcheckQ, String handlerName, boolean useGrouping, String... addlParams) { - List params = new ArrayList(); + List params = new ArrayList<>(); params.add("q"); params.add(useSpellcheckQ ? "*:*" : q); diff --git a/solr/core/src/test/org/apache/solr/handler/component/DistributedSuggestComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/DistributedSuggestComponentTest.java index ddbdacf5b1e..86f8e8de26e 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/DistributedSuggestComponentTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/DistributedSuggestComponentTest.java @@ -126,7 +126,7 @@ public class DistributedSuggestComponentTest extends BaseDistributedSearchTestCa } private Object[] buildRequest(String q, boolean useSuggestQ, String handlerName, String... addlParams) { - List params = new ArrayList(); + List params = new ArrayList<>(); if(useSuggestQ) { params.add("suggest.q"); diff --git a/solr/core/src/test/org/apache/solr/handler/component/DummyCustomParamSpellChecker.java b/solr/core/src/test/org/apache/solr/handler/component/DummyCustomParamSpellChecker.java index bc78dfcd324..d107e9a3d30 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/DummyCustomParamSpellChecker.java +++ b/solr/core/src/test/org/apache/solr/handler/component/DummyCustomParamSpellChecker.java @@ -54,7 +54,7 @@ public class DummyCustomParamSpellChecker extends SolrSpellChecker { // sort the keys to make ordering predictable Iterator iterator = options.customParams.getParameterNamesIterator(); - List lst = new ArrayList(); + List lst = new ArrayList<>(); while (iterator.hasNext()) { lst.add(iterator.next()); } diff --git a/solr/core/src/test/org/apache/solr/handler/component/QueryElevationComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/QueryElevationComponentTest.java index 4b19022c462..e1646e6913e 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/QueryElevationComponentTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/QueryElevationComponentTest.java @@ -348,7 +348,7 @@ public class QueryElevationComponentTest extends SolrTestCaseJ4 { init("schema12.xml"); SolrCore core = h.getCore(); - NamedList args = new NamedList(); + NamedList args = new NamedList<>(); args.add(QueryElevationComponent.FIELD_TYPE, "string"); args.add(QueryElevationComponent.CONFIG_FILE, "elevate.xml"); @@ -371,7 +371,7 @@ public class QueryElevationComponentTest extends SolrTestCaseJ4 { assertEquals(null, map.get("zzzz")); // Now test the same thing with a lowercase filter: 'lowerfilt' - args = new NamedList(); + args = new NamedList<>(); args.add(QueryElevationComponent.FIELD_TYPE, "lowerfilt"); args.add(QueryElevationComponent.CONFIG_FILE, "elevate.xml"); @@ -535,7 +535,7 @@ public class QueryElevationComponentTest extends SolrTestCaseJ4 { String query = "title:ipod"; - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(CommonParams.Q, query); args.put(CommonParams.QT, "/elevate"); args.put(CommonParams.FL, "id,score"); @@ -699,7 +699,7 @@ public class QueryElevationComponentTest extends SolrTestCaseJ4 { writeFile(f, "aaa", "A"); QueryElevationComponent comp = (QueryElevationComponent) h.getCore().getSearchComponent("elevate"); - NamedList args = new NamedList(); + NamedList args = new NamedList<>(); args.add(QueryElevationComponent.CONFIG_FILE, testfile); comp.init(args); comp.inform(h.getCore()); diff --git a/solr/core/src/test/org/apache/solr/handler/component/SearchHandlerTest.java b/solr/core/src/test/org/apache/solr/handler/component/SearchHandlerTest.java index 014c3810f4f..5106bdf5298 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/SearchHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/SearchHandlerTest.java @@ -43,7 +43,7 @@ public class SearchHandlerTest extends SolrTestCaseJ4 // Build an explicit list //----------------------------------------------- - List names0 = new ArrayList(); + List names0 = new ArrayList<>(); names0.add( MoreLikeThisComponent.COMPONENT_NAME ); NamedList args = new NamedList(); @@ -58,7 +58,7 @@ public class SearchHandlerTest extends SolrTestCaseJ4 // Build an explicit list that includes the debug comp. //----------------------------------------------- - names0 = new ArrayList(); + names0 = new ArrayList<>(); names0.add( FacetComponent.COMPONENT_NAME ); names0.add( DebugComponent.COMPONENT_NAME ); names0.add( MoreLikeThisComponent.COMPONENT_NAME ); @@ -80,10 +80,10 @@ public class SearchHandlerTest extends SolrTestCaseJ4 // First/Last list //----------------------------------------------- - names0 = new ArrayList(); + names0 = new ArrayList<>(); names0.add( MoreLikeThisComponent.COMPONENT_NAME ); - List names1 = new ArrayList(); + List names1 = new ArrayList<>(); names1.add( FacetComponent.COMPONENT_NAME ); args = new NamedList(); diff --git a/solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java index 338ee571137..1b0c1bd5660 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/StatsComponentTest.java @@ -166,7 +166,7 @@ public class StatsComponentTest extends AbstractSolrTestCase { assertU(adoc("id", "4")); assertU(commit()); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(CommonParams.Q, "*:*"); args.put(StatsParams.STATS, "true"); args.put(StatsParams.STATS_FIELD, "active_s"); @@ -197,7 +197,7 @@ public class StatsComponentTest extends AbstractSolrTestCase { assertU(adoc("id", "3")); assertU(commit()); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(CommonParams.Q, "*:*"); args.put(StatsParams.STATS, "true"); args.put(StatsParams.STATS_FIELD, "active_dt"); @@ -322,7 +322,7 @@ public class StatsComponentTest extends AbstractSolrTestCase { assertU(adoc("id", "4")); assertU(commit()); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(CommonParams.Q, "*:*"); args.put(StatsParams.STATS, "true"); args.put(StatsParams.STATS_FIELD, "active_i"); @@ -342,7 +342,7 @@ public class StatsComponentTest extends AbstractSolrTestCase { assertU(adoc("id", "4")); assertU(commit()); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(CommonParams.Q, "*:*"); args.put(StatsParams.STATS, "true"); args.put(StatsParams.STATS_FIELD, "active_s"); @@ -363,7 +363,7 @@ public class StatsComponentTest extends AbstractSolrTestCase { assertU(adoc("id", "3")); assertU(commit()); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put(CommonParams.Q, "*:*"); args.put(StatsParams.STATS, "true"); args.put(StatsParams.STATS_FIELD, "active_dt"); diff --git a/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java index 155a140dabe..c9a6c63a23d 100644 --- a/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java +++ b/solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java @@ -210,7 +210,7 @@ public class TermVectorComponentTest extends SolrTestCaseJ4 { ); // test each combination at random - final List list = new ArrayList(); + final List list = new ArrayList<>(); list.addAll(Arrays.asList("json.nl","map", "qt",tv, "q", "id:0", TermVectorComponent.COMPONENT_NAME, "true")); String[][] options = new String[][] { { TermVectorParams.TF, "'tf':1" }, { TermVectorParams.OFFSETS, "'offsets':{'start':20, 'end':27}" }, diff --git a/solr/core/src/test/org/apache/solr/highlight/FastVectorHighlighterTest.java b/solr/core/src/test/org/apache/solr/highlight/FastVectorHighlighterTest.java index 7b4fe383f0c..27dc3914521 100644 --- a/solr/core/src/test/org/apache/solr/highlight/FastVectorHighlighterTest.java +++ b/solr/core/src/test/org/apache/solr/highlight/FastVectorHighlighterTest.java @@ -67,7 +67,7 @@ public class FastVectorHighlighterTest extends SolrTestCaseJ4 { @Test public void test() { - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "tv_text"); args.put("hl.snippets", "2"); diff --git a/solr/core/src/test/org/apache/solr/highlight/HighlighterConfigTest.java b/solr/core/src/test/org/apache/solr/highlight/HighlighterConfigTest.java index 0460b5992e4..20040910321 100644 --- a/solr/core/src/test/org/apache/solr/highlight/HighlighterConfigTest.java +++ b/solr/core/src/test/org/apache/solr/highlight/HighlighterConfigTest.java @@ -52,7 +52,7 @@ public class HighlighterConfigTest extends AbstractSolrTestCase { assertTrue( highlighter instanceof DummyHighlighter ); // check to see that doHighlight is called from the DummyHighlighter - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("df", "t_text"); args.put("hl.fl", ""); diff --git a/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java b/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java index 22078edd5b5..749897417d0 100644 --- a/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java +++ b/solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java @@ -83,7 +83,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { @Test public void testMergeContiguous() throws Exception { - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put(HighlightParams.HIGHLIGHT, "true"); args.put("df", "t_text"); args.put(HighlightParams.FIELDS, ""); @@ -132,7 +132,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testTermVecHighlight() { // do summarization using term vectors - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "tv_text"); args.put("hl.snippets", "2"); @@ -154,7 +154,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { @Test public void testTermVectorWithoutOffsetsHighlight() { - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "tv_no_off_text"); @@ -197,7 +197,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testTermVecMultiValuedHighlight() throws Exception { // do summarization using term vectors on multivalued field - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "tv_mv_text"); args.put("hl.snippets", "2"); @@ -224,7 +224,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testTermVecMultiValuedHighlight2() throws Exception { // do summarization using term vectors on multivalued field - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "tv_mv_text"); args.put("hl.snippets", "2"); @@ -249,7 +249,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testDisMaxHighlight() { // same test run through dismax handler - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "tv_text"); args.put("qf", "tv_text"); @@ -277,7 +277,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testMultiValueAnalysisHighlight() { // do summarization using re-analysis of the field - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "textgap"); args.put("df", "textgap"); @@ -299,7 +299,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { @Test public void testMultiValueBestFragmentHighlight() { - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "textgap"); args.put("df", "textgap"); @@ -338,7 +338,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testDefaultFieldHighlight() { // do summarization using re-analysis of the field - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("df", "t_text"); args.put("hl.fl", ""); @@ -361,7 +361,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testHighlightDisabled() { // ensure highlighting can be explicitly disabled - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "false"); args.put("hl.fl", "t_text"); TestHarness.LocalRequestFactory sumLRF = h.getRequestFactory( @@ -379,7 +379,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testTwoFieldHighlight() { // do summarization using re-analysis of the field - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "t_text tv_text"); TestHarness.LocalRequestFactory sumLRF = h.getRequestFactory( @@ -405,7 +405,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { assertU(commit()); assertU(optimize()); - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "t_text1 t_text2"); @@ -449,7 +449,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testCustomSimpleFormatterHighlight() { // do summarization using a custom formatter - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "t_text"); args.put("hl.simple.pre",""); @@ -482,7 +482,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { @Test public void testLongFragment() { - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "tv_text"); TestHarness.LocalRequestFactory sumLRF = h.getRequestFactory( @@ -503,7 +503,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { @Test public void testMaxChars() { - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("fl", "id score"); args.put("hl", "true"); args.put("hl.snippets", "10"); @@ -538,7 +538,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { @Test public void testRegexFragmenter() { - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("fl", "id score"); args.put("hl", "true"); args.put("hl.snippets", "10"); @@ -585,7 +585,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { assertU(optimize()); // default length - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "tv_text"); TestHarness.LocalRequestFactory sumLRF = h.getRequestFactory( @@ -628,7 +628,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { assertU(optimize()); // do summarization - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fragsize","0"); args.put("hl.fl", "t_text"); @@ -674,7 +674,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { @Test public void testPhraseHighlighter() { - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("hl.fl", "t_text"); args.put("hl.fragsize", "40"); @@ -731,7 +731,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { @Test public void testGetHighlightFields() { - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("fl", "id score"); args.put("hl", "true"); args.put("hl.fl", "t*"); @@ -776,7 +776,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testDefaultFieldPrefixWildcardHighlight() { // do summarization using re-analysis of the field - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("df", "t_text"); args.put("hl.fl", ""); @@ -800,7 +800,7 @@ public class HighlighterTest extends SolrTestCaseJ4 { public void testDefaultFieldNonPrefixWildcardHighlight() { // do summarization using re-analysis of the field - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put("hl", "true"); args.put("df", "t_text"); args.put("hl.fl", ""); diff --git a/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java b/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java index b5162009435..a49adbcadbf 100644 --- a/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java +++ b/solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java @@ -48,7 +48,7 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 { assertU(commit()); } - static ArrayList pendingDocs = new ArrayList(); + static ArrayList pendingDocs = new ArrayList<>(); // committing randomly gives different looking segments each time static void add_doc(String... fieldsAndValues) { diff --git a/solr/core/src/test/org/apache/solr/request/TestFaceting.java b/solr/core/src/test/org/apache/solr/request/TestFaceting.java index a6c1daeb9ac..05fabc24712 100644 --- a/solr/core/src/test/org/apache/solr/request/TestFaceting.java +++ b/solr/core/src/test/org/apache/solr/request/TestFaceting.java @@ -272,7 +272,7 @@ public class TestFaceting extends SolrTestCaseJ4 { public void testTrieFields() { // make sure that terms are correctly filtered even for trie fields that index several // terms for a single value - List fields = new ArrayList(); + List fields = new ArrayList<>(); fields.add("id"); fields.add("7"); final String[] suffixes = new String[] {"ti", "tis", "tf", "tfs", "tl", "tls", "td", "tds"}; diff --git a/solr/core/src/test/org/apache/solr/request/TestWriterPerf.java b/solr/core/src/test/org/apache/solr/request/TestWriterPerf.java index ab404df0c1a..d9a760f7f97 100644 --- a/solr/core/src/test/org/apache/solr/request/TestWriterPerf.java +++ b/solr/core/src/test/org/apache/solr/request/TestWriterPerf.java @@ -67,7 +67,7 @@ public class TestWriterPerf extends AbstractSolrTestCase { void index(Object... olst) { - ArrayList lst = new ArrayList(); + ArrayList lst = new ArrayList<>(); for (Object o : olst) lst.add(o.toString()); assertU(adoc(lst.toArray(new String[lst.size()]))); } diff --git a/solr/core/src/test/org/apache/solr/response/TestPHPSerializedResponseWriter.java b/solr/core/src/test/org/apache/solr/response/TestPHPSerializedResponseWriter.java index d31190e0368..722fac3b6af 100644 --- a/solr/core/src/test/org/apache/solr/response/TestPHPSerializedResponseWriter.java +++ b/solr/core/src/test/org/apache/solr/response/TestPHPSerializedResponseWriter.java @@ -81,7 +81,7 @@ public class TestPHPSerializedResponseWriter extends SolrTestCaseJ4 { // we use LinkedHashMap because we are doing a string comparison // later and we need predictible ordering - LinkedHashMap nl = new LinkedHashMap(); + LinkedHashMap nl = new LinkedHashMap<>(); nl.put("data4.1", "hashmap"); nl.put("data4.2", "hello"); d.addField("data4",nl); diff --git a/solr/core/src/test/org/apache/solr/rest/SolrRestletTestBase.java b/solr/core/src/test/org/apache/solr/rest/SolrRestletTestBase.java index 5ab06818c5c..ce01b05f1bb 100644 --- a/solr/core/src/test/org/apache/solr/rest/SolrRestletTestBase.java +++ b/solr/core/src/test/org/apache/solr/rest/SolrRestletTestBase.java @@ -27,7 +27,7 @@ import java.util.TreeMap; abstract public class SolrRestletTestBase extends RestTestBase { @BeforeClass public static void init() throws Exception { - final SortedMap extraServlets = new TreeMap(); + final SortedMap extraServlets = new TreeMap<>(); final ServletHolder solrRestApi = new ServletHolder("SolrRestApi", ServerServlet.class); solrRestApi.setInitParameter("org.restlet.application", "org.apache.solr.rest.SolrRestApi"); extraServlets.put(solrRestApi, "/schema/*"); // '/schema/*' matches '/schema', '/schema/', and '/schema/whatever...' diff --git a/solr/core/src/test/org/apache/solr/rest/schema/TestClassNameShortening.java b/solr/core/src/test/org/apache/solr/rest/schema/TestClassNameShortening.java index ef5cd46c54f..d73c5ef8804 100644 --- a/solr/core/src/test/org/apache/solr/rest/schema/TestClassNameShortening.java +++ b/solr/core/src/test/org/apache/solr/rest/schema/TestClassNameShortening.java @@ -30,7 +30,7 @@ public class TestClassNameShortening extends RestTestBase { @BeforeClass public static void init() throws Exception { - final SortedMap extraServlets = new TreeMap(); + final SortedMap extraServlets = new TreeMap<>(); final ServletHolder solrRestApi = new ServletHolder("SolrRestApi", ServerServlet.class); solrRestApi.setInitParameter("org.restlet.application", "org.apache.solr.rest.SolrRestApi"); extraServlets.put(solrRestApi, "/schema/*"); // '/schema/*' matches '/schema', '/schema/', and '/schema/whatever...' diff --git a/solr/core/src/test/org/apache/solr/rest/schema/TestManagedSchemaFieldResource.java b/solr/core/src/test/org/apache/solr/rest/schema/TestManagedSchemaFieldResource.java index 7c679b390e9..71e1864d241 100644 --- a/solr/core/src/test/org/apache/solr/rest/schema/TestManagedSchemaFieldResource.java +++ b/solr/core/src/test/org/apache/solr/rest/schema/TestManagedSchemaFieldResource.java @@ -46,7 +46,7 @@ public class TestManagedSchemaFieldResource extends RestTestBase { tmpConfDir = new File(tmpSolrHome, confDir); FileUtils.copyDirectory(new File(TEST_HOME()), tmpSolrHome.getAbsoluteFile()); - final SortedMap extraServlets = new TreeMap(); + final SortedMap extraServlets = new TreeMap<>(); final ServletHolder solrRestApi = new ServletHolder("SolrRestApi", ServerServlet.class); solrRestApi.setInitParameter("org.restlet.application", "org.apache.solr.rest.SolrRestApi"); extraServlets.put(solrRestApi, "/schema/*"); // '/schema/*' matches '/schema', '/schema/', and '/schema/whatever...' diff --git a/solr/core/src/test/org/apache/solr/rest/schema/TestSerializedLuceneMatchVersion.java b/solr/core/src/test/org/apache/solr/rest/schema/TestSerializedLuceneMatchVersion.java index cfe66fce098..c9d4816cea5 100644 --- a/solr/core/src/test/org/apache/solr/rest/schema/TestSerializedLuceneMatchVersion.java +++ b/solr/core/src/test/org/apache/solr/rest/schema/TestSerializedLuceneMatchVersion.java @@ -30,7 +30,7 @@ public class TestSerializedLuceneMatchVersion extends RestTestBase { @BeforeClass public static void init() throws Exception { - final SortedMap extraServlets = new TreeMap(); + final SortedMap extraServlets = new TreeMap<>(); final ServletHolder solrRestApi = new ServletHolder("SolrRestApi", ServerServlet.class); solrRestApi.setInitParameter("org.restlet.application", "org.apache.solr.rest.SolrRestApi"); extraServlets.put(solrRestApi, "/schema/*"); // '/schema/*' matches '/schema', '/schema/', and '/schema/whatever...' diff --git a/solr/core/src/test/org/apache/solr/schema/ChangedSchemaMergeTest.java b/solr/core/src/test/org/apache/solr/schema/ChangedSchemaMergeTest.java index 56247c1f5f6..6a09ef8ebc5 100644 --- a/solr/core/src/test/org/apache/solr/schema/ChangedSchemaMergeTest.java +++ b/solr/core/src/test/org/apache/solr/schema/ChangedSchemaMergeTest.java @@ -45,7 +45,7 @@ public class ChangedSchemaMergeTest extends SolrTestCaseJ4 { private void addDoc(SolrCore core, String... fieldValues) throws IOException { UpdateHandler updater = core.getUpdateHandler(); - AddUpdateCommand cmd = new AddUpdateCommand(new LocalSolrQueryRequest(core, new NamedList())); + AddUpdateCommand cmd = new AddUpdateCommand(new LocalSolrQueryRequest(core, new NamedList<>())); cmd.solrDoc = sdoc((Object[]) fieldValues); updater.addDoc(cmd); } @@ -83,7 +83,7 @@ public class ChangedSchemaMergeTest extends SolrTestCaseJ4 { addDoc(changed, "id", "2", "which", "15", "text", "some stuff with which"); addDoc(changed, "id", "3", "which", "15", "text", "some stuff with which"); addDoc(changed, "id", "4", "which", "15", "text", "some stuff with which"); - SolrQueryRequest req = new LocalSolrQueryRequest(changed, new NamedList()); + SolrQueryRequest req = new LocalSolrQueryRequest(changed, new NamedList<>()); changed.getUpdateHandler().commit(new CommitUpdateCommand(req, false)); // write the new schema out and make it current diff --git a/solr/core/src/test/org/apache/solr/schema/CopyFieldTest.java b/solr/core/src/test/org/apache/solr/schema/CopyFieldTest.java index 31b003799b7..3b09d82df87 100644 --- a/solr/core/src/test/org/apache/solr/schema/CopyFieldTest.java +++ b/solr/core/src/test/org/apache/solr/schema/CopyFieldTest.java @@ -128,7 +128,7 @@ public class CopyFieldTest extends SolrTestCaseJ4 { assertU(adoc("id", "10", "title", "test copy field", "text_en", "this is a simple test of the copy field functionality")); assertU(commit()); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put( CommonParams.Q, "text_en:simple" ); args.put( "indent", "true" ); SolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); @@ -138,7 +138,7 @@ public class CopyFieldTest extends SolrTestCaseJ4 { ,"//result/doc[1]/int[@name='id'][.='10']" ); - args = new HashMap(); + args = new HashMap<>(); args.put( CommonParams.Q, "highlight:simple" ); args.put( "indent", "true" ); req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); @@ -148,14 +148,14 @@ public class CopyFieldTest extends SolrTestCaseJ4 { ,"//result/doc[1]/arr[@name='highlight']/str[.='this is a simple test of ']" ); - args = new HashMap(); + args = new HashMap<>(); args.put( CommonParams.Q, "text_en:functionality" ); args.put( "indent", "true" ); req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); assertQ("Make sure they got in", req ,"//*[@numFound='1']"); - args = new HashMap(); + args = new HashMap<>(); args.put( CommonParams.Q, "highlight:functionality" ); args.put( "indent", "true" ); req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); @@ -189,7 +189,7 @@ public class CopyFieldTest extends SolrTestCaseJ4 { assertU(adoc("id", "A5", "sku1", "10-1839ACX-93", "sku2", "AAM46")); assertU(commit()); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put( CommonParams.Q, "text:AAM46" ); args.put( "indent", "true" ); SolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); @@ -198,7 +198,7 @@ public class CopyFieldTest extends SolrTestCaseJ4 { ,"//result/doc[1]/str[@name='id'][.='A5']" ); - args = new HashMap(); + args = new HashMap<>(); args.put( CommonParams.Q, "1_s:10-1839ACX-93" ); args.put( "indent", "true" ); req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); @@ -208,14 +208,14 @@ public class CopyFieldTest extends SolrTestCaseJ4 { ,"//result/doc[1]/arr[@name='sku1']/str[.='10-1839ACX-93']" ); - args = new HashMap(); + args = new HashMap<>(); args.put( CommonParams.Q, "1_dest_sub_s:10-1839ACX-93" ); args.put( "indent", "true" ); req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); assertQ("sku1 copied to *_dest_sub_s (*_s subset pattern)", req ,"//*[@numFound='1']"); - args = new HashMap(); + args = new HashMap<>(); args.put( CommonParams.Q, "dest_sub_no_ast_s:AAM46" ); args.put( "indent", "true" ); req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); @@ -237,7 +237,7 @@ public class CopyFieldTest extends SolrTestCaseJ4 { assertU(adoc("id", "A5", "sku1", "10-1839ACX-93", "testing123_s", "AAM46")); assertU(commit()); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put( CommonParams.Q, "text:AAM46" ); args.put( "indent", "true" ); SolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); diff --git a/solr/core/src/test/org/apache/solr/schema/IndexSchemaTest.java b/solr/core/src/test/org/apache/solr/schema/IndexSchemaTest.java index 5777e166fc3..1e997207e74 100644 --- a/solr/core/src/test/org/apache/solr/schema/IndexSchemaTest.java +++ b/solr/core/src/test/org/apache/solr/schema/IndexSchemaTest.java @@ -48,7 +48,7 @@ public class IndexSchemaTest extends SolrTestCaseJ4 { assertU(adoc("id", "10", "title", "test", "aaa_dynamic", "aaa")); assertU(commit()); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put( CommonParams.Q, "title:test" ); args.put( "indent", "true" ); SolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); @@ -58,7 +58,7 @@ public class IndexSchemaTest extends SolrTestCaseJ4 { ,"//result/doc[1]/int[@name='id'][.='10']" ); - args = new HashMap(); + args = new HashMap<>(); args.put( CommonParams.Q, "aaa_dynamic:aaa" ); args.put( "indent", "true" ); req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); @@ -67,7 +67,7 @@ public class IndexSchemaTest extends SolrTestCaseJ4 { ,"//result/doc[1]/int[@name='id'][.='10']" ); - args = new HashMap(); + args = new HashMap<>(); args.put( CommonParams.Q, "dynamic_aaa:aaa" ); args.put( "indent", "true" ); req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); diff --git a/solr/core/src/test/org/apache/solr/schema/MockExchangeRateProvider.java b/solr/core/src/test/org/apache/solr/schema/MockExchangeRateProvider.java index a8656aa71b3..e342e015828 100644 --- a/solr/core/src/test/org/apache/solr/schema/MockExchangeRateProvider.java +++ b/solr/core/src/test/org/apache/solr/schema/MockExchangeRateProvider.java @@ -29,7 +29,7 @@ import org.apache.solr.common.SolrException.ErrorCode; * Simple mock provider with fixed rates and some assertions */ public class MockExchangeRateProvider implements ExchangeRateProvider { - private static Map map = new HashMap(); + private static Map map = new HashMap<>(); static { map.put("USD,EUR", 0.8); map.put("EUR,USD", 1.2); @@ -59,7 +59,7 @@ public class MockExchangeRateProvider implements ExchangeRateProvider { Set currenciesPairs = map.keySet(); Set returnSet; - returnSet = new HashSet(); + returnSet = new HashSet<>(); for (String c : currenciesPairs) { String[] pairs = c.split(","); returnSet.add(pairs[0]); diff --git a/solr/core/src/test/org/apache/solr/schema/ModifyConfFileTest.java b/solr/core/src/test/org/apache/solr/schema/ModifyConfFileTest.java index 23bc5d5bf75..e4c4e270b10 100644 --- a/solr/core/src/test/org/apache/solr/schema/ModifyConfFileTest.java +++ b/solr/core/src/test/org/apache/solr/schema/ModifyConfFileTest.java @@ -82,7 +82,7 @@ public class ModifyConfFileTest extends SolrTestCaseJ4 { core.execute(handler, new LocalSolrQueryRequest(core, params), rsp); assertEquals(rsp.getException().getMessage(), "No file name specified for write operation."); - ArrayList streams = new ArrayList( 2 ); + ArrayList streams = new ArrayList<>( 2 ); streams.add(new ContentStreamBase.StringStream("Testing rewrite of schema.xml file." ) ); params = params("op", "write", "file", "bogus.txt"); diff --git a/solr/core/src/test/org/apache/solr/schema/OpenExchangeRatesOrgProviderTest.java b/solr/core/src/test/org/apache/solr/schema/OpenExchangeRatesOrgProviderTest.java index 5aa603be044..b8baff584e2 100644 --- a/solr/core/src/test/org/apache/solr/schema/OpenExchangeRatesOrgProviderTest.java +++ b/solr/core/src/test/org/apache/solr/schema/OpenExchangeRatesOrgProviderTest.java @@ -34,7 +34,7 @@ public class OpenExchangeRatesOrgProviderTest extends SolrTestCaseJ4 { OpenExchangeRatesOrgProvider oerp; ResourceLoader loader; - private final Map mockParams = new HashMap(); + private final Map mockParams = new HashMap<>(); @Override @@ -59,7 +59,7 @@ public class OpenExchangeRatesOrgProviderTest extends SolrTestCaseJ4 { "open-exchange-rates.json", oerp.ratesFileLocation); assertEquals("Wrong default interval", (1440*60), oerp.refreshIntervalSeconds); - Map params = new HashMap(); + Map params = new HashMap<>(); params.put(OpenExchangeRatesOrgProvider.PARAM_RATES_FILE_LOCATION, "http://foo.bar/baz"); params.put(OpenExchangeRatesOrgProvider.PARAM_REFRESH_INTERVAL, "100"); diff --git a/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldTest.java b/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldTest.java index 119ee2f67f6..f7135f46302 100644 --- a/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldTest.java +++ b/solr/core/src/test/org/apache/solr/schema/PreAnalyzedFieldTest.java @@ -85,7 +85,7 @@ public class PreAnalyzedFieldTest extends SolrTestCaseJ4 { public void testValidSimple() { PreAnalyzedField paf = new PreAnalyzedField(); // use Simple format - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put(PreAnalyzedField.PARSER_IMPL, SimplePreAnalyzedParser.class.getName()); paf.init(h.getCore().getLatestSchema(), args); PreAnalyzedParser parser = new SimplePreAnalyzedParser(); @@ -128,7 +128,7 @@ public class PreAnalyzedFieldTest extends SolrTestCaseJ4 { public void testParsers() { PreAnalyzedField paf = new PreAnalyzedField(); // use Simple format - HashMap args = new HashMap(); + HashMap args = new HashMap<>(); args.put(PreAnalyzedField.PARSER_IMPL, SimplePreAnalyzedParser.class.getName()); paf.init(h.getCore().getLatestSchema(), args); try { diff --git a/solr/core/src/test/org/apache/solr/schema/PrimitiveFieldTypeTest.java b/solr/core/src/test/org/apache/solr/schema/PrimitiveFieldTypeTest.java index 4649103abed..6581e4f8958 100644 --- a/solr/core/src/test/org/apache/solr/schema/PrimitiveFieldTypeTest.java +++ b/solr/core/src/test/org/apache/solr/schema/PrimitiveFieldTypeTest.java @@ -45,7 +45,7 @@ public class PrimitiveFieldTypeTest extends SolrTestCaseJ4 { System.setProperty("solr.test.sys.prop2", "proptwo"); System.setProperty("solr.allow.unsafe.resourceloading", "true"); - initMap = new HashMap(); + initMap = new HashMap<>(); config = new SolrConfig(new SolrResourceLoader("solr/collection1"), testConfHome + "solrconfig.xml", null); } diff --git a/solr/core/src/test/org/apache/solr/schema/SortableBinaryField.java b/solr/core/src/test/org/apache/solr/schema/SortableBinaryField.java index 042ea10560b..ea423abca59 100644 --- a/solr/core/src/test/org/apache/solr/schema/SortableBinaryField.java +++ b/solr/core/src/test/org/apache/solr/schema/SortableBinaryField.java @@ -45,7 +45,7 @@ public class SortableBinaryField extends BinaryField { @Override public List createFields(SchemaField field, Object value, float boost) { if (field.hasDocValues()) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); StorableField storedField = createField(field, value, boost); fields.add(storedField); ByteBuffer byteBuffer = toObject(storedField); diff --git a/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchemaAddField.java b/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchemaAddField.java index b525e4e653d..b60b3a071d1 100644 --- a/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchemaAddField.java +++ b/solr/core/src/test/org/apache/solr/schema/TestCloudManagedSchemaAddField.java @@ -57,14 +57,14 @@ public class TestCloudManagedSchemaAddField extends AbstractFullDistribZkTestBas @Override public SortedMap getExtraServlets() { - final SortedMap extraServlets = new TreeMap(); + final SortedMap extraServlets = new TreeMap<>(); final ServletHolder solrRestApi = new ServletHolder("SolrRestApi", ServerServlet.class); solrRestApi.setInitParameter("org.restlet.application", "org.apache.solr.rest.SolrRestApi"); extraServlets.put(solrRestApi, "/schema/*"); // '/schema/*' matches '/schema', '/schema/', and '/schema/whatever...' return extraServlets; } - private List restTestHarnesses = new ArrayList(); + private List restTestHarnesses = new ArrayList<>(); private void setupHarnesses() { for (int i = 0 ; i < clients.size() ; ++i) { diff --git a/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java b/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java index a127b67c6b1..2c95ef6c86d 100644 --- a/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java +++ b/solr/core/src/test/org/apache/solr/schema/TestManagedSchema.java @@ -176,7 +176,7 @@ public class TestManagedSchema extends AbstractBadConfigTestBase { String managedSchemaContents = FileUtils.readFileToString(managedSchemaFile, "UTF-8"); assertFalse(managedSchemaContents.contains("\"new_field\"")); - Map options = new HashMap(); + Map options = new HashMap<>(); options.put("stored", "false"); IndexSchema oldSchema = h.getCore().getLatestSchema(); String fieldName = "new_field"; @@ -225,7 +225,7 @@ public class TestManagedSchema extends AbstractBadConfigTestBase { assertU(commit()); assertQ(req("new_field:thing1"), "//*[@numFound='0']"); - Map options = new HashMap(); + Map options = new HashMap<>(); options.put("stored", "false"); IndexSchema oldSchema = h.getCore().getLatestSchema(); String fieldName = "new_field"; @@ -252,7 +252,7 @@ public class TestManagedSchema extends AbstractBadConfigTestBase { String errString = "Field 'str' already exists."; ignoreException(Pattern.quote(errString)); try { - Map options = new HashMap(); + Map options = new HashMap<>(); IndexSchema oldSchema = h.getCore().getLatestSchema(); String fieldName = "str"; String fieldType = "string"; @@ -280,7 +280,7 @@ public class TestManagedSchema extends AbstractBadConfigTestBase { System.setProperty("managed.schema.mutable", "true"); initCore("solrconfig-managed-schema.xml", "schema-one-field-no-dynamic-field.xml", tmpSolrHome.getPath()); - Map options = new HashMap(); + Map options = new HashMap<>(); options.put("stored", "false"); IndexSchema oldSchema = h.getCore().getLatestSchema(); String fieldName = "new_field"; @@ -320,7 +320,7 @@ public class TestManagedSchema extends AbstractBadConfigTestBase { String errString = "Can't add dynamic field '*_s'."; ignoreException(Pattern.quote(errString)); try { - Map options = new HashMap(); + Map options = new HashMap<>(); IndexSchema oldSchema = h.getCore().getLatestSchema(); String fieldName = "*_s"; String fieldType = "string"; @@ -356,7 +356,7 @@ public class TestManagedSchema extends AbstractBadConfigTestBase { assertNull("Field '" + fieldName + "' is present in the schema", h.getCore().getLatestSchema().getFieldOrNull(fieldName)); - Map options = new HashMap(); + Map options = new HashMap<>(); IndexSchema oldSchema = h.getCore().getLatestSchema(); String fieldType = "string_disk"; SchemaField newField = oldSchema.newField(fieldName, fieldType, options); @@ -384,7 +384,7 @@ public class TestManagedSchema extends AbstractBadConfigTestBase { assertNull("Field '" + fieldName + "' is present in the schema", h.getCore().getLatestSchema().getFieldOrNull(fieldName)); - Map options = new HashMap(); + Map options = new HashMap<>(); IndexSchema oldSchema = h.getCore().getLatestSchema(); String fieldType = "text"; SchemaField newField = oldSchema.newField(fieldName, fieldType, options); @@ -409,7 +409,7 @@ public class TestManagedSchema extends AbstractBadConfigTestBase { String managedSchemaContents = FileUtils.readFileToString(managedSchemaFile, "UTF-8"); assertFalse(managedSchemaContents.contains("\"new_field\"")); - Map options = new HashMap(); + Map options = new HashMap<>(); options.put("stored", "false"); IndexSchema oldSchema = h.getCore().getLatestSchema(); assertEquals("str", oldSchema.getUniqueKeyField().getName()); @@ -445,7 +445,7 @@ public class TestManagedSchema extends AbstractBadConfigTestBase { assertNull("Field '" + fieldName + "' is present in the schema", h.getCore().getLatestSchema().getFieldOrNull(fieldName)); - Map options = new HashMap(); + Map options = new HashMap<>(); IndexSchema oldSchema = h.getCore().getLatestSchema(); String fieldType = "text"; SchemaField newField = oldSchema.newField(fieldName, fieldType, options); diff --git a/solr/core/src/test/org/apache/solr/search/CursorMarkTest.java b/solr/core/src/test/org/apache/solr/search/CursorMarkTest.java index 5a822984925..950d9366993 100644 --- a/solr/core/src/test/org/apache/solr/search/CursorMarkTest.java +++ b/solr/core/src/test/org/apache/solr/search/CursorMarkTest.java @@ -248,7 +248,7 @@ public class CursorMarkTest extends SolrTestCaseJ4 { * a list of the fields in the schema - excluding _version_ */ private Collection getAllFieldNames() { - ArrayList names = new ArrayList(37); + ArrayList names = new ArrayList<>(37); for (String f : h.getCore().getLatestSchema().getFields().keySet()) { if (! f.equals("_version_")) { names.add(f); diff --git a/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java b/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java index ad6db3c177b..a8d42317c52 100644 --- a/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java +++ b/solr/core/src/test/org/apache/solr/search/QueryEqualityTest.java @@ -71,9 +71,9 @@ public class QueryEqualityTest extends SolrTestCaseJ4 { /** @see #testParserCoverage */ private static boolean doAssertParserCoverage = false; /** @see #testParserCoverage */ - private static final Set qParsersTested = new HashSet(); + private static final Set qParsersTested = new HashSet<>(); /** @see #testParserCoverage */ - private static final Set valParsersTested = new HashSet(); + private static final Set valParsersTested = new HashSet<>(); public void testDateMathParsingEquality() throws Exception { diff --git a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java index 37f2247d14f..e4436e3771b 100644 --- a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java +++ b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java @@ -1146,7 +1146,7 @@ public class TestExtendedDismaxParser extends SolrTestCaseJ4 { public FuzzyQueryParser(QParser parser, String defaultField) { super(parser, defaultField); - frequentlyMisspelledWords = new HashSet(); + frequentlyMisspelledWords = new HashSet<>(); frequentlyMisspelledWords.add("absence"); } diff --git a/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java b/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java index fe336987cff..40ebb987fcb 100644 --- a/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java +++ b/solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java @@ -38,8 +38,8 @@ import java.util.concurrent.atomic.AtomicInteger; public class TestFastLRUCache extends LuceneTestCase { public void testPercentageAutowarm() throws IOException { - FastLRUCache fastCache = new FastLRUCache(); - Map params = new HashMap(); + FastLRUCache fastCache = new FastLRUCache<>(); + Map params = new HashMap<>(); params.put("size", "100"); params.put("initialSize", "10"); params.put("autowarmCount", "100%"); @@ -56,7 +56,7 @@ public class TestFastLRUCache extends LuceneTestCase { assertEquals(1L, nl.get("hits")); assertEquals(101L, nl.get("inserts")); assertEquals(null, fastCache.get(1)); // first item put in should be the first out - FastLRUCache fastCacheNew = new FastLRUCache(); + FastLRUCache fastCacheNew = new FastLRUCache<>(); fastCacheNew.init(params, o, cr); fastCacheNew.warm(null, fastCache); fastCacheNew.setState(SolrCache.State.LIVE); @@ -84,8 +84,8 @@ public class TestFastLRUCache extends LuceneTestCase { } private void doTestPercentageAutowarm(int limit, int percentage, int[] hits, int[]misses) { - FastLRUCache fastCache = new FastLRUCache(); - Map params = new HashMap(); + FastLRUCache fastCache = new FastLRUCache<>(); + Map params = new HashMap<>(); params.put("size", String.valueOf(limit)); params.put("initialSize", "10"); params.put("autowarmCount", percentage + "%"); @@ -96,7 +96,7 @@ public class TestFastLRUCache extends LuceneTestCase { fastCache.put(i, "" + i);//adds numbers from 1 to 100 } - FastLRUCache fastCacheNew = new FastLRUCache(); + FastLRUCache fastCacheNew = new FastLRUCache<>(); fastCacheNew.init(params, o, cr); fastCacheNew.warm(null, fastCache); fastCacheNew.setState(SolrCache.State.LIVE); @@ -116,8 +116,8 @@ public class TestFastLRUCache extends LuceneTestCase { } public void testNoAutowarm() throws IOException { - FastLRUCache fastCache = new FastLRUCache(); - Map params = new HashMap(); + FastLRUCache fastCache = new FastLRUCache<>(); + Map params = new HashMap<>(); params.put("size", "100"); params.put("initialSize", "10"); CacheRegenerator cr = new NoOpRegenerator(); @@ -133,7 +133,7 @@ public class TestFastLRUCache extends LuceneTestCase { assertEquals(1L, nl.get("hits")); assertEquals(101L, nl.get("inserts")); assertEquals(null, fastCache.get(1)); // first item put in should be the first out - FastLRUCache fastCacheNew = new FastLRUCache(); + FastLRUCache fastCacheNew = new FastLRUCache<>(); fastCacheNew.init(params, o, cr); fastCacheNew.warm(null, fastCache); fastCacheNew.setState(SolrCache.State.LIVE); @@ -145,8 +145,8 @@ public class TestFastLRUCache extends LuceneTestCase { } public void testFullAutowarm() throws IOException { - FastLRUCache cache = new FastLRUCache(); - Map params = new HashMap(); + FastLRUCache cache = new FastLRUCache<>(); + Map params = new HashMap<>(); params.put("size", "100"); params.put("initialSize", "10"); params.put("autowarmCount", "-1"); @@ -162,7 +162,7 @@ public class TestFastLRUCache extends LuceneTestCase { assertEquals(null, cache.get(1)); // first item put in should be the first out - FastLRUCache cacheNew = new FastLRUCache(); + FastLRUCache cacheNew = new FastLRUCache<>(); cacheNew.init(params, o, cr); cacheNew.warm(null, cache); cacheNew.setState(SolrCache.State.LIVE); @@ -217,7 +217,7 @@ public class TestFastLRUCache extends LuceneTestCase { } public void testOldestItems() { - ConcurrentLRUCache cache = new ConcurrentLRUCache(100, 90); + ConcurrentLRUCache cache = new ConcurrentLRUCache<>(100, 90); for (int i = 0; i < 50; i++) { cache.put(i + 1, "" + (i + 1)); } @@ -246,7 +246,7 @@ public class TestFastLRUCache extends LuceneTestCase { int sz = random().nextInt(100)+5; int lowWaterMark = random().nextInt(sz-3)+1; int keyrange = random().nextInt(sz*3)+1; - ConcurrentLRUCache cache = new ConcurrentLRUCache(sz, lowWaterMark); + ConcurrentLRUCache cache = new ConcurrentLRUCache<>(sz, lowWaterMark); for (int i=0; i<10000; i++) { cache.put(random().nextInt(keyrange), ""); cache.get(random().nextInt(keyrange)); diff --git a/solr/core/src/test/org/apache/solr/search/TestFiltering.java b/solr/core/src/test/org/apache/solr/search/TestFiltering.java index 53665d08b07..f81d36efd24 100644 --- a/solr/core/src/test/org/apache/solr/search/TestFiltering.java +++ b/solr/core/src/test/org/apache/solr/search/TestFiltering.java @@ -271,7 +271,7 @@ public class TestFiltering extends SolrTestCaseJ4 { int nonZeros=0; for (int qiter=0; qiter params = new ArrayList(); + List params = new ArrayList<>(); params.add("q"); params.add(makeRandomQuery(model, true, false)); int nFilters = random().nextInt(5); diff --git a/solr/core/src/test/org/apache/solr/search/TestLFUCache.java b/solr/core/src/test/org/apache/solr/search/TestLFUCache.java index 40c75007583..3835af3f180 100644 --- a/solr/core/src/test/org/apache/solr/search/TestLFUCache.java +++ b/solr/core/src/test/org/apache/solr/search/TestLFUCache.java @@ -186,7 +186,7 @@ public class TestLFUCache extends SolrTestCaseJ4 { @Test public void testItemOrdering() { - ConcurrentLFUCache cache = new ConcurrentLFUCache(100, 90); + ConcurrentLFUCache cache = new ConcurrentLFUCache<>(100, 90); try { for (int i = 0; i < 50; i++) { cache.put(i + 1, "" + (i + 1)); @@ -252,7 +252,7 @@ public class TestLFUCache extends SolrTestCaseJ4 { @Test public void testTimeDecay() { - ConcurrentLFUCache cacheDecay = new ConcurrentLFUCache(10, 9); + ConcurrentLFUCache cacheDecay = new ConcurrentLFUCache<>(10, 9); try { for (int i = 1; i < 21; i++) { cacheDecay.put(i, Integer.toString(i)); @@ -326,7 +326,7 @@ public class TestLFUCache extends SolrTestCaseJ4 { @Test public void testTimeNoDecay() { - ConcurrentLFUCache cacheNoDecay = new ConcurrentLFUCache(10, 9, + ConcurrentLFUCache cacheNoDecay = new ConcurrentLFUCache<>(10, 9, (int) Math.floor((9 + 10) / 2), (int) Math.ceil(0.75 * 10), false, false, null, false); try { for (int i = 1; i < 21; i++) { diff --git a/solr/core/src/test/org/apache/solr/search/TestLRUCache.java b/solr/core/src/test/org/apache/solr/search/TestLRUCache.java index 42c916f6c0f..90763ba28ea 100644 --- a/solr/core/src/test/org/apache/solr/search/TestLRUCache.java +++ b/solr/core/src/test/org/apache/solr/search/TestLRUCache.java @@ -31,8 +31,8 @@ import org.apache.solr.common.util.NamedList; public class TestLRUCache extends LuceneTestCase { public void testFullAutowarm() throws IOException { - LRUCache lruCache = new LRUCache(); - Map params = new HashMap(); + LRUCache lruCache = new LRUCache<>(); + Map params = new HashMap<>(); params.put("size", "100"); params.put("initialSize", "10"); params.put("autowarmCount", "100%"); @@ -45,7 +45,7 @@ public class TestLRUCache extends LuceneTestCase { assertEquals("25", lruCache.get(25)); assertEquals(null, lruCache.get(110)); assertEquals(null, lruCache.get(1)); // first item put in should be the first out - LRUCache lruCacheNew = new LRUCache(); + LRUCache lruCacheNew = new LRUCache<>(); lruCacheNew.init(params, o, cr); lruCacheNew.warm(null, lruCache); lruCacheNew.setState(SolrCache.State.LIVE); @@ -64,8 +64,8 @@ public class TestLRUCache extends LuceneTestCase { } private void doTestPercentageAutowarm(int limit, int percentage, int[] hits, int[]misses) { - LRUCache lruCache = new LRUCache(); - Map params = new HashMap(); + LRUCache lruCache = new LRUCache<>(); + Map params = new HashMap<>(); params.put("size", String.valueOf(limit)); params.put("initialSize", "10"); params.put("autowarmCount", percentage + "%"); @@ -76,7 +76,7 @@ public class TestLRUCache extends LuceneTestCase { lruCache.put(i, "" + i);//adds numbers from 1 to 100 } - LRUCache lruCacheNew = new LRUCache(); + LRUCache lruCacheNew = new LRUCache<>(); lruCacheNew.init(params, o, cr); lruCacheNew.warm(null, lruCache); lruCacheNew.setState(SolrCache.State.LIVE); @@ -94,8 +94,8 @@ public class TestLRUCache extends LuceneTestCase { @SuppressWarnings("unchecked") public void testNoAutowarm() throws IOException { - LRUCache lruCache = new LRUCache(); - Map params = new HashMap(); + LRUCache lruCache = new LRUCache<>(); + Map params = new HashMap<>(); params.put("size", "100"); params.put("initialSize", "10"); CacheRegenerator cr = new NoOpRegenerator(); @@ -111,7 +111,7 @@ public class TestLRUCache extends LuceneTestCase { assertEquals(1L, nl.get("hits")); assertEquals(101L, nl.get("inserts")); assertEquals(null, lruCache.get(1)); // first item put in should be the first out - LRUCache lruCacheNew = new LRUCache(); + LRUCache lruCacheNew = new LRUCache<>(); lruCacheNew.init(params, o, cr); lruCacheNew.warm(null, lruCache); lruCacheNew.setState(SolrCache.State.LIVE); diff --git a/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java index 2023e5b1187..3929b358aa0 100644 --- a/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java +++ b/solr/core/src/test/org/apache/solr/search/TestMaxScoreQueryParser.java @@ -135,7 +135,7 @@ public class TestMaxScoreQueryParser extends AbstractSolrTestCase { private Query parse(String q, String... params) { try { ModifiableSolrParams p = new ModifiableSolrParams(); - ArrayList al = new ArrayList(Arrays.asList(params)); + ArrayList al = new ArrayList<>(Arrays.asList(params)); while(al.size() >= 2) { p.add(al.remove(0), al.remove(0)); } diff --git a/solr/core/src/test/org/apache/solr/search/TestPseudoReturnFields.java b/solr/core/src/test/org/apache/solr/search/TestPseudoReturnFields.java index f1da7c29c3d..f0a68a65a24 100644 --- a/solr/core/src/test/org/apache/solr/search/TestPseudoReturnFields.java +++ b/solr/core/src/test/org/apache/solr/search/TestPseudoReturnFields.java @@ -471,7 +471,7 @@ public class TestPseudoReturnFields extends SolrTestCaseJ4 { ,"//result/doc[count(*)=6]" ); - final List params = new ArrayList((fl.size()*2) + 4); + final List params = new ArrayList<>((fl.size()*2) + 4); final StringBuilder info = new StringBuilder(); params.addAll(Arrays.asList("q","*:*", "rows", "1")); for (String item : fl) { diff --git a/solr/core/src/test/org/apache/solr/search/TestRTGBase.java b/solr/core/src/test/org/apache/solr/search/TestRTGBase.java index d107a8ba687..aa4c3beec36 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRTGBase.java +++ b/solr/core/src/test/org/apache/solr/search/TestRTGBase.java @@ -55,8 +55,8 @@ public class TestRTGBase extends SolrTestCaseJ4 { } } - protected final ConcurrentHashMap model = new ConcurrentHashMap(); - protected Map committedModel = new HashMap(); + protected final ConcurrentHashMap model = new ConcurrentHashMap<>(); + protected Map committedModel = new HashMap<>(); protected long snapshotCount; protected long committedModelClock; protected volatile int lastId; diff --git a/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java b/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java index 22e36ffe425..ad2dcadd321 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java +++ b/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java @@ -88,7 +88,7 @@ public class TestRangeQuery extends SolrTestCaseJ4 { String[] dates = {"0299-12-31T23:59:59.999Z","2000-01-01T00:00:00.000Z","2000-01-01T00:00:00.001Z", "0299-12-31T23:59:59.998Z","2000-01-01T00:00:00.002Z" }; // fields that normal range queries should work on - Map norm_fields = new HashMap(); + Map norm_fields = new HashMap<>(); norm_fields.put("foo_i", ints); norm_fields.put("foo_l", longs); norm_fields.put("foo_d", doubles); @@ -103,7 +103,7 @@ public class TestRangeQuery extends SolrTestCaseJ4 { // fields that frange queries should work on - Map frange_fields = new HashMap(); + Map frange_fields = new HashMap<>(); frange_fields.put("foo_i", ints); frange_fields.put("foo_l", longs); frange_fields.put("foo_d", doubles); @@ -120,12 +120,12 @@ public class TestRangeQuery extends SolrTestCaseJ4 { frange_fields.put("foo_s", strings); frange_fields.put("foo_dt", dates); - Map all_fields = new HashMap(); + Map all_fields = new HashMap<>(); all_fields.putAll(norm_fields); all_fields.putAll(frange_fields); for (int j=0; j fields = new ArrayList(); + List fields = new ArrayList<>(); fields.add("id"); fields.add(""+j); for (Map.Entry entry : all_fields.entrySet()) { @@ -239,7 +239,7 @@ public class TestRangeQuery extends SolrTestCaseJ4 { // lower=2; upper=2; inclusive=true; // inclusive=true; lowerMissing=true; upperMissing=true; - List qs = new ArrayList(); + List qs = new ArrayList<>(); for (String field : norm_fields) { String q = field + ':' + (inclusive?'[':'{') + (lowerMissing?"*":lower) diff --git a/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java b/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java index 43942b03096..1581f64afcf 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java +++ b/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java @@ -438,7 +438,7 @@ public class TestRealTimeGet extends TestRTGBase { final AtomicInteger numCommitting = new AtomicInteger(); - List threads = new ArrayList(); + List threads = new ArrayList<>(); for (int i=0; i(model); // take a snapshot + newCommittedModel = new HashMap<>(model); // take a snapshot version = snapshotCount++; verbose("took snapshot version=",version); } diff --git a/solr/core/src/test/org/apache/solr/search/TestRecovery.java b/solr/core/src/test/org/apache/solr/search/TestRecovery.java index 4b5ed59259d..1b8bc633cd9 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRecovery.java +++ b/solr/core/src/test/org/apache/solr/search/TestRecovery.java @@ -124,7 +124,7 @@ public class TestRecovery extends SolrTestCaseJ4 { clearIndex(); assertU(commit()); - Deque versions = new ArrayDeque(); + Deque versions = new ArrayDeque<>(); versions.addFirst(addAndGetVersion(sdoc("id", "A1"), null)); versions.addFirst(addAndGetVersion(sdoc("id", "A11"), null)); versions.addFirst(addAndGetVersion(sdoc("id", "A12"), null)); @@ -771,7 +771,7 @@ public class TestRecovery extends SolrTestCaseJ4 { int start = 0; int maxReq = 50; - LinkedList versions = new LinkedList(); + LinkedList versions = new LinkedList<>(); addDocs(10, start, versions); start+=10; assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start))); assertU(commit()); diff --git a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java index 2a86d45d702..9de9d3bf73f 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java +++ b/solr/core/src/test/org/apache/solr/search/TestRecoveryHdfs.java @@ -154,7 +154,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { clearIndex(); assertU(commit()); - Deque versions = new ArrayDeque(); + Deque versions = new ArrayDeque<>(); versions.addFirst(addAndGetVersion(sdoc("id", "A1"), null)); versions.addFirst(addAndGetVersion(sdoc("id", "A11"), null)); versions.addFirst(addAndGetVersion(sdoc("id", "A12"), null)); @@ -768,7 +768,7 @@ public class TestRecoveryHdfs extends SolrTestCaseJ4 { int start = 0; int maxReq = 50; - LinkedList versions = new LinkedList(); + LinkedList versions = new LinkedList<>(); addDocs(10, start, versions); start+=10; assertJQ(req("qt","/get", "getVersions",""+maxReq), "/versions==" + versions.subList(0,Math.min(maxReq,start))); assertU(commit()); diff --git a/solr/core/src/test/org/apache/solr/search/TestSearchPerf.java b/solr/core/src/test/org/apache/solr/search/TestSearchPerf.java index 71d7a7c69b5..5bff259bb31 100644 --- a/solr/core/src/test/org/apache/solr/search/TestSearchPerf.java +++ b/solr/core/src/test/org/apache/solr/search/TestSearchPerf.java @@ -77,7 +77,7 @@ public class TestSearchPerf extends AbstractSolrTestCase { // Skip encoding for updating the index void createIndex2(int nDocs, String... fields) throws IOException { - Set fieldSet = new HashSet(Arrays.asList(fields)); + Set fieldSet = new HashSet<>(Arrays.asList(fields)); SolrQueryRequest req = lrf.makeRequest(); SolrQueryResponse rsp = new SolrQueryResponse(); @@ -233,7 +233,7 @@ public class TestSearchPerf extends AbstractSolrTestCase { QParser parser = QParser.getParser("foomany_s:[" + l + " TO " + u + "]", null, req); Query rangeQ = parser.getQuery(); - List filters = new ArrayList(); + List filters = new ArrayList<>(); filters.add(rangeQ); req.close(); diff --git a/solr/core/src/test/org/apache/solr/search/TestSort.java b/solr/core/src/test/org/apache/solr/search/TestSort.java index 520ee7329dd..b31952191b3 100644 --- a/solr/core/src/test/org/apache/solr/search/TestSort.java +++ b/solr/core/src/test/org/apache/solr/search/TestSort.java @@ -237,7 +237,7 @@ public class TestSort extends SolrTestCaseJ4 { final boolean sortMissingLast = !luceneSort && r.nextBoolean(); final boolean sortMissingFirst = !luceneSort && !sortMissingLast; final boolean reverse = r.nextBoolean(); - List sfields = new ArrayList(); + List sfields = new ArrayList<>(); final boolean secondary = r.nextBoolean(); final boolean luceneSort2 = r.nextBoolean(); @@ -263,7 +263,7 @@ public class TestSort extends SolrTestCaseJ4 { boolean scoreInOrder = r.nextBoolean(); final TopFieldCollector topCollector = TopFieldCollector.create(sort, top, true, trackScores, trackMaxScores, scoreInOrder); - final List collectedDocs = new ArrayList(); + final List collectedDocs = new ArrayList<>(); // delegate and collect docs ourselves Collector myCollector = new Collector() { int docBase; diff --git a/solr/core/src/test/org/apache/solr/search/TestStandardQParsers.java b/solr/core/src/test/org/apache/solr/search/TestStandardQParsers.java index 3a74e6731ad..5fbef6abde1 100644 --- a/solr/core/src/test/org/apache/solr/search/TestStandardQParsers.java +++ b/solr/core/src/test/org/apache/solr/search/TestStandardQParsers.java @@ -50,9 +50,9 @@ public class TestStandardQParsers extends LuceneTestCase { public void testRegisteredName() throws Exception { Map> standardPlugins = getStandardQParsers(); - List notStatic = new ArrayList(standardPlugins.size()); - List notFinal = new ArrayList(standardPlugins.size()); - List mismatch = new ArrayList(standardPlugins.size()); + List notStatic = new ArrayList<>(standardPlugins.size()); + List notFinal = new ArrayList<>(standardPlugins.size()); + List mismatch = new ArrayList<>(standardPlugins.size()); for (Map.Entry> pair : standardPlugins.entrySet()) { String regName = pair.getKey(); @@ -93,7 +93,7 @@ public class TestStandardQParsers extends LuceneTestCase { Object[] standardPluginsValue = QParserPlugin.standardPlugins; Map> standardPlugins - = new HashMap>(standardPluginsValue.length / 2); + = new HashMap<>(standardPluginsValue.length / 2); for (int i = 0; i < standardPluginsValue.length; i += 2) { @SuppressWarnings("unchecked") diff --git a/solr/core/src/test/org/apache/solr/search/TestStressLucene.java b/solr/core/src/test/org/apache/solr/search/TestStressLucene.java index f27e363d115..c90d33dbe44 100644 --- a/solr/core/src/test/org/apache/solr/search/TestStressLucene.java +++ b/solr/core/src/test/org/apache/solr/search/TestStressLucene.java @@ -79,7 +79,7 @@ public class TestStressLucene extends TestRTGBase { final AtomicInteger numCommitting = new AtomicInteger(); - List threads = new ArrayList(); + List threads = new ArrayList<>(); final FieldType idFt = new FieldType(); @@ -143,7 +143,7 @@ public class TestStressLucene extends TestRTGBase { if (reopenLock != null) reopenLock.lock(); synchronized(globalLock) { - newCommittedModel = new HashMap(model); // take a snapshot + newCommittedModel = new HashMap<>(model); // take a snapshot version = snapshotCount++; oldReader = reader; oldReader.incRef(); // increment the reference since we will use this for reopening diff --git a/solr/core/src/test/org/apache/solr/search/TestStressRecovery.java b/solr/core/src/test/org/apache/solr/search/TestStressRecovery.java index b403c8d16c6..412908d0a5f 100644 --- a/solr/core/src/test/org/apache/solr/search/TestStressRecovery.java +++ b/solr/core/src/test/org/apache/solr/search/TestStressRecovery.java @@ -82,7 +82,7 @@ public class TestStressRecovery extends TestRTGBase { final AtomicInteger numCommitting = new AtomicInteger(); - List threads = new ArrayList(); + List threads = new ArrayList<>(); final AtomicLong testVersion = new AtomicLong(0); @@ -119,7 +119,7 @@ public class TestStressRecovery extends TestRTGBase { long version; synchronized(globalLock) { - newCommittedModel = new HashMap(model); // take a snapshot + newCommittedModel = new HashMap<>(model); // take a snapshot version = snapshotCount++; } @@ -348,7 +348,7 @@ public class TestStressRecovery extends TestRTGBase { // before we start buffering updates, we want to point // visibleModel away from the live model. - visibleModel = new ConcurrentHashMap(model); + visibleModel = new ConcurrentHashMap<>(model); synchronized (stateChangeLock) { uLog.bufferUpdates(); diff --git a/solr/core/src/test/org/apache/solr/search/TestStressReorder.java b/solr/core/src/test/org/apache/solr/search/TestStressReorder.java index f4e62f08fee..5d3ce31a1e0 100644 --- a/solr/core/src/test/org/apache/solr/search/TestStressReorder.java +++ b/solr/core/src/test/org/apache/solr/search/TestStressReorder.java @@ -99,7 +99,7 @@ public class TestStressReorder extends TestRTGBase { final AtomicInteger numCommitting = new AtomicInteger(); - List threads = new ArrayList(); + List threads = new ArrayList<>(); final AtomicLong testVersion = new AtomicLong(0); @@ -120,7 +120,7 @@ public class TestStressReorder extends TestRTGBase { long version; synchronized(TestStressReorder.this) { - newCommittedModel = new HashMap(model); // take a snapshot + newCommittedModel = new HashMap<>(model); // take a snapshot version = snapshotCount++; } diff --git a/solr/core/src/test/org/apache/solr/search/TestStressUserVersions.java b/solr/core/src/test/org/apache/solr/search/TestStressUserVersions.java index 2778051d8a5..807221a930f 100755 --- a/solr/core/src/test/org/apache/solr/search/TestStressUserVersions.java +++ b/solr/core/src/test/org/apache/solr/search/TestStressUserVersions.java @@ -102,7 +102,7 @@ public class TestStressUserVersions extends TestRTGBase { final AtomicInteger numCommitting = new AtomicInteger(); - List threads = new ArrayList(); + List threads = new ArrayList<>(); final AtomicLong testVersion = new AtomicLong(0); @@ -123,7 +123,7 @@ public class TestStressUserVersions extends TestRTGBase { long version; synchronized(TestStressUserVersions.this) { - newCommittedModel = new HashMap(model); // take a snapshot + newCommittedModel = new HashMap<>(model); // take a snapshot version = snapshotCount++; } diff --git a/solr/core/src/test/org/apache/solr/search/TestStressVersions.java b/solr/core/src/test/org/apache/solr/search/TestStressVersions.java index 5d3f6f6f0e0..d91a2cc6a96 100644 --- a/solr/core/src/test/org/apache/solr/search/TestStressVersions.java +++ b/solr/core/src/test/org/apache/solr/search/TestStressVersions.java @@ -69,7 +69,7 @@ public class TestStressVersions extends TestRTGBase { final AtomicInteger numCommitting = new AtomicInteger(); - List threads = new ArrayList(); + List threads = new ArrayList<>(); for (int i=0; i(model); // take a snapshot + newCommittedModel = new HashMap<>(model); // take a snapshot version = snapshotCount++; } diff --git a/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java b/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java index 1ed6dbcf4f3..7f1bba79bbc 100644 --- a/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java +++ b/solr/core/src/test/org/apache/solr/search/function/TestFunctionQuery.java @@ -103,7 +103,7 @@ public class TestFunctionQuery extends SolrTestCaseJ4 { void singleTest(String field, String funcTemplate, List args, float... results) { String parseableQuery = func(field, funcTemplate); - List nargs = new ArrayList(Arrays.asList("q", parseableQuery + List nargs = new ArrayList<>(Arrays.asList("q", parseableQuery ,"fl", "*,score" ,"indent","on" ,"rows","100")); @@ -114,7 +114,7 @@ public class TestFunctionQuery extends SolrTestCaseJ4 { } } - List tests = new ArrayList(); + List tests = new ArrayList<>(); // Construct xpaths like the following: // "//doc[./float[@name='foo_pf']='10.0' and ./float[@name='score']='10.0']" diff --git a/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java b/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java index 8e1e2821c48..3d8086201e7 100644 --- a/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java +++ b/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java @@ -81,7 +81,7 @@ public class BJQParserTest extends SolrTestCaseJ4 { private static int id=0; private static List> createBlocks() { - List> blocks = new ArrayList>(); + List> blocks = new ArrayList<>(); for (String parent : abcdef) { List block = createChildrenBlock(parent); block.add(new String[] {"parent_s", parent}); @@ -92,7 +92,7 @@ public class BJQParserTest extends SolrTestCaseJ4 { } private static List createChildrenBlock(String parent) { - List block = new ArrayList(); + List block = new ArrayList<>(); for (String child : klm) { block .add(new String[] {"child_s", child, "parentchild_s", parent + child}); @@ -103,7 +103,7 @@ public class BJQParserTest extends SolrTestCaseJ4 { } private static void addGrandChildren(List block) { - List grandChildren = new ArrayList(xyz); + List grandChildren = new ArrayList<>(xyz); // add grandchildren after children for (ListIterator iter = block.listIterator(); iter.hasNext();) { String[] child = iter.next(); diff --git a/solr/core/src/test/org/apache/solr/servlet/CacheHeaderTestBase.java b/solr/core/src/test/org/apache/solr/servlet/CacheHeaderTestBase.java index d8c5468878e..b34538ddd5f 100644 --- a/solr/core/src/test/org/apache/solr/servlet/CacheHeaderTestBase.java +++ b/solr/core/src/test/org/apache/solr/servlet/CacheHeaderTestBase.java @@ -39,7 +39,7 @@ public abstract class CacheHeaderTestBase extends SolrJettyTestBase { HttpSolrServer httpserver = (HttpSolrServer)getSolrServer(); HttpRequestBase m = null; - ArrayList qparams = new ArrayList(); + ArrayList qparams = new ArrayList<>(); if(params.length==0) { qparams.add(new BasicNameValuePair("q", "solr")); qparams.add(new BasicNameValuePair("qt", "standard")); @@ -66,7 +66,7 @@ public abstract class CacheHeaderTestBase extends SolrJettyTestBase { HttpSolrServer httpserver = (HttpSolrServer)getSolrServer(); HttpRequestBase m = null; - ArrayList qparams = new ArrayList(); + ArrayList qparams = new ArrayList<>(); for(int i=0;i args = new HashMap(); + Map args = new HashMap<>(); args.put( CommonParams.STREAM_BODY, new String[] {body1} ); // Make sure it got a single stream in and out ok - List streams = new ArrayList(); + List streams = new ArrayList<>(); SolrQueryRequest req = parser.buildRequestFrom( core, new MultiMapSolrParams( args ), streams ); assertEquals( 1, streams.size() ); assertEquals( body1, IOUtils.toString( streams.get(0).getReader() ) ); req.close(); // Now add three and make sure they come out ok - streams = new ArrayList(); + streams = new ArrayList<>(); args.put( CommonParams.STREAM_BODY, new String[] {body1,body2,body3} ); req = parser.buildRequestFrom( core, new MultiMapSolrParams( args ), streams ); assertEquals( 3, streams.size() ); - ArrayList input = new ArrayList(); - ArrayList output = new ArrayList(); + ArrayList input = new ArrayList<>(); + ArrayList output = new ArrayList<>(); input.add( body1 ); input.add( body2 ); input.add( body3 ); @@ -107,7 +107,7 @@ public class SolrRequestParserTest extends SolrTestCaseJ4 { // set the contentType and make sure tat gets set String ctype = "text/xxx"; - streams = new ArrayList(); + streams = new ArrayList<>(); args.put( CommonParams.STREAM_CONTENTTYPE, new String[] {ctype} ); req = parser.buildRequestFrom( core, new MultiMapSolrParams( args ), streams ); for( ContentStream s : streams ) { @@ -138,11 +138,11 @@ public class SolrRequestParserTest extends SolrTestCaseJ4 { SolrCore core = h.getCore(); - Map args = new HashMap(); + Map args = new HashMap<>(); args.put( CommonParams.STREAM_URL, new String[] {url} ); // Make sure it got a single stream in and out ok - List streams = new ArrayList(); + List streams = new ArrayList<>(); SolrQueryRequest req = parser.buildRequestFrom( core, new MultiMapSolrParams( args ), streams ); assertEquals( 1, streams.size() ); try { @@ -358,11 +358,11 @@ public class SolrRequestParserTest extends SolrTestCaseJ4 { expect(request.getMethod()).andReturn("GET").anyTimes(); expect(request.getContentType()).andReturn( "application/x-www-form-urlencoded" ).anyTimes(); expect(request.getQueryString()).andReturn("q=title:solr").anyTimes(); - Map headers = new HashMap(); + Map headers = new HashMap<>(); headers.put("X-Forwarded-For", "10.0.0.1"); - expect(request.getHeaderNames()).andReturn(new Vector(headers.keySet()).elements()).anyTimes(); + expect(request.getHeaderNames()).andReturn(new Vector<>(headers.keySet()).elements()).anyTimes(); for(Map.Entry entry:headers.entrySet()) { - Vector v = new Vector(); + Vector v = new Vector<>(); v.add(entry.getValue()); expect(request.getHeaders(entry.getKey())).andReturn(v.elements()).anyTimes(); } diff --git a/solr/core/src/test/org/apache/solr/spelling/SimpleQueryConverter.java b/solr/core/src/test/org/apache/solr/spelling/SimpleQueryConverter.java index 69d70ab644a..c8ca39ef635 100644 --- a/solr/core/src/test/org/apache/solr/spelling/SimpleQueryConverter.java +++ b/solr/core/src/test/org/apache/solr/spelling/SimpleQueryConverter.java @@ -40,7 +40,7 @@ class SimpleQueryConverter extends SpellingQueryConverter { @Override public Collection convert(String origQuery) { - Collection result = new HashSet(); + Collection result = new HashSet<>(); WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer(LuceneTestCase.TEST_VERSION_CURRENT); try (TokenStream ts = analyzer.tokenStream("", origQuery)) { diff --git a/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java b/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java index 95bda284b22..5e71e14d95d 100644 --- a/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java +++ b/solr/core/src/test/org/apache/solr/spelling/SpellCheckCollatorTest.java @@ -348,7 +348,7 @@ public class SpellCheckCollatorTest extends SolrTestCaseJ4 { spellCheck = (NamedList) values.get("spellcheck"); suggestions = (NamedList) spellCheck.get("suggestions"); List expandedCollationList = suggestions.getAll("collation"); - Set usedcollations = new HashSet(); + Set usedcollations = new HashSet<>(); assertTrue(expandedCollationList.size() == 2); for (NamedList expandedCollation : expandedCollationList) { String multipleCollation = (String) expandedCollation.get("collationQuery"); diff --git a/solr/core/src/test/org/apache/solr/spelling/SpellPossibilityIteratorTest.java b/solr/core/src/test/org/apache/solr/spelling/SpellPossibilityIteratorTest.java index cca7b76308c..9c621512767 100644 --- a/solr/core/src/test/org/apache/solr/spelling/SpellPossibilityIteratorTest.java +++ b/solr/core/src/test/org/apache/solr/spelling/SpellPossibilityIteratorTest.java @@ -43,7 +43,7 @@ public class SpellPossibilityIteratorTest extends SolrTestCaseJ4 { public void setUp() throws Exception { super.setUp(); - AYE = new LinkedHashMap(); + AYE = new LinkedHashMap<>(); AYE.put("I", 0); AYE.put("II", 0); AYE.put("III", 0); @@ -53,7 +53,7 @@ public class SpellPossibilityIteratorTest extends SolrTestCaseJ4 { AYE.put("VII", 0); AYE.put("VIII", 0); - BEE = new LinkedHashMap(); + BEE = new LinkedHashMap<>(); BEE.put("alpha", 0); BEE.put("beta", 0); BEE.put("gamma", 0); @@ -64,7 +64,7 @@ public class SpellPossibilityIteratorTest extends SolrTestCaseJ4 { BEE.put("theta", 0); BEE.put("iota", 0); - AYE_BEE = new LinkedHashMap(); + AYE_BEE = new LinkedHashMap<>(); AYE_BEE.put("one-alpha", 0); AYE_BEE.put("two-beta", 0); AYE_BEE.put("three-gamma", 0); @@ -76,7 +76,7 @@ public class SpellPossibilityIteratorTest extends SolrTestCaseJ4 { AYE_BEE.put("nine-iota", 0); - CEE = new LinkedHashMap(); + CEE = new LinkedHashMap<>(); CEE.put("one", 0); CEE.put("two", 0); CEE.put("three", 0); @@ -91,7 +91,7 @@ public class SpellPossibilityIteratorTest extends SolrTestCaseJ4 { @Test public void testScalability() throws Exception { - Map> lotsaSuggestions = new LinkedHashMap>(); + Map> lotsaSuggestions = new LinkedHashMap<>(); lotsaSuggestions.put(TOKEN_AYE , AYE); lotsaSuggestions.put(TOKEN_BEE , BEE); lotsaSuggestions.put(TOKEN_CEE , CEE); @@ -135,7 +135,7 @@ public class SpellPossibilityIteratorTest extends SolrTestCaseJ4 { @Test public void testSpellPossibilityIterator() throws Exception { - Map> suggestions = new LinkedHashMap>(); + Map> suggestions = new LinkedHashMap<>(); suggestions.put(TOKEN_AYE , AYE); suggestions.put(TOKEN_BEE , BEE); suggestions.put(TOKEN_CEE , CEE); @@ -185,7 +185,7 @@ public class SpellPossibilityIteratorTest extends SolrTestCaseJ4 { @Test public void testOverlappingTokens() throws Exception { - Map> overlappingSuggestions = new LinkedHashMap>(); + Map> overlappingSuggestions = new LinkedHashMap<>(); overlappingSuggestions.put(TOKEN_AYE, AYE); overlappingSuggestions.put(TOKEN_BEE, BEE); overlappingSuggestions.put(TOKEN_AYE_BEE, AYE_BEE); @@ -194,7 +194,7 @@ public class SpellPossibilityIteratorTest extends SolrTestCaseJ4 { PossibilityIterator iter = new PossibilityIterator(overlappingSuggestions, Integer.MAX_VALUE, Integer.MAX_VALUE, true); int aCount = 0; int abCount = 0; - Set dupChecker = new HashSet(); + Set dupChecker = new HashSet<>(); while (iter.hasNext()) { PossibilityIterator.RankedSpellPossibility rsp = iter.next(); Token a = null; diff --git a/solr/core/src/test/org/apache/solr/spelling/SpellingQueryConverterTest.java b/solr/core/src/test/org/apache/solr/spelling/SpellingQueryConverterTest.java index b76cad428e2..bccdbbcf56b 100644 --- a/solr/core/src/test/org/apache/solr/spelling/SpellingQueryConverterTest.java +++ b/solr/core/src/test/org/apache/solr/spelling/SpellingQueryConverterTest.java @@ -136,49 +136,49 @@ public class SpellingQueryConverterTest extends LuceneTestCase { converter.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)); { - List tokens = new ArrayList(converter.convert("aaa bbb ccc")); + List tokens = new ArrayList<>(converter.convert("aaa bbb ccc")); assertTrue("Should have 3 tokens", tokens != null && tokens.size()==3); assertTrue("token 1 should be optional", !hasRequiredFlag(tokens.get(0)) && !hasProhibitedFlag(tokens.get(0))); assertTrue("token 2 should be optional", !hasRequiredFlag(tokens.get(1)) && !hasProhibitedFlag(tokens.get(1))); assertTrue("token 3 should be optional", !hasRequiredFlag(tokens.get(2)) && !hasProhibitedFlag(tokens.get(2))); } { - List tokens = new ArrayList(converter.convert("+aaa bbb -ccc")); + List tokens = new ArrayList<>(converter.convert("+aaa bbb -ccc")); assertTrue("Should have 3 tokens", tokens != null && tokens.size()==3); assertTrue("token 1 should be required", hasRequiredFlag(tokens.get(0)) && !hasProhibitedFlag(tokens.get(0))); assertTrue("token 2 should be optional", !hasRequiredFlag(tokens.get(1)) && !hasProhibitedFlag(tokens.get(1))); assertTrue("token 3 should be prohibited", !hasRequiredFlag(tokens.get(2)) && hasProhibitedFlag(tokens.get(2))); } { - List tokens = new ArrayList(converter.convert("aaa AND bbb ccc")); + List tokens = new ArrayList<>(converter.convert("aaa AND bbb ccc")); assertTrue("Should have 3 tokens", tokens != null && tokens.size()==3); assertTrue("token 1 doesn't precede n.b.o.", !hasNBOFlag(tokens.get(0)) && hasInBooleanFlag(tokens.get(0))); assertTrue("token 2 doesn't precede n.b.o.", !hasNBOFlag(tokens.get(1)) && hasInBooleanFlag(tokens.get(0))); assertTrue("token 3 doesn't precede n.b.o.", !hasNBOFlag(tokens.get(2)) && hasInBooleanFlag(tokens.get(0))); } { - List tokens = new ArrayList(converter.convert("aaa OR bbb OR ccc")); + List tokens = new ArrayList<>(converter.convert("aaa OR bbb OR ccc")); assertTrue("Should have 3 tokens", tokens != null && tokens.size()==3); assertTrue("token 1 doesn't precede n.b.o.", !hasNBOFlag(tokens.get(0)) && hasInBooleanFlag(tokens.get(0))); assertTrue("token 2 doesn't precede n.b.o.", !hasNBOFlag(tokens.get(1)) && hasInBooleanFlag(tokens.get(0))); assertTrue("token 3 doesn't precede n.b.o.", !hasNBOFlag(tokens.get(2)) && hasInBooleanFlag(tokens.get(0))); } { - List tokens = new ArrayList(converter.convert("aaa AND bbb NOT ccc")); + List tokens = new ArrayList<>(converter.convert("aaa AND bbb NOT ccc")); assertTrue("Should have 3 tokens", tokens != null && tokens.size()==3); assertTrue("token 1 doesn't precede n.b.o.", !hasNBOFlag(tokens.get(0)) && hasInBooleanFlag(tokens.get(0))); assertTrue("token 2 precedes n.b.o.", hasNBOFlag(tokens.get(1)) && hasInBooleanFlag(tokens.get(0))); assertTrue("token 3 doesn't precede n.b.o.", !hasNBOFlag(tokens.get(2)) && hasInBooleanFlag(tokens.get(0))); } { - List tokens = new ArrayList(converter.convert("aaa NOT bbb AND ccc")); + List tokens = new ArrayList<>(converter.convert("aaa NOT bbb AND ccc")); assertTrue("Should have 3 tokens", tokens != null && tokens.size()==3); assertTrue("token 1 precedes n.b.o.", hasNBOFlag(tokens.get(0)) && hasInBooleanFlag(tokens.get(0))); assertTrue("token 2 precedes n.b.o.", hasNBOFlag(tokens.get(1)) && hasInBooleanFlag(tokens.get(0))); assertTrue("token 3 doesn't precedes n.b.o.", !hasNBOFlag(tokens.get(2)) && hasInBooleanFlag(tokens.get(0))); } { - List tokens = new ArrayList(converter.convert("aaa AND NOT bbb AND ccc")); + List tokens = new ArrayList<>(converter.convert("aaa AND NOT bbb AND ccc")); assertTrue("Should have 3 tokens", tokens != null && tokens.size()==3); assertTrue("token 1 precedes n.b.o.", hasNBOFlag(tokens.get(0)) && hasInBooleanFlag(tokens.get(0))); assertTrue("token 2 precedes n.b.o.", hasNBOFlag(tokens.get(1)) && hasInBooleanFlag(tokens.get(0))); diff --git a/solr/core/src/test/org/apache/solr/spelling/WordBreakSolrSpellCheckerTest.java b/solr/core/src/test/org/apache/solr/spelling/WordBreakSolrSpellCheckerTest.java index 8d3e7243f9c..dfa4a6c0bc7 100644 --- a/solr/core/src/test/org/apache/solr/spelling/WordBreakSolrSpellCheckerTest.java +++ b/solr/core/src/test/org/apache/solr/spelling/WordBreakSolrSpellCheckerTest.java @@ -58,7 +58,7 @@ public class WordBreakSolrSpellCheckerTest extends SolrTestCaseJ4 { public void testStandAlone() throws Exception { SolrCore core = h.getCore(); WordBreakSolrSpellChecker checker = new WordBreakSolrSpellChecker(); - NamedList params = new NamedList(); + NamedList params = new NamedList<>(); params.add("field", "lowerfilt"); params.add(WordBreakSolrSpellChecker.PARAM_BREAK_WORDS, "true"); params.add(WordBreakSolrSpellChecker.PARAM_COMBINE_WORDS, "true"); diff --git a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java index 23491121d54..34b2dc06f36 100644 --- a/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java +++ b/solr/core/src/test/org/apache/solr/store/hdfs/HdfsDirectoryTest.java @@ -150,7 +150,7 @@ public class HdfsDirectoryTest extends SolrTestCaseJ4 { public void testRandomAccessWrites() throws IOException { int i = 0; try { - Set names = new HashSet(); + Set names = new HashSet<>(); for (; i< 10; i++) { Directory fsDir = new RAMDirectory(); String name = getName(); diff --git a/solr/core/src/test/org/apache/solr/update/AddBlockUpdateTest.java b/solr/core/src/test/org/apache/solr/update/AddBlockUpdateTest.java index f0404453ec3..e9aae01b2f6 100644 --- a/solr/core/src/test/org/apache/solr/update/AddBlockUpdateTest.java +++ b/solr/core/src/test/org/apache/solr/update/AddBlockUpdateTest.java @@ -188,7 +188,7 @@ public class AddBlockUpdateTest extends SolrTestCaseJ4 { @Test public void testBasics() throws Exception { - List blocks = new ArrayList(Arrays.asList( + List blocks = new ArrayList<>(Arrays.asList( block("abcD"), block("efgH"), merge(block("ijkL"), block("mnoP")), @@ -268,7 +268,7 @@ public class AddBlockUpdateTest extends SolrTestCaseJ4 { public void testSolrJXML() throws IOException { UpdateRequest req = new UpdateRequest(); - List docs = new ArrayList(); + List docs = new ArrayList<>(); SolrInputDocument document1 = new SolrInputDocument() { { @@ -276,7 +276,7 @@ public class AddBlockUpdateTest extends SolrTestCaseJ4 { addField("id", id); addField("parent_s", "X"); - ArrayList ch1 = new ArrayList( + ArrayList ch1 = new ArrayList<>( Arrays.asList(new SolrInputDocument() { { addField("id", id()); @@ -336,7 +336,7 @@ public class AddBlockUpdateTest extends SolrTestCaseJ4 { public void testXML() throws IOException, XMLStreamException { UpdateRequest req = new UpdateRequest(); - List docs = new ArrayList(); + List docs = new ArrayList<>(); String xml_doc1 = @@ -571,7 +571,7 @@ public class AddBlockUpdateTest extends SolrTestCaseJ4 { } private Collection> callables(List blocks) { - final List> rez = new ArrayList>(); + final List> rez = new ArrayList<>(); for (Document block : blocks) { final String msg = block.asXML(); if (msg.length() > 0) { diff --git a/solr/core/src/test/org/apache/solr/update/AutoCommitTest.java b/solr/core/src/test/org/apache/solr/update/AutoCommitTest.java index fbf35ec5d5f..db2b672385e 100644 --- a/solr/core/src/test/org/apache/solr/update/AutoCommitTest.java +++ b/solr/core/src/test/org/apache/solr/update/AutoCommitTest.java @@ -142,7 +142,7 @@ public class AutoCommitTest extends AbstractSolrTestCase { */ public static Collection toContentStreams( final String str, final String contentType ) { - ArrayList streams = new ArrayList(); + ArrayList streams = new ArrayList<>(); ContentStreamBase stream = new ContentStreamBase.StringStream( str ); stream.setContentType( contentType ); streams.add( stream ); diff --git a/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java b/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java index 77267cb01d7..5fb1e14c65d 100644 --- a/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java +++ b/solr/core/src/test/org/apache/solr/update/DirectUpdateHandlerTest.java @@ -171,7 +171,7 @@ public class DirectUpdateHandlerTest extends SolrTestCaseJ4 { ureq.close(); // search - "B" should not be found. - Map args = new HashMap(); + Map args = new HashMap<>(); args.put( CommonParams.Q, "id:A OR id:B" ); args.put( "indent", "true" ); SolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); @@ -217,7 +217,7 @@ public class DirectUpdateHandlerTest extends SolrTestCaseJ4 { ureq.close(); // search - "A","B" should be found. - Map args = new HashMap(); + Map args = new HashMap<>(); args.put( CommonParams.Q, "id:A OR id:B" ); args.put( "indent", "true" ); SolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); diff --git a/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java b/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java index 8ca9f3e2b50..d1b00ce37a4 100644 --- a/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java +++ b/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java @@ -346,9 +346,9 @@ public class SoftAutoCommitTest extends AbstractSolrTestCase { class MockEventListener implements SolrEventListener { // use capacity bound Queues just so we're sure we don't OOM - public final BlockingQueue soft = new LinkedBlockingQueue(1000); - public final BlockingQueue hard = new LinkedBlockingQueue(1000); - public final BlockingQueue searcher = new LinkedBlockingQueue(1000); + public final BlockingQueue soft = new LinkedBlockingQueue<>(1000); + public final BlockingQueue hard = new LinkedBlockingQueue<>(1000); + public final BlockingQueue searcher = new LinkedBlockingQueue<>(1000); // if non enpty, then at least one offer failed (queues full) private StringBuffer fail = new StringBuffer(); diff --git a/solr/core/src/test/org/apache/solr/update/SolrCmdDistributorTest.java b/solr/core/src/test/org/apache/solr/update/SolrCmdDistributorTest.java index 05e1f8dc7cf..07ccd71324e 100644 --- a/solr/core/src/test/org/apache/solr/update/SolrCmdDistributorTest.java +++ b/solr/core/src/test/org/apache/solr/update/SolrCmdDistributorTest.java @@ -133,7 +133,7 @@ public class SolrCmdDistributorTest extends BaseDistributedSearchTestCase { ModifiableSolrParams params = new ModifiableSolrParams(); - List nodes = new ArrayList(); + List nodes = new ArrayList<>(); ZkNodeProps nodeProps = new ZkNodeProps(ZkStateReader.BASE_URL_PROP, ((HttpSolrServer) controlClient).getBaseURL(), @@ -325,7 +325,7 @@ public class SolrCmdDistributorTest extends BaseDistributedSearchTestCase { final MockStreamingSolrServers ss = new MockStreamingSolrServers(updateShardHandler); SolrCmdDistributor cmdDistrib = new SolrCmdDistributor(ss, 5, 0); ss.setExp(Exp.CONNECT_EXCEPTION); - ArrayList nodes = new ArrayList(); + ArrayList nodes = new ArrayList<>(); final HttpSolrServer solrclient1 = (HttpSolrServer) clients.get(0); final AtomicInteger retries = new AtomicInteger(); @@ -359,7 +359,7 @@ public class SolrCmdDistributorTest extends BaseDistributedSearchTestCase { final MockStreamingSolrServers ss = new MockStreamingSolrServers(updateShardHandler); SolrCmdDistributor cmdDistrib = new SolrCmdDistributor(ss, 5, 0); ss.setExp(Exp.CONNECT_EXCEPTION); - ArrayList nodes = new ArrayList(); + ArrayList nodes = new ArrayList<>(); ZkNodeProps nodeProps = new ZkNodeProps(ZkStateReader.BASE_URL_PROP, solrclient.getBaseURL(), ZkStateReader.CORE_NAME_PROP, ""); @@ -405,7 +405,7 @@ public class SolrCmdDistributorTest extends BaseDistributedSearchTestCase { final MockStreamingSolrServers ss = new MockStreamingSolrServers(updateShardHandler); SolrCmdDistributor cmdDistrib = new SolrCmdDistributor(ss, 5, 0); ss.setExp(Exp.SOCKET_EXCEPTION); - ArrayList nodes = new ArrayList(); + ArrayList nodes = new ArrayList<>(); ZkNodeProps nodeProps = new ZkNodeProps(ZkStateReader.BASE_URL_PROP, solrclient.getBaseURL(), ZkStateReader.CORE_NAME_PROP, ""); @@ -453,7 +453,7 @@ public class SolrCmdDistributorTest extends BaseDistributedSearchTestCase { long numFoundBefore = solrclient.query(new SolrQuery("*:*")).getResults() .getNumFound(); - ArrayList nodes = new ArrayList(); + ArrayList nodes = new ArrayList<>(); ZkNodeProps nodeProps = new ZkNodeProps(ZkStateReader.BASE_URL_PROP, "[ff01::114]:33332" + context, ZkStateReader.CORE_NAME_PROP, ""); RetryNode retryNode = new RetryNode(new ZkCoreNodeProps(nodeProps), null, "collection1", "shard1") { diff --git a/solr/core/src/test/org/apache/solr/update/TestDocBasedVersionConstraints.java b/solr/core/src/test/org/apache/solr/update/TestDocBasedVersionConstraints.java index 21ceabaa2df..89e898c8adb 100755 --- a/solr/core/src/test/org/apache/solr/update/TestDocBasedVersionConstraints.java +++ b/solr/core/src/test/org/apache/solr/update/TestDocBasedVersionConstraints.java @@ -367,7 +367,7 @@ public class TestDocBasedVersionConstraints extends SolrTestCaseJ4 { final int winner = TestUtil.nextInt(random(), 0, numAdds - 1); final int winnerVersion = atLeast(100); final boolean winnerIsDeleted = (0 == TestUtil.nextInt(random(), 0, 4)); - List> tasks = new ArrayList>(numAdds); + List> tasks = new ArrayList<>(numAdds); for (int variant = 0; variant < numAdds; variant++) { final boolean iShouldWin = (variant==winner); final long version = (iShouldWin ? winnerVersion diff --git a/solr/core/src/test/org/apache/solr/update/processor/FieldMutatingUpdateProcessorTest.java b/solr/core/src/test/org/apache/solr/update/processor/FieldMutatingUpdateProcessorTest.java index 9e3fe697b5a..48ac66c9f78 100644 --- a/solr/core/src/test/org/apache/solr/update/processor/FieldMutatingUpdateProcessorTest.java +++ b/solr/core/src/test/org/apache/solr/update/processor/FieldMutatingUpdateProcessorTest.java @@ -415,7 +415,7 @@ public class FieldMutatingUpdateProcessorTest extends UpdateProcessorTestBase { // test something that's definitely a SortedSet special = new SolrInputField("foo_s"); - special.setValue(new TreeSet + special.setValue(new TreeSet<> (Arrays.asList("ggg", "first", "last", "hhh")), 1.2F); d = processAdd("last-value", @@ -443,7 +443,7 @@ public class FieldMutatingUpdateProcessorTest extends UpdateProcessorTestBase { // (ie: get default behavior of Collection using iterator) special = new SolrInputField("foo_s"); - special.setValue(new LinkedHashSet + special.setValue(new LinkedHashSet<> (Arrays.asList("first", "ggg", "hhh", "last")), 1.2F); d = processAdd("last-value", diff --git a/solr/core/src/test/org/apache/solr/update/processor/ParsingFieldUpdateProcessorsTest.java b/solr/core/src/test/org/apache/solr/update/processor/ParsingFieldUpdateProcessorsTest.java index 4be6c21580d..691e8ff620c 100644 --- a/solr/core/src/test/org/apache/solr/update/processor/ParsingFieldUpdateProcessorsTest.java +++ b/solr/core/src/test/org/apache/solr/update/processor/ParsingFieldUpdateProcessorsTest.java @@ -240,7 +240,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { IndexSchema schema = h.getCore().getLatestSchema(); assertNull(schema.getFieldOrNull("not_in_schema")); DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC(); - Map mixed = new HashMap(); + Map mixed = new HashMap<>(); String[] dateStrings = { "2020-05-13T18:47", "1989-12-14", "1682-07-22T18:33:00.000Z" }; for (String dateString : dateStrings) { mixed.put(dateTimeFormatter.parseDateTime(dateString).toDate(), dateString); @@ -339,7 +339,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { public void testFailedParseMixedInt() throws Exception { IndexSchema schema = h.getCore().getLatestSchema(); assertNull(schema.getFieldOrNull("not_in_schema")); - Map mixed = new HashMap(); + Map mixed = new HashMap<>(); Float floatVal = 294423.0f; mixed.put(85, "85"); mixed.put(floatVal, floatVal); // Float-typed field value @@ -422,7 +422,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { public void testFailedParseMixedLong() throws Exception { IndexSchema schema = h.getCore().getLatestSchema(); assertNull(schema.getFieldOrNull("not_in_schema")); - Map mixed = new HashMap(); + Map mixed = new HashMap<>(); Float floatVal = 294423.0f; mixed.put(85L, "85"); mixed.put(floatVal, floatVal); // Float-typed field value @@ -506,7 +506,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { public void testMixedFloats() throws Exception { IndexSchema schema = h.getCore().getLatestSchema(); assertNotNull(schema.getFieldOrNull("float_tf")); // should match dynamic field "*_tf" - Map mixedFloats = new HashMap(); + Map mixedFloats = new HashMap<>(); mixedFloats.put(85.0f, "85"); mixedFloats.put(2894518.0f, "2,894,518"); mixedFloats.put(2.94423E-9f, 2.94423E-9f); // Float-typed field value @@ -524,7 +524,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { public void testFailedParseMixedFloat() throws Exception { IndexSchema schema = h.getCore().getLatestSchema(); assertNull(schema.getFieldOrNull("not_in_schema")); - Map mixed = new HashMap(); + Map mixed = new HashMap<>(); Long longVal = 294423L; mixed.put(85L, "85"); mixed.put(longVal, longVal); // Float-typed field value @@ -608,7 +608,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { public void testFailedParseMixedDouble() throws Exception { IndexSchema schema = h.getCore().getLatestSchema(); assertNull(schema.getFieldOrNull("not_in_schema")); - Map mixed = new HashMap(); + Map mixed = new HashMap<>(); Long longVal = 294423L; mixed.put(85, "85.0"); mixed.put(longVal, longVal); // Float-typed field value @@ -710,7 +710,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { public void testFailedParseMixedBoolean() throws Exception { IndexSchema schema = h.getCore().getLatestSchema(); assertNull(schema.getFieldOrNull("not_in_schema")); - Map mixed = new HashMap(); + Map mixed = new HashMap<>(); Long longVal = 294423L; mixed.put(true, "true"); mixed.put(longVal, longVal); // Float-typed field value @@ -739,7 +739,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { SolrInputDocument d = null; String chain = "cascading-parsers-no-run-processor"; - Map booleans = new HashMap(); + Map booleans = new HashMap<>(); booleans.put(true, "truE"); booleans.put(false, "False"); d = processAdd(chain, doc(f("id", "341"), f(fieldName, booleans.values()))); @@ -750,7 +750,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { } assertTrue(booleans.isEmpty()); - Map ints = new HashMap(); + Map ints = new HashMap<>(); ints.put(2, "2"); ints.put(50928, "50928"); ints.put(86942008, "86,942,008"); @@ -762,7 +762,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { } assertTrue(ints.isEmpty()); - Map longs = new HashMap(); + Map longs = new HashMap<>(); longs.put(2L, "2"); longs.put(50928L, "50928"); longs.put(86942008987654L, "86,942,008,987,654"); @@ -789,7 +789,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { } */ - Map doubles = new HashMap(); + Map doubles = new HashMap<>(); doubles.put(2.0, "2."); doubles.put(509.28, "509.28"); doubles.put(86942.008, "86,942.008"); @@ -801,7 +801,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { } DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC(); - Map dates = new HashMap(); + Map dates = new HashMap<>(); String[] dateStrings = { "2020-05-13T18:47", "1989-12-14", "1682-07-22T18:33:00.000Z" }; for (String dateString : dateStrings) { dates.put(dateTimeFormatter.parseDateTime(dateString).toDate(), dateString); @@ -814,7 +814,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { } assertTrue(dates.isEmpty()); - Map mixedLongsAndDoubles = new LinkedHashMap(); // preserve order + Map mixedLongsAndDoubles = new LinkedHashMap<>(); // preserve order mixedLongsAndDoubles.put(85.0, "85"); mixedLongsAndDoubles.put(2.94423E-9, "2.94423E-9"); mixedLongsAndDoubles.put(2894518.0, "2,894,518"); @@ -827,7 +827,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { } assertTrue(mixedLongsAndDoubles.isEmpty()); - Set mixed = new HashSet(); + Set mixed = new HashSet<>(); mixed.add("true"); mixed.add("1682-07-22T18:33:00.000Z"); mixed.add("2,894,518"); @@ -839,7 +839,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { assertTrue(o instanceof String); } - Map mixedDoubles = new LinkedHashMap(); // preserve order + Map mixedDoubles = new LinkedHashMap<>(); // preserve order mixedDoubles.put(85.0, "85"); mixedDoubles.put(2.94423E-9, 2.94423E-9); // Double-typed field value mixedDoubles.put(2894518.0, "2,894,518"); @@ -852,7 +852,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { } assertTrue(mixedDoubles.isEmpty()); - Map mixedInts = new LinkedHashMap(); // preserve order + Map mixedInts = new LinkedHashMap<>(); // preserve order mixedInts.put(85, "85"); mixedInts.put(294423, 294423); // Integer-typed field value mixedInts.put(-2894518, "-2,894,518"); @@ -865,7 +865,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { } assertTrue(mixedInts.isEmpty()); - Map mixedLongs = new LinkedHashMap(); // preserve order + Map mixedLongs = new LinkedHashMap<>(); // preserve order mixedLongs.put(85L, "85"); mixedLongs.put(42944233L, 42944233L); // Long-typed field value mixedLongs.put(2894518L, "2,894,518"); @@ -878,7 +878,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { } assertTrue(mixedLongs.isEmpty()); - Map mixedBooleans = new LinkedHashMap(); // preserve order + Map mixedBooleans = new LinkedHashMap<>(); // preserve order mixedBooleans.put(true, "true"); mixedBooleans.put(false, false); // Boolean-typed field value mixedBooleans.put(false, "false"); @@ -892,7 +892,7 @@ public class ParsingFieldUpdateProcessorsTest extends UpdateProcessorTestBase { assertTrue(mixedBooleans.isEmpty()); dateTimeFormatter = ISODateTimeFormat.dateOptionalTimeParser().withZoneUTC(); - Map mixedDates = new HashMap(); + Map mixedDates = new HashMap<>(); dateStrings = new String[] { "2020-05-13T18:47", "1989-12-14", "1682-07-22T18:33:00.000Z" }; for (String dateString : dateStrings) { mixedDates.put(dateTimeFormatter.parseDateTime(dateString).toDate(), dateString); diff --git a/solr/core/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java index ce0d72afbdc..dde06e4145b 100644 --- a/solr/core/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java +++ b/solr/core/src/test/org/apache/solr/update/processor/SignatureUpdateProcessorFactoryTest.java @@ -253,7 +253,7 @@ public class SignatureUpdateProcessorFactoryTest extends SolrTestCaseJ4 { public void testFailNonIndexedSigWithOverwriteDupes() throws Exception { SolrCore core = h.getCore(); SignatureUpdateProcessorFactory f = new SignatureUpdateProcessorFactory(); - NamedList initArgs = new NamedList(); + NamedList initArgs = new NamedList<>(); initArgs.add("overwriteDupes", "true"); initArgs.add("signatureField", "signatureField_sS"); f.init(initArgs); @@ -278,7 +278,7 @@ public class SignatureUpdateProcessorFactoryTest extends SolrTestCaseJ4 { .getFactories()[0]); factory.setEnabled(true); - Map params = new HashMap(); + Map params = new HashMap<>(); MultiMapSolrParams mmparams = new MultiMapSolrParams(params); params.put(UpdateParams.UPDATE_CHAIN, new String[] {chain}); @@ -307,7 +307,7 @@ public class SignatureUpdateProcessorFactoryTest extends SolrTestCaseJ4 { SolrInputDocument docA = new SolrInputDocument(); SolrInputDocument docB = new SolrInputDocument(); - UnusualList ints = new UnusualList(3); + UnusualList ints = new UnusualList<>(3); for (int val : new int[] {42, 66, 34}) { docA.addField("ints_is", new Integer(val)); ints.add(val); @@ -333,7 +333,7 @@ public class SignatureUpdateProcessorFactoryTest extends SolrTestCaseJ4 { } - ArrayList streams = new ArrayList(2); + ArrayList streams = new ArrayList<>(2); streams.add(new BinaryRequestWriter().getContentStream(ureq)); LocalSolrQueryRequest req = new LocalSolrQueryRequest(h.getCore(), mmparams); try { @@ -368,7 +368,7 @@ public class SignatureUpdateProcessorFactoryTest extends SolrTestCaseJ4 { } static void addDoc(String doc, String chain) throws Exception { - Map params = new HashMap(); + Map params = new HashMap<>(); MultiMapSolrParams mmparams = new MultiMapSolrParams(params); params.put(UpdateParams.UPDATE_CHAIN, new String[] { chain }); SolrQueryRequestBase req = new SolrQueryRequestBase(h.getCore(), @@ -377,7 +377,7 @@ public class SignatureUpdateProcessorFactoryTest extends SolrTestCaseJ4 { UpdateRequestHandler handler = new UpdateRequestHandler(); handler.init(null); - ArrayList streams = new ArrayList(2); + ArrayList streams = new ArrayList<>(2); streams.add(new ContentStreamBase.StringStream(doc)); req.setContentStreams(streams); handler.handleRequestBody(req, new SolrQueryResponse()); diff --git a/solr/core/src/test/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactoryTest.java index 9dad69cbdfb..7ea4b6b8157 100644 --- a/solr/core/src/test/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactoryTest.java +++ b/solr/core/src/test/org/apache/solr/update/processor/StatelessScriptUpdateProcessorFactoryTest.java @@ -77,7 +77,7 @@ public class StatelessScriptUpdateProcessorFactoryTest extends UpdateProcessorTe SolrCore core = h.getCore(); UpdateRequestProcessorChain chained = core.getUpdateProcessingChain("single-script"); final StatelessScriptUpdateProcessorFactory factory = ((StatelessScriptUpdateProcessorFactory) chained.getFactories()[0]); - final List functionMessages = new ArrayList(); + final List functionMessages = new ArrayList<>(); factory.setScriptEngineCustomizer(new ScriptEngineCustomizer() { @Override public void customize(ScriptEngine engine) { @@ -122,7 +122,7 @@ public class StatelessScriptUpdateProcessorFactoryTest extends UpdateProcessorTe UpdateRequestProcessorChain chained = core.getUpdateProcessingChain(chain); final StatelessScriptUpdateProcessorFactory factory = ((StatelessScriptUpdateProcessorFactory) chained.getFactories()[0]); - final List functionMessages = new ArrayList(); + final List functionMessages = new ArrayList<>(); ScriptEngineCustomizer customizer = new ScriptEngineCustomizer() { @Override public void customize(ScriptEngine engine) { diff --git a/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java index 78ac58af31a..b2e5ff0df11 100644 --- a/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java +++ b/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java @@ -106,7 +106,7 @@ public class UniqFieldsUpdateProcessorFactoryTest extends SolrTestCaseJ4 { } private void addDoc(String doc) throws Exception { - Map params = new HashMap(); + Map params = new HashMap<>(); MultiMapSolrParams mmparams = new MultiMapSolrParams(params); params.put(UpdateParams.UPDATE_CHAIN, new String[] { "uniq-fields" }); SolrQueryRequestBase req = new SolrQueryRequestBase(h.getCore(), @@ -115,7 +115,7 @@ public class UniqFieldsUpdateProcessorFactoryTest extends SolrTestCaseJ4 { UpdateRequestHandler handler = new UpdateRequestHandler(); handler.init(null); - ArrayList streams = new ArrayList(2); + ArrayList streams = new ArrayList<>(2); streams.add(new ContentStreamBase.StringStream(doc)); req.setContentStreams(streams); handler.handleRequestBody(req, new SolrQueryResponse()); diff --git a/solr/core/src/test/org/apache/solr/util/CircularListTest.java b/solr/core/src/test/org/apache/solr/util/CircularListTest.java index f4c4b9de47d..526742814d2 100644 --- a/solr/core/src/test/org/apache/solr/util/CircularListTest.java +++ b/solr/core/src/test/org/apache/solr/util/CircularListTest.java @@ -30,7 +30,7 @@ public class CircularListTest extends LuceneTestCase { @Test public void testCircularList() throws IOException { - CircularList list = new CircularList(10); + CircularList list = new CircularList<>(10); for(int i=0;i<10; i++) { list.add(new Integer(i)); } diff --git a/solr/core/src/test/org/apache/solr/util/DOMUtilTest.java b/solr/core/src/test/org/apache/solr/util/DOMUtilTest.java index 8b4fc9c5e04..b80f622aa37 100644 --- a/solr/core/src/test/org/apache/solr/util/DOMUtilTest.java +++ b/solr/core/src/test/org/apache/solr/util/DOMUtilTest.java @@ -23,7 +23,7 @@ import org.apache.solr.common.util.SimpleOrderedMap; public class DOMUtilTest extends DOMUtilTestBase { public void testAddToNamedListPrimitiveTypes() throws Exception { - NamedList namedList = new SimpleOrderedMap(); + NamedList namedList = new SimpleOrderedMap<>(); DOMUtil.addToNamedList( getNode( "STRING", "/str" ), namedList, null ); assertTypeAndValue( namedList, "String", "STRING" ); DOMUtil.addToNamedList( getNode( "100", "/int" ), namedList, null ); diff --git a/solr/core/src/test/org/apache/solr/util/DateMathParserTest.java b/solr/core/src/test/org/apache/solr/util/DateMathParserTest.java index 715055bb600..a17ed662007 100644 --- a/solr/core/src/test/org/apache/solr/util/DateMathParserTest.java +++ b/solr/core/src/test/org/apache/solr/util/DateMathParserTest.java @@ -314,7 +314,7 @@ public class DateMathParserTest extends LuceneTestCase { DateMathParser p = new DateMathParser(UTC, Locale.ROOT); p.setNow(parser.parse("2001-07-04T12:08:56.235")); - Map badCommands = new HashMap(); + Map badCommands = new HashMap<>(); badCommands.put("/", 1); badCommands.put("+", 1); badCommands.put("-", 1); diff --git a/solr/core/src/test/org/apache/solr/util/SimplePostToolTest.java b/solr/core/src/test/org/apache/solr/util/SimplePostToolTest.java index 08dbcc16909..8655bd95c02 100644 --- a/solr/core/src/test/org/apache/solr/util/SimplePostToolTest.java +++ b/solr/core/src/test/org/apache/solr/util/SimplePostToolTest.java @@ -177,8 +177,8 @@ public class SimplePostToolTest extends SolrTestCaseJ4 { } static class MockPageFetcher extends PageFetcher { - HashMap htmlMap = new HashMap(); - HashMap> linkMap = new HashMap>(); + HashMap htmlMap = new HashMap<>(); + HashMap> linkMap = new HashMap<>(); public MockPageFetcher() throws IOException { (new SimplePostTool()).super(); @@ -190,18 +190,18 @@ public class SimplePostToolTest extends SolrTestCaseJ4 { htmlMap.put("http://[ff01::114]/page2", ""); htmlMap.put("http://[ff01::114]/disallowed", ""); - Set s = new HashSet(); + Set s = new HashSet<>(); s.add(new URL("http://[ff01::114]/page1")); s.add(new URL("http://[ff01::114]/page2")); linkMap.put("http://[ff01::114]", s); linkMap.put("http://[ff01::114]/index.html", s); - s = new HashSet(); + s = new HashSet<>(); s.add(new URL("http://[ff01::114]/page1/foo")); linkMap.put("http://[ff01::114]/page1", s); - s = new HashSet(); + s = new HashSet<>(); s.add(new URL("http://[ff01::114]/page1/foo/bar")); linkMap.put("http://[ff01::114]/page1/foo", s); - s = new HashSet(); + s = new HashSet<>(); s.add(new URL("http://[ff01::114]/disallowed")); linkMap.put("http://[ff01::114]/page2", s); @@ -237,7 +237,7 @@ public class SimplePostToolTest extends SolrTestCaseJ4 { public Set getLinksFromWebPage(URL u, InputStream is, String type, URL postUrl) { Set s = linkMap.get(SimplePostTool.normalizeUrlEnding(u.toString())); if(s == null) - s = new HashSet(); + s = new HashSet<>(); return s; } } diff --git a/solr/core/src/test/org/apache/solr/util/SolrPluginUtilsTest.java b/solr/core/src/test/org/apache/solr/util/SolrPluginUtilsTest.java index 7894d341c38..a4d90925673 100644 --- a/solr/core/src/test/org/apache/solr/util/SolrPluginUtilsTest.java +++ b/solr/core/src/test/org/apache/solr/util/SolrPluginUtilsTest.java @@ -76,7 +76,7 @@ public class SolrPluginUtilsTest extends SolrTestCaseJ4 { DocList docs = qr.getDocList(); assertEquals("wrong docs size", 3, docs.size()); - Set fields = new HashSet(); + Set fields = new HashSet<>(); fields.add("val_dynamic"); fields.add("dynamic_val"); fields.add("range_facet_l"); // copied from id @@ -149,7 +149,7 @@ public class SolrPluginUtilsTest extends SolrTestCaseJ4 { @Test public void testParseFieldBoosts() throws Exception { - Map e1 = new HashMap(); + Map e1 = new HashMap<>(); e1.put("fieldOne",2.3f); e1.put("fieldTwo",null); e1.put("fieldThree",-0.4f); @@ -165,7 +165,7 @@ public class SolrPluginUtilsTest extends SolrTestCaseJ4 { " fieldTwo fieldThree^-0.4 ", " "})); - Map e2 = new HashMap(); + Map e2 = new HashMap<>(); assertEquals("empty e2", e2, SolrPluginUtils.parseFieldBoosts ("")); assertEquals("spacey e2", e2, SolrPluginUtils.parseFieldBoosts diff --git a/solr/core/src/test/org/apache/solr/util/TestFastWriter.java b/solr/core/src/test/org/apache/solr/util/TestFastWriter.java index 47dd8f79567..b503cbc9a09 100644 --- a/solr/core/src/test/org/apache/solr/util/TestFastWriter.java +++ b/solr/core/src/test/org/apache/solr/util/TestFastWriter.java @@ -28,7 +28,7 @@ import java.util.Random; class MemWriter extends FastWriter { - public List buffers = new LinkedList(); + public List buffers = new LinkedList<>(); Random r; public MemWriter(char[] tempBuffer, Random r) { diff --git a/solr/core/src/test/org/apache/solr/util/TestNumberUtils.java b/solr/core/src/test/org/apache/solr/util/TestNumberUtils.java index 61e2bfaa152..8e5f3f3ae6f 100644 --- a/solr/core/src/test/org/apache/solr/util/TestNumberUtils.java +++ b/solr/core/src/test/org/apache/solr/util/TestNumberUtils.java @@ -126,7 +126,7 @@ public class TestNumberUtils extends LuceneTestCase { int iter=1000; // INTEGERS - List converters = new ArrayList(); + List converters = new ArrayList<>(); converters.add( new Int2Int() ); converters.add( new SortInt() ); converters.add( new Base10kS() ); diff --git a/solr/core/src/test/org/apache/solr/util/TestUtils.java b/solr/core/src/test/org/apache/solr/util/TestUtils.java index 4c06f0ba422..d057fc9deee 100644 --- a/solr/core/src/test/org/apache/solr/util/TestUtils.java +++ b/solr/core/src/test/org/apache/solr/util/TestUtils.java @@ -74,20 +74,20 @@ public class TestUtils extends LuceneTestCase { public void testNamedLists() { - SimpleOrderedMap map = new SimpleOrderedMap(); + SimpleOrderedMap map = new SimpleOrderedMap<>(); map.add( "test", 10 ); SimpleOrderedMap clone = map.clone(); assertEquals( map.toString(), clone.toString() ); assertEquals( new Integer(10), clone.get( "test" ) ); - Map realMap = new HashMap(); + Map realMap = new HashMap<>(); realMap.put( "one", 1 ); realMap.put( "two", 2 ); realMap.put( "three", 3 ); - map = new SimpleOrderedMap(); + map = new SimpleOrderedMap<>(); map.addAll( realMap ); assertEquals( 3, map.size() ); - map = new SimpleOrderedMap(); + map = new SimpleOrderedMap<>(); map.add( "one", 1 ); map.add( "two", 2 ); map.add( "three", 3 ); @@ -101,7 +101,7 @@ public class TestUtils extends LuceneTestCase { assertEquals( 4, map.indexOf( null, 1 ) ); assertEquals( null, map.get( null, 1 ) ); - map = new SimpleOrderedMap(); + map = new SimpleOrderedMap<>(); map.add( "one", 1 ); map.add( "two", 2 ); Iterator> iter = map.iterator(); diff --git a/solr/core/src/test/org/apache/solr/util/TimeZoneUtilsTest.java b/solr/core/src/test/org/apache/solr/util/TimeZoneUtilsTest.java index e4f695f5ed8..727accb6e87 100644 --- a/solr/core/src/test/org/apache/solr/util/TimeZoneUtilsTest.java +++ b/solr/core/src/test/org/apache/solr/util/TimeZoneUtilsTest.java @@ -46,7 +46,7 @@ public class TimeZoneUtilsTest extends LuceneTestCase { public void testValidIds() throws Exception { - final Set idsTested = new HashSet(); + final Set idsTested = new HashSet<>(); // brain dead: anything the JVM supports, should work for (String validId : TimeZone.getAvailableIDs()) { diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/SolrQuery.java b/solr/solrj/src/java/org/apache/solr/client/solrj/SolrQuery.java index e53dac90dcb..9e0924595a6 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/SolrQuery.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/SolrQuery.java @@ -620,7 +620,7 @@ public class SolrQuery extends ModifiableSolrParams * @since 4.2 */ public SolrQuery setSorts(List value) { - sortClauses = new ArrayList(value); + sortClauses = new ArrayList<>(value); serializeSorts(); return this; } @@ -674,7 +674,7 @@ public class SolrQuery extends ModifiableSolrParams * @since 4.2 */ public SolrQuery addSort(SortClause sortClause) { - if (sortClauses == null) sortClauses = new ArrayList(); + if (sortClauses == null) sortClauses = new ArrayList<>(); sortClauses.add(sortClause); serializeSorts(); return this; diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/SolrServer.java b/solr/solrj/src/java/org/apache/solr/client/solrj/SolrServer.java index dcb0c83835d..9da6e719df1 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/SolrServer.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/SolrServer.java @@ -86,7 +86,7 @@ public abstract class SolrServer implements Serializable */ public UpdateResponse addBeans(Collection beans, int commitWithinMs) throws SolrServerException, IOException { DocumentObjectBinder binder = this.getBinder(); - ArrayList docs = new ArrayList(beans.size()); + ArrayList docs = new ArrayList<>(beans.size()); for (Object bean : beans) { docs.add(binder.toSolrInputDocument(bean)); } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/beans/DocumentObjectBinder.java b/solr/solrj/src/java/org/apache/solr/client/solrj/beans/DocumentObjectBinder.java index b772ea04bad..877c1c3792d 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/beans/DocumentObjectBinder.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/beans/DocumentObjectBinder.java @@ -34,14 +34,14 @@ import java.nio.ByteBuffer; */ public class DocumentObjectBinder { - private final Map> infocache = new ConcurrentHashMap>(); + private final Map> infocache = new ConcurrentHashMap<>(); public DocumentObjectBinder() { } public List getBeans(Class clazz, SolrDocumentList solrDocList) { List fields = getDocFields(clazz); - List result = new ArrayList(solrDocList.size()); + List result = new ArrayList<>(solrDocList.size()); for (SolrDocument sdoc : solrDocList) { result.add(getBean(clazz, fields, sdoc)); @@ -103,9 +103,9 @@ public class DocumentObjectBinder { } private List collectInfo(Class clazz) { - List fields = new ArrayList(); + List fields = new ArrayList<>(); Class superClazz = clazz; - List members = new ArrayList(); + List members = new ArrayList<>(); while (superClazz != null && superClazz != Object.class) { members.addAll(Arrays.asList(superClazz.getDeclaredFields())); @@ -275,7 +275,7 @@ public class DocumentObjectBinder { Map allValuesMap = null; List allValuesList = null; if (isContainedInMap) { - allValuesMap = new HashMap(); + allValuesMap = new HashMap<>(); } else { allValuesList = new ArrayList(); } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java index 5d2f81e0907..67274c2cd27 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java @@ -46,7 +46,7 @@ public class BinaryRequestWriter extends RequestWriter { && (updateRequest.getDocIterator() == null) ) { return null; } - List l = new ArrayList(); + List l = new ArrayList<>(); l.add(new LazyContentStream(updateRequest)); return l; } else { diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java index 5a162203b7f..62c3e37412e 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java @@ -97,7 +97,7 @@ public class CloudSolrServer extends SolrServer { private String idField = "id"; private final Set NON_ROUTABLE_PARAMS; { - NON_ROUTABLE_PARAMS = new HashSet(); + NON_ROUTABLE_PARAMS = new HashSet<>(); NON_ROUTABLE_PARAMS.add(UpdateParams.EXPUNGE_DELETES); NON_ROUTABLE_PARAMS.add(UpdateParams.MAX_OPTIMIZE_SEGMENTS); NON_ROUTABLE_PARAMS.add(UpdateParams.COMMIT); @@ -322,7 +322,7 @@ public class CloudSolrServer extends SolrServer { long start = System.nanoTime(); if (parallelUpdates) { - final Map>> responseFutures = new HashMap>>(routes.size()); + final Map>> responseFutures = new HashMap<>(routes.size()); for (final Map.Entry entry : routes.entrySet()) { final String url = entry.getKey(); final LBHttpSolrServer.Req lbRequest = entry.getValue(); @@ -373,7 +373,7 @@ public class CloudSolrServer extends SolrServer { Set paramNames = nonRoutableParams.getParameterNames(); - Set intersection = new HashSet(paramNames); + Set intersection = new HashSet<>(paramNames); intersection.retainAll(NON_ROUTABLE_PARAMS); if (nonRoutableRequest != null || intersection.size() > 0) { @@ -381,7 +381,7 @@ public class CloudSolrServer extends SolrServer { nonRoutableRequest = new UpdateRequest(); } nonRoutableRequest.setParams(nonRoutableParams); - List urlList = new ArrayList(); + List urlList = new ArrayList<>(); urlList.addAll(routes.keySet()); Collections.shuffle(urlList, rand); LBHttpSolrServer.Req req = new LBHttpSolrServer.Req(nonRoutableRequest, urlList); @@ -402,13 +402,13 @@ public class CloudSolrServer extends SolrServer { } private Map> buildUrlMap(DocCollection col) { - Map> urlMap = new HashMap>(); + Map> urlMap = new HashMap<>(); Collection slices = col.getActiveSlices(); Iterator sliceIterator = slices.iterator(); while (sliceIterator.hasNext()) { Slice slice = sliceIterator.next(); String name = slice.getName(); - List urls = new ArrayList(); + List urls = new ArrayList<>(); Replica leader = slice.getLeader(); if (leader == null) { // take unoptimized general path - we cannot find a leader yet @@ -514,14 +514,14 @@ public class CloudSolrServer extends SolrServer { } } sendToLeaders = true; - replicas = new ArrayList(); + replicas = new ArrayList<>(); } SolrParams reqParams = request.getParams(); if (reqParams == null) { reqParams = new ModifiableSolrParams(); } - List theUrlList = new ArrayList(); + List theUrlList = new ArrayList<>(); if (request.getPath().equals("/admin/collections") || request.getPath().equals("/admin/cores")) { Set liveNodes = clusterState.getLiveNodes(); @@ -549,7 +549,7 @@ public class CloudSolrServer extends SolrServer { // Retrieve slices from the cloud state and, for each collection // specified, // add it to the Map of slices. - Map slices = new HashMap(); + Map slices = new HashMap<>(); for (String collectionName : collectionsList) { Collection colSlices = clusterState .getActiveSlices(collectionName); @@ -567,8 +567,8 @@ public class CloudSolrServer extends SolrServer { // build a map of unique nodes // TODO: allow filtering by group, role, etc - Map nodes = new HashMap(); - List urlList2 = new ArrayList(); + Map nodes = new HashMap<>(); + List urlList2 = new ArrayList<>(); for (Slice slice : slices.values()) { for (ZkNodeProps nodeProps : slice.getReplicasMap().values()) { ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps); @@ -609,15 +609,15 @@ public class CloudSolrServer extends SolrServer { } if (sendToLeaders) { - theUrlList = new ArrayList(leaderUrlList.size()); + theUrlList = new ArrayList<>(leaderUrlList.size()); theUrlList.addAll(leaderUrlList); } else { - theUrlList = new ArrayList(urlList.size()); + theUrlList = new ArrayList<>(urlList.size()); theUrlList.addAll(urlList); } Collections.shuffle(theUrlList, rand); if (sendToLeaders) { - ArrayList theReplicas = new ArrayList( + ArrayList theReplicas = new ArrayList<>( replicasList.size()); theReplicas.addAll(replicasList); Collections.shuffle(theReplicas, rand); @@ -640,7 +640,7 @@ public class CloudSolrServer extends SolrServer { String collection) { // Extract each comma separated collection name and store in a List. List rawCollectionsList = StrUtils.splitSmart(collection, ",", true); - Set collectionsList = new HashSet(); + Set collectionsList = new HashSet<>(); // validate collections for (String collectionName : rawCollectionsList) { if (!clusterState.getCollections().contains(collectionName)) { diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrServer.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrServer.java index 6e1a65b4cf2..dadc235773e 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrServer.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrServer.java @@ -121,9 +121,9 @@ public class ConcurrentUpdateSolrServer extends SolrServer { HttpClient client, int queueSize, int threadCount, ExecutorService es, boolean streamDeletes) { this.server = new HttpSolrServer(solrServerUrl, client); this.server.setFollowRedirects(false); - queue = new LinkedBlockingQueue(queueSize); + queue = new LinkedBlockingQueue<>(queueSize); this.threadCount = threadCount; - runners = new LinkedList(); + runners = new LinkedList<>(); scheduler = es; this.streamDeletes = streamDeletes; } @@ -364,7 +364,7 @@ public class ConcurrentUpdateSolrServer extends SolrServer { } // RETURN A DUMMY result - NamedList dummy = new NamedList(); + NamedList dummy = new NamedList<>(); dummy.add("NOTE", "the request is processed in a background stream"); return dummy; } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java index 7ba878a7317..e026d9a71b0 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java @@ -269,7 +269,7 @@ public class HttpSolrServer extends SolrServer { } } - LinkedList postParams = new LinkedList(); + LinkedList postParams = new LinkedList<>(); if (streams == null || isMultipart) { HttpPost post = new HttpPost(url + ClientUtils.toQueryString( queryParams, false )); post.setHeader("Content-Charset", "UTF-8"); @@ -278,7 +278,7 @@ public class HttpSolrServer extends SolrServer { "application/x-www-form-urlencoded; charset=UTF-8"); } - List parts = new LinkedList(); + List parts = new LinkedList<>(); Iterator iter = wparams.getParameterNamesIterator(); while (iter.hasNext()) { String p = iter.next(); @@ -436,7 +436,7 @@ public class HttpSolrServer extends SolrServer { if (processor == null) { // no processor specified, return raw stream - NamedList rsp = new NamedList(); + NamedList rsp = new NamedList<>(); rsp.add("stream", respBody); // Only case where stream should not be closed shouldClose = false; diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java index 3687c97b19e..a1ebefe9ddd 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java @@ -76,7 +76,7 @@ import java.util.*; * @since solr 1.4 */ public class LBHttpSolrServer extends SolrServer { - private static Set RETRY_CODES = new HashSet(4); + private static Set RETRY_CODES = new HashSet<>(4); static { RETRY_CODES.add(404); @@ -87,10 +87,10 @@ public class LBHttpSolrServer extends SolrServer { // keys to the maps are currently of the form "http://localhost:8983/solr" // which should be equivalent to CommonsHttpSolrServer.getBaseURL() - private final Map aliveServers = new LinkedHashMap(); + private final Map aliveServers = new LinkedHashMap<>(); // access to aliveServers should be synchronized on itself - protected final Map zombieServers = new ConcurrentHashMap(); + protected final Map zombieServers = new ConcurrentHashMap<>(); // changes to aliveServers are reflected in this array, no need to synchronize private volatile ServerWrapper[] aliveServerList = new ServerWrapper[0]; @@ -283,7 +283,7 @@ public class LBHttpSolrServer extends SolrServer { Rsp rsp = new Rsp(); Exception ex = null; boolean isUpdate = req.request instanceof IsUpdateRequest; - List skipped = new ArrayList(req.getNumDeadServersToTry()); + List skipped = new ArrayList<>(req.getNumDeadServersToTry()); for (String serverStr : req.getServers()) { serverStr = normalize(serverStr); @@ -505,7 +505,7 @@ public class LBHttpSolrServer extends SolrServer { if (e.getRootCause() instanceof IOException) { ex = e; moveAliveToDead(wrapper); - if (justFailed == null) justFailed = new HashMap(); + if (justFailed == null) justFailed = new HashMap<>(); justFailed.put(wrapper.getKey(), wrapper); } else { throw e; @@ -619,7 +619,7 @@ public class LBHttpSolrServer extends SolrServer { aliveCheckExecutor = Executors.newSingleThreadScheduledExecutor( new SolrjNamedThreadFactory("aliveCheckExecutor")); aliveCheckExecutor.scheduleAtFixedRate( - getAliveCheckRunner(new WeakReference(this)), + getAliveCheckRunner(new WeakReference<>(this)), this.interval, this.interval, TimeUnit.MILLISECONDS); } } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/XMLResponseParser.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/XMLResponseParser.java index 491caaf481b..ad5a54898fa 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/XMLResponseParser.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/XMLResponseParser.java @@ -135,7 +135,7 @@ public class XMLResponseParser extends ResponseParser response = readNamedList( parser ); } else if( name.equals( "solr" ) ) { - return new SimpleOrderedMap(); + return new SimpleOrderedMap<>(); } else { throw new Exception( "really needs to be response or result. " + @@ -212,7 +212,7 @@ public class XMLResponseParser extends ResponseParser } StringBuilder builder = new StringBuilder(); - NamedList nl = new SimpleOrderedMap(); + NamedList nl = new SimpleOrderedMap<>(); KnownType type = null; String name = null; @@ -284,7 +284,7 @@ public class XMLResponseParser extends ResponseParser StringBuilder builder = new StringBuilder(); KnownType type = null; - List vals = new ArrayList(); + List vals = new ArrayList<>(); int depth = 0; while( true ) diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/ContentStreamUpdateRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/ContentStreamUpdateRequest.java index c9d464d6126..6b02486c956 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/ContentStreamUpdateRequest.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/ContentStreamUpdateRequest.java @@ -45,7 +45,7 @@ public class ContentStreamUpdateRequest extends AbstractUpdateRequest { */ public ContentStreamUpdateRequest(String url) { super(METHOD.POST, url); - contentStreams = new ArrayList(); + contentStreams = new ArrayList<>(); } @Override diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/DocumentAnalysisRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/DocumentAnalysisRequest.java index 1e41ca4fb81..fa841118569 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/DocumentAnalysisRequest.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/DocumentAnalysisRequest.java @@ -42,7 +42,7 @@ import java.util.concurrent.TimeUnit; */ public class DocumentAnalysisRequest extends SolrRequest { - private List documents = new ArrayList(); + private List documents = new ArrayList<>(); private String query; private boolean showMatch = false; diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/FieldAnalysisRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/FieldAnalysisRequest.java index e5821b7ec99..47f340d7ce6 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/FieldAnalysisRequest.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/FieldAnalysisRequest.java @@ -212,7 +212,7 @@ public class FieldAnalysisRequest extends SolrRequest { */ public FieldAnalysisRequest addFieldName(String fieldName) { if (fieldNames == null) { - fieldNames = new LinkedList(); + fieldNames = new LinkedList<>(); } fieldNames.add(fieldName); return this; @@ -249,7 +249,7 @@ public class FieldAnalysisRequest extends SolrRequest { */ public FieldAnalysisRequest addFieldType(String fieldTypeName) { if (fieldTypes == null) { - fieldTypes = new LinkedList(); + fieldTypes = new LinkedList<>(); } fieldTypes.add(fieldTypeName); return this; diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/LukeRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/LukeRequest.java index 052c189f2c0..8cd530529f7 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/LukeRequest.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/LukeRequest.java @@ -59,7 +59,7 @@ public class LukeRequest extends SolrRequest public void addField( String f ) { if( fields == null ) { - fields = new ArrayList(); + fields = new ArrayList<>(); } fields.add( f ); } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java index 8e901ea84ff..2990653c6ea 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java @@ -44,7 +44,7 @@ public class RequestWriter { if (req instanceof UpdateRequest) { UpdateRequest updateRequest = (UpdateRequest) req; if (isEmpty(updateRequest)) return null; - List l = new ArrayList(); + List l = new ArrayList<>(); l.add(new LazyContentStream(updateRequest)); return l; } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java b/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java index 154d7e090f1..962d24771e4 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java @@ -86,7 +86,7 @@ public class UpdateRequest extends AbstractUpdateRequest { public UpdateRequest add(final SolrInputDocument doc) { if (documents == null) { - documents = new LinkedHashMap>(); + documents = new LinkedHashMap<>(); } documents.put(doc, null); return this; @@ -103,9 +103,9 @@ public class UpdateRequest extends AbstractUpdateRequest { public UpdateRequest add(final SolrInputDocument doc, Integer commitWithin, Boolean overwrite) { if (documents == null) { - documents = new LinkedHashMap>(); + documents = new LinkedHashMap<>(); } - Map params = new HashMap(2); + Map params = new HashMap<>(2); if (commitWithin != null) params.put(COMMIT_WITHIN, commitWithin); if (overwrite != null) params.put(OVERWRITE, overwrite); @@ -116,7 +116,7 @@ public class UpdateRequest extends AbstractUpdateRequest { public UpdateRequest add(final Collection docs) { if (documents == null) { - documents = new LinkedHashMap>(); + documents = new LinkedHashMap<>(); } for (SolrInputDocument doc : docs) { documents.put(doc, null); @@ -126,7 +126,7 @@ public class UpdateRequest extends AbstractUpdateRequest { public UpdateRequest deleteById(String id) { if (deleteById == null) { - deleteById = new LinkedHashMap>(); + deleteById = new LinkedHashMap<>(); } deleteById.put(id, null); return this; @@ -134,7 +134,7 @@ public class UpdateRequest extends AbstractUpdateRequest { public UpdateRequest deleteById(List ids) { if (deleteById == null) { - deleteById = new LinkedHashMap>(); + deleteById = new LinkedHashMap<>(); } for (String id : ids) { @@ -146,9 +146,9 @@ public class UpdateRequest extends AbstractUpdateRequest { public UpdateRequest deleteById(String id, Long version) { if (deleteById == null) { - deleteById = new LinkedHashMap>(); + deleteById = new LinkedHashMap<>(); } - Map params = new HashMap(1); + Map params = new HashMap<>(1); params.put(VER, version); deleteById.put(id, params); return this; @@ -156,7 +156,7 @@ public class UpdateRequest extends AbstractUpdateRequest { public UpdateRequest deleteByQuery(String q) { if (deleteQuery == null) { - deleteQuery = new ArrayList(); + deleteQuery = new ArrayList<>(); } deleteQuery.add(q); return this; @@ -179,7 +179,7 @@ public class UpdateRequest extends AbstractUpdateRequest { return null; } - Map routes = new HashMap(); + Map routes = new HashMap<>(); if (documents != null) { Set>> entries = documents.entrySet(); for (Entry> entry : entries) { @@ -278,7 +278,7 @@ public class UpdateRequest extends AbstractUpdateRequest { } private List>> getDocLists(Map> documents) { - List>> docLists = new ArrayList>>(); + List>> docLists = new ArrayList<>(); Map> docList = null; if (this.documents != null) { @@ -297,7 +297,7 @@ public class UpdateRequest extends AbstractUpdateRequest { } if (overwrite != lastOverwrite || commitWithin != lastCommitWithin || docLists.size() == 0) { - docList = new LinkedHashMap>(); + docList = new LinkedHashMap<>(); docLists.add(docList); } docList.put(entry.getKey(), entry.getValue()); @@ -307,7 +307,7 @@ public class UpdateRequest extends AbstractUpdateRequest { } if (docIterator != null) { - docList = new LinkedHashMap>(); + docList = new LinkedHashMap<>(); docLists.add(docList); while (docIterator.hasNext()) { SolrInputDocument doc = docIterator.next(); @@ -404,7 +404,7 @@ public class UpdateRequest extends AbstractUpdateRequest { public List getDocuments() { if (documents == null) return null; - List docs = new ArrayList(documents.size()); + List docs = new ArrayList<>(documents.size()); docs.addAll(documents.keySet()); return docs; } @@ -419,7 +419,7 @@ public class UpdateRequest extends AbstractUpdateRequest { public List getDeleteById() { if (deleteById == null) return null; - List deletes = new ArrayList(deleteById.keySet()); + List deletes = new ArrayList<>(deleteById.keySet()); return deletes; } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/AnalysisResponseBase.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/AnalysisResponseBase.java index 42732a2d330..1e399f381b9 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/AnalysisResponseBase.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/AnalysisResponseBase.java @@ -63,7 +63,7 @@ public class AnalysisResponseBase extends SolrResponseBase { * @return The built analysis phases list. */ protected List buildPhases(NamedList>> phaseNL) { - List phases = new ArrayList(phaseNL.size()); + List phases = new ArrayList<>(phaseNL.size()); for (Map.Entry>> phaseEntry : phaseNL) { AnalysisPhase phase = new AnalysisPhase(phaseEntry.getKey()); List> tokens = phaseEntry.getValue(); @@ -116,7 +116,7 @@ public class AnalysisResponseBase extends SolrResponseBase { public static class AnalysisPhase { private final String className; - private List tokens = new ArrayList(); + private List tokens = new ArrayList<>(); AnalysisPhase(String className) { this.className = className; diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/CollectionAdminResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/CollectionAdminResponse.java index 3a6b643c77c..8acf2e2c43b 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/CollectionAdminResponse.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/CollectionAdminResponse.java @@ -46,7 +46,7 @@ public class CollectionAdminResponse extends SolrResponseBase @SuppressWarnings("unchecked") public Map> getCollectionCoresStatus() { - Map> res = new HashMap>(); + Map> res = new HashMap<>(); NamedList> cols = getCollectionStatus(); if( cols != null ) { for (Map.Entry> e : cols) { @@ -64,7 +64,7 @@ public class CollectionAdminResponse extends SolrResponseBase @SuppressWarnings("unchecked") public Map> getCollectionNodesStatus() { - Map> res = new HashMap>(); + Map> res = new HashMap<>(); NamedList> cols = getCollectionStatus(); if( cols != null ) { for (Map.Entry> e : cols) { diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/DocumentAnalysisResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/DocumentAnalysisResponse.java index 3ec590abc58..2f11d78bddc 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/DocumentAnalysisResponse.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/DocumentAnalysisResponse.java @@ -33,7 +33,7 @@ import java.util.Map; */ public class DocumentAnalysisResponse extends AnalysisResponseBase implements Iterable> { - private final Map documentAnalysisByKey = new HashMap(); + private final Map documentAnalysisByKey = new HashMap<>(); /** * {@inheritDoc} @@ -116,7 +116,7 @@ public class DocumentAnalysisResponse extends AnalysisResponseBase implements It public static class DocumentAnalysis implements Iterable> { private final String documentKey; - private Map fieldAnalysisByFieldName = new HashMap(); + private Map fieldAnalysisByFieldName = new HashMap<>(); private DocumentAnalysis(String documentKey) { this.documentKey = documentKey; @@ -168,7 +168,7 @@ public class DocumentAnalysisResponse extends AnalysisResponseBase implements It private final String fieldName; private List queryPhases; - private Map> indexPhasesByFieldValue = new HashMap>(); + private Map> indexPhasesByFieldValue = new HashMap<>(); private FieldAnalysis(String fieldName) { this.fieldName = fieldName; diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/FacetField.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/FacetField.java index 65920619d91..086c9991d70 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/FacetField.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/FacetField.java @@ -125,7 +125,7 @@ import org.apache.solr.client.solrj.util.ClientUtils; public void add( String name, long cnt ) { if( _values == null ) { - _values = new ArrayList( 30 ); + _values = new ArrayList<>( 30 ); } _values.add( new Count( this, name, cnt ) ); } @@ -136,7 +136,7 @@ import org.apache.solr.client.solrj.util.ClientUtils; public void insert( String name, long cnt ) { if( _values == null ) { - _values = new ArrayList( 30 ); + _values = new ArrayList<>( 30 ); } _values.add( 0, new Count( this, name, cnt ) ); } @@ -158,7 +158,7 @@ import org.apache.solr.client.solrj.util.ClientUtils; { FacetField ff = new FacetField( _name ); if( _values != null ) { - ff._values = new ArrayList( _values.size() ); + ff._values = new ArrayList<>( _values.size() ); for( Count c : _values ) { if( c._count < max ) { // !equal to ff._values.add( c ); diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/FieldAnalysisResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/FieldAnalysisResponse.java index d6d2092f483..9c542d7f837 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/FieldAnalysisResponse.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/FieldAnalysisResponse.java @@ -32,8 +32,8 @@ import java.util.Map; */ public class FieldAnalysisResponse extends AnalysisResponseBase { - private Map analysisByFieldTypeName = new HashMap(); - private Map analysisByFieldName = new HashMap(); + private Map analysisByFieldTypeName = new HashMap<>(); + private Map analysisByFieldName = new HashMap<>(); /** * {@inheritDoc} diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/FieldStatsInfo.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/FieldStatsInfo.java index b56938ba543..3c178481bbb 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/FieldStatsInfo.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/FieldStatsInfo.java @@ -85,9 +85,9 @@ public class FieldStatsInfo implements Serializable { else if( "facets".equals( entry.getKey() ) ) { @SuppressWarnings("unchecked") NamedList fields = (NamedList)entry.getValue(); - facets = new HashMap>(); + facets = new HashMap<>(); for( Map.Entry ev : fields ) { - List vals = new ArrayList(); + List vals = new ArrayList<>(); facets.put( ev.getKey(), vals ); @SuppressWarnings("unchecked") NamedList> vnl = (NamedList>) ev.getValue(); diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/GroupCommand.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/GroupCommand.java index 80c7726e0da..c2c81274bb0 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/GroupCommand.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/GroupCommand.java @@ -45,7 +45,7 @@ import java.util.List; public class GroupCommand implements Serializable { private final String _name; - private final List _values = new ArrayList(); + private final List _values = new ArrayList<>(); private final int _matches; private final Integer _ngroups; diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/GroupResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/GroupResponse.java index 9ec54622252..210bb5e8341 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/GroupResponse.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/GroupResponse.java @@ -34,7 +34,7 @@ import java.util.List; */ public class GroupResponse implements Serializable { - private final List _values = new ArrayList(); + private final List _values = new ArrayList<>(); /** * Adds a grouping command to the response. diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/LukeResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/LukeResponse.java index 21099804577..5d2f3282da1 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/LukeResponse.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/LukeResponse.java @@ -91,7 +91,7 @@ public class LukeResponse extends SolrResponseBase { String key = entry.getKey(); if ("fields".equals(key) && entry.getValue() != null) { List theFields = (List) entry.getValue(); - fields = new ArrayList(theFields); + fields = new ArrayList<>(theFields); } else if ("tokenized".equals(key) == true) { tokenized = Boolean.parseBoolean(entry.getValue().toString()); } else if ("analyzer".equals(key) == true) { @@ -202,7 +202,7 @@ public class LukeResponse extends SolrResponseBase { flds = (NamedList) schema.get("fields"); } if (flds != null) { - fieldInfo = new HashMap(); + fieldInfo = new HashMap<>(); for (Map.Entry field : flds) { FieldInfo f = new FieldInfo(field.getKey()); f.read((NamedList) field.getValue()); @@ -213,7 +213,7 @@ public class LukeResponse extends SolrResponseBase { if( schema != null ) { NamedList fldTypes = (NamedList) schema.get("types"); if (fldTypes != null) { - fieldTypeInfo = new HashMap(); + fieldTypeInfo = new HashMap<>(); for (Map.Entry fieldType : fldTypes) { FieldTypeInfo ft = new FieldTypeInfo(fieldType.getKey()); ft.read((NamedList) fieldType.getValue()); diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java index 0901147bb9d..6deedebbc38 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/QueryResponse.java @@ -158,7 +158,7 @@ public class QueryResponse extends SolrResponseBase private void extractStatsInfo(NamedList info) { if( info != null ) { - _fieldStatsInfo = new HashMap(); + _fieldStatsInfo = new HashMap<>(); NamedList> ff = (NamedList>) info.get( "stats_fields" ); if( ff != null ) { for( Map.Entry> entry : ff ) { @@ -174,13 +174,13 @@ public class QueryResponse extends SolrResponseBase private void extractDebugInfo( NamedList debug ) { - _debugMap = new LinkedHashMap(); // keep the order + _debugMap = new LinkedHashMap<>(); // keep the order for( Map.Entry info : debug ) { _debugMap.put( info.getKey(), info.getValue() ); } // Parse out interesting bits from the debug info - _explainMap = new HashMap(); + _explainMap = new HashMap<>(); NamedList explain = (NamedList)_debugMap.get( "explain" ); if( explain != null ) { for( Map.Entry info : explain ) { @@ -246,9 +246,9 @@ public class QueryResponse extends SolrResponseBase private void extractHighlightingInfo( NamedList info ) { - _highlighting = new HashMap>>(); + _highlighting = new HashMap<>(); for( Map.Entry doc : info ) { - Map> fieldMap = new HashMap>(); + Map> fieldMap = new HashMap<>(); _highlighting.put( doc.getKey(), fieldMap ); NamedList> fnl = (NamedList>)doc.getValue(); @@ -261,7 +261,7 @@ public class QueryResponse extends SolrResponseBase private void extractFacetInfo( NamedList info ) { // Parse the queries - _facetQuery = new LinkedHashMap(); + _facetQuery = new LinkedHashMap<>(); NamedList fq = (NamedList) info.get( "facet_queries" ); if (fq != null) { for( Map.Entry entry : fq ) { @@ -273,8 +273,8 @@ public class QueryResponse extends SolrResponseBase // TODO?? The list could be or ? If always then we can switch to NamedList> ff = (NamedList>) info.get( "facet_fields" ); if( ff != null ) { - _facetFields = new ArrayList( ff.size() ); - _limitingFacets = new ArrayList( ff.size() ); + _facetFields = new ArrayList<>( ff.size() ); + _limitingFacets = new ArrayList<>( ff.size() ); long minsize = _results == null ? Long.MAX_VALUE :_results.getNumFound(); for( Map.Entry> facet : ff ) { @@ -295,7 +295,7 @@ public class QueryResponse extends SolrResponseBase NamedList> df = (NamedList>) info.get("facet_dates"); if (df != null) { // System.out.println(df); - _facetDates = new ArrayList( df.size() ); + _facetDates = new ArrayList<>( df.size() ); for (Map.Entry> facet : df) { // System.out.println("Key: " + facet.getKey() + " Value: " + facet.getValue()); NamedList values = facet.getValue(); @@ -318,7 +318,7 @@ public class QueryResponse extends SolrResponseBase //Parse range facets NamedList> rf = (NamedList>) info.get("facet_ranges"); if (rf != null) { - _facetRanges = new ArrayList( rf.size() ); + _facetRanges = new ArrayList<>( rf.size() ); for (Map.Entry> facet : rf) { NamedList values = facet.getValue(); Object rawGap = values.get("gap"); @@ -358,7 +358,7 @@ public class QueryResponse extends SolrResponseBase //Parse pivot facets NamedList pf = (NamedList) info.get("facet_pivot"); if (pf != null) { - _facetPivot = new NamedList>(); + _facetPivot = new NamedList<>(); for( int i=0; i)pf.getVal(i) ) ); } @@ -367,7 +367,7 @@ public class QueryResponse extends SolrResponseBase protected List readPivots( List list ) { - ArrayList values = new ArrayList( list.size() ); + ArrayList values = new ArrayList<>( list.size() ); for( NamedList nl : list ) { // NOTE, this is cheating, but we know the order they are written in, so no need to check String f = (String)nl.getVal( 0 ); @@ -386,7 +386,7 @@ public class QueryResponse extends SolrResponseBase * Remove the field facet info */ public void removeFacets() { - _facetFields = new ArrayList(); + _facetFields = new ArrayList<>(); } //------------------------------------------------------ diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/RangeFacet.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/RangeFacet.java index 520746e5d52..22708ced563 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/RangeFacet.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/RangeFacet.java @@ -26,7 +26,7 @@ import java.util.List; public abstract class RangeFacet { private final String name; - private final List counts = new ArrayList(); + private final List counts = new ArrayList<>(); private final B start; private final B end; diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java index a38fdb63dcf..69e53a6c831 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/SpellCheckResponse.java @@ -32,8 +32,8 @@ import java.util.Map; public class SpellCheckResponse { private boolean correctlySpelled; private List collations; - private List suggestions = new ArrayList(); - Map suggestionMap = new LinkedHashMap(); + private List suggestions = new ArrayList<>(); + Map suggestionMap = new LinkedHashMap<>(); public SpellCheckResponse(NamedList> spellInfo) { NamedList sugg = spellInfo.get("suggestions"); @@ -49,7 +49,7 @@ public class SpellCheckResponse { //continue; } else if ("collation".equals(n)) { List collationInfo = sugg.getAll(n); - collations = new ArrayList(collationInfo.size()); + collations = new ArrayList<>(collationInfo.size()); for (Object o : collationInfo) { if (o instanceof String) { collations.add(new Collation() @@ -138,7 +138,7 @@ public class SpellCheckResponse { private int startOffset; private int endOffset; private int originalFrequency; - private List alternatives = new ArrayList(); + private List alternatives = new ArrayList<>(); private List alternativeFrequencies; public Suggestion(String token, NamedList suggestion) { @@ -161,7 +161,7 @@ public class SpellCheckResponse { // extended results detected @SuppressWarnings("unchecked") List extended = (List)list; - alternativeFrequencies = new ArrayList(); + alternativeFrequencies = new ArrayList<>(); for (NamedList nl : extended) { alternatives.add((String)nl.get("word")); alternativeFrequencies.add((Integer)nl.get("freq")); @@ -221,7 +221,7 @@ public class SpellCheckResponse { public class Collation { private String collationQueryString; - private List misspellingsAndCorrections = new ArrayList(); + private List misspellingsAndCorrections = new ArrayList<>(); private long numberOfHits; public long getNumberOfHits() { diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java b/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java index 92f6e55450a..5d8c0b7db48 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/response/TermsResponse.java @@ -27,12 +27,12 @@ import java.util.Map; * Encapsulates responses from TermsComponent */ public class TermsResponse { - private Map> termMap = new HashMap>(); + private Map> termMap = new HashMap<>(); public TermsResponse(NamedList> termsInfo) { for (int i = 0; i < termsInfo.size(); i++) { String fieldName = termsInfo.getName(i); - List itemList = new ArrayList(); + List itemList = new ArrayList<>(); NamedList items = termsInfo.getVal(i); for (int j = 0; j < items.size(); j++) { diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/util/ClientUtils.java b/solr/solrj/src/java/org/apache/solr/client/solrj/util/ClientUtils.java index cb7660594d8..a4bcca0aba1 100644 --- a/solr/solrj/src/java/org/apache/solr/client/solrj/util/ClientUtils.java +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/util/ClientUtils.java @@ -61,7 +61,7 @@ public class ClientUtils if( str == null ) return null; - ArrayList streams = new ArrayList( 1 ); + ArrayList streams = new ArrayList<>( 1 ); ContentStreamBase ccc = new ContentStreamBase.StringStream( str ); ccc.setContentType( contentType ); streams.add( ccc ); diff --git a/solr/solrj/src/java/org/apache/solr/common/SolrDocument.java b/solr/solrj/src/java/org/apache/solr/common/SolrDocument.java index 50a302c7059..10377791f95 100644 --- a/solr/solrj/src/java/org/apache/solr/common/SolrDocument.java +++ b/solr/solrj/src/java/org/apache/solr/common/SolrDocument.java @@ -46,7 +46,7 @@ public class SolrDocument implements Map, Iterable(); + _fields = new LinkedHashMap<>(); } /** @@ -96,7 +96,7 @@ public class SolrDocument implements Map, Iterable lst = new ArrayList(); + ArrayList lst = new ArrayList<>(); for( Object o : (Iterable)value ) { lst.add( o ); } @@ -122,7 +122,7 @@ public class SolrDocument implements Map, Iterable c = new ArrayList( 3 ); + Collection c = new ArrayList<>( 3 ); for ( Object o : (Collection)value ) { c.add(o); } @@ -138,7 +138,7 @@ public class SolrDocument implements Map, Iterable)existing; } else { - vals = new ArrayList( 3 ); + vals = new ArrayList<>( 3 ); vals.add( existing ); } @@ -193,7 +193,7 @@ public class SolrDocument implements Map, Iterable)v; } if( v != null ) { - ArrayList arr = new ArrayList(1); + ArrayList arr = new ArrayList<>(1); arr.add( v ); return arr; } diff --git a/solr/solrj/src/java/org/apache/solr/common/SolrInputDocument.java b/solr/solrj/src/java/org/apache/solr/common/SolrInputDocument.java index 99ec58e99ef..bb3e1068582 100644 --- a/solr/solrj/src/java/org/apache/solr/common/SolrInputDocument.java +++ b/solr/solrj/src/java/org/apache/solr/common/SolrInputDocument.java @@ -42,7 +42,7 @@ public class SolrInputDocument implements Map, Iterable _childDocuments; public SolrInputDocument() { - _fields = new LinkedHashMap(); + _fields = new LinkedHashMap<>(); } public SolrInputDocument(Map fields) { @@ -208,7 +208,7 @@ public class SolrInputDocument implements Map, Iterable(_childDocuments.size()); + clone._childDocuments = new ArrayList<>(_childDocuments.size()); for (SolrInputDocument child : _childDocuments) { clone._childDocuments.add(child.deepCopy()); } @@ -322,7 +322,7 @@ public class SolrInputDocument implements Map, Iterable(); + _childDocuments = new ArrayList<>(); } _childDocuments.add(child); } diff --git a/solr/solrj/src/java/org/apache/solr/common/SolrInputField.java b/solr/solrj/src/java/org/apache/solr/common/SolrInputField.java index f5cb5d1aff0..e68f5f8f226 100644 --- a/solr/solrj/src/java/org/apache/solr/common/SolrInputField.java +++ b/solr/solrj/src/java/org/apache/solr/common/SolrInputField.java @@ -50,7 +50,7 @@ public class SolrInputField implements Iterable, Serializable if( v instanceof Object[] ) { Object[] arr = (Object[])v; - Collection c = new ArrayList( arr.length ); + Collection c = new ArrayList<>( arr.length ); for( Object o : arr ) { c.add( o ); } @@ -69,7 +69,7 @@ public class SolrInputField implements Iterable, Serializable public void addValue(Object v, float b) { if( value == null ) { if ( v instanceof Collection ) { - Collection c = new ArrayList( 3 ); + Collection c = new ArrayList<>( 3 ); for ( Object o : (Collection)v ) { c.add( o ); } @@ -92,7 +92,7 @@ public class SolrInputField implements Iterable, Serializable vals = (Collection)value; } else { - vals = new ArrayList( 3 ); + vals = new ArrayList<>( 3 ); vals.add( value ); value = vals; } @@ -146,7 +146,7 @@ public class SolrInputField implements Iterable, Serializable return (Collection)value; } if( value != null ) { - Collection vals = new ArrayList(1); + Collection vals = new ArrayList<>(1); vals.add( value ); return vals; } @@ -221,7 +221,7 @@ public class SolrInputField implements Iterable, Serializable // We can't clone here, so we rely on simple primitives if (value instanceof Collection) { Collection values = (Collection) value; - Collection cloneValues = new ArrayList(values.size()); + Collection cloneValues = new ArrayList<>(values.size()); cloneValues.addAll(values); clone.value = cloneValues; } else { diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/Aliases.java b/solr/solrj/src/java/org/apache/solr/common/cloud/Aliases.java index 28e76bb4047..1d18323c056 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/Aliases.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/Aliases.java @@ -30,7 +30,7 @@ public class Aliases { } public Aliases() { - this.aliasMap = new HashMap>(); + this.aliasMap = new HashMap<>(); } public Map getCollectionAliasMap() { diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java index 0c8549d200d..237cfda379b 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java @@ -74,9 +74,9 @@ public class ClusterState implements JSONWriter.Writable { public ClusterState(Integer zkClusterStateVersion, Set liveNodes, Map collectionStates, ZkStateReader stateReader) { this.zkClusterStateVersion = zkClusterStateVersion; - this.liveNodes = new HashSet(liveNodes.size()); + this.liveNodes = new HashSet<>(liveNodes.size()); this.liveNodes.addAll(liveNodes); - this.collectionStates = new LinkedHashMap(collectionStates.size()); + this.collectionStates = new LinkedHashMap<>(collectionStates.size()); this.collectionStates.putAll(collectionStates); this.stateReader = stateReader; @@ -258,7 +258,7 @@ public class ClusterState implements JSONWriter.Writable { return new ClusterState(version, liveNodes, Collections.emptyMap(),stateReader); } Map stateMap = (Map) ZkStateReader.fromJSON(bytes); - Map collections = new LinkedHashMap(stateMap.size()); + Map collections = new LinkedHashMap<>(stateMap.size()); for (Entry entry : stateMap.entrySet()) { String collectionName = entry.getKey(); DocCollection coll = collectionFromObjects(collectionName, (Map)entry.getValue()); @@ -289,7 +289,7 @@ public class ClusterState implements JSONWriter.Writable { props = Collections.emptyMap(); } else { slices = makeSlices(sliceObjs); - props = new HashMap(objs); + props = new HashMap<>(objs); objs.remove(DocCollection.SHARDS); } @@ -310,7 +310,7 @@ public class ClusterState implements JSONWriter.Writable { private static Map makeSlices(Map genericSlices) { if (genericSlices == null) return Collections.emptyMap(); - Map result = new LinkedHashMap(genericSlices.size()); + Map result = new LinkedHashMap<>(genericSlices.size()); for (Map.Entry entry : genericSlices.entrySet()) { String name = entry.getKey(); Object val = entry.getValue(); diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java b/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java index e649ad112a7..d01644ff6aa 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/CompositeIdRouter.java @@ -90,7 +90,7 @@ public class CompositeIdRouter extends HashBasedRouter { Range completeRange = new KeyParser(id).getRange(); - List targetSlices = new ArrayList(1); + List targetSlices = new ArrayList<>(1); for (Slice slice : collection.getActiveSlices()) { Range range = slice.getRange(); if (range != null && range.overlaps(completeRange)) { @@ -102,7 +102,7 @@ public class CompositeIdRouter extends HashBasedRouter { } public List partitionRangeByKey(String key, Range range) { - List result = new ArrayList(3); + List result = new ArrayList<>(3); Range keyRange = keyHashRange(key); if (!keyRange.overlaps(range)) { throw new IllegalArgumentException("Key range does not overlap given range"); @@ -133,7 +133,7 @@ public class CompositeIdRouter extends HashBasedRouter { long rangeSize = (long) max - (long) min; long rangeStep = Math.max(1, rangeSize / partitions); - List ranges = new ArrayList(partitions); + List ranges = new ArrayList<>(partitions); long start = min; long end = start; diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java b/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java index 17ac7323ed8..5755d717875 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/DocCollection.java @@ -45,11 +45,11 @@ public class DocCollection extends ZkNodeProps { * @param props The properties of the slice. This is used directly and a copy is not made. */ public DocCollection(String name, Map slices, Map props, DocRouter router) { - super( props==null ? props = new HashMap() : props); + super( props==null ? props = new HashMap<>() : props); this.name = name; this.slices = slices; - this.activeSlices = new HashMap(); + this.activeSlices = new HashMap<>(); Iterator> iter = slices.entrySet().iterator(); @@ -115,7 +115,7 @@ public class DocCollection extends ZkNodeProps { @Override public void write(JSONWriter jsonWriter) { - LinkedHashMap all = new LinkedHashMap(slices.size() + 1); + LinkedHashMap all = new LinkedHashMap<>(slices.size() + 1); all.putAll(propMap); all.put(SHARDS, slices); jsonWriter.write(all); diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java b/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java index c19eebb2c7d..088d272a26c 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/DocRouter.java @@ -63,7 +63,7 @@ public abstract class DocRouter { } public static Map getRouterSpec(ZkNodeProps props){ - Map map = new LinkedHashMap(); + Map map = new LinkedHashMap<>(); for (String s : props.keySet()) { if(s.startsWith("router.")){ map.put(s.substring(7), props.get(s)); @@ -81,7 +81,7 @@ public abstract class DocRouter { // currently just an implementation detail... private final static Map routerMap; static { - routerMap = new HashMap(); + routerMap = new HashMap<>(); PlainIdRouter plain = new PlainIdRouter(); // instead of doing back compat this way, we could always convert the clusterstate on first read to "plain" if it doesn't have any properties. routerMap.put(null, plain); // back compat with 4.0 @@ -174,7 +174,7 @@ public abstract class DocRouter { long rangeSize = (long)max - (long)min; long rangeStep = Math.max(1, rangeSize / partitions); - List ranges = new ArrayList(partitions); + List ranges = new ArrayList<>(partitions); long start = min; long end = start; @@ -216,7 +216,7 @@ public abstract class DocRouter { } List shardKeyList = StrUtils.splitSmart(shardKeys, ",", true); - HashSet allSlices = new HashSet(); + HashSet allSlices = new HashSet<>(); for (String shardKey : shardKeyList) { allSlices.addAll( getSearchSlicesSingle(shardKey, params, collection) ); } diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/RoutingRule.java b/solr/solrj/src/java/org/apache/solr/common/cloud/RoutingRule.java index 5f7658f66dd..69f810fecbc 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/RoutingRule.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/RoutingRule.java @@ -37,7 +37,7 @@ public class RoutingRule extends ZkNodeProps { this.routeRangesStr = (String) propMap.get("routeRanges"); String[] rangesArr = this.routeRangesStr.split(","); if (rangesArr != null && rangesArr.length > 0) { - this.routeRanges = new ArrayList(); + this.routeRanges = new ArrayList<>(); for (String r : rangesArr) { routeRanges.add(DocRouter.DEFAULT.fromString(r)); } diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java b/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java index 0200ff66362..70abf6e1d26 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java @@ -54,7 +54,7 @@ public class Slice extends ZkNodeProps { * @param props The properties of the slice - a shallow copy will always be made. */ public Slice(String name, Map replicas, Map props) { - super( props==null ? new LinkedHashMap(2) : new LinkedHashMap(props)); + super( props==null ? new LinkedHashMap(2) : new LinkedHashMap<>(props)); this.name = name; Object rangeObj = propMap.get(RANGE); @@ -92,7 +92,7 @@ public class Slice extends ZkNodeProps { Map rules = (Map) propMap.get("routingRules"); if (rules != null) { - this.routingRules = new HashMap(); + this.routingRules = new HashMap<>(); for (Map.Entry entry : rules.entrySet()) { Object o = entry.getValue(); if (o instanceof Map) { @@ -112,8 +112,8 @@ public class Slice extends ZkNodeProps { private Map makeReplicas(Map genericReplicas) { - if (genericReplicas == null) return new HashMap(1); - Map result = new LinkedHashMap(genericReplicas.size()); + if (genericReplicas == null) return new HashMap<>(1); + Map result = new LinkedHashMap<>(genericReplicas.size()); for (Map.Entry entry : genericReplicas.entrySet()) { String name = entry.getKey(); Object val = entry.getValue(); @@ -157,7 +157,7 @@ public class Slice extends ZkNodeProps { } public Map getReplicasCopy() { - return new LinkedHashMap(replicas); + return new LinkedHashMap<>(replicas); } public Replica getLeader() { diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZooKeeper.java b/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZooKeeper.java index df352cdd7cf..dd89e1c2c18 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZooKeeper.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/SolrZooKeeper.java @@ -31,7 +31,7 @@ import org.apache.zookeeper.ZooKeeper; // we use this class to expose nasty stuff for tests public class SolrZooKeeper extends ZooKeeper { - final Set spawnedThreads = new CopyOnWriteArraySet(); + final Set spawnedThreads = new CopyOnWriteArraySet<>(); // for test debug //static Map clients = new ConcurrentHashMap(); diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkClientConnectionStrategy.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkClientConnectionStrategy.java index c1465b8e93d..5dec65d105e 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkClientConnectionStrategy.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkClientConnectionStrategy.java @@ -33,8 +33,8 @@ import org.slf4j.LoggerFactory; public abstract class ZkClientConnectionStrategy { private static Logger log = LoggerFactory.getLogger(ZkClientConnectionStrategy.class); - private List disconnectedListeners = new ArrayList(); - private List connectedListeners = new ArrayList(); + private List disconnectedListeners = new ArrayList<>(); + private List connectedListeners = new ArrayList<>(); public abstract void connect(String zkServerAddress, int zkClientTimeout, Watcher watcher, ZkUpdate updater) throws IOException, InterruptedException, TimeoutException; public abstract void reconnect(String serverAddress, int zkClientTimeout, Watcher watcher, ZkUpdate updater) throws IOException, InterruptedException, TimeoutException; diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java index 45f735cd5a4..5ddfa2414b4 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkNodeProps.java @@ -58,7 +58,7 @@ public class ZkNodeProps implements JSONWriter.Writable { if ((keyVals.length & 0x01) != 0) { throw new IllegalArgumentException("arguments should be key,value"); } - Map propMap = new LinkedHashMap(keyVals.length>>1); + Map propMap = new LinkedHashMap<>(keyVals.length>>1); for (int i = 0; i < keyVals.length; i+=2) { propMap.put(keyVals[i].toString(), keyVals[i+1]); } @@ -82,7 +82,7 @@ public class ZkNodeProps implements JSONWriter.Writable { /** Returns a shallow writable copy of the properties */ public Map shallowCopy() { - return new LinkedHashMap(propMap); + return new LinkedHashMap<>(propMap); } /** diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java index 12dc700c896..09c06c1ba60 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java @@ -313,7 +313,7 @@ public class ZkStateReader { List liveNodes = zkClient.getChildren( LIVE_NODES_ZKNODE, this, true); log.info("Updating live nodes... ({})", liveNodes.size()); - Set liveNodesSet = new HashSet(); + Set liveNodesSet = new HashSet<>(); liveNodesSet.addAll(liveNodes); ClusterState clusterState = new ClusterState( ZkStateReader.this.clusterState.getZkClusterStateVersion(), @@ -340,7 +340,7 @@ public class ZkStateReader { }, true); - Set liveNodeSet = new HashSet(); + Set liveNodeSet = new HashSet<>(); liveNodeSet.addAll(liveNodes); ClusterState clusterState = ClusterState.load(zkClient, liveNodeSet, ZkStateReader.this); this.clusterState = clusterState; @@ -403,7 +403,7 @@ public class ZkStateReader { synchronized (getUpdateLock()) { List liveNodes = zkClient.getChildren(LIVE_NODES_ZKNODE, null, true); - Set liveNodesSet = new HashSet(); + Set liveNodesSet = new HashSet<>(); liveNodesSet.addAll(liveNodes); if (!onlyLiveNodes) { @@ -439,7 +439,7 @@ public class ZkStateReader { try { List liveNodes = zkClient.getChildren(LIVE_NODES_ZKNODE, null, true); - Set liveNodesSet = new HashSet(); + Set liveNodesSet = new HashSet<>(); liveNodesSet.addAll(liveNodes); if (!onlyLiveNodes) { @@ -575,7 +575,7 @@ public class ZkStateReader { } Map shardMap = replicas.getReplicasMap(); - List nodes = new ArrayList(shardMap.size()); + List nodes = new ArrayList<>(shardMap.size()); for (Entry entry : shardMap.entrySet()) { ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue()); diff --git a/solr/solrj/src/java/org/apache/solr/common/params/DefaultSolrParams.java b/solr/solrj/src/java/org/apache/solr/common/params/DefaultSolrParams.java index c2dd5d3dc19..bea20516e00 100644 --- a/solr/solrj/src/java/org/apache/solr/common/params/DefaultSolrParams.java +++ b/solr/solrj/src/java/org/apache/solr/common/params/DefaultSolrParams.java @@ -52,7 +52,7 @@ public class DefaultSolrParams extends SolrParams { @Override public Iterator getParameterNamesIterator() { - final IteratorChain c = new IteratorChain(); + final IteratorChain c = new IteratorChain<>(); c.addIterator(defaults.getParameterNamesIterator()); c.addIterator(params.getParameterNamesIterator()); return c; diff --git a/solr/solrj/src/java/org/apache/solr/common/params/ModifiableSolrParams.java b/solr/solrj/src/java/org/apache/solr/common/params/ModifiableSolrParams.java index 05a6c43141a..b84f4aaa117 100644 --- a/solr/solrj/src/java/org/apache/solr/common/params/ModifiableSolrParams.java +++ b/solr/solrj/src/java/org/apache/solr/common/params/ModifiableSolrParams.java @@ -39,7 +39,7 @@ public class ModifiableSolrParams extends SolrParams public ModifiableSolrParams() { // LinkedHashMap so params show up in CGI in the same order as they are entered - vals = new LinkedHashMap(); + vals = new LinkedHashMap<>(); } /** Constructs a new ModifiableSolrParams directly using the provided Map<String,String[]> */ @@ -51,7 +51,7 @@ public class ModifiableSolrParams extends SolrParams /** Constructs a new ModifiableSolrParams, copying values from an existing SolrParams */ public ModifiableSolrParams(SolrParams params) { - vals = new LinkedHashMap(); + vals = new LinkedHashMap<>(); if( params != null ) { this.add( params ); } diff --git a/solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java b/solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java index 02501d2259c..9a263961df2 100644 --- a/solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java +++ b/solr/solrj/src/java/org/apache/solr/common/params/SolrParams.java @@ -270,7 +270,7 @@ public abstract class SolrParams implements Serializable { /** Create a Map<String,String> from a NamedList given no keys are repeated */ public static Map toMap(NamedList params) { - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); for (int i=0; i toMultiMap(NamedList params) { - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); for (int i=0; i map = new HashMap(); + HashMap map = new HashMap<>(); for (int i=0; i names) { - NamedList nl = new NamedList(); + NamedList nl = new NamedList<>(); for (Iterator it = getParameterNamesIterator(); it.hasNext();) { final String name = it.next(); if (names.contains(name)) { @@ -316,7 +316,7 @@ public abstract class SolrParams implements Serializable { /** Convert this to a NamedList */ public NamedList toNamedList() { - final SimpleOrderedMap result = new SimpleOrderedMap(); + final SimpleOrderedMap result = new SimpleOrderedMap<>(); for(Iterator it=getParameterNamesIterator(); it.hasNext(); ) { final String name = it.next(); diff --git a/solr/solrj/src/java/org/apache/solr/common/util/DateUtil.java b/solr/solrj/src/java/org/apache/solr/common/util/DateUtil.java index e1dc6b1ede3..c77080ae8ef 100644 --- a/solr/solrj/src/java/org/apache/solr/common/util/DateUtil.java +++ b/solr/solrj/src/java/org/apache/solr/common/util/DateUtil.java @@ -71,7 +71,7 @@ public class DateUtil { /** * A suite of default date formats that can be parsed, and thus transformed to the Solr specific format */ - public static final Collection DEFAULT_DATE_FORMATS = new ArrayList(); + public static final Collection DEFAULT_DATE_FORMATS = new ArrayList<>(); static { DEFAULT_DATE_FORMATS.add("yyyy-MM-dd'T'HH:mm:ss'Z'"); diff --git a/solr/solrj/src/java/org/apache/solr/common/util/IteratorChain.java b/solr/solrj/src/java/org/apache/solr/common/util/IteratorChain.java index a4109a517c7..ad384f5586f 100644 --- a/solr/solrj/src/java/org/apache/solr/common/util/IteratorChain.java +++ b/solr/solrj/src/java/org/apache/solr/common/util/IteratorChain.java @@ -27,7 +27,7 @@ import java.util.List; public class IteratorChain implements Iterator { - private final List> iterators = new ArrayList>(); + private final List> iterators = new ArrayList<>(); private Iterator> itit; private Iterator current; diff --git a/solr/solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java b/solr/solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java index 41e7f54d0c8..338490362a3 100644 --- a/solr/solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java +++ b/solr/solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java @@ -119,7 +119,7 @@ public class JavaBinCodec { public SimpleOrderedMap readOrderedMap(DataInputInputStream dis) throws IOException { int sz = readSize(dis); - SimpleOrderedMap nl = new SimpleOrderedMap(); + SimpleOrderedMap nl = new SimpleOrderedMap<>(); for (int i = 0; i < sz; i++) { String name = (String) readVal(dis); Object val = readVal(dis); @@ -130,7 +130,7 @@ public class JavaBinCodec { public NamedList readNamedList(DataInputInputStream dis) throws IOException { int sz = readSize(dis); - NamedList nl = new NamedList(); + NamedList nl = new NamedList<>(); for (int i = 0; i < sz; i++) { String name = (String) readVal(dis); Object val = readVal(dis); @@ -364,7 +364,7 @@ public class JavaBinCodec { public void writeSolrDocumentList(SolrDocumentList docs) throws IOException { writeTag(SOLRDOCLST); - List l = new ArrayList(3); + List l = new ArrayList<>(3); l.add(docs.getNumFound()); l.add(docs.getStart()); l.add(docs.getMaxScore()); @@ -419,7 +419,7 @@ public class JavaBinCodec { public Map readMap(DataInputInputStream dis) throws IOException { int sz = readVInt(dis); - Map m = new LinkedHashMap(); + Map m = new LinkedHashMap<>(); for (int i = 0; i < sz; i++) { Object key = readVal(dis); Object val = readVal(dis); @@ -438,7 +438,7 @@ public class JavaBinCodec { } public List readIterator(DataInputInputStream fis) throws IOException { - ArrayList l = new ArrayList(); + ArrayList l = new ArrayList<>(); while (true) { Object o = readVal(fis); if (o == END_OBJ) break; @@ -472,7 +472,7 @@ public class JavaBinCodec { public List readArray(DataInputInputStream dis) throws IOException { int sz = readSize(dis); - ArrayList l = new ArrayList(sz); + ArrayList l = new ArrayList<>(sz); for (int i = 0; i < sz; i++) { l.add(readVal(dis)); } @@ -778,7 +778,7 @@ public class JavaBinCodec { writeTag(EXTERN_STRING, idx); if (idx == 0) { writeStr(s); - if (stringsMap == null) stringsMap = new HashMap(); + if (stringsMap == null) stringsMap = new HashMap<>(); stringsMap.put(s, ++stringsCount); } @@ -790,7 +790,7 @@ public class JavaBinCodec { return stringsList.get(idx - 1); } else {// idx == 0 means it has a string value String s = (String) readVal(fis); - if (stringsList == null) stringsList = new ArrayList(); + if (stringsList == null) stringsList = new ArrayList<>(); stringsList.add(s); return s; } diff --git a/solr/solrj/src/java/org/apache/solr/common/util/NamedList.java b/solr/solrj/src/java/org/apache/solr/common/util/NamedList.java index 63309c4bef6..de6be33268d 100644 --- a/solr/solrj/src/java/org/apache/solr/common/util/NamedList.java +++ b/solr/solrj/src/java/org/apache/solr/common/util/NamedList.java @@ -60,7 +60,7 @@ public class NamedList implements Cloneable, Serializable, Iterable(); + nvPairs = new ArrayList<>(); } /** @@ -109,7 +109,7 @@ public class NamedList implements Cloneable, Serializable, Iterable nameValueMapToList(Map.Entry[] nameValuePairs) { - List result = new ArrayList(); + List result = new ArrayList<>(); for (Map.Entry ent : nameValuePairs) { result.add(ent.getKey()); result.add(ent.getValue()); @@ -251,7 +251,7 @@ public class NamedList implements Cloneable, Serializable, Iterable getAll(String name) { - List result = new ArrayList(); + List result = new ArrayList<>(); int sz = size(); for (int i = 0; i < sz; i++) { String n = getName(i); @@ -418,9 +418,9 @@ public class NamedList implements Cloneable, Serializable, Iterable clone() { - ArrayList newList = new ArrayList(nvPairs.size()); + ArrayList newList = new ArrayList<>(nvPairs.size()); newList.addAll(nvPairs); - return new NamedList(newList); + return new NamedList<>(newList); } //---------------------------------------------------------------------------- @@ -501,7 +501,7 @@ public class NamedList implements Cloneable, Serializable, Iterable removeAll(String name) { - List result = new ArrayList(); + List result = new ArrayList<>(); result = getAll(name); if (result.size() > 0 ) { killAll(name); @@ -575,7 +575,7 @@ public class NamedList implements Cloneable, Serializable, Iterable removeConfigArgs(final String name) throws SolrException { List objects = getAll(name); - List collection = new ArrayList(size() / 2); + List collection = new ArrayList<>(size() / 2); final String err = "init arg '" + name + "' must be a string " + "(ie: 'str'), or an array (ie: 'arr') containing strings; found: "; diff --git a/solr/solrj/src/java/org/apache/solr/common/util/SimpleOrderedMap.java b/solr/solrj/src/java/org/apache/solr/common/util/SimpleOrderedMap.java index 736ad18e992..4608f905f7c 100644 --- a/solr/solrj/src/java/org/apache/solr/common/util/SimpleOrderedMap.java +++ b/solr/solrj/src/java/org/apache/solr/common/util/SimpleOrderedMap.java @@ -60,8 +60,8 @@ public class SimpleOrderedMap extends NamedList { @Override public SimpleOrderedMap clone() { - ArrayList newList = new ArrayList(nvPairs.size()); + ArrayList newList = new ArrayList<>(nvPairs.size()); newList.addAll(nvPairs); - return new SimpleOrderedMap(newList); + return new SimpleOrderedMap<>(newList); } } diff --git a/solr/solrj/src/java/org/apache/solr/common/util/StrUtils.java b/solr/solrj/src/java/org/apache/solr/common/util/StrUtils.java index 16f9cd5dd79..3c4ddfe2a73 100644 --- a/solr/solrj/src/java/org/apache/solr/common/util/StrUtils.java +++ b/solr/solrj/src/java/org/apache/solr/common/util/StrUtils.java @@ -38,7 +38,7 @@ public class StrUtils { * outside strings. */ public static List splitSmart(String s, char separator) { - ArrayList lst = new ArrayList(4); + ArrayList lst = new ArrayList<>(4); int pos=0, start=0, end=s.length(); char inString=0; char ch=0; @@ -85,7 +85,7 @@ public class StrUtils { * @param decode decode backslash escaping */ public static List splitSmart(String s, String separator, boolean decode) { - ArrayList lst = new ArrayList(2); + ArrayList lst = new ArrayList<>(2); StringBuilder sb = new StringBuilder(); int pos=0, end=s.length(); while (pos < end) { @@ -135,7 +135,7 @@ public class StrUtils { if (fileNames == null) return Collections.emptyList(); - List result = new ArrayList(); + List result = new ArrayList<>(); for (String file : fileNames.split("(? splitWS(String s, boolean decode) { - ArrayList lst = new ArrayList(2); + ArrayList lst = new ArrayList<>(2); StringBuilder sb = new StringBuilder(); int pos=0, end=s.length(); while (pos < end) { @@ -207,7 +207,7 @@ public class StrUtils { } public static List toLower(List strings) { - ArrayList ret = new ArrayList(strings.size()); + ArrayList ret = new ArrayList<>(strings.size()); for (String str : strings) { ret.add(str.toLowerCase(Locale.ROOT)); } diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/LargeVolumeTestBase.java b/solr/solrj/src/test/org/apache/solr/client/solrj/LargeVolumeTestBase.java index e467bbc4ff5..287f00a2847 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/LargeVolumeTestBase.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/LargeVolumeTestBase.java @@ -87,12 +87,12 @@ public abstract class LargeVolumeTestBase extends SolrJettyTestBase public void run() { try { UpdateResponse resp = null; - List docs = new ArrayList(); + List docs = new ArrayList<>(); for (int i = 0; i < numdocs; i++) { if (i > 0 && i % 200 == 0) { resp = tserver.add(docs); assertEquals(0, resp.getStatus()); - docs = new ArrayList(); + docs = new ArrayList<>(); } if (i > 0 && i % 5000 == 0) { log.info(getName() + " - Committing " + i); diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java index 86341d023c9..1c68f18d296 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java @@ -114,7 +114,7 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase Assert.assertEquals(docID, response.getResults().get(0).getFieldValue("id") ); // Now add a few docs for facet testing... - List docs = new ArrayList(); + List docs = new ArrayList<>(); SolrInputDocument doc2 = new SolrInputDocument(); doc2.addField( "id", "2", 1.0f ); doc2.addField( "inStock", true, 1.0f ); @@ -241,7 +241,7 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase doc2.addField( "name", "h\uD866\uDF05llo", 1.0f ); doc2.addField( "price", 20 ); - Collection docs = new ArrayList(); + Collection docs = new ArrayList<>(); docs.add( doc1 ); docs.add( doc2 ); @@ -358,7 +358,7 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase // Empty the database... server.deleteByQuery("*:*");// delete everything! - List docs = new ArrayList(); + List docs = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { // Now add something... SolrInputDocument doc = new SolrInputDocument(); @@ -739,7 +739,7 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase server.commit(); assertNumFound( "*:*", 0 ); // make sure it got in - ArrayList docs = new ArrayList(10); + ArrayList docs = new ArrayList<>(10); for( int i=1; i<=10; i++ ) { SolrInputDocument doc = new SolrInputDocument(); doc.setField( "id", i+"", 1.0f ); @@ -816,7 +816,7 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase assertNumFound( "*:*", 0 ); // make sure it got in int id = 1; - ArrayList docs = new ArrayList(); + ArrayList docs = new ArrayList<>(); docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "a", "inStock", true ) ); docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "a", "inStock", false ) ); docs.add( makeTestDoc( "id", id++, "features", "aaa", "cat", "a", "inStock", true ) ); @@ -1120,7 +1120,7 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase assertEquals(1.0f, resp.getResults().get(0).getFirstValue("price_f")); //update "price" with incorrect version (optimistic locking) - HashMap oper = new HashMap(); //need better api for this??? + HashMap oper = new HashMap<>(); //need better api for this??? oper.put("set",100); doc = new SolrInputDocument(); @@ -1174,7 +1174,7 @@ abstract public class SolrExampleTests extends SolrExampleTestsBase QueryResponse response = solrServer.query(new SolrQuery("id:123")); assertEquals("Failed to add doc to cloud server", 1, response.getResults().getNumFound()); - Map> operation = new HashMap>(); + Map> operation = new HashMap<>(); operation.put("set", Arrays.asList("first", "second", "third")); doc.addField("multi_ss", operation); solrServer.add(doc); diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestsBase.java b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestsBase.java index 72947d4add0..5e568193c88 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestsBase.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTestsBase.java @@ -202,7 +202,7 @@ abstract public class SolrExampleTestsBase extends SolrJettyTestBase { assertNumFound("*:*", 3); // make sure it got in // should be able to handle multiple delete commands in a single go - List ids = new ArrayList(); + List ids = new ArrayList<>(); for (SolrInputDocument d : doc) { ids.add(d.getFieldValue("id").toString()); } diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java b/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java index 6279df945b3..afe712d641c 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/TestLBHttpSolrServer.java @@ -99,7 +99,7 @@ public class TestLBHttpSolrServer extends SolrTestCaseJ4 { } private void addDocs(SolrInstance solrInstance) throws IOException, SolrServerException { - List docs = new ArrayList(); + List docs = new ArrayList<>(); for (int i = 0; i < 10; i++) { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", i); @@ -135,7 +135,7 @@ public class TestLBHttpSolrServer extends SolrTestCaseJ4 { LBHttpSolrServer lbHttpSolrServer = new LBHttpSolrServer(httpClient, s); lbHttpSolrServer.setAliveCheckInterval(500); SolrQuery solrQuery = new SolrQuery("*:*"); - Set names = new HashSet(); + Set names = new HashSet<>(); QueryResponse resp = null; for (String value : s) { resp = lbHttpSolrServer.query(solrQuery); diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/beans/TestDocumentObjectBinder.java b/solr/solrj/src/test/org/apache/solr/client/solrj/beans/TestDocumentObjectBinder.java index 0fa5291fdf6..5c830165b6a 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/beans/TestDocumentObjectBinder.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/beans/TestDocumentObjectBinder.java @@ -111,11 +111,11 @@ public class TestDocumentObjectBinder extends LuceneTestCase { item.features = Arrays.asList(item.categories); List supA = Arrays.asList("supA1", "supA2", "supA3"); List supB = Arrays.asList("supB1", "supB2", "supB3"); - item.supplier = new HashMap>(); + item.supplier = new HashMap<>(); item.supplier.put("supplier_supA", supA); item.supplier.put("supplier_supB", supB); - item.supplier_simple = new HashMap(); + item.supplier_simple = new HashMap<>(); item.supplier_simple.put("sup_simple_supA", "supA_val"); item.supplier_simple.put("sup_simple_supB", "supB_val"); diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java index 35abddeefba..fa9521c185d 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/SolrExampleStreamingTest.java @@ -78,7 +78,7 @@ public class SolrExampleStreamingTest extends SolrExampleTests { public void testWaitOptions() throws Exception { // SOLR-3903 - final List failures = new ArrayList(); + final List failures = new ArrayList<>(); ConcurrentUpdateSolrServer s = new ConcurrentUpdateSolrServer (jetty.getBaseUrl().toString() + "/collection1", 2, 2) { @Override diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestEmbeddedSolrServer.java b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestEmbeddedSolrServer.java index 4baddaea3f3..5d17d7826dd 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestEmbeddedSolrServer.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/embedded/TestEmbeddedSolrServer.java @@ -60,7 +60,7 @@ public class TestEmbeddedSolrServer extends AbstractEmbeddedSolrServerTestCase { EmbeddedSolrServer solrServer = (EmbeddedSolrServer)getSolrCore0(); Assert.assertEquals(3, cores.getCores().size()); - List solrCores = new ArrayList(); + List solrCores = new ArrayList<>(); for (SolrCore solrCore : cores.getCores()) { Assert.assertEquals(false, solrCore.isClosed()); solrCores.add(solrCore); diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrServerTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrServerTest.java index 5e801060919..7b33b705fc3 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrServerTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/BasicHttpSolrServerTest.java @@ -99,7 +99,7 @@ public class BasicHttpSolrServerTest extends SolrJettyTestBase { private void setHeaders(HttpServletRequest req) { Enumeration headerNames = req.getHeaderNames(); - headers = new HashMap(); + headers = new HashMap<>(); while (headerNames.hasMoreElements()) { final String name = headerNames.nextElement(); headers.put(name, req.getHeader(name)); diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java index d03ed948faa..5a126d36e5f 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/request/TestUpdateRequestCodec.java @@ -73,7 +73,7 @@ public class TestUpdateRequestCodec extends LuceneTestCase { updateRequest.add(doc); doc = new SolrInputDocument(); - Collection foobar = new HashSet(); + Collection foobar = new HashSet<>(); foobar.add("baz1"); foobar.add("baz2"); doc.addField("foobar",foobar); @@ -85,7 +85,7 @@ public class TestUpdateRequestCodec extends LuceneTestCase { JavaBinUpdateRequestCodec codec = new JavaBinUpdateRequestCodec(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); codec.marshal(updateRequest, baos); - final List docs = new ArrayList(); + final List docs = new ArrayList<>(); JavaBinUpdateRequestCodec.StreamingUpdateHandler handler = new JavaBinUpdateRequestCodec.StreamingUpdateHandler() { @Override public void update(SolrInputDocument document, UpdateRequest req, Integer commitWithin, Boolean overwrite) { @@ -114,7 +114,7 @@ public class TestUpdateRequestCodec extends LuceneTestCase { @Test public void testIteratable() throws IOException { - final List values = new ArrayList(); + final List values = new ArrayList<>(); values.add("iterItem1"); values.add("iterItem2"); @@ -136,7 +136,7 @@ public class TestUpdateRequestCodec extends LuceneTestCase { JavaBinUpdateRequestCodec codec = new JavaBinUpdateRequestCodec(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); codec.marshal(updateRequest, baos); - final List docs = new ArrayList(); + final List docs = new ArrayList<>(); JavaBinUpdateRequestCodec.StreamingUpdateHandler handler = new JavaBinUpdateRequestCodec.StreamingUpdateHandler() { @Override public void update(SolrInputDocument document, UpdateRequest req, Integer commitWithin, Boolean overwrite) { @@ -191,7 +191,7 @@ public class TestUpdateRequestCodec extends LuceneTestCase { updateRequest.add(doc); doc = new SolrInputDocument(); - Collection foobar = new HashSet(); + Collection foobar = new HashSet<>(); foobar.add("baz1"); foobar.add("baz2"); doc.addField("foobar",foobar); diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/response/AnlysisResponseBaseTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/response/AnlysisResponseBaseTest.java index d2a7948039d..62bd947c2a2 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/response/AnlysisResponseBaseTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/response/AnlysisResponseBaseTest.java @@ -102,7 +102,7 @@ public class AnlysisResponseBaseTest extends LuceneTestCase { //================================================ Helper Methods ================================================== private List buildFakeTokenInfoList(int numberOfTokens) { - List list = new ArrayList(numberOfTokens); + List list = new ArrayList<>(numberOfTokens); for (int i = 0; i < numberOfTokens; i++) { list.add(new NamedList()); } diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/response/DocumentAnalysisResponseTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/response/DocumentAnalysisResponseTest.java index 4003230b76a..f07a14031db 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/response/DocumentAnalysisResponseTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/response/DocumentAnalysisResponseTest.java @@ -40,7 +40,7 @@ public class DocumentAnalysisResponseTest extends LuceneTestCase { // the parsing of the analysis phases is already tested in the AnalysisResponseBaseTest. So we can just fake // the phases list here and use it. - final List phases = new ArrayList(1); + final List phases = new ArrayList<>(1); AnalysisResponseBase.AnalysisPhase expectedPhase = new AnalysisResponseBase.AnalysisPhase("Tokenizer"); phases.add(expectedPhase); diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/response/FieldAnalysisResponseTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/response/FieldAnalysisResponseTest.java index 7611ff80a41..59a154bcef0 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/response/FieldAnalysisResponseTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/response/FieldAnalysisResponseTest.java @@ -42,7 +42,7 @@ public class FieldAnalysisResponseTest extends LuceneTestCase { // the parsing of the analysis phases is already tested in the AnalysisResponseBaseTest. So we can just fake // the phases list here and use it. - final List phases = new ArrayList(1); + final List phases = new ArrayList<>(1); AnalysisResponseBase.AnalysisPhase expectedPhase = new AnalysisResponseBase.AnalysisPhase("Tokenizer"); phases.add(expectedPhase); diff --git a/solr/solrj/src/test/org/apache/solr/common/SolrDocumentTest.java b/solr/solrj/src/test/org/apache/solr/common/SolrDocumentTest.java index a67c603c835..52fc832f55d 100644 --- a/solr/solrj/src/test/org/apache/solr/common/SolrDocumentTest.java +++ b/solr/solrj/src/test/org/apache/solr/common/SolrDocumentTest.java @@ -55,7 +55,7 @@ public class SolrDocumentTest extends LuceneTestCase assertNull( doc.getFieldValue( "xxxxx" ) ); assertNull( doc.getFieldValues( "xxxxx" ) ); - List keys = new ArrayList(); + List keys = new ArrayList<>(); for( String s : doc.getFieldNames() ) { keys.add( s ); } @@ -105,7 +105,7 @@ public class SolrDocumentTest extends LuceneTestCase public void testAddCollections() { - final List c0 = new ArrayList(); + final List c0 = new ArrayList<>(); c0.add( "aaa" ); c0.add( "aaa" ); c0.add( "aaa" ); @@ -170,7 +170,7 @@ public class SolrDocumentTest extends LuceneTestCase // set field using a collection is documented to be backed by // that collection, so changes should affect it. - Collection tmp = new ArrayList(3); + Collection tmp = new ArrayList<>(3); tmp.add("one"); doc.setField( "collection_backed", tmp ); assertEquals("collection not the same", diff --git a/solr/solrj/src/test/org/apache/solr/common/params/SolrParamTest.java b/solr/solrj/src/test/org/apache/solr/common/params/SolrParamTest.java index 5aa3bd756af..f3eb1417d06 100644 --- a/solr/solrj/src/test/org/apache/solr/common/params/SolrParamTest.java +++ b/solr/solrj/src/test/org/apache/solr/common/params/SolrParamTest.java @@ -28,7 +28,7 @@ import org.apache.solr.common.SolrException; public class SolrParamTest extends LuceneTestCase { public void testGetParams() { - Map pmap = new HashMap(); + Map pmap = new HashMap<>(); pmap.put( "str" , "string" ); pmap.put( "bool" , "true" ); pmap.put( "true-0" , "true" ); @@ -161,7 +161,7 @@ public class SolrParamTest extends LuceneTestCase required.getInt( "f.bad.nnnn", pint ) ); // Check default SolrParams - Map dmap = new HashMap(); + Map dmap = new HashMap<>(); // these are not defined in params dmap.put( "dstr" , "default" ); dmap.put( "dint" , "123" ); diff --git a/solr/solrj/src/test/org/apache/solr/common/util/IteratorChainTest.java b/solr/solrj/src/test/org/apache/solr/common/util/IteratorChainTest.java index c2ce6e50d55..99d73405926 100644 --- a/solr/solrj/src/test/org/apache/solr/common/util/IteratorChainTest.java +++ b/solr/solrj/src/test/org/apache/solr/common/util/IteratorChainTest.java @@ -27,7 +27,7 @@ import org.apache.solr.common.util.IteratorChain; public class IteratorChainTest extends LuceneTestCase { private Iterator makeIterator(String marker,int howMany) { - final List c = new ArrayList(); + final List c = new ArrayList<>(); for(int i = 1; i <= howMany; i++) { c.add(marker + i); } @@ -35,13 +35,13 @@ public class IteratorChainTest extends LuceneTestCase { } public void testNoIterator() { - final IteratorChain c = new IteratorChain(); + final IteratorChain c = new IteratorChain<>(); assertFalse("Empty IteratorChain.hastNext() is false",c.hasNext()); assertEquals("",getString(c)); } public void testCallNextTooEarly() { - final IteratorChain c = new IteratorChain(); + final IteratorChain c = new IteratorChain<>(); c.addIterator(makeIterator("a",3)); try { c.next(); @@ -52,7 +52,7 @@ public class IteratorChainTest extends LuceneTestCase { } public void testCallAddTooLate() { - final IteratorChain c = new IteratorChain(); + final IteratorChain c = new IteratorChain<>(); c.hasNext(); try { c.addIterator(makeIterator("a",3)); @@ -63,7 +63,7 @@ public class IteratorChainTest extends LuceneTestCase { } public void testRemove() { - final IteratorChain c = new IteratorChain(); + final IteratorChain c = new IteratorChain<>(); try { c.remove(); fail("Calling remove should throw UnsupportedOperationException"); @@ -73,20 +73,20 @@ public class IteratorChainTest extends LuceneTestCase { } public void testOneIterator() { - final IteratorChain c = new IteratorChain(); + final IteratorChain c = new IteratorChain<>(); c.addIterator(makeIterator("a",3)); assertEquals("a1a2a3",getString(c)); } public void testTwoIterators() { - final IteratorChain c = new IteratorChain(); + final IteratorChain c = new IteratorChain<>(); c.addIterator(makeIterator("a",3)); c.addIterator(makeIterator("b",2)); assertEquals("a1a2a3b1b2",getString(c)); } public void testEmptyIteratorsInTheMiddle() { - final IteratorChain c = new IteratorChain(); + final IteratorChain c = new IteratorChain<>(); c.addIterator(makeIterator("a",3)); c.addIterator(makeIterator("b",0)); c.addIterator(makeIterator("c",1)); diff --git a/solr/solrj/src/test/org/apache/solr/common/util/NamedListTest.java b/solr/solrj/src/test/org/apache/solr/common/util/NamedListTest.java index f7e869c6b70..7bf3f4d09ef 100644 --- a/solr/solrj/src/test/org/apache/solr/common/util/NamedListTest.java +++ b/solr/solrj/src/test/org/apache/solr/common/util/NamedListTest.java @@ -25,7 +25,7 @@ import org.apache.solr.common.SolrException; public class NamedListTest extends LuceneTestCase { public void testRemove() { - NamedList nl = new NamedList(); + NamedList nl = new NamedList<>(); nl.add("key1", "value1"); nl.add("key2", "value2"); assertEquals(2, nl.size()); @@ -39,7 +39,7 @@ public class NamedListTest extends LuceneTestCase { } public void testRemoveAll() { - NamedList nl = new NamedList(); + NamedList nl = new NamedList<>(); nl.add("key1", "value1-1"); nl.add("key2", "value2-1"); nl.add("key1", "value1-2"); @@ -64,7 +64,7 @@ public class NamedListTest extends LuceneTestCase { } public void testRemoveArgs() { - NamedList nl = new NamedList(); + NamedList nl = new NamedList<>(); nl.add("key1", "value1-1"); nl.add("key2", "value2-1"); nl.add("key1", "value1-2"); @@ -114,23 +114,23 @@ public class NamedListTest extends LuceneTestCase { // - key3c // this is a varied NL structure. - NamedList nl2b = new NamedList(); + NamedList nl2b = new NamedList<>(); nl2b.add("key2b1", "value2b1"); nl2b.add("key2b2", "value2b2"); - NamedList nl3a = new NamedList(); + NamedList nl3a = new NamedList<>(); nl3a.add("key3a1", "value3a1"); nl3a.add("key3a2", "value3a2"); nl3a.add("key3a3", "value3a3"); - NamedList nl2 = new NamedList(); + NamedList nl2 = new NamedList<>(); nl2.add("key2a", "value2a"); nl2.add("key2b", nl2b); nl2.add("k2int1", (int) 5); - NamedList nl3 = new NamedList(); + NamedList nl3 = new NamedList<>(); nl3.add("key3a", nl3a); nl3.add("key3b", "value3b"); nl3.add("key3c", "value3c"); nl3.add("key3c", "value3c2"); - NamedList nl = new NamedList(); + NamedList nl = new NamedList<>(); nl.add("key1", "value1"); nl.add("key2", nl2); nl.add("key3", nl3); @@ -165,8 +165,8 @@ public class NamedListTest extends LuceneTestCase { // - key1a // - key1b // key2 (null list) - NamedList> enl = new NamedList>(); - NamedList enlkey1 = new NamedList(); + NamedList> enl = new NamedList<>(); + NamedList enlkey1 = new NamedList<>(); NamedList enlkey2 = null; enlkey1.add("key1a", "value1a"); enlkey1.add("key1b", "value1b"); diff --git a/solr/solrj/src/test/org/apache/solr/common/util/TestNamedListCodec.java b/solr/solrj/src/test/org/apache/solr/common/util/TestNamedListCodec.java index 312d994482c..ae2a03645db 100644 --- a/solr/solrj/src/test/org/apache/solr/common/util/TestNamedListCodec.java +++ b/solr/solrj/src/test/org/apache/solr/common/util/TestNamedListCodec.java @@ -128,7 +128,7 @@ public class TestNamedListCodec extends LuceneTestCase { NamedList r = new NamedList(); - Map map = new HashMap(); + Map map = new HashMap<>(); map.put("foo", "bar"); map.put("junk", "funk"); map.put("ham", "burger"); diff --git a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java index 5b96642a90c..11f88831e7b 100644 --- a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java +++ b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java @@ -187,8 +187,8 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 { protected boolean fixShardCount = false; protected JettySolrRunner controlJetty; - protected List clients = new ArrayList(); - protected List jettys = new ArrayList(); + protected List clients = new ArrayList<>(); + protected List jettys = new ArrayList<>(); protected String context; protected String[] deadServers; @@ -211,7 +211,7 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 { public static int UNORDERED = 8; protected int flags; - protected Map handle = new HashMap(); + protected Map handle = new HashMap<>(); protected String id = "id"; public static Logger log = LoggerFactory.getLogger(BaseDistributedSearchTestCase.class); @@ -328,7 +328,7 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 { if (sb.length() > 0) sb.append(','); int nDeadServers = r.nextInt(deadServers.length+1); if (nDeadServers > 0) { - List replicas = new ArrayList(Arrays.asList(deadServers)); + List replicas = new ArrayList<>(Arrays.asList(deadServers)); Collections.shuffle(replicas, r); replicas.add(r.nextInt(nDeadServers+1), shard); for (int i=0; i(); + path = new ArrayList<>(); } public CollectionTester(Object val) { this(val, JSONTestUtil.DEFAULT_DELTA); @@ -259,7 +259,7 @@ class CollectionTester { return true; } - private static Set reserved = new HashSet(Arrays.asList("_SKIP_","_MATCH_","_ORDERED_","_UNORDERED_")); + private static Set reserved = new HashSet<>(Arrays.asList("_SKIP_","_MATCH_","_ORDERED_","_UNORDERED_")); boolean matchMap() { Map expectedMap = (Map)expected; @@ -286,7 +286,7 @@ class CollectionTester { } Set keys = match != null ? match : expectedMap.keySet(); - Set visited = new HashSet(); + Set visited = new HashSet<>(); Iterator> iter = ordered ? v.entrySet().iterator() : null; @@ -346,7 +346,7 @@ class CollectionTester { if (v.containsKey(skipStr)) skipped++; } if (numExpected != (v.size() - skipped)) { - HashSet set = new HashSet(v.keySet()); + HashSet set = new HashSet<>(v.keySet()); set.removeAll(expectedMap.keySet()); setErr("unexpected map keys " + set); return false; diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java index 2840a50d7f1..bbd433b11f4 100644 --- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java +++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java @@ -437,7 +437,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { /** Causes an exception matching the regex pattern to not be logged. */ public static void ignoreException(String pattern) { if (SolrException.ignorePatterns == null) - SolrException.ignorePatterns = new HashSet(); + SolrException.ignorePatterns = new HashSet<>(); SolrException.ignorePatterns.add(pattern); } @@ -1374,7 +1374,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { } public Map toObject(IndexSchema schema) { - Map result = new HashMap(); + Map result = new HashMap<>(); for (Fld fld : fields) { SchemaField sf = schema.getField(fld.ftype.fname); if (!sf.multiValued()) { @@ -1419,7 +1419,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { public List createValues() { int nVals = numValues.getInt(); if (nVals <= 0) return null; - List vals = new ArrayList(nVals); + List vals = new ArrayList<>(nVals); for (int i=0; i indexDocs(List descriptor, Map model, int nDocs) throws Exception { if (model == null) { - model = new LinkedHashMap(); + model = new LinkedHashMap<>(); } // commit an average of 10 times for large sets, or 10% of the time for small sets @@ -1499,7 +1499,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { public static Doc createDoc(List descriptor) { Doc doc = new Doc(); - doc.fields = new ArrayList(); + doc.fields = new ArrayList<>(); for (FldType ftype : descriptor) { Fld fld = ftype.createField(); if (fld != null) { @@ -1514,7 +1514,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { public static Comparator createSort(IndexSchema schema, List fieldTypes, String[] out) { StringBuilder sortSpec = new StringBuilder(); int nSorts = random().nextInt(4); - List> comparators = new ArrayList>(); + List> comparators = new ArrayList<>(); for (int i=0; i0) sortSpec.append(','); @@ -1653,7 +1653,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { /** Return a Map from field value to a list of document ids */ public Map> invertField(Map model, String field) { - Map> value_to_id = new HashMap>(); + Map> value_to_id = new HashMap<>(); // invert field for (Comparable key : model.keySet()) { @@ -1663,7 +1663,7 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { for (Comparable val : vals) { List ids = value_to_id.get(val); if (ids == null) { - ids = new ArrayList(2); + ids = new ArrayList<>(2); value_to_id.put(val, ids); } ids.add(key); diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java index 66a3adf468a..4203ff47be2 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractFullDistribZkTestBase.java @@ -119,12 +119,12 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes protected CloudSolrServer controlClientCloud; // cloud version of the control client protected volatile CloudSolrServer cloudClient; - protected List cloudJettys = new ArrayList(); - protected Map> shardToJetty = new HashMap>(); + protected List cloudJettys = new ArrayList<>(); + protected Map> shardToJetty = new HashMap<>(); private AtomicInteger jettyIntCntr = new AtomicInteger(0); protected ChaosMonkey chaosMonkey; - protected Map shardToLeaderJetty = new HashMap(); + protected Map shardToLeaderJetty = new HashMap<>(); private boolean cloudInit; protected boolean checkCreatedVsState; protected boolean useJettyDataDir = true; @@ -354,8 +354,8 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes * be the case */ protected List createJettys(int numJettys, boolean checkCreatedVsState) throws Exception { - List jettys = new ArrayList(); - List clients = new ArrayList(); + List jettys = new ArrayList<>(); + List clients = new ArrayList<>(); StringBuilder sb = new StringBuilder(); for (int i = 1; i <= numJettys; i++) { if (sb.length() > 0) sb.append(','); @@ -531,7 +531,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes ClusterState clusterState = zkStateReader.getClusterState(); DocCollection coll = clusterState.getCollection(DEFAULT_COLLECTION); - List theClients = new ArrayList(); + List theClients = new ArrayList<>(); for (SolrServer client : clients) { // find info for this client in zk nextClient: @@ -570,7 +570,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes if (replica.getStr(ZkStateReader.BASE_URL_PROP).contains(":" + port)) { List list = shardToJetty.get(slice.getName()); if (list == null) { - list = new ArrayList(); + list = new ArrayList<>(); shardToJetty.put(slice.getName(), list); } boolean isLeader = slice.getLeader() == replica; @@ -915,7 +915,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes public QueryResponse queryAndCompareReplicas(SolrParams params, String shard) throws Exception { - ArrayList shardClients = new ArrayList(7); + ArrayList shardClients = new ArrayList<>(7); updateMappingsFromZk(jettys, clients); ZkStateReader zkStateReader = cloudClient.getZkStateReader(); @@ -947,7 +947,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes public void queryAndCompareShards(SolrParams params) throws Exception { updateMappingsFromZk(jettys, clients); - List shards = new ArrayList(shardToJetty.keySet()); + List shards = new ArrayList<>(shardToJetty.keySet()); for (String shard : shards) { queryAndCompareReplicas(params, shard); } @@ -1121,19 +1121,19 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes // System.err.println("######"+bName+ ": " + toStr(b,10)); //System.err.println("###### sizes=" + a.size() + "," + b.size()); boolean legal = true; - Set setA = new HashSet(); + Set setA = new HashSet<>(); for (SolrDocument sdoc : a) { setA.add(sdoc); } - Set setB = new HashSet(); + Set setB = new HashSet<>(); for (SolrDocument sdoc : b) { setB.add(sdoc); } - Set onlyInA = new HashSet(setA); + Set onlyInA = new HashSet<>(setA); onlyInA.removeAll(setB); - Set onlyInB = new HashSet(setB); + Set onlyInB = new HashSet<>(setB); onlyInB.removeAll(setA); if (onlyInA.size() > 0) { @@ -1168,19 +1168,19 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes System.err.println("######"+bName+ ": " + toStr(b,10)); System.err.println("###### sizes=" + a.size() + "," + b.size()); - Set setA = new HashSet(); + Set setA = new HashSet<>(); for (SolrDocument sdoc : a) { setA.add(new HashMap(sdoc)); } - Set setB = new HashSet(); + Set setB = new HashSet<>(); for (SolrDocument sdoc : b) { setB.add(new HashMap(sdoc)); } - Set onlyInA = new HashSet(setA); + Set onlyInA = new HashSet<>(setA); onlyInA.removeAll(setB); - Set onlyInB = new HashSet(setB); + Set onlyInB = new HashSet<>(setB); onlyInB.removeAll(setA); if (onlyInA.size() > 0) { @@ -1590,7 +1590,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes } int clientIndex = random().nextInt(2); - List list = new ArrayList(); + List list = new ArrayList<>(); list.add(numShards); list.add(numReplicas); if (collectionInfos != null) { diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java index ec3625ab3d0..d8b7d623bfe 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractZkTestCase.java @@ -92,7 +92,7 @@ public abstract class AbstractZkTestCase extends SolrTestCaseJ4 { zkClient = new SolrZkClient(zkAddress, AbstractZkTestCase.TIMEOUT); - Map props = new HashMap(); + Map props = new HashMap<>(); props.put("configName", "conf1"); final ZkNodeProps zkProps = new ZkNodeProps(props); diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java index b9a3b12324b..487d327f870 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/ChaosMonkey.java @@ -80,7 +80,7 @@ public class ChaosMonkey { private Map shardToLeaderJetty; private volatile long startTime; - private List deadPool = new ArrayList(); + private List deadPool = new ArrayList<>(); private Thread monkeyThread; @@ -305,7 +305,7 @@ public class ChaosMonkey { private String getRandomSlice() { Map slices = zkStateReader.getClusterState().getSlicesMap(collection); - List sliceKeyList = new ArrayList(slices.size()); + List sliceKeyList = new ArrayList<>(slices.size()); sliceKeyList.addAll(slices.keySet()); String sliceName = sliceKeyList.get(LuceneTestCase.random().nextInt(sliceKeyList.size())); return sliceName; diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/StopableIndexingThread.java b/solr/test-framework/src/java/org/apache/solr/cloud/StopableIndexingThread.java index 8446f086849..1b0e97fef83 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/StopableIndexingThread.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/StopableIndexingThread.java @@ -33,9 +33,9 @@ public class StopableIndexingThread extends AbstractFullDistribZkTestBase.Stopab private static String i1 = "a_si"; private volatile boolean stop = false; protected final String id; - protected final List deletes = new ArrayList(); - protected Set addFails = new HashSet(); - protected Set deleteFails = new HashSet(); + protected final List deletes = new ArrayList<>(); + protected Set addFails = new HashSet<>(); + protected Set deleteFails = new HashSet<>(); protected boolean doDeletes; private int numCycles; private SolrServer controlClient; diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/ZkTestServer.java b/solr/test-framework/src/java/org/apache/solr/cloud/ZkTestServer.java index 776945633a6..b890c23b9b1 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/ZkTestServer.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/ZkTestServer.java @@ -328,7 +328,7 @@ public class ZkTestServer { } public static List parseHostPortList(String hplist) { - ArrayList alist = new ArrayList(); + ArrayList alist = new ArrayList<>(); for (String hp : hplist.split(",")) { int idx = hp.lastIndexOf(':'); String host = hp.substring(0, idx); diff --git a/solr/test-framework/src/java/org/apache/solr/update/processor/BufferingRequestProcessor.java b/solr/test-framework/src/java/org/apache/solr/update/processor/BufferingRequestProcessor.java index a57f25fa92e..658d29bbbce 100644 --- a/solr/test-framework/src/java/org/apache/solr/update/processor/BufferingRequestProcessor.java +++ b/solr/test-framework/src/java/org/apache/solr/update/processor/BufferingRequestProcessor.java @@ -28,10 +28,10 @@ import org.apache.solr.update.RollbackUpdateCommand; public class BufferingRequestProcessor extends UpdateRequestProcessor { - public List addCommands = new ArrayList(); - public List deleteCommands = new ArrayList(); - public List commitCommands = new ArrayList(); - public List rollbackCommands = new ArrayList(); + public List addCommands = new ArrayList<>(); + public List deleteCommands = new ArrayList<>(); + public List commitCommands = new ArrayList<>(); + public List rollbackCommands = new ArrayList<>(); public BufferingRequestProcessor(UpdateRequestProcessor next) { super(next); diff --git a/solr/test-framework/src/java/org/apache/solr/util/AbstractSolrTestCase.java b/solr/test-framework/src/java/org/apache/solr/util/AbstractSolrTestCase.java index b73962379c8..b2bb7d3b744 100644 --- a/solr/test-framework/src/java/org/apache/solr/util/AbstractSolrTestCase.java +++ b/solr/test-framework/src/java/org/apache/solr/util/AbstractSolrTestCase.java @@ -67,7 +67,7 @@ public abstract class AbstractSolrTestCase extends SolrTestCaseJ4 { /** Causes an exception matching the regex pattern to not be logged. */ public static void ignoreException(String pattern) { if (SolrException.ignorePatterns == null) - SolrException.ignorePatterns = new HashSet(); + SolrException.ignorePatterns = new HashSet<>(); SolrException.ignorePatterns.add(pattern); } diff --git a/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java b/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java index 083c81dc01c..b51fc659bcf 100644 --- a/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java +++ b/solr/test-framework/src/java/org/apache/solr/util/BaseTestHarness.java @@ -33,8 +33,8 @@ import java.io.StringWriter; import java.io.UnsupportedEncodingException; abstract public class BaseTestHarness { - private static final ThreadLocal builderTL = new ThreadLocal(); - private static final ThreadLocal xpathTL = new ThreadLocal(); + private static final ThreadLocal builderTL = new ThreadLocal<>(); + private static final ThreadLocal xpathTL = new ThreadLocal<>(); public static DocumentBuilder getXmlDocumentBuilder() { try { diff --git a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java index 793a7b30c0a..187b578870a 100644 --- a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java +++ b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java @@ -388,7 +388,7 @@ public class TestHarness extends BaseTestHarness { public String qtype = null; public int start = 0; public int limit = 1000; - public Map args = new HashMap(); + public Map args = new HashMap<>(); public LocalRequestFactory() { } /** @@ -423,7 +423,7 @@ public class TestHarness extends BaseTestHarness { } Map.Entry [] entries = new NamedListEntry[q.length / 2]; for (int i = 0; i < q.length; i += 2) { - entries[i/2] = new NamedListEntry(q[i], q[i+1]); + entries[i/2] = new NamedListEntry<>(q[i], q[i+1]); } return new LocalSolrQueryRequest(TestHarness.this.getCore(), new NamedList(entries)); }