LUCENE-5512: remove redundant typing (diamond operator) in trunk

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1576755 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2014-03-12 14:39:17 +00:00
parent 0e2966b6c0
commit 3b67b17493
1243 changed files with 4143 additions and 4141 deletions

View File

@ -184,6 +184,9 @@ Build
* LUCENE-5511: "ant precommit" / "ant check-svn-working-copy" now work again
with any working copy format (thanks to svnkit 1.8.4). (Uwe Schindler)
* LUCENE-5512: Remove redundant typing (use diamond operator) throughout
the codebase. (Furkan KAMACI via Robert Muir)
======================= Lucene 4.7.0 =======================
New Features

View File

@ -63,7 +63,7 @@ CharacterEntities = ( "AElig" | "Aacute" | "Acirc" | "Agrave" | "Alpha"
| "zwj" | "zwnj" )
%{
private static final Map<String,String> upperCaseVariantsAccepted
= new HashMap<String,String>();
= new HashMap<>();
static {
upperCaseVariantsAccepted.put("quot", "QUOT");
upperCaseVariantsAccepted.put("copy", "COPY");
@ -73,7 +73,7 @@ CharacterEntities = ( "AElig" | "Aacute" | "Acirc" | "Agrave" | "Alpha"
upperCaseVariantsAccepted.put("amp", "AMP");
}
private static final CharArrayMap<Character> entityValues
= new CharArrayMap<Character>(Version.LUCENE_CURRENT, 253, false);
= new CharArrayMap<>(Version.LUCENE_CURRENT, 253, false);
static {
String[] entities = {
"AElig", "\u00C6", "Aacute", "\u00C1", "Acirc", "\u00C2",

View File

@ -30663,7 +30663,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter {
/* user code: */
private static final Map<String,String> upperCaseVariantsAccepted
= new HashMap<String,String>();
= new HashMap<>();
static {
upperCaseVariantsAccepted.put("quot", "QUOT");
upperCaseVariantsAccepted.put("copy", "COPY");
@ -30673,7 +30673,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter {
upperCaseVariantsAccepted.put("amp", "AMP");
}
private static final CharArrayMap<Character> entityValues
= new CharArrayMap<Character>(Version.LUCENE_CURRENT, 253, false);
= new CharArrayMap<>(Version.LUCENE_CURRENT, 253, false);
static {
String[] entities = {
"AElig", "\u00C6", "Aacute", "\u00C1", "Acirc", "\u00C2",

View File

@ -43,7 +43,7 @@ public class MappingCharFilter extends BaseCharFilter {
private final FST<CharsRef> map;
private final FST.BytesReader fstReader;
private final RollingCharBuffer buffer = new RollingCharBuffer();
private final FST.Arc<CharsRef> scratchArc = new FST.Arc<CharsRef>();
private final FST.Arc<CharsRef> scratchArc = new FST.Arc<>();
private final Map<Character,FST.Arc<CharsRef>> cachedRootArcs;
private CharsRef replacement;

View File

@ -69,7 +69,7 @@ public class MappingCharFilterFactory extends CharFilterFactory implements
wlist = getLines(loader, mapping);
} else {
List<String> files = splitFileNames(mapping);
wlist = new ArrayList<String>();
wlist = new ArrayList<>();
for (String file : files) {
List<String> lines = getLines(loader, file.trim());
wlist.addAll(lines);

View File

@ -40,7 +40,7 @@ import org.apache.lucene.util.fst.Util;
public class NormalizeCharMap {
final FST<CharsRef> map;
final Map<Character,FST.Arc<CharsRef>> cachedRootArcs = new HashMap<Character,FST.Arc<CharsRef>>();
final Map<Character,FST.Arc<CharsRef>> cachedRootArcs = new HashMap<>();
// Use the builder to create:
private NormalizeCharMap(FST<CharsRef> map) {
@ -48,7 +48,7 @@ public class NormalizeCharMap {
if (map != null) {
try {
// Pre-cache root arcs:
final FST.Arc<CharsRef> scratchArc = new FST.Arc<CharsRef>();
final FST.Arc<CharsRef> scratchArc = new FST.Arc<>();
final FST.BytesReader fstReader = map.getBytesReader();
map.getFirstArc(scratchArc);
if (FST.targetHasArcs(scratchArc)) {
@ -78,7 +78,7 @@ public class NormalizeCharMap {
*/
public static class Builder {
private final Map<String,String> pendingPairs = new TreeMap<String,String>();
private final Map<String,String> pendingPairs = new TreeMap<>();
/** Records a replacement to be applied to the input
* stream. Whenever <code>singleMatch</code> occurs in
@ -108,7 +108,7 @@ public class NormalizeCharMap {
final FST<CharsRef> map;
try {
final Outputs<CharsRef> outputs = CharSequenceOutputs.getSingleton();
final org.apache.lucene.util.fst.Builder<CharsRef> builder = new org.apache.lucene.util.fst.Builder<CharsRef>(FST.INPUT_TYPE.BYTE2, outputs);
final org.apache.lucene.util.fst.Builder<CharsRef> builder = new org.apache.lucene.util.fst.Builder<>(FST.INPUT_TYPE.BYTE2, outputs);
final IntsRef scratch = new IntsRef();
for(Map.Entry<String,String> ent : pendingPairs.entrySet()) {
builder.add(Util.toUTF16(ent.getKey(), scratch),

View File

@ -84,7 +84,7 @@ public abstract class CompoundWordTokenFilterBase extends TokenFilter {
protected CompoundWordTokenFilterBase(Version matchVersion, TokenStream input, CharArraySet dictionary, int minWordSize, int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
super(input);
this.matchVersion = matchVersion;
this.tokens=new LinkedList<CompoundToken>();
this.tokens=new LinkedList<>();
if (minWordSize < 0) {
throw new IllegalArgumentException("minWordSize cannot be negative");
}

View File

@ -54,7 +54,7 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer {
private transient TernaryTree ivalues;
public HyphenationTree() {
stoplist = new HashMap<String,ArrayList<Object>>(23); // usually a small table
stoplist = new HashMap<>(23); // usually a small table
classmap = new TernaryTree();
vspace = new ByteVector();
vspace.alloc(1); // this reserves index 0, which we don't use

View File

@ -188,7 +188,7 @@ public class PatternParser extends DefaultHandler {
}
protected ArrayList<Object> normalizeException(ArrayList<?> ex) {
ArrayList<Object> res = new ArrayList<Object>();
ArrayList<Object> res = new ArrayList<>();
for (int i = 0; i < ex.size(); i++) {
Object item = ex.get(i);
if (item instanceof String) {
@ -287,7 +287,7 @@ public class PatternParser extends DefaultHandler {
currElement = ELEM_PATTERNS;
} else if (local.equals("exceptions")) {
currElement = ELEM_EXCEPTIONS;
exception = new ArrayList<Object>();
exception = new ArrayList<>();
} else if (local.equals("hyphen")) {
if (token.length() > 0) {
exception.add(token.toString());

View File

@ -503,7 +503,7 @@ public class TernaryTree implements Cloneable {
public Iterator() {
cur = -1;
ns = new Stack<Item>();
ns = new Stack<>();
ks = new StringBuilder();
rewind();
}

View File

@ -52,6 +52,6 @@ public class LowerCaseTokenizerFactory extends TokenizerFactory implements Multi
@Override
public AbstractAnalysisFactory getMultiTermComponent() {
return new LowerCaseFilterFactory(new HashMap<String,String>(getOriginalArgs()));
return new LowerCaseFilterFactory(new HashMap<>(getOriginalArgs()));
}
}

View File

@ -58,7 +58,7 @@ public class TypeTokenFilterFactory extends TokenFilterFactory implements Resour
public void inform(ResourceLoader loader) throws IOException {
List<String> files = splitFileNames(stopTypesFiles);
if (files.size() > 0) {
stopTypes = new HashSet<String>();
stopTypes = new HashSet<>();
for (String file : files) {
List<String> typesLines = getLines(loader, file.trim());
stopTypes.addAll(typesLines);

View File

@ -280,7 +280,7 @@ public class KStemmer {
DictEntry defaultEntry;
DictEntry entry;
CharArrayMap<DictEntry> d = new CharArrayMap<DictEntry>(Version.LUCENE_CURRENT, 1000, false);
CharArrayMap<DictEntry> d = new CharArrayMap<>(Version.LUCENE_CURRENT, 1000, false);
for (int i = 0; i < exceptionWords.length; i++) {
if (!d.containsKey(exceptionWords[i])) {
entry = new DictEntry(exceptionWords[i], true);
@ -574,7 +574,7 @@ public class KStemmer {
return matchedEntry != null;
}
// Set<String> lookups = new HashSet<String>();
// Set<String> lookups = new HashSet<>();
/* convert past tense (-ed) to present, and `-ied' to `y' */
private void pastTense() {

View File

@ -189,7 +189,7 @@ public class Dictionary {
// read dictionary entries
IntSequenceOutputs o = IntSequenceOutputs.getSingleton();
Builder<IntsRef> b = new Builder<IntsRef>(FST.INPUT_TYPE.BYTE4, o);
Builder<IntsRef> b = new Builder<>(FST.INPUT_TYPE.BYTE4, o);
readDictionaryFiles(dictionaries, decoder, b);
words = b.finish();
aliases = null; // no longer needed
@ -502,7 +502,7 @@ public class Dictionary {
List<Character> list = affixes.get(affixArg);
if (list == null) {
list = new ArrayList<Character>();
list = new ArrayList<>();
affixes.put(affixArg, list);
}

View File

@ -82,10 +82,10 @@ public class HunspellStemFilterFactory extends TokenFilterFactory implements Res
String dicts[] = dictionaryFiles.split(",");
InputStream affix = null;
List<InputStream> dictionaries = new ArrayList<InputStream>();
List<InputStream> dictionaries = new ArrayList<>();
try {
dictionaries = new ArrayList<InputStream>();
dictionaries = new ArrayList<>();
for (String file : dicts) {
dictionaries.add(loader.openResource(file));
}

View File

@ -84,7 +84,7 @@ final class Stemmer {
word = scratchBuffer;
}
List<CharsRef> stems = new ArrayList<CharsRef>();
List<CharsRef> stems = new ArrayList<>();
IntsRef forms = dictionary.lookupWord(word, 0, length);
if (forms != null) {
// TODO: some forms should not be added, e.g. ONLYINCOMPOUND
@ -158,7 +158,7 @@ final class Stemmer {
private List<CharsRef> stem(char word[], int length, int previous, int prevFlag, int prefixFlag, int recursionDepth, boolean doPrefix, boolean doSuffix, boolean previousWasPrefix, boolean circumfix) {
// TODO: allow this stuff to be reused by tokenfilter
List<CharsRef> stems = new ArrayList<CharsRef>();
List<CharsRef> stems = new ArrayList<>();
if (doPrefix && dictionary.prefixes != null) {
for (int i = length - 1; i >= 0; i--) {
@ -323,7 +323,7 @@ final class Stemmer {
condition >>>= 1;
char append = (char) (affixReader.readShort() & 0xffff);
List<CharsRef> stems = new ArrayList<CharsRef>();
List<CharsRef> stems = new ArrayList<>();
IntsRef forms = dictionary.lookupWord(strippedWord, 0, length);
if (forms != null) {

View File

@ -43,7 +43,7 @@ public class IndicNormalizer {
}
private static final IdentityHashMap<Character.UnicodeBlock,ScriptData> scripts =
new IdentityHashMap<Character.UnicodeBlock,ScriptData>(9);
new IdentityHashMap<>(9);
private static int flag(Character.UnicodeBlock ub) {
return scripts.get(ub).flag;

View File

@ -88,7 +88,7 @@ public class CapitalizationFilterFactory extends TokenFilterFactory {
k = getSet(args, OK_PREFIX);
if (k != null) {
okPrefix = new ArrayList<char[]>();
okPrefix = new ArrayList<>();
for (String item : k) {
okPrefix.add(item.toCharArray());
}

View File

@ -33,7 +33,7 @@ import java.util.Map;
*
* <pre class="prettyprint">
* {@code
* Map<String,Analyzer> analyzerPerField = new HashMap<String,Analyzer>();
* Map<String,Analyzer> analyzerPerField = new HashMap<>();
* analyzerPerField.put("firstname", new KeywordAnalyzer());
* analyzerPerField.put("lastname", new KeywordAnalyzer());
*

View File

@ -44,7 +44,7 @@ public final class StemmerOverrideFilter extends TokenFilter {
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class);
private final BytesReader fstReader;
private final Arc<BytesRef> scratchArc = new FST.Arc<BytesRef>();
private final Arc<BytesRef> scratchArc = new FST.Arc<>();
private final CharsRef spare = new CharsRef();
/**
@ -145,7 +145,7 @@ public final class StemmerOverrideFilter extends TokenFilter {
public static class Builder {
private final BytesRefHash hash = new BytesRefHash();
private final BytesRef spare = new BytesRef();
private final ArrayList<CharSequence> outputValues = new ArrayList<CharSequence>();
private final ArrayList<CharSequence> outputValues = new ArrayList<>();
private final boolean ignoreCase;
private final CharsRef charsSpare = new CharsRef();
@ -200,7 +200,7 @@ public final class StemmerOverrideFilter extends TokenFilter {
*/
public StemmerOverrideMap build() throws IOException {
ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton();
org.apache.lucene.util.fst.Builder<BytesRef> builder = new org.apache.lucene.util.fst.Builder<BytesRef>(
org.apache.lucene.util.fst.Builder<BytesRef> builder = new org.apache.lucene.util.fst.Builder<>(
FST.INPUT_TYPE.BYTE4, outputs);
final int[] sort = hash.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
IntsRef intsSpare = new IntsRef();

View File

@ -104,7 +104,7 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
}
if (types != null) {
List<String> files = splitFileNames( types );
List<String> wlist = new ArrayList<String>();
List<String> wlist = new ArrayList<>();
for( String file : files ){
List<String> lines = getLines(loader, file.trim());
wlist.addAll( lines );
@ -124,7 +124,7 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
// parses a list of MappingCharFilter style rules into a custom byte[] type table
private byte[] parseTypes(List<String> rules) {
SortedMap<Character,Byte> typeMap = new TreeMap<Character,Byte>();
SortedMap<Character,Byte> typeMap = new TreeMap<>();
for( String rule : rules ){
Matcher m = typePattern.matcher(rule);
if( !m.find() )

View File

@ -82,7 +82,7 @@ public final class DutchAnalyzer extends Analyzer {
throw new RuntimeException("Unable to load default stopword set");
}
DEFAULT_STEM_DICT = new CharArrayMap<String>(Version.LUCENE_CURRENT, 4, false);
DEFAULT_STEM_DICT = new CharArrayMap<>(Version.LUCENE_CURRENT, 4, false);
DEFAULT_STEM_DICT.put("fiets", "fiets"); //otherwise fiet
DEFAULT_STEM_DICT.put("bromfiets", "bromfiets"); //otherwise bromfiet
DEFAULT_STEM_DICT.put("ei", "eier");

View File

@ -98,7 +98,7 @@ public class ReversePathHierarchyTokenizer extends Tokenizer {
this.skip = skip;
resultToken = new StringBuilder(bufferSize);
resultTokenBuffer = new char[bufferSize];
delimiterPositions = new ArrayList<Integer>(bufferSize/10);
delimiterPositions = new ArrayList<>(bufferSize/10);
}
private static final int DEFAULT_BUFFER_SIZE = 1024;

View File

@ -248,7 +248,7 @@ public abstract class RSLPStemmerBase {
try {
InputStream is = clazz.getResourceAsStream(resource);
LineNumberReader r = new LineNumberReader(new InputStreamReader(is, "UTF-8"));
Map<String,Step> steps = new HashMap<String,Step>();
Map<String,Step> steps = new HashMap<>();
String step;
while ((step = readLine(r)) != null) {
Step s = parseStep(r, step);
@ -285,7 +285,7 @@ public abstract class RSLPStemmerBase {
}
private static Rule[] parseRules(LineNumberReader r, int type) throws IOException {
List<Rule> rules = new ArrayList<Rule>();
List<Rule> rules = new ArrayList<>();
String line;
while ((line = readLine(r)) != null) {
Matcher matcher = stripPattern.matcher(line);

View File

@ -46,7 +46,7 @@ import org.apache.lucene.util.Version;
public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
private final Analyzer delegate;
private final Map<String, Set<String>> stopWordsPerField = new HashMap<String, Set<String>>();
private final Map<String, Set<String>> stopWordsPerField = new HashMap<>();
//The default maximum percentage (40%) of index documents which
//can contain a term, after which the term is considered to be a stop word.
public static final float defaultMaxDocFreqPercent = 0.4f;
@ -153,7 +153,7 @@ public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
this.delegate = delegate;
for (String field : fields) {
Set<String> stopWords = new HashSet<String>();
Set<String> stopWords = new HashSet<>();
Terms terms = MultiFields.getTerms(indexReader, field);
CharsRef spare = new CharsRef();
if (terms != null) {
@ -204,7 +204,7 @@ public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper {
* @return the stop words (as terms)
*/
public Term[] getStopWords() {
List<Term> allStopWords = new ArrayList<Term>();
List<Term> allStopWords = new ArrayList<>();
for (String fieldName : stopWordsPerField.keySet()) {
Set<String> stopWords = stopWordsPerField.get(fieldName);
for (String text : stopWords) {

View File

@ -74,7 +74,7 @@ public final class ShingleFilter extends TokenFilter {
* that will be composed to form output shingles.
*/
private LinkedList<InputWindowToken> inputWindow
= new LinkedList<InputWindowToken>();
= new LinkedList<>();
/**
* The number of input tokens in the next output token. This is the "n" in

View File

@ -75,7 +75,7 @@ sink2.consumeAllTokens();
* <p>Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene.
*/
public final class TeeSinkTokenFilter extends TokenFilter {
private final List<WeakReference<SinkTokenStream>> sinks = new LinkedList<WeakReference<SinkTokenStream>>();
private final List<WeakReference<SinkTokenStream>> sinks = new LinkedList<>();
/**
* Instantiates a new TeeSinkTokenFilter.
@ -98,7 +98,7 @@ public final class TeeSinkTokenFilter extends TokenFilter {
*/
public SinkTokenStream newSinkTokenStream(SinkFilter filter) {
SinkTokenStream sink = new SinkTokenStream(this.cloneAttributes(), filter);
this.sinks.add(new WeakReference<SinkTokenStream>(sink));
this.sinks.add(new WeakReference<>(sink));
return sink;
}
@ -116,7 +116,7 @@ public final class TeeSinkTokenFilter extends TokenFilter {
for (Iterator<AttributeImpl> it = this.cloneAttributes().getAttributeImplsIterator(); it.hasNext(); ) {
sink.addAttributeImpl(it.next());
}
this.sinks.add(new WeakReference<SinkTokenStream>(sink));
this.sinks.add(new WeakReference<>(sink));
}
/**
@ -186,7 +186,7 @@ public final class TeeSinkTokenFilter extends TokenFilter {
* TokenStream output from a tee with optional filtering.
*/
public static final class SinkTokenStream extends TokenStream {
private final List<AttributeSource.State> cachedStates = new LinkedList<AttributeSource.State>();
private final List<AttributeSource.State> cachedStates = new LinkedList<>();
private AttributeSource.State finalState;
private Iterator<AttributeSource.State> it = null;
private SinkFilter filter;

View File

@ -130,7 +130,7 @@ public class SolrSynonymParser extends SynonymMap.Parser {
}
private static String[] split(String s, String separator) {
ArrayList<String> list = new ArrayList<String>(2);
ArrayList<String> list = new ArrayList<>(2);
StringBuilder sb = new StringBuilder();
int pos=0, end=s.length();
while (pos < end) {

View File

@ -282,7 +282,7 @@ public final class SynonymFilter extends TokenFilter {
//System.out.println("FSTFilt maxH=" + synonyms.maxHorizontalContext);
scratchArc = new FST.Arc<BytesRef>();
scratchArc = new FST.Arc<>();
}
private void capture() {

View File

@ -83,7 +83,7 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource
private final String format;
private final boolean expand;
private final String analyzerName;
private final Map<String, String> tokArgs = new HashMap<String, String>();
private final Map<String, String> tokArgs = new HashMap<>();
private SynonymMap map;

View File

@ -67,7 +67,7 @@ public class SynonymMap {
* @lucene.experimental
*/
public static class Builder {
private final HashMap<CharsRef,MapEntry> workingSet = new HashMap<CharsRef,MapEntry>();
private final HashMap<CharsRef,MapEntry> workingSet = new HashMap<>();
private final BytesRefHash words = new BytesRefHash();
private final BytesRef utf8Scratch = new BytesRef(8);
private int maxHorizontalContext;
@ -82,7 +82,7 @@ public class SynonymMap {
private static class MapEntry {
boolean includeOrig;
// we could sort for better sharing ultimately, but it could confuse people
ArrayList<Integer> ords = new ArrayList<Integer>();
ArrayList<Integer> ords = new ArrayList<>();
}
/** Sugar: just joins the provided terms with {@link
@ -210,7 +210,7 @@ public class SynonymMap {
ByteSequenceOutputs outputs = ByteSequenceOutputs.getSingleton();
// TODO: are we using the best sharing options?
org.apache.lucene.util.fst.Builder<BytesRef> builder =
new org.apache.lucene.util.fst.Builder<BytesRef>(FST.INPUT_TYPE.BYTE4, outputs);
new org.apache.lucene.util.fst.Builder<>(FST.INPUT_TYPE.BYTE4, outputs);
BytesRef scratch = new BytesRef(64);
ByteArrayDataOutput scratchOutput = new ByteArrayDataOutput();
@ -218,7 +218,7 @@ public class SynonymMap {
final Set<Integer> dedupSet;
if (dedup) {
dedupSet = new HashSet<Integer>();
dedupSet = new HashSet<>();
} else {
dedupSet = null;
}

View File

@ -65,7 +65,7 @@ public abstract class AbstractAnalysisFactory {
* Initialize this factory via a set of key-value pairs.
*/
protected AbstractAnalysisFactory(Map<String,String> args) {
originalArgs = Collections.unmodifiableMap(new HashMap<String,String>(args));
originalArgs = Collections.unmodifiableMap(new HashMap<>(args));
String version = get(args, LUCENE_MATCH_VERSION_PARAM);
luceneMatchVersion = version == null ? null : Version.parseLeniently(version);
args.remove(CLASS_NAME); // consume the class arg
@ -202,7 +202,7 @@ public abstract class AbstractAnalysisFactory {
Set<String> set = null;
Matcher matcher = ITEM_PATTERN.matcher(s);
if (matcher.find()) {
set = new HashSet<String>();
set = new HashSet<>();
set.add(matcher.group(0));
while (matcher.find()) {
set.add(matcher.group(0));
@ -296,7 +296,7 @@ public abstract class AbstractAnalysisFactory {
if (fileNames == null)
return Collections.<String>emptyList();
List<String> result = new ArrayList<String>();
List<String> result = new ArrayList<>();
for (String file : fileNames.split("(?<!\\\\),")) {
result.add(file.replaceAll("\\\\(?=,)", ""));
}

View File

@ -73,7 +73,7 @@ final class AnalysisSPILoader<S extends AbstractAnalysisFactory> {
*/
public synchronized void reload(ClassLoader classloader) {
final LinkedHashMap<String,Class<? extends S>> services =
new LinkedHashMap<String,Class<? extends S>>(this.services);
new LinkedHashMap<>(this.services);
final SPIClassIterator<S> loader = SPIClassIterator.get(clazz, classloader);
while (loader.hasNext()) {
final Class<? extends S> service = loader.next();

View File

@ -52,7 +52,7 @@ import org.apache.lucene.util.Version;
*/
public class CharArrayMap<V> extends AbstractMap<Object,V> {
// private only because missing generics
private static final CharArrayMap<?> EMPTY_MAP = new EmptyCharArrayMap<Object>();
private static final CharArrayMap<?> EMPTY_MAP = new EmptyCharArrayMap<>();
private final static int INIT_SIZE = 8;
private final CharacterUtils charUtils;
@ -559,7 +559,7 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
return emptyMap();
if (map instanceof UnmodifiableCharArrayMap)
return map;
return new UnmodifiableCharArrayMap<V>(map);
return new UnmodifiableCharArrayMap<>(map);
}
/**
@ -595,12 +595,12 @@ public class CharArrayMap<V> extends AbstractMap<Object,V> {
System.arraycopy(m.keys, 0, keys, 0, keys.length);
final V[] values = (V[]) new Object[m.values.length];
System.arraycopy(m.values, 0, values, 0, values.length);
m = new CharArrayMap<V>(m);
m = new CharArrayMap<>(m);
m.keys = keys;
m.values = values;
return m;
}
return new CharArrayMap<V>(matchVersion, map, false);
return new CharArrayMap<>(matchVersion, map, false);
}
/** Returns an empty, unmodifiable map. */

View File

@ -74,7 +74,7 @@ public class CharArraySet extends AbstractSet<Object> {
* otherwise <code>true</code>.
*/
public CharArraySet(Version matchVersion, int startSize, boolean ignoreCase) {
this(new CharArrayMap<Object>(matchVersion, startSize, ignoreCase));
this(new CharArrayMap<>(matchVersion, startSize, ignoreCase));
}
/**

View File

@ -30,7 +30,7 @@ import org.apache.lucene.analysis.CharFilter;
public abstract class CharFilterFactory extends AbstractAnalysisFactory {
private static final AnalysisSPILoader<CharFilterFactory> loader =
new AnalysisSPILoader<CharFilterFactory>(CharFilterFactory.class);
new AnalysisSPILoader<>(CharFilterFactory.class);
/** looks up a charfilter by name from context classpath */
public static CharFilterFactory forName(String name, Map<String,String> args) {

View File

@ -29,7 +29,7 @@ import org.apache.lucene.analysis.TokenStream;
public abstract class TokenFilterFactory extends AbstractAnalysisFactory {
private static final AnalysisSPILoader<TokenFilterFactory> loader =
new AnalysisSPILoader<TokenFilterFactory>(TokenFilterFactory.class,
new AnalysisSPILoader<>(TokenFilterFactory.class,
new String[] { "TokenFilterFactory", "FilterFactory" });
/** looks up a tokenfilter by name from context classpath */

View File

@ -31,7 +31,7 @@ import java.util.Set;
public abstract class TokenizerFactory extends AbstractAnalysisFactory {
private static final AnalysisSPILoader<TokenizerFactory> loader =
new AnalysisSPILoader<TokenizerFactory>(TokenizerFactory.class);
new AnalysisSPILoader<>(TokenizerFactory.class);
/** looks up a tokenizer by name from context classpath */
public static TokenizerFactory forName(String name, Map<String,String> args) {

View File

@ -219,7 +219,7 @@ public class WordlistLoader {
try {
input = getBufferedReader(IOUtils.getDecodingReader(stream, charset));
lines = new ArrayList<String>();
lines = new ArrayList<>();
for (String word=null; (word=input.readLine())!=null;) {
// skip initial bom marker
if (lines.isEmpty() && word.length() > 0 && word.charAt(0) == '\uFEFF')

View File

@ -215,7 +215,7 @@ public final class WikipediaTokenizer extends Tokenizer {
int lastPos = theStart + numAdded;
int tmpTokType;
int numSeen = 0;
List<AttributeSource.State> tmp = new ArrayList<AttributeSource.State>();
List<AttributeSource.State> tmp = new ArrayList<>();
setupSavedToken(0, type);
tmp.add(captureState());
//while we can get a token and that token is the same type and we have not transitioned to a new wiki-item of the same type

View File

@ -114,7 +114,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
public void testGamma() throws Exception {
String test = "&Gamma;";
String gold = "\u0393";
Set<String> set = new HashSet<String>();
Set<String> set = new HashSet<>();
set.add("reserved");
Reader reader = new HTMLStripCharFilter(new StringReader(test), set);
StringBuilder builder = new StringBuilder();
@ -129,7 +129,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
public void testEntities() throws Exception {
String test = "&nbsp; &lt;foo&gt; &Uuml;bermensch &#61; &Gamma; bar &#x393;";
String gold = " <foo> \u00DCbermensch = \u0393 bar \u0393";
Set<String> set = new HashSet<String>();
Set<String> set = new HashSet<>();
set.add("reserved");
Reader reader = new HTMLStripCharFilter(new StringReader(test), set);
StringBuilder builder = new StringBuilder();
@ -144,7 +144,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
public void testMoreEntities() throws Exception {
String test = "&nbsp; &lt;junk/&gt; &nbsp; &#33; &#64; and &#8217;";
String gold = " <junk/> ! @ and ";
Set<String> set = new HashSet<String>();
Set<String> set = new HashSet<>();
set.add("reserved");
Reader reader = new HTMLStripCharFilter(new StringReader(test), set);
StringBuilder builder = new StringBuilder();
@ -158,7 +158,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
public void testReserved() throws Exception {
String test = "aaa bbb <reserved ccc=\"ddddd\"> eeee </reserved> ffff <reserved ggg=\"hhhh\"/> <other/>";
Set<String> set = new HashSet<String>();
Set<String> set = new HashSet<>();
set.add("reserved");
Reader reader = new HTMLStripCharFilter(new StringReader(test), set);
StringBuilder builder = new StringBuilder();
@ -588,7 +588,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
public void testEscapeScript() throws Exception {
String test = "one<script no-value-attr>callSomeMethod();</script>two";
String gold = "one<script no-value-attr></script>two";
Set<String> escapedTags = new HashSet<String>(Arrays.asList("SCRIPT"));
Set<String> escapedTags = new HashSet<>(Arrays.asList("SCRIPT"));
Reader reader = new HTMLStripCharFilter
(new StringReader(test), escapedTags);
int ch = 0;
@ -628,7 +628,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
public void testEscapeStyle() throws Exception {
String test = "one<style type=\"text/css\"> body,font,a { font-family:arial; } </style>two";
String gold = "one<style type=\"text/css\"></style>two";
Set<String> escapedTags = new HashSet<String>(Arrays.asList("STYLE"));
Set<String> escapedTags = new HashSet<>(Arrays.asList("STYLE"));
Reader reader = new HTMLStripCharFilter
(new StringReader(test), escapedTags);
int ch = 0;
@ -668,7 +668,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase {
public void testEscapeBR() throws Exception {
String test = "one<BR class='whatever'>two</\nBR\n>";
String gold = "one<BR class='whatever'>two</\nBR\n>";
Set<String> escapedTags = new HashSet<String>(Arrays.asList("BR"));
Set<String> escapedTags = new HashSet<>(Arrays.asList("BR"));
Reader reader = new HTMLStripCharFilter
(new StringReader(test), escapedTags);
int ch = 0;

View File

@ -270,7 +270,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
Random random = random();
NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder();
// we can't add duplicate keys, or NormalizeCharMap gets angry
Set<String> keys = new HashSet<String>();
Set<String> keys = new HashSet<>();
int num = random.nextInt(5);
//System.out.println("NormalizeCharMap=");
for (int i = 0; i < num; i++) {
@ -296,7 +296,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
final char endLetter = (char) TestUtil.nextInt(random, 'b', 'z');
final Map<String,String> map = new HashMap<String,String>();
final Map<String,String> map = new HashMap<>();
final NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder();
final int numMappings = atLeast(5);
if (VERBOSE) {
@ -333,7 +333,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
final StringBuilder output = new StringBuilder();
// Maps output offset to input offset:
final List<Integer> inputOffsets = new ArrayList<Integer>();
final List<Integer> inputOffsets = new ArrayList<>();
int cumDiff = 0;
int charIdx = 0;
@ -416,7 +416,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
final MappingCharFilter mapFilter = new MappingCharFilter(charMap, new StringReader(content));
final StringBuilder actualBuilder = new StringBuilder();
final List<Integer> actualInputOffsets = new ArrayList<Integer>();
final List<Integer> actualInputOffsets = new ArrayList<>();
// Now consume the actual mapFilter, somewhat randomly:
while (true) {

View File

@ -117,7 +117,7 @@ public class TestAllAnalyzersHaveFactories extends LuceneTestCase {
continue;
}
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
if (Tokenizer.class.isAssignableFrom(c)) {

View File

@ -122,7 +122,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
/** tries to initialize a factory with no arguments */
private AbstractAnalysisFactory initialize(Class<? extends AbstractAnalysisFactory> factoryClazz) throws IOException {
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
Constructor<? extends AbstractAnalysisFactory> ctor;
try {

View File

@ -110,7 +110,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
};
};
private static final Map<Constructor<?>,Predicate<Object[]>> brokenConstructors = new HashMap<Constructor<?>, Predicate<Object[]>>();
private static final Map<Constructor<?>,Predicate<Object[]>> brokenConstructors = new HashMap<>();
static {
try {
brokenConstructors.put(
@ -158,7 +158,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
// TODO: also fix these and remove (maybe):
// Classes/options that don't produce consistent graph offsets:
private static final Map<Constructor<?>,Predicate<Object[]>> brokenOffsetsConstructors = new HashMap<Constructor<?>, Predicate<Object[]>>();
private static final Map<Constructor<?>,Predicate<Object[]>> brokenOffsetsConstructors = new HashMap<>();
static {
try {
for (Class<?> c : Arrays.<Class<?>>asList(
@ -188,9 +188,9 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
@BeforeClass
public static void beforeClass() throws Exception {
List<Class<?>> analysisClasses = getClassesForPackage("org.apache.lucene.analysis");
tokenizers = new ArrayList<Constructor<? extends Tokenizer>>();
tokenfilters = new ArrayList<Constructor<? extends TokenFilter>>();
charfilters = new ArrayList<Constructor<? extends CharFilter>>();
tokenizers = new ArrayList<>();
tokenfilters = new ArrayList<>();
charfilters = new ArrayList<>();
for (final Class<?> c : analysisClasses) {
final int modifiers = c.getModifiers();
if (
@ -257,7 +257,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
}
public static List<Class<?>> getClassesForPackage(String pckgname) throws Exception {
final List<Class<?>> classes = new ArrayList<Class<?>>();
final List<Class<?>> classes = new ArrayList<>();
collectClassesForPackage(pckgname, classes);
assertFalse("No classes found in package '"+pckgname+"'; maybe your test classes are packaged as JAR file?", classes.isEmpty());
return classes;
@ -358,7 +358,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
put(Set.class, new ArgProducer() {
@Override public Object create(Random random) {
// TypeTokenFilter
Set<String> set = new HashSet<String>();
Set<String> set = new HashSet<>();
int num = random.nextInt(5);
for (int i = 0; i < num; i++) {
set.add(StandardTokenizer.TOKEN_TYPES[random.nextInt(StandardTokenizer.TOKEN_TYPES.length)]);
@ -369,7 +369,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
put(Collection.class, new ArgProducer() {
@Override public Object create(Random random) {
// CapitalizationFilter
Collection<char[]> col = new ArrayList<char[]>();
Collection<char[]> col = new ArrayList<>();
int num = random.nextInt(5);
for (int i = 0; i < num; i++) {
col.add(TestUtil.randomSimpleString(random).toCharArray());
@ -459,7 +459,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
@Override public Object create(Random random) {
NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder();
// we can't add duplicate keys, or NormalizeCharMap gets angry
Set<String> keys = new HashSet<String>();
Set<String> keys = new HashSet<>();
int num = random.nextInt(5);
//System.out.println("NormalizeCharMap=");
for (int i = 0; i < num; i++) {
@ -489,7 +489,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
put(CharArrayMap.class, new ArgProducer() {
@Override public Object create(Random random) {
int num = random.nextInt(10);
CharArrayMap<String> map = new CharArrayMap<String>(TEST_VERSION_CURRENT, num, random.nextBoolean());
CharArrayMap<String> map = new CharArrayMap<>(TEST_VERSION_CURRENT, num, random.nextBoolean());
for (int i = 0; i < num; i++) {
// TODO: make nastier
map.put(TestUtil.randomSimpleString(random), TestUtil.randomSimpleString(random));

View File

@ -32,7 +32,7 @@ import java.util.HashSet;
public class TestStopAnalyzer extends BaseTokenStreamTestCase {
private StopAnalyzer stop = new StopAnalyzer(TEST_VERSION_CURRENT);
private Set<Object> inValidTokens = new HashSet<Object>();
private Set<Object> inValidTokens = new HashSet<>();
@Override
public void setUp() throws Exception {

View File

@ -59,7 +59,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
*/
public void testStopPositons() throws IOException {
StringBuilder sb = new StringBuilder();
ArrayList<String> a = new ArrayList<String>();
ArrayList<String> a = new ArrayList<>();
for (int i=0; i<20; i++) {
String w = English.intToEnglish(i).trim();
sb.append(w).append(" ");
@ -76,8 +76,8 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
StopFilter stpf = new StopFilter(Version.LUCENE_40, in, stopSet);
doTestStopPositons(stpf);
// with increments, concatenating two stop filters
ArrayList<String> a0 = new ArrayList<String>();
ArrayList<String> a1 = new ArrayList<String>();
ArrayList<String> a0 = new ArrayList<>();
ArrayList<String> a1 = new ArrayList<>();
for (int i=0; i<a.size(); i++) {
if (i%2==0) {
a0.add(a.get(i));

View File

@ -287,7 +287,7 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase {
BufferedReader bufferedReader = null;
String[] urls;
try {
List<String> urlList = new ArrayList<String>();
List<String> urlList = new ArrayList<>();
bufferedReader = new BufferedReader(new InputStreamReader
(getClass().getResourceAsStream("LuceneResourcesWikiPageURLs.txt"), "UTF-8"));
String line;
@ -331,7 +331,7 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase {
BufferedReader bufferedReader = null;
String[] emails;
try {
List<String> emailList = new ArrayList<String>();
List<String> emailList = new ArrayList<>();
bufferedReader = new BufferedReader(new InputStreamReader
(getClass().getResourceAsStream
("email.addresses.from.random.text.with.email.addresses.txt"), "UTF-8"));
@ -401,7 +401,7 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase {
BufferedReader bufferedReader = null;
String[] urls;
try {
List<String> urlList = new ArrayList<String>();
List<String> urlList = new ArrayList<>();
bufferedReader = new BufferedReader(new InputStreamReader
(getClass().getResourceAsStream
("urls.from.random.text.with.urls.txt"), "UTF-8"));

View File

@ -73,7 +73,7 @@ public class TestKStemmer extends BaseTokenStreamTestCase {
// tf = new KStemFilter(tf);
KStemmer kstem = new KStemmer();
Map<String,String> map = new TreeMap<String,String>();
Map<String,String> map = new TreeMap<>();
for(;;) {
Token t = tf.next();
if (t==null) break;

View File

@ -1888,8 +1888,8 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase {
};
// Construct input text and expected output tokens
List<String> expectedUnfoldedTokens = new ArrayList<String>();
List<String> expectedFoldedTokens = new ArrayList<String>();
List<String> expectedUnfoldedTokens = new ArrayList<>();
List<String> expectedFoldedTokens = new ArrayList<>();
StringBuilder inputText = new StringBuilder();
for (int n = 0 ; n < foldings.length ; n += 2) {
if (n > 0) {

View File

@ -78,7 +78,7 @@ public class TestCapitalizationFilter extends BaseTokenStreamTestCase {
true, keep, true, null, 0, DEFAULT_MAX_WORD_COUNT, DEFAULT_MAX_TOKEN_LENGTH);
// Now try some prefixes
List<char[]> okPrefix = new ArrayList<char[]>();
List<char[]> okPrefix = new ArrayList<>();
okPrefix.add("McK".toCharArray());
assertCapitalizesTo("McKinley",

View File

@ -32,7 +32,7 @@ public class TestKeepWordFilter extends BaseTokenStreamTestCase {
public void testStopAndGo() throws Exception
{
Set<String> words = new HashSet<String>();
Set<String> words = new HashSet<>();
words.add( "aaa" );
words.add( "bbb" );
@ -51,7 +51,7 @@ public class TestKeepWordFilter extends BaseTokenStreamTestCase {
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
final Set<String> words = new HashSet<String>();
final Set<String> words = new HashSet<>();
words.add( "a" );
words.add( "b" );

View File

@ -31,7 +31,7 @@ public class TestPerFieldAnalyzerWrapper extends BaseTokenStreamTestCase {
public void testPerField() throws Exception {
String text = "Qwerty";
Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>();
Map<String, Analyzer> analyzerPerField = new HashMap<>();
analyzerPerField.put("special", new SimpleAnalyzer(TEST_VERSION_CURRENT));
PerFieldAnalyzerWrapper analyzer =

View File

@ -78,7 +78,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase {
}
public void testRandomRealisticWhiteSpace() throws IOException {
Map<String,String> map = new HashMap<String,String>();
Map<String,String> map = new HashMap<>();
int numTerms = atLeast(50);
for (int i = 0; i < numTerms; i++) {
String randomRealisticUnicodeString = TestUtil
@ -105,7 +105,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase {
StemmerOverrideFilter.Builder builder = new StemmerOverrideFilter.Builder(random().nextBoolean());
Set<Entry<String,String>> entrySet = map.entrySet();
StringBuilder input = new StringBuilder();
List<String> output = new ArrayList<String>();
List<String> output = new ArrayList<>();
for (Entry<String,String> entry : entrySet) {
builder.add(entry.getKey(), entry.getValue());
if (random().nextBoolean() || output.isEmpty()) {
@ -121,7 +121,7 @@ public class TestStemmerOverrideFilter extends BaseTokenStreamTestCase {
}
public void testRandomRealisticKeyword() throws IOException {
Map<String,String> map = new HashMap<String,String>();
Map<String,String> map = new HashMap<>();
int numTerms = atLeast(50);
for (int i = 0; i < numTerms; i++) {
String randomRealisticUnicodeString = TestUtil

View File

@ -210,7 +210,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
@Test
public void testPositionIncrements() throws Exception {
final int flags = GENERATE_WORD_PARTS | GENERATE_NUMBER_PARTS | CATENATE_ALL | SPLIT_ON_CASE_CHANGE | SPLIT_ON_NUMERICS | STEM_ENGLISH_POSSESSIVE;
final CharArraySet protWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<String>(Arrays.asList("NUTCH")), false);
final CharArraySet protWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<>(Arrays.asList("NUTCH")), false);
/* analyzer that uses whitespace + wdf */
Analyzer a = new Analyzer() {
@ -332,7 +332,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
final int flags = random().nextInt(512);
final CharArraySet protectedWords;
if (random().nextBoolean()) {
protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<String>(Arrays.asList("a", "b", "cd")), false);
protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<>(Arrays.asList("a", "b", "cd")), false);
} else {
protectedWords = null;
}
@ -355,7 +355,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
final int flags = i;
final CharArraySet protectedWords;
if (random.nextBoolean()) {
protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<String>(Arrays.asList("a", "b", "cd")), false);
protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet<>(Arrays.asList("a", "b", "cd")), false);
} else {
protectedWords = null;
}

View File

@ -78,7 +78,7 @@ public class TestPatternTokenizer extends BaseTokenStreamTestCase
final String INPUT = "G&uuml;nther G&uuml;nther is here";
// create MappingCharFilter
List<String> mappingRules = new ArrayList<String>();
List<String> mappingRules = new ArrayList<>();
mappingRules.add( "\"&uuml;\" => \"ü\"" );
NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder();
builder.add("&uuml;", "ü");

View File

@ -396,8 +396,8 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase {
final int numSyn = atLeast(5);
//final int numSyn = 2;
final Map<String,OneSyn> synMap = new HashMap<String,OneSyn>();
final List<OneSyn> syns = new ArrayList<OneSyn>();
final Map<String,OneSyn> synMap = new HashMap<>();
final List<OneSyn> syns = new ArrayList<>();
final boolean dedup = random().nextBoolean();
if (VERBOSE) {
System.out.println(" dedup=" + dedup);
@ -410,7 +410,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase {
s = new OneSyn();
s.in = synIn;
syns.add(s);
s.out = new ArrayList<String>();
s.out = new ArrayList<>();
synMap.put(synIn, s);
s.keepOrig = random().nextBoolean();
}
@ -453,7 +453,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase {
}
private void pruneDups(List<OneSyn> syns) {
Set<String> seen = new HashSet<String>();
Set<String> seen = new HashSet<>();
for(OneSyn syn : syns) {
int idx = 0;
while(idx < syn.out.size()) {

View File

@ -47,7 +47,7 @@ public abstract class BaseTokenStreamFactoryTestCase extends BaseTokenStreamTest
if (keysAndValues.length % 2 == 1) {
throw new IllegalArgumentException("invalid keysAndValues map");
}
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
for (int i = 0; i < keysAndValues.length; i += 2) {
String previous = args.put(keysAndValues[i], keysAndValues[i+1]);
assertNull("duplicate values for key: " + keysAndValues[i], previous);

View File

@ -25,8 +25,8 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestCharArrayMap extends LuceneTestCase {
public void doRandom(int iter, boolean ignoreCase) {
CharArrayMap<Integer> map = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 1, ignoreCase);
HashMap<String,Integer> hmap = new HashMap<String,Integer>();
CharArrayMap<Integer> map = new CharArrayMap<>(TEST_VERSION_CURRENT, 1, ignoreCase);
HashMap<String,Integer> hmap = new HashMap<>();
char[] key;
for (int i=0; i<iter; i++) {
@ -64,8 +64,8 @@ public class TestCharArrayMap extends LuceneTestCase {
}
public void testMethods() {
CharArrayMap<Integer> cm = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 2, false);
HashMap<String,Integer> hm = new HashMap<String,Integer>();
CharArrayMap<Integer> cm = new CharArrayMap<>(TEST_VERSION_CURRENT, 2, false);
HashMap<String,Integer> hm = new HashMap<>();
hm.put("foo",1);
hm.put("bar",2);
cm.putAll(hm);
@ -133,7 +133,7 @@ public class TestCharArrayMap extends LuceneTestCase {
}
public void testModifyOnUnmodifiable(){
CharArrayMap<Integer> map = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 2, false);
CharArrayMap<Integer> map = new CharArrayMap<>(TEST_VERSION_CURRENT, 2, false);
map.put("foo",1);
map.put("bar",2);
final int size = map.size();
@ -230,7 +230,7 @@ public class TestCharArrayMap extends LuceneTestCase {
}
public void testToString() {
CharArrayMap<Integer> cm = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false);
CharArrayMap<Integer> cm = new CharArrayMap<>(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false);
assertEquals("[test]",cm.keySet().toString());
assertEquals("[1]",cm.values().toString());
assertEquals("[test=1]",cm.entrySet().toString());

View File

@ -256,7 +256,7 @@ public class TestCharArraySet extends LuceneTestCase {
CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<String>();
List<String> stopwordsUpper = new ArrayList<>();
for (String string : stopwords) {
stopwordsUpper.add(string.toUpperCase(Locale.ROOT));
}
@ -278,7 +278,7 @@ public class TestCharArraySet extends LuceneTestCase {
assertFalse(copyCaseSens.contains(string));
}
// test adding terms to the copy
List<String> newWords = new ArrayList<String>();
List<String> newWords = new ArrayList<>();
for (String string : stopwords) {
newWords.add(string+"_1");
}
@ -303,7 +303,7 @@ public class TestCharArraySet extends LuceneTestCase {
CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<String>();
List<String> stopwordsUpper = new ArrayList<>();
for (String string : stopwords) {
stopwordsUpper.add(string.toUpperCase(Locale.ROOT));
}
@ -325,7 +325,7 @@ public class TestCharArraySet extends LuceneTestCase {
assertFalse(copyCaseSens.contains(string));
}
// test adding terms to the copy
List<String> newWords = new ArrayList<String>();
List<String> newWords = new ArrayList<>();
for (String string : stopwords) {
newWords.add(string+"_1");
}
@ -346,10 +346,10 @@ public class TestCharArraySet extends LuceneTestCase {
* Test the static #copy() function with a JDK {@link Set} as a source
*/
public void testCopyJDKSet() {
Set<String> set = new HashSet<String>();
Set<String> set = new HashSet<>();
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<String>();
List<String> stopwordsUpper = new ArrayList<>();
for (String string : stopwords) {
stopwordsUpper.add(string.toUpperCase(Locale.ROOT));
}
@ -365,7 +365,7 @@ public class TestCharArraySet extends LuceneTestCase {
assertFalse(copy.contains(string));
}
List<String> newWords = new ArrayList<String>();
List<String> newWords = new ArrayList<>();
for (String string : stopwords) {
newWords.add(string+"_1");
}

View File

@ -51,7 +51,7 @@ public class TestElision extends BaseTokenStreamTestCase {
}
private List<String> filter(TokenFilter filter) throws IOException {
List<String> tas = new ArrayList<String>();
List<String> tas = new ArrayList<>();
CharTermAttribute termAtt = filter.getAttribute(CharTermAttribute.class);
filter.reset();
while (filter.incrementToken()) {

View File

@ -130,7 +130,7 @@ public class WikipediaTokenizerTest extends BaseTokenStreamTestCase {
}
public void testLucene1133() throws Exception {
Set<String> untoks = new HashSet<String>();
Set<String> untoks = new HashSet<>();
untoks.add(WikipediaTokenizer.CATEGORY);
untoks.add(WikipediaTokenizer.ITALICS);
//should be exactly the same, regardless of untoks
@ -150,7 +150,7 @@ public class WikipediaTokenizerTest extends BaseTokenStreamTestCase {
}
public void testBoth() throws Exception {
Set<String> untoks = new HashSet<String>();
Set<String> untoks = new HashSet<>();
untoks.add(WikipediaTokenizer.CATEGORY);
untoks.add(WikipediaTokenizer.ITALICS);
String test = "[[Category:a b c d]] [[Category:e f g]] [[link here]] [[link there]] ''italics here'' something ''more italics'' [[Category:h i j]]";

View File

@ -111,7 +111,7 @@ public class GenerateJflexTLDMacros {
* @throws java.io.IOException if there is a problem downloading the database
*/
private SortedSet<String> getIANARootZoneDatabase() throws IOException {
final SortedSet<String> TLDs = new TreeSet<String>();
final SortedSet<String> TLDs = new TreeSet<>();
final URLConnection connection = tldFileURL.openConnection();
connection.setUseCaches(false);
connection.addRequestProperty("Cache-Control", "no-cache");

View File

@ -84,7 +84,7 @@ public class ICUTokenizerFactory extends TokenizerFactory implements ResourceLoa
/** Creates a new ICUTokenizerFactory */
public ICUTokenizerFactory(Map<String,String> args) {
super(args);
tailored = new HashMap<Integer,String>();
tailored = new HashMap<>();
String rulefilesArg = get(args, RULEFILES);
if (rulefilesArg != null) {
List<String> scriptAndResourcePaths = splitFileNames(rulefilesArg);

View File

@ -32,7 +32,7 @@ public class TestICUTransformFilterFactory extends BaseTokenStreamTestCase {
/** ensure the transform is working */
public void test() throws Exception {
Reader reader = new StringReader("簡化字");
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("id", "Traditional-Simplified");
ICUTransformFilterFactory factory = new ICUTransformFilterFactory(args);
TokenStream stream = whitespaceMockTokenizer(reader);
@ -44,7 +44,7 @@ public class TestICUTransformFilterFactory extends BaseTokenStreamTestCase {
public void testForwardDirection() throws Exception {
// forward
Reader reader = new StringReader("Российская Федерация");
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("id", "Cyrillic-Latin");
ICUTransformFilterFactory factory = new ICUTransformFilterFactory(args);
TokenStream stream = whitespaceMockTokenizer(reader);
@ -55,7 +55,7 @@ public class TestICUTransformFilterFactory extends BaseTokenStreamTestCase {
public void testReverseDirection() throws Exception {
// backward (invokes Latin-Cyrillic)
Reader reader = new StringReader("Rossijskaâ Federaciâ");
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("id", "Cyrillic-Latin");
args.put("direction", "reverse");
ICUTransformFilterFactory factory = new ICUTransformFilterFactory(args);

View File

@ -44,7 +44,7 @@ public class TestICUTokenizerFactory extends BaseTokenStreamTestCase {
// U+201C LEFT DOUBLE QUOTATION MARK; U+201D RIGHT DOUBLE QUOTATION MARK
Reader reader = new StringReader
(" Don't,break.at?/(punct)! \u201Cnice\u201D\r\n\r\n85_At:all; `really\" +2=3$5,&813 !@#%$^)(*@#$ ");
final Map<String,String> args = new HashMap<String,String>();
final Map<String,String> args = new HashMap<>();
args.put(ICUTokenizerFactory.RULEFILES, "Latn:Latin-break-only-on-whitespace.rbbi");
ICUTokenizerFactory factory = new ICUTokenizerFactory(args);
factory.inform(new ClasspathResourceLoader(this.getClass()));
@ -58,7 +58,7 @@ public class TestICUTokenizerFactory extends BaseTokenStreamTestCase {
public void testTokenizeLatinDontBreakOnHyphens() throws Exception {
Reader reader = new StringReader
("One-two punch. Brang-, not brung-it. This one--not that one--is the right one, -ish.");
final Map<String,String> args = new HashMap<String,String>();
final Map<String,String> args = new HashMap<>();
args.put(ICUTokenizerFactory.RULEFILES, "Latn:Latin-dont-break-on-hyphens.rbbi");
ICUTokenizerFactory factory = new ICUTokenizerFactory(args);
factory.inform(new ClasspathResourceLoader(getClass()));
@ -78,7 +78,7 @@ public class TestICUTokenizerFactory extends BaseTokenStreamTestCase {
public void testKeywordTokenizeCyrillicAndThai() throws Exception {
Reader reader = new StringReader
("Some English. Немного русский. ข้อความภาษาไทยเล็ก ๆ น้อย ๆ More English.");
final Map<String,String> args = new HashMap<String,String>();
final Map<String,String> args = new HashMap<>();
args.put(ICUTokenizerFactory.RULEFILES, "Cyrl:KeywordTokenizer.rbbi,Thai:KeywordTokenizer.rbbi");
ICUTokenizerFactory factory = new ICUTokenizerFactory(args);
factory.inform(new ClasspathResourceLoader(getClass()));

View File

@ -76,7 +76,7 @@ public class GenerateHTMLStripCharFilterSupplementaryMacros {
System.out.println("\t []");
}
HashMap<Character,UnicodeSet> utf16ByLead = new HashMap<Character,UnicodeSet>();
HashMap<Character,UnicodeSet> utf16ByLead = new HashMap<>();
for (UnicodeSetIterator it = new UnicodeSetIterator(set); it.next();) {
char utf16[] = Character.toChars(it.codepoint);
UnicodeSet trails = utf16ByLead.get(utf16[0]);
@ -87,7 +87,7 @@ public class GenerateHTMLStripCharFilterSupplementaryMacros {
trails.add(utf16[1]);
}
Map<String,UnicodeSet> utf16ByTrail = new HashMap<String,UnicodeSet>();
Map<String,UnicodeSet> utf16ByTrail = new HashMap<>();
for (Map.Entry<Character,UnicodeSet> entry : utf16ByLead.entrySet()) {
String trail = entry.getValue().getRegexEquivalent();
UnicodeSet leads = utf16ByTrail.get(trail);

View File

@ -95,7 +95,7 @@ public class GenerateJFlexSupplementaryMacros {
System.out.println("\t []");
}
HashMap<Character,UnicodeSet> utf16ByLead = new HashMap<Character,UnicodeSet>();
HashMap<Character,UnicodeSet> utf16ByLead = new HashMap<>();
for (UnicodeSetIterator it = new UnicodeSetIterator(set); it.next();) {
char utf16[] = Character.toChars(it.codepoint);
UnicodeSet trails = utf16ByLead.get(utf16[0]);

View File

@ -188,7 +188,7 @@ public class GenerateUTR30DataFiles {
if (matcher.matches()) {
final String leftHandSide = matcher.group(1);
final String rightHandSide = matcher.group(2).trim();
List<String> diacritics = new ArrayList<String>();
List<String> diacritics = new ArrayList<>();
for (String outputCodePoint : rightHandSide.split("\\s+")) {
int ch = Integer.parseInt(outputCodePoint, 16);
if (UCharacter.hasBinaryProperty(ch, UProperty.DIACRITIC)

View File

@ -48,7 +48,7 @@ public class GraphvizFormatter {
public GraphvizFormatter(ConnectionCosts costs) {
this.costs = costs;
this.bestPathMap = new HashMap<String, String>();
this.bestPathMap = new HashMap<>();
sb.append(formatHeader());
sb.append(" init [style=invis]\n");
sb.append(" init -> 0.0 [label=\"" + BOS_LABEL + "\"]\n");

View File

@ -73,7 +73,7 @@ public class JapaneseAnalyzer extends StopwordAnalyzerBase {
try {
DEFAULT_STOP_SET = loadStopwordSet(true, JapaneseAnalyzer.class, "stopwords.txt", "#"); // ignore case
final CharArraySet tagset = loadStopwordSet(false, JapaneseAnalyzer.class, "stoptags.txt", "#");
DEFAULT_STOP_TAGS = new HashSet<String>();
DEFAULT_STOP_TAGS = new HashSet<>();
for (Object element : tagset) {
char chars[] = (char[]) element;
DEFAULT_STOP_TAGS.add(new String(chars));

View File

@ -58,7 +58,7 @@ public class JapanesePartOfSpeechStopFilterFactory extends TokenFilterFactory im
stopTags = null;
CharArraySet cas = getWordSet(loader, stopTagFiles, false);
if (cas != null) {
stopTags = new HashSet<String>();
stopTags = new HashSet<>();
for (Object element : cas) {
char chars[] = (char[]) element;
stopTags.add(new String(chars));

View File

@ -132,7 +132,7 @@ public final class JapaneseTokenizer extends Tokenizer {
private static final int MAX_UNKNOWN_WORD_LENGTH = 1024;
private static final int MAX_BACKTRACE_GAP = 1024;
private final EnumMap<Type, Dictionary> dictionaryMap = new EnumMap<Type, Dictionary>(Type.class);
private final EnumMap<Type, Dictionary> dictionaryMap = new EnumMap<>(Type.class);
private final TokenInfoFST fst;
private final TokenInfoDictionary dictionary;
@ -141,7 +141,7 @@ public final class JapaneseTokenizer extends Tokenizer {
private final UserDictionary userDictionary;
private final CharacterDefinition characterDefinition;
private final FST.Arc<Long> arc = new FST.Arc<Long>();
private final FST.Arc<Long> arc = new FST.Arc<>();
private final FST.BytesReader fstReader;
private final IntsRef wordIdRef = new IntsRef();
@ -174,7 +174,7 @@ public final class JapaneseTokenizer extends Tokenizer {
private int pos;
// Already parsed, but not yet passed to caller, tokens:
private final List<Token> pending = new ArrayList<Token>();
private final List<Token> pending = new ArrayList<>();
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);

View File

@ -44,7 +44,7 @@ public final class TokenInfoDictionary extends BinaryDictionary {
try {
is = getResource(FST_FILENAME_SUFFIX);
is = new BufferedInputStream(is);
fst = new FST<Long>(new InputStreamDataInput(is), PositiveIntOutputs.getSingleton());
fst = new FST<>(new InputStreamDataInput(is), PositiveIntOutputs.getSingleton());
} catch (IOException ioe) {
priorE = ioe;
} finally {

View File

@ -51,9 +51,9 @@ public final class TokenInfoFST {
@SuppressWarnings({"rawtypes","unchecked"})
private FST.Arc<Long>[] cacheRootArcs() throws IOException {
FST.Arc<Long> rootCache[] = new FST.Arc[1+(cacheCeiling-0x3040)];
FST.Arc<Long> firstArc = new FST.Arc<Long>();
FST.Arc<Long> firstArc = new FST.Arc<>();
fst.getFirstArc(firstArc);
FST.Arc<Long> arc = new FST.Arc<Long>();
FST.Arc<Long> arc = new FST.Arc<>();
final FST.BytesReader fstReader = fst.getBytesReader();
// TODO: jump to 3040, readNextRealArc to ceiling? (just be careful we don't add bugs)
for (int i = 0; i < rootCache.length; i++) {

View File

@ -60,7 +60,7 @@ public final class UserDictionary implements Dictionary {
BufferedReader br = new BufferedReader(reader);
String line = null;
int wordId = CUSTOM_DICTIONARY_WORD_ID_OFFSET;
List<String[]> featureEntries = new ArrayList<String[]>();
List<String[]> featureEntries = new ArrayList<>();
// text, segmentation, readings, POS
while ((line = br.readLine()) != null) {
@ -85,11 +85,11 @@ public final class UserDictionary implements Dictionary {
}
});
List<String> data = new ArrayList<String>(featureEntries.size());
List<int[]> segmentations = new ArrayList<int[]>(featureEntries.size());
List<String> data = new ArrayList<>(featureEntries.size());
List<int[]> segmentations = new ArrayList<>(featureEntries.size());
PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton();
Builder<Long> fstBuilder = new Builder<Long>(FST.INPUT_TYPE.BYTE2, fstOutput);
Builder<Long> fstBuilder = new Builder<>(FST.INPUT_TYPE.BYTE2, fstOutput);
IntsRef scratch = new IntsRef();
long ord = 0;
@ -136,12 +136,12 @@ public final class UserDictionary implements Dictionary {
*/
public int[][] lookup(char[] chars, int off, int len) throws IOException {
// TODO: can we avoid this treemap/toIndexArray?
TreeMap<Integer, int[]> result = new TreeMap<Integer, int[]>(); // index, [length, length...]
TreeMap<Integer, int[]> result = new TreeMap<>(); // index, [length, length...]
boolean found = false; // true if we found any results
final FST.BytesReader fstReader = fst.getBytesReader();
FST.Arc<Long> arc = new FST.Arc<Long>();
FST.Arc<Long> arc = new FST.Arc<>();
int end = off + len;
for (int startOffset = off; startOffset < end; startOffset++) {
arc = fst.getFirstArc(arc);
@ -175,7 +175,7 @@ public final class UserDictionary implements Dictionary {
* @return array of {wordId, index, length}
*/
private int[][] toIndexArray(Map<Integer, int[]> input) {
ArrayList<int[]> result = new ArrayList<int[]>();
ArrayList<int[]> result = new ArrayList<>();
for (int i : input.keySet()) {
int[] wordIdAndLength = input.get(i);
int wordId = wordIdAndLength[0];

View File

@ -42,7 +42,7 @@ public final class CSVUtil {
*/
public static String[] parse(String line) {
boolean insideQuote = false;
ArrayList<String> result = new ArrayList<String>();
ArrayList<String> result = new ArrayList<>();
int quoteCount = 0;
StringBuilder sb = new StringBuilder();
for(int i = 0; i < line.length(); i++) {

View File

@ -26,7 +26,7 @@ import java.util.HashMap;
*/
public class ToStringUtil {
// a translation map for parts of speech, only used for reflectWith
private static final HashMap<String,String> posTranslations = new HashMap<String,String>();
private static final HashMap<String,String> posTranslations = new HashMap<>();
static {
posTranslations.put("名詞", "noun");
posTranslations.put("名詞-一般", "noun-common");
@ -127,7 +127,7 @@ public class ToStringUtil {
}
// a translation map for inflection types, only used for reflectWith
private static final HashMap<String,String> inflTypeTranslations = new HashMap<String,String>();
private static final HashMap<String,String> inflTypeTranslations = new HashMap<>();
static {
inflTypeTranslations.put("*", "*");
inflTypeTranslations.put("形容詞・アウオ段", "adj-group-a-o-u");
@ -197,7 +197,7 @@ public class ToStringUtil {
}
// a translation map for inflection forms, only used for reflectWith
private static final HashMap<String,String> inflFormTranslations = new HashMap<String,String>();
private static final HashMap<String,String> inflFormTranslations = new HashMap<>();
static {
inflFormTranslations.put("*", "*");
inflFormTranslations.put("基本形", "base");

View File

@ -59,7 +59,7 @@ public class TestJapaneseIterationMarkCharFilterFactory extends BaseTokenStreamT
JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(new HashMap<String,String>());
tokenizerFactory.inform(new StringMockResourceLoader(""));
Map<String, String> filterArgs = new HashMap<String, String>();
Map<String, String> filterArgs = new HashMap<>();
filterArgs.put("normalizeKanji", "true");
filterArgs.put("normalizeKana", "false");
JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(filterArgs);
@ -76,7 +76,7 @@ public class TestJapaneseIterationMarkCharFilterFactory extends BaseTokenStreamT
JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(new HashMap<String,String>());
tokenizerFactory.inform(new StringMockResourceLoader(""));
Map<String, String> filterArgs = new HashMap<String, String>();
Map<String, String> filterArgs = new HashMap<>();
filterArgs.put("normalizeKanji", "false");
filterArgs.put("normalizeKana", "true");
JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(filterArgs);

View File

@ -39,7 +39,7 @@ public class TestJapanesePartOfSpeechStopFilterFactory extends BaseTokenStreamTe
tokenizerFactory.inform(new StringMockResourceLoader(""));
TokenStream ts = tokenizerFactory.create();
((Tokenizer)ts).setReader(new StringReader("私は制限スピードを超える。"));
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
args.put("tags", "stoptags.txt");
JapanesePartOfSpeechStopFilterFactory factory = new JapanesePartOfSpeechStopFilterFactory(args);

View File

@ -60,7 +60,7 @@ public class TestJapaneseTokenizerFactory extends BaseTokenStreamTestCase {
* Test mode parameter: specifying normal mode
*/
public void testMode() throws IOException {
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("mode", "normal");
JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(args);
factory.inform(new StringMockResourceLoader(""));
@ -81,7 +81,7 @@ public class TestJapaneseTokenizerFactory extends BaseTokenStreamTestCase {
"関西国際空港,関西 国際 空港,カンサイ コクサイ クウコウ,テスト名詞\n" +
"# Custom reading for sumo wrestler\n" +
"朝青龍,朝青龍,アサショウリュウ,カスタム人名\n";
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("userDictionary", "userdict.txt");
JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(args);
factory.inform(new StringMockResourceLoader(userDict));
@ -96,7 +96,7 @@ public class TestJapaneseTokenizerFactory extends BaseTokenStreamTestCase {
* Test preserving punctuation
*/
public void testPreservePunctuation() throws IOException {
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("discardPunctuation", "false");
JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(args);
factory.inform(new StringMockResourceLoader(""));

View File

@ -37,7 +37,7 @@ public class TestTokenInfoDictionary extends LuceneTestCase {
TokenInfoDictionary tid = TokenInfoDictionary.getInstance();
ConnectionCosts matrix = ConnectionCosts.getInstance();
FST<Long> fst = tid.getFST().getInternalFST();
IntsRefFSTEnum<Long> fstEnum = new IntsRefFSTEnum<Long>(fst);
IntsRefFSTEnum<Long> fstEnum = new IntsRefFSTEnum<>(fst);
InputOutput<Long> mapping;
IntsRef scratch = new IntsRef();
while ((mapping = fstEnum.next()) != null) {

View File

@ -40,7 +40,7 @@ public abstract class BinaryDictionaryWriter {
private int targetMapEndOffset = 0, lastWordId = -1, lastSourceId = -1;
private int[] targetMap = new int[8192];
private int[] targetMapOffsets = new int[8192];
private final ArrayList<String> posDict = new ArrayList<String>();
private final ArrayList<String> posDict = new ArrayList<>();
public BinaryDictionaryWriter(Class<? extends BinaryDictionary> implClazz, int size) {
this.implClazz = implClazz;

View File

@ -69,7 +69,7 @@ public class TokenInfoDictionaryBuilder {
return name.endsWith(".csv");
}
};
ArrayList<File> csvFiles = new ArrayList<File>();
ArrayList<File> csvFiles = new ArrayList<>();
for (File file : new File(dirname).listFiles(filter)) {
csvFiles.add(file);
}
@ -82,7 +82,7 @@ public class TokenInfoDictionaryBuilder {
// all lines in the file
System.out.println(" parse...");
List<String[]> lines = new ArrayList<String[]>(400000);
List<String[]> lines = new ArrayList<>(400000);
for (File file : csvFiles){
FileInputStream inputStream = new FileInputStream(file);
Charset cs = Charset.forName(encoding);
@ -132,7 +132,7 @@ public class TokenInfoDictionaryBuilder {
System.out.println(" encode...");
PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton();
Builder<Long> fstBuilder = new Builder<Long>(FST.INPUT_TYPE.BYTE2, 0, 0, true, true, Integer.MAX_VALUE, fstOutput, null, true, PackedInts.DEFAULT, true, 15);
Builder<Long> fstBuilder = new Builder<>(FST.INPUT_TYPE.BYTE2, 0, 0, true, true, Integer.MAX_VALUE, fstOutput, null, true, PackedInts.DEFAULT, true, 15);
IntsRef scratch = new IntsRef();
long ord = -1; // first ord will be 0
String lastValue = null;

View File

@ -66,7 +66,7 @@ public class UnknownDictionaryBuilder {
dictionary.put(CSVUtil.parse(NGRAM_DICTIONARY_ENTRY));
List<String[]> lines = new ArrayList<String[]>();
List<String[]> lines = new ArrayList<>();
String line = null;
while ((line = lineReader.readLine()) != null) {
// note: unk.def only has 10 fields, it simplifies the writer to just append empty reading and pronunciation,

View File

@ -56,7 +56,7 @@ public class MorfologikFilter extends TokenFilter {
private final IStemmer stemmer;
private List<WordData> lemmaList;
private final ArrayList<StringBuilder> tagsList = new ArrayList<StringBuilder>();
private final ArrayList<StringBuilder> tagsList = new ArrayList<>();
private int lemmaListIndex;

View File

@ -82,7 +82,7 @@ public class MorphosyntacticTagsAttributeImpl extends AttributeImpl
public void copyTo(AttributeImpl target) {
List<StringBuilder> cloned = null;
if (tags != null) {
cloned = new ArrayList<StringBuilder>(tags.size());
cloned = new ArrayList<>(tags.size());
for (StringBuilder b : tags) {
cloned.add(new StringBuilder(b));
}

View File

@ -124,8 +124,8 @@ public class TestMorfologikAnalyzer extends BaseTokenStreamTestCase {
ts.incrementToken();
assertEquals(term, ts.getAttribute(CharTermAttribute.class).toString());
TreeSet<String> actual = new TreeSet<String>();
TreeSet<String> expected = new TreeSet<String>();
TreeSet<String> actual = new TreeSet<>();
TreeSet<String> expected = new TreeSet<>();
for (StringBuilder b : ts.getAttribute(MorphosyntacticTagsAttribute.class).getTags()) {
actual.add(b.toString());
}

View File

@ -32,7 +32,7 @@ public final class DoubleMetaphoneFilter extends TokenFilter {
private static final String TOKEN_TYPE = "DoubleMetaphone";
private final LinkedList<State> remainingTokens = new LinkedList<State>();
private final LinkedList<State> remainingTokens = new LinkedList<>();
private final DoubleMetaphone encoder = new DoubleMetaphone();
private final boolean inject;
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);

View File

@ -73,7 +73,7 @@ public class PhoneticFilterFactory extends TokenFilterFactory implements Resourc
private static final String PACKAGE_CONTAINING_ENCODERS = "org.apache.commons.codec.language.";
//Effectively constants; uppercase keys
private static final Map<String, Class<? extends Encoder>> registry = new HashMap<String, Class<? extends Encoder>>(6);
private static final Map<String, Class<? extends Encoder>> registry = new HashMap<>(6);
static {
registry.put("DoubleMetaphone".toUpperCase(Locale.ROOT), DoubleMetaphone.class);

View File

@ -38,7 +38,7 @@ public class TestBeiderMorseFilterFactory extends BaseTokenStreamTestCase {
}
public void testLanguageSet() throws Exception {
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("languageSet", "polish");
BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(args);
TokenStream ts = factory.create(whitespaceMockTokenizer("Weinberg"));
@ -50,7 +50,7 @@ public class TestBeiderMorseFilterFactory extends BaseTokenStreamTestCase {
}
public void testOptions() throws Exception {
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("nameType", "ASHKENAZI");
args.put("ruleType", "EXACT");
BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(args);

View File

@ -39,7 +39,7 @@ public class TestDoubleMetaphoneFilterFactory extends BaseTokenStreamTestCase {
}
public void testSettingSizeAndInject() throws Exception {
Map<String,String> parameters = new HashMap<String,String>();
Map<String,String> parameters = new HashMap<>();
parameters.put("inject", "false");
parameters.put("maxCodeLength", "8");
DoubleMetaphoneFilterFactory factory = new DoubleMetaphoneFilterFactory(parameters);

View File

@ -36,7 +36,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase {
* Case: default
*/
public void testFactoryDefaults() throws IOException {
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put(PhoneticFilterFactory.ENCODER, "Metaphone");
PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
factory.inform(new ClasspathResourceLoader(factory.getClass()));
@ -45,7 +45,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase {
}
public void testInjectFalse() throws IOException {
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put(PhoneticFilterFactory.ENCODER, "Metaphone");
args.put(PhoneticFilterFactory.INJECT, "false");
PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
@ -54,7 +54,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase {
}
public void testMaxCodeLength() throws IOException {
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put(PhoneticFilterFactory.ENCODER, "Metaphone");
args.put(PhoneticFilterFactory.MAX_CODE_LENGTH, "2");
PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
@ -76,7 +76,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase {
public void testUnknownEncoder() throws IOException {
try {
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("encoder", "XXX");
PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
factory.inform(new ClasspathResourceLoader(factory.getClass()));
@ -88,7 +88,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase {
public void testUnknownEncoderReflection() throws IOException {
try {
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("encoder", "org.apache.commons.codec.language.NonExistence");
PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
factory.inform(new ClasspathResourceLoader(factory.getClass()));
@ -102,7 +102,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase {
* Case: Reflection
*/
public void testFactoryReflection() throws IOException {
Map<String,String> args = new HashMap<String, String>();
Map<String,String> args = new HashMap<>();
args.put(PhoneticFilterFactory.ENCODER, "org.apache.commons.codec.language.Metaphone");
PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
factory.inform(new ClasspathResourceLoader(factory.getClass()));
@ -115,7 +115,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase {
* so this effectively tests reflection without package name
*/
public void testFactoryReflectionCaverphone2() throws IOException {
Map<String,String> args = new HashMap<String, String>();
Map<String,String> args = new HashMap<>();
args.put(PhoneticFilterFactory.ENCODER, "Caverphone2");
PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
factory.inform(new ClasspathResourceLoader(factory.getClass()));
@ -124,7 +124,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase {
}
public void testFactoryReflectionCaverphone() throws IOException {
Map<String,String> args = new HashMap<String, String>();
Map<String,String> args = new HashMap<>();
args.put(PhoneticFilterFactory.ENCODER, "Caverphone");
PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
factory.inform(new ClasspathResourceLoader(factory.getClass()));
@ -182,7 +182,7 @@ public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase {
static void assertAlgorithm(String algName, String inject, String input,
String[] expected) throws Exception {
Tokenizer tokenizer = whitespaceMockTokenizer(input);
Map<String,String> args = new HashMap<String,String>();
Map<String,String> args = new HashMap<>();
args.put("encoder", algName);
args.put("inject", inject);
PhoneticFilterFactory factory = new PhoneticFilterFactory(args);

View File

@ -34,7 +34,7 @@ import org.apache.lucene.analysis.cn.smart.Utility;
*/
class BiSegGraph {
private Map<Integer,ArrayList<SegTokenPair>> tokenPairListTable = new HashMap<Integer,ArrayList<SegTokenPair>>();
private Map<Integer,ArrayList<SegTokenPair>> tokenPairListTable = new HashMap<>();
private List<SegToken> segTokenList;
@ -144,7 +144,7 @@ class BiSegGraph {
public void addSegTokenPair(SegTokenPair tokenPair) {
int to = tokenPair.to;
if (!isToExist(to)) {
ArrayList<SegTokenPair> newlist = new ArrayList<SegTokenPair>();
ArrayList<SegTokenPair> newlist = new ArrayList<>();
newlist.add(tokenPair);
tokenPairListTable.put(to, newlist);
} else {
@ -168,7 +168,7 @@ class BiSegGraph {
public List<SegToken> getShortPath() {
int current;
int nodeCount = getToCount();
List<PathNode> path = new ArrayList<PathNode>();
List<PathNode> path = new ArrayList<>();
PathNode zeroPath = new PathNode();
zeroPath.weight = 0;
zeroPath.preNode = 0;
@ -197,8 +197,8 @@ class BiSegGraph {
int preNode, lastNode;
lastNode = path.size() - 1;
current = lastNode;
List<Integer> rpath = new ArrayList<Integer>();
List<SegToken> resultPath = new ArrayList<SegToken>();
List<Integer> rpath = new ArrayList<>();
List<SegToken> resultPath = new ArrayList<>();
rpath.add(current);
while (current != 0) {

View File

@ -34,7 +34,7 @@ class SegGraph {
/**
* Map of start offsets to ArrayList of tokens at that position
*/
private Map<Integer,ArrayList<SegToken>> tokenListTable = new HashMap<Integer,ArrayList<SegToken>>();
private Map<Integer,ArrayList<SegToken>> tokenListTable = new HashMap<>();
private int maxStart = -1;
@ -72,7 +72,7 @@ class SegGraph {
* @return a {@link List} of these ordered tokens.
*/
public List<SegToken> makeIndex() {
List<SegToken> result = new ArrayList<SegToken>();
List<SegToken> result = new ArrayList<>();
int s = -1, count = 0, size = tokenListTable.size();
List<SegToken> tokenList;
int index = 0;
@ -98,7 +98,7 @@ class SegGraph {
public void addToken(SegToken token) {
int s = token.startOffset;
if (!isStartExist(s)) {
ArrayList<SegToken> newlist = new ArrayList<SegToken>();
ArrayList<SegToken> newlist = new ArrayList<>();
newlist.add(token);
tokenListTable.put(s, newlist);
} else {
@ -115,7 +115,7 @@ class SegGraph {
* @return {@link List} of all tokens in the map.
*/
public List<SegToken> toTokenList() {
List<SegToken> result = new ArrayList<SegToken>();
List<SegToken> result = new ArrayList<>();
int s = -1, count = 0, size = tokenListTable.size();
List<SegToken> tokenList;

View File

@ -78,7 +78,7 @@ public class Gener extends Reduce {
@Override
public Trie optimize(Trie orig) {
List<CharSequence> cmds = orig.cmds;
List<Row> rows = new ArrayList<Row>();
List<Row> rows = new ArrayList<>();
List<Row> orows = orig.rows;
int remap[] = new int[orows.size()];

View File

@ -88,7 +88,7 @@ public class Lift extends Reduce {
@Override
public Trie optimize(Trie orig) {
List<CharSequence> cmds = orig.cmds;
List<Row> rows = new ArrayList<Row>();
List<Row> rows = new ArrayList<>();
List<Row> orows = orig.rows;
int remap[] = new int[orows.size()];

View File

@ -70,7 +70,7 @@ public class MultiTrie extends Trie {
final char EOM = '*';
final String EOM_NODE = "" + EOM;
List<Trie> tries = new ArrayList<Trie>();
List<Trie> tries = new ArrayList<>();
int BY = 1;
@ -186,7 +186,7 @@ public class MultiTrie extends Trie {
*/
@Override
public Trie reduce(Reduce by) {
List<Trie> h = new ArrayList<Trie>();
List<Trie> h = new ArrayList<>();
for (Trie trie : tries)
h.add(trie.reduce(by));

View File

@ -277,7 +277,7 @@ public class MultiTrie2 extends MultiTrie {
*/
@Override
public Trie reduce(Reduce by) {
List<Trie> h = new ArrayList<Trie>();
List<Trie> h = new ArrayList<>();
for (Trie trie : tries)
h.add(trie.reduce(by));

View File

@ -81,7 +81,7 @@ public class Optimizer extends Reduce {
@Override
public Trie optimize(Trie orig) {
List<CharSequence> cmds = orig.cmds;
List<Row> rows = new ArrayList<Row>();
List<Row> rows = new ArrayList<>();
List<Row> orows = orig.rows;
int remap[] = new int[orows.size()];

Some files were not shown because too many files have changed in this diff Show More