Remove a few more Xlint skips

This commit is contained in:
Nik Everett 2016-01-06 23:26:57 -05:00
parent 1325c1442b
commit 0786c506dc
13 changed files with 54 additions and 60 deletions

View File

@ -35,6 +35,3 @@ dependencyLicenses {
mapping from: /asm-.*/, to: 'asm'
}
compileJava.options.compilerArgs << '-Xlint:-rawtypes'
compileTestJava.options.compilerArgs << '-Xlint:-rawtypes'

View File

@ -19,6 +19,10 @@
package org.elasticsearch.script.expression;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
@ -26,10 +30,6 @@ import org.elasticsearch.index.fielddata.AtomicFieldData;
import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
/**
* A ValueSource to create FunctionValues to get the count of the number of values in a field for a document.
*/
@ -43,6 +43,7 @@ public class CountMethodValueSource extends ValueSource {
}
@Override
@SuppressWarnings("rawtypes") // ValueSource uses a rawtype
public FunctionValues getValues(Map context, LeafReaderContext leaf) throws IOException {
AtomicFieldData leafData = fieldData.load(leaf);
assert(leafData instanceof AtomicNumericFieldData);

View File

@ -19,6 +19,10 @@
package org.elasticsearch.script.expression;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.function.FunctionValues;
import org.elasticsearch.index.fielddata.AtomicFieldData;
@ -26,10 +30,6 @@ import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.search.MultiValueMode;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
class DateMethodValueSource extends FieldDataValueSource {
protected final String methodName;
@ -45,6 +45,7 @@ class DateMethodValueSource extends FieldDataValueSource {
}
@Override
@SuppressWarnings("rawtypes") // ValueSource uses a rawtype
public FunctionValues getValues(Map context, LeafReaderContext leaf) throws IOException {
AtomicFieldData leafData = fieldData.load(leaf);
assert(leafData instanceof AtomicNumericFieldData);

View File

@ -19,6 +19,10 @@
package org.elasticsearch.script.expression;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
@ -27,10 +31,6 @@ import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.search.MultiValueMode;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
/**
* A {@link ValueSource} wrapper for field data.
*/
@ -67,6 +67,7 @@ class FieldDataValueSource extends ValueSource {
}
@Override
@SuppressWarnings("rawtypes") // ValueSource uses a rawtype
public FunctionValues getValues(Map context, LeafReaderContext leaf) throws IOException {
AtomicFieldData leafData = fieldData.load(leaf);
assert(leafData instanceof AtomicNumericFieldData);

View File

@ -19,13 +19,13 @@
package org.elasticsearch.script.expression;
import java.io.IOException;
import java.util.Map;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
import java.io.IOException;
import java.util.Map;
/**
* A {@link ValueSource} which has a stub {@link FunctionValues} that holds a dynamically replaceable constant double.
*/
@ -37,6 +37,7 @@ class ReplaceableConstValueSource extends ValueSource {
}
@Override
@SuppressWarnings("rawtypes") // ValueSource uses a rawtype
public FunctionValues getValues(Map map, LeafReaderContext atomicReaderContext) throws IOException {
return fv;
}

View File

@ -19,6 +19,12 @@
package org.elasticsearch.script.expression;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.lucene.expressions.Expression;
import org.apache.lucene.expressions.js.JavascriptCompiler;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
@ -47,12 +53,6 @@ import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.bucketScript;
@ -121,7 +121,7 @@ public class MoreExpressionTests extends ESIntegTestCase {
client().prepareIndex("test", "doc", "1").setSource("text", "hello goodbye"),
client().prepareIndex("test", "doc", "2").setSource("text", "hello hello hello goodbye"),
client().prepareIndex("test", "doc", "3").setSource("text", "hello hello goodebye"));
ScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction(new Script("1 / _score", ScriptType.INLINE, "expression", null));
ScoreFunctionBuilder<?> score = ScoreFunctionBuilders.scriptFunction(new Script("1 / _score", ScriptType.INLINE, "expression", null));
SearchRequestBuilder req = client().prepareSearch().setIndices("test");
req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE));
req.setSearchType(SearchType.DFS_QUERY_THEN_FETCH); // make sure DF is consistent

View File

@ -30,6 +30,3 @@ dependencies {
dependencyLicenses {
mapping from: /lucene-.*/, to: 'lucene'
}
compileJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked"

View File

@ -19,6 +19,9 @@
package org.elasticsearch.index.analysis;
import java.util.Arrays;
import java.util.HashSet;
import org.apache.commons.codec.Encoder;
import org.apache.commons.codec.language.Caverphone1;
import org.apache.commons.codec.language.Caverphone2;
@ -43,9 +46,6 @@ import org.elasticsearch.index.analysis.phonetic.HaasePhonetik;
import org.elasticsearch.index.analysis.phonetic.KoelnerPhonetik;
import org.elasticsearch.index.analysis.phonetic.Nysiis;
import java.util.Arrays;
import java.util.HashSet;
/**
*
*/
@ -122,7 +122,7 @@ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory {
if (encoder == null) {
if (ruletype != null && nametype != null) {
if (languageset != null) {
final LanguageSet languages = LanguageSet.from(new HashSet(Arrays.asList(languageset)));
final LanguageSet languages = LanguageSet.from(new HashSet<>(Arrays.asList(languageset)));
return new BeiderMorseFilter(tokenStream, new PhoneticEngine(nametype, ruletype, true), languages);
}
return new BeiderMorseFilter(tokenStream, new PhoneticEngine(nametype, ruletype, true));

View File

@ -19,9 +19,6 @@
package org.elasticsearch.index.analysis.phonetic;
import org.apache.commons.codec.EncoderException;
import org.apache.commons.codec.StringEncoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
@ -31,6 +28,9 @@ import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.codec.EncoderException;
import org.apache.commons.codec.StringEncoder;
/**
* K&ouml;lner Phonetik
*
@ -49,13 +49,13 @@ public class KoelnerPhonetik implements StringEncoder {
private static final String[] POSTEL_VARIATIONS_REPLACEMENTS = {"OWN", "AUN", "RW", "RB", "RSK", "WSK"};
private Pattern[] variationsPatterns;
private boolean primary = false;
private final Set<Character> csz = new HashSet(Arrays.asList(
private final Set<Character> csz = new HashSet<>(Arrays.asList(
'C', 'S', 'Z'));
private final Set<Character> ckq = new HashSet(Arrays.asList(
private final Set<Character> ckq = new HashSet<>(Arrays.asList(
'C', 'K', 'Q'));
private final Set<Character> aouhkxq = new HashSet(Arrays.asList(
private final Set<Character> aouhkxq = new HashSet<>(Arrays.asList(
'A', 'O', 'U', 'H', 'K', 'X', 'Q'));
private final Set<Character> ahkloqrux = new HashSet(Arrays.asList(
private final Set<Character> ahkloqrux = new HashSet<>(Arrays.asList(
'A', 'H', 'K', 'L', 'O', 'Q', 'R', 'U', 'X'));
/**
@ -139,10 +139,10 @@ public class KoelnerPhonetik implements StringEncoder {
private List<String> partition(String str) {
String primaryForm = str;
List<String> parts = new ArrayList();
List<String> parts = new ArrayList<>();
parts.add(primaryForm.replaceAll("[^\\p{L}\\p{N}]", ""));
if (!primary) {
List<String> tmpParts = new ArrayList();
List<String> tmpParts = new ArrayList<>();
tmpParts.addAll((Arrays.asList(str.split("[\\p{Z}\\p{C}\\p{P}]"))));
int numberOfParts = tmpParts.size();
while (tmpParts.size() > 0) {
@ -156,9 +156,9 @@ public class KoelnerPhonetik implements StringEncoder {
tmpParts.remove(0);
}
}
List<String> variations = new ArrayList();
List<String> variations = new ArrayList<>();
for (int i = 0; i < parts.size(); i++) {
List variation = getVariations(parts.get(i));
List<String> variation = getVariations(parts.get(i));
if (variation != null) {
variations.addAll(variation);
}
@ -166,9 +166,9 @@ public class KoelnerPhonetik implements StringEncoder {
return variations;
}
private List getVariations(String str) {
private List<String> getVariations(String str) {
int position = 0;
List<String> variations = new ArrayList();
List<String> variations = new ArrayList<>();
variations.add("");
while (position < str.length()) {
int i = 0;
@ -182,7 +182,7 @@ public class KoelnerPhonetik implements StringEncoder {
}
if (substPos >= position) {
i--;
List<String> varNew = new ArrayList();
List<String> varNew = new ArrayList<>();
String prevPart = str.substring(position, substPos);
for (int ii = 0; ii < variations.size(); ii++) {
String tmp = variations.get(ii);

View File

@ -25,8 +25,6 @@ esplugin {
// no unit tests
test.enabled = false
compileJava.options.compilerArgs << "-Xlint:-rawtypes"
configurations {
exampleFixture
}

View File

@ -19,6 +19,10 @@
package org.elasticsearch.plugin.example;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Module;
@ -28,10 +32,6 @@ import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.repositories.RepositoriesModule;
import org.elasticsearch.rest.action.cat.AbstractCatAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
/**
* Example of a plugin.
*/
@ -59,6 +59,7 @@ public class JvmExamplePlugin extends Plugin {
}
@Override
@SuppressWarnings("rawtypes") // Plugin use a rawtype
public Collection<Class<? extends LifecycleComponent>> nodeServices() {
Collection<Class<? extends LifecycleComponent>> services = new ArrayList<>();
return services;

View File

@ -21,5 +21,3 @@ esplugin {
description 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.'
classname 'org.elasticsearch.plugin.mapper.MapperMurmur3Plugin'
}
compileJava.options.compilerArgs << "-Xlint:-rawtypes"

View File

@ -19,6 +19,10 @@
package org.elasticsearch.index.mapper.murmur3;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.util.BytesRef;
@ -35,10 +39,6 @@ import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.LongFieldMapper;
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
public class Murmur3FieldMapper extends LongFieldMapper {
@ -93,8 +93,7 @@ public class Murmur3FieldMapper extends LongFieldMapper {
public static class TypeParser implements Mapper.TypeParser {
@Override
@SuppressWarnings("unchecked")
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
Builder builder = new Builder(name);
// tweaking these settings is no longer allowed, the entire purpose of murmur3 fields is to store a hash