[7.x] Backport AggregatorTestCase.writeTestDoc() (#55318)

This commit is contained in:
Christos Soulios 2020-04-16 21:10:18 +03:00 committed by GitHub
parent 4d849f0948
commit b810f0024a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 108 additions and 34 deletions

View File

@ -18,11 +18,15 @@
*/
package org.elasticsearch.search.aggregations;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.HalfFloatPoint;
import org.apache.lucene.document.InetAddressPoint;
import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.AssertingDirectoryReader;
import org.apache.lucene.index.CompositeReaderContext;
import org.apache.lucene.index.DirectoryReader;
@ -41,12 +45,14 @@ import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.util.BigArrays;
@ -54,6 +60,10 @@ import org.elasticsearch.common.util.MockBigArrays;
import org.elasticsearch.common.util.MockPageCacheRecycler;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalyzerScope;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache.Listener;
import org.elasticsearch.index.cache.query.DisabledQueryCache;
@ -74,6 +84,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.index.mapper.ObjectMapper;
import org.elasticsearch.index.mapper.ObjectMapper.Nested;
import org.elasticsearch.index.mapper.RangeFieldMapper;
@ -105,6 +116,7 @@ import org.elasticsearch.test.InternalAggregationTestCase;
import org.junit.After;
import java.io.IOException;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@ -116,7 +128,8 @@ import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Collectors;
import static java.util.Collections.singleton;
import static java.util.Collections.singletonMap;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
@ -141,7 +154,6 @@ public abstract class AggregatorTestCase extends ESTestCase {
List<String> blacklist = new ArrayList<>();
blacklist.add(ObjectMapper.CONTENT_TYPE); // Cannot aggregate objects
blacklist.add(GeoShapeFieldMapper.CONTENT_TYPE); // Cannot aggregate geoshapes (yet)
blacklist.add(TextFieldMapper.CONTENT_TYPE); // TODO Does not support doc values, but does support FD, needs a lot of mocking
blacklist.add(ObjectMapper.NESTED_CONTENT_TYPE); // TODO support for nested
blacklist.add(CompletionFieldMapper.CONTENT_TYPE); // TODO support completion
blacklist.add(FieldAliasMapper.CONTENT_TYPE); // TODO support alias
@ -595,6 +607,18 @@ public abstract class AggregatorTestCase extends ESTestCase {
"createAggBuilderForTypeTest() must be implemented as well.");
}
/**
* A method that allows implementors to specifically blacklist particular field types (based on their content_name).
* This is needed in some areas where the ValuesSourceType is not granular enough, for example integer values
* vs floating points, or `keyword` bytes vs `binary` bytes (which are not searchable)
*
* This is a blacklist instead of a whitelist because there are vastly more field types than ValuesSourceTypes,
* and it's expected that these unsupported cases are exceptional rather than common
*/
protected List<String> unsupportedMappedFieldTypes() {
return Collections.emptyList();
}
/**
* This test will validate that an aggregator succeeds or fails to run against all the field types
* that are registered in {@link IndicesModule} (e.g. all the core field types). An aggregator
@ -604,11 +628,12 @@ public abstract class AggregatorTestCase extends ESTestCase {
*
* Exception types/messages are not currently checked, just presence/absence of an exception.
*/
public void testSupportedFieldTypes() throws IOException {
public final void testSupportedFieldTypes() throws IOException {
MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
Settings settings = Settings.builder().put("index.version.created", Version.CURRENT.id).build();
String fieldName = "typeTestFieldName";
List<ValuesSourceType> supportedVSTypes = getSupportedValuesSourceTypes();
List<String> unsupportedMappedFieldTypes = unsupportedMappedFieldTypes();
if (supportedVSTypes.isEmpty()) {
// If the test says it doesn't support any VStypes, it has not been converted yet so skip
@ -627,7 +652,11 @@ public abstract class AggregatorTestCase extends ESTestCase {
Map<String, Object> source = new HashMap<>();
source.put("type", mappedType.getKey());
source.put("doc_values", "true");
// Text is the only field that doesn't support DVs, instead FD
if (mappedType.getKey().equals(TextFieldMapper.CONTENT_TYPE) == false) {
source.put("doc_values", "true");
}
Mapper.Builder builder = mappedType.getValue().parse(fieldName, source, new MockParserContext());
FieldMapper mapper = (FieldMapper) builder.build(new BuilderContext(settings, new ContentPath()));
@ -648,15 +677,16 @@ public abstract class AggregatorTestCase extends ESTestCase {
IndexSearcher indexSearcher = newIndexSearcher(indexReader);
AggregationBuilder aggregationBuilder = createAggBuilderForTypeTest(fieldType, fieldName);
ValuesSourceType vst = fieldType.getValuesSourceType();
// TODO in the future we can make this more explicit with expectThrows(), when the exceptions are standardized
try {
searchAndReduce(indexSearcher, new MatchAllDocsQuery(), aggregationBuilder, fieldType);
if (supportedVSTypes.contains(fieldType.getValuesSourceType()) == false) {
if (supportedVSTypes.contains(vst) == false || unsupportedMappedFieldTypes.contains(fieldType.typeName())) {
fail("Aggregator [" + aggregationBuilder.getType() + "] should not support field type ["
+ fieldType.typeName() + "] but executing against the field did not throw an excetion");
+ fieldType.typeName() + "] but executing against the field did not throw an exception");
}
} catch (Exception e) {
if (supportedVSTypes.contains(fieldType.getValuesSourceType())) {
if (supportedVSTypes.contains(vst) && unsupportedMappedFieldTypes.contains(fieldType.typeName()) == false) {
fail("Aggregator [" + aggregationBuilder.getType() + "] supports field type ["
+ fieldType.typeName() + "] but executing against the field threw an exception: [" + e.getMessage() + "]");
}
@ -674,74 +704,118 @@ public abstract class AggregatorTestCase extends ESTestCase {
*/
private void writeTestDoc(MappedFieldType fieldType, String fieldName, RandomIndexWriter iw) throws IOException {
if (fieldType.getValuesSourceType().equals(CoreValuesSourceType.NUMERIC)) {
String typeName = fieldType.typeName();
ValuesSourceType vst = fieldType.getValuesSourceType();
Document doc = new Document();
String json;
if (vst.equals(CoreValuesSourceType.NUMERIC)) {
// TODO note: once VS refactor adds DATE/BOOLEAN, this conditional will go away
if (fieldType.typeName().equals(DateFieldMapper.CONTENT_TYPE)
|| fieldType.typeName().equals(DateFieldMapper.DATE_NANOS_CONTENT_TYPE)) {
iw.addDocument(singleton(new SortedNumericDocValuesField(fieldName, randomNonNegativeLong())));
} else if (fieldType.typeName().equals(BooleanFieldMapper.CONTENT_TYPE)) {
iw.addDocument(singleton(new SortedNumericDocValuesField(fieldName, randomBoolean() ? 0 : 1)));
long v;
if (typeName.equals(DateFieldMapper.CONTENT_TYPE) || typeName.equals(DateFieldMapper.DATE_NANOS_CONTENT_TYPE)) {
// positive integer because date_nanos gets unhappy with large longs
v = Math.abs(randomInt());
json = "{ \"" + fieldName + "\" : \"" + v + "\" }";
} else if (typeName.equals(BooleanFieldMapper.CONTENT_TYPE)) {
v = randomBoolean() ? 0 : 1;
json = "{ \"" + fieldName + "\" : \"" + (v == 0 ? "false" : "true") + "\" }";
} else if (typeName.equals(NumberFieldMapper.NumberType.DOUBLE.typeName())) {
double d = Math.abs(randomDouble());
v = NumericUtils.doubleToSortableLong(d);
json = "{ \"" + fieldName + "\" : \"" + d + "\" }";
} else if (typeName.equals(NumberFieldMapper.NumberType.FLOAT.typeName())) {
float f = Math.abs(randomFloat());
v = NumericUtils.floatToSortableInt(f);
json = "{ \"" + fieldName + "\" : \"" + f + "\" }";
} else if (typeName.equals(NumberFieldMapper.NumberType.HALF_FLOAT.typeName())) {
// Respect half float range
float f = Math.abs((randomFloat() * 2 - 1) * 70000);
v = HalfFloatPoint.halfFloatToSortableShort(f);
json = "{ \"" + fieldName + "\" : \"" + f + "\" }";
} else {
iw.addDocument(singleton(new SortedNumericDocValuesField(fieldName, randomLong())));
// smallest numeric is a byte so we select the smallest
v = Math.abs(randomByte());
json = "{ \"" + fieldName + "\" : \"" + v + "\" }";
}
} else if (fieldType.getValuesSourceType().equals(CoreValuesSourceType.BYTES)) {
if (fieldType.typeName().equals(BinaryFieldMapper.CONTENT_TYPE)) {
iw.addDocument(singleton(new BinaryFieldMapper.CustomBinaryDocValuesField(fieldName, new BytesRef("a").bytes)));
} else if (fieldType.typeName().equals(IpFieldMapper.CONTENT_TYPE)) {
doc.add(new SortedNumericDocValuesField(fieldName, v));
} else if (vst.equals(CoreValuesSourceType.BYTES)) {
if (typeName.equals(BinaryFieldMapper.CONTENT_TYPE)) {
doc.add(new BinaryFieldMapper.CustomBinaryDocValuesField(fieldName, new BytesRef("a").bytes));
json = "{ \"" + fieldName + "\" : \"a\" }";
} else if (typeName.equals(IpFieldMapper.CONTENT_TYPE)) {
// TODO note: once VS refactor adds IP, this conditional will go away
boolean v4 = randomBoolean();
iw.addDocument(singleton(new SortedSetDocValuesField(fieldName, new BytesRef(InetAddressPoint.encode(randomIp(v4))))));
InetAddress ip = randomIp(randomBoolean());
json = "{ \"" + fieldName + "\" : \"" + NetworkAddress.format(ip) + "\" }";
doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(InetAddressPoint.encode(ip))));
} else {
iw.addDocument(singleton(new SortedSetDocValuesField(fieldName, new BytesRef("a"))));
doc.add(new SortedSetDocValuesField(fieldName, new BytesRef("a")));
json = "{ \"" + fieldName + "\" : \"a\" }";
}
} else if (fieldType.getValuesSourceType().equals(CoreValuesSourceType.RANGE)) {
} else if (vst.equals(CoreValuesSourceType.RANGE)) {
Object start;
Object end;
RangeType rangeType;
if (fieldType.typeName().equals(RangeType.DOUBLE.typeName())) {
if (typeName.equals(RangeType.DOUBLE.typeName())) {
start = randomDouble();
end = RangeType.DOUBLE.nextUp(start);
rangeType = RangeType.DOUBLE;
} else if (fieldType.typeName().equals(RangeType.FLOAT.typeName())) {
} else if (typeName.equals(RangeType.FLOAT.typeName())) {
start = randomFloat();
end = RangeType.FLOAT.nextUp(start);
rangeType = RangeType.DOUBLE;
} else if (fieldType.typeName().equals(RangeType.IP.typeName())) {
} else if (typeName.equals(RangeType.IP.typeName())) {
boolean v4 = randomBoolean();
start = randomIp(v4);
end = RangeType.IP.nextUp(start);
rangeType = RangeType.IP;
} else if (fieldType.typeName().equals(RangeType.LONG.typeName())) {
} else if (typeName.equals(RangeType.LONG.typeName())) {
start = randomLong();
end = RangeType.LONG.nextUp(start);
rangeType = RangeType.LONG;
} else if (fieldType.typeName().equals(RangeType.INTEGER.typeName())) {
} else if (typeName.equals(RangeType.INTEGER.typeName())) {
start = randomInt();
end = RangeType.INTEGER.nextUp(start);
rangeType = RangeType.INTEGER;
} else if (fieldType.typeName().equals(RangeType.DATE.typeName())) {
} else if (typeName.equals(RangeType.DATE.typeName())) {
start = randomNonNegativeLong();
end = RangeType.DATE.nextUp(start);
rangeType = RangeType.DATE;
} else {
throw new IllegalStateException("Unknown type of range [" + fieldType.typeName() + "]");
throw new IllegalStateException("Unknown type of range [" + typeName + "]");
}
final RangeFieldMapper.Range range = new RangeFieldMapper.Range(rangeType, start, end, true, true);
iw.addDocument(singleton(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(Collections.singleton(range)))));
} else if (fieldType.getValuesSourceType().equals(CoreValuesSourceType.GEOPOINT)) {
iw.addDocument(singleton(new LatLonDocValuesField(fieldName, randomDouble(), randomDouble())));
doc.add(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(Collections.singleton(range))));
json = "{ \"" + fieldName + "\" : { \n" +
" \"gte\" : \"" + start + "\",\n" +
" \"lte\" : \"" + end + "\"\n" +
" }}";
} else if (vst.equals(CoreValuesSourceType.GEOPOINT)) {
double lat = randomDouble();
double lon = randomDouble();
doc.add(new LatLonDocValuesField(fieldName, lat, lon));
json = "{ \"" + fieldName + "\" : \"[" + lon + "," + lat + "]\" }";
} else {
throw new IllegalStateException("Unknown field type [" + fieldType.typeName() + "]");
throw new IllegalStateException("Unknown field type [" + typeName + "]");
}
doc.add(new StoredField("_source", new BytesRef(json)));
iw.addDocument(doc);
}
private class MockParserContext extends Mapper.TypeParser.ParserContext {
MockParserContext() {
super(null, null, null, null, null);
}
@Override
public IndexAnalyzers getIndexAnalyzers() {
NamedAnalyzer defaultAnalyzer = new NamedAnalyzer(AnalysisRegistry.DEFAULT_ANALYZER_NAME,
AnalyzerScope.GLOBAL, new StandardAnalyzer());
return new IndexAnalyzers(singletonMap(AnalysisRegistry.DEFAULT_ANALYZER_NAME, defaultAnalyzer), emptyMap(), emptyMap());
}
}
@After