Fixes date range query using epoch with timezone (#21542)
This change fixes the rnage query so that an exception is always thrown if the range query uses epoch time together with a time zone. Since epoch time is always UTC it should not be used with a time zone. Closes #21501
This commit is contained in:
parent
00de8e07fc
commit
c6c734dce1
|
@ -321,14 +321,6 @@ public class DateFieldMapper extends FieldMapper {
|
|||
dateParser = this.dateMathParser;
|
||||
}
|
||||
|
||||
if (PointValues.size(reader, name()) == 0) {
|
||||
// no points, so nothing matches
|
||||
return Relation.DISJOINT;
|
||||
}
|
||||
|
||||
long minValue = LongPoint.decodeDimension(PointValues.getMinPackedValue(reader, name()), 0);
|
||||
long maxValue = LongPoint.decodeDimension(PointValues.getMaxPackedValue(reader, name()), 0);
|
||||
|
||||
long fromInclusive = Long.MIN_VALUE;
|
||||
if (from != null) {
|
||||
fromInclusive = parseToMilliseconds(from, !includeLower, timeZone, dateParser, context);
|
||||
|
@ -351,6 +343,17 @@ public class DateFieldMapper extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
// This check needs to be done after fromInclusive and toInclusive
|
||||
// are resolved so we can throw an exception if they are invalid
|
||||
// even if there are no points in the shard
|
||||
if (PointValues.size(reader, name()) == 0) {
|
||||
// no points, so nothing matches
|
||||
return Relation.DISJOINT;
|
||||
}
|
||||
|
||||
long minValue = LongPoint.decodeDimension(PointValues.getMinPackedValue(reader, name()), 0);
|
||||
long maxValue = LongPoint.decodeDimension(PointValues.getMaxPackedValue(reader, name()), 0);
|
||||
|
||||
if (minValue >= fromInclusive && maxValue <= toInclusive) {
|
||||
return Relation.WITHIN;
|
||||
} else if (maxValue < fromInclusive || minValue > toInclusive) {
|
||||
|
|
|
@ -73,11 +73,12 @@ public class DateFieldTypeTests extends FieldTypeTestCase {
|
|||
}
|
||||
|
||||
public void testIsFieldWithinQueryEmptyReader() throws IOException {
|
||||
QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, null, () -> nowInMillis);
|
||||
IndexReader reader = new MultiReader();
|
||||
DateFieldType ft = new DateFieldType();
|
||||
ft.setName("my_date");
|
||||
assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03",
|
||||
randomBoolean(), randomBoolean(), null, null, null));
|
||||
randomBoolean(), randomBoolean(), null, null, context));
|
||||
}
|
||||
|
||||
private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader,
|
||||
|
@ -128,7 +129,9 @@ public class DateFieldTypeTests extends FieldTypeTestCase {
|
|||
// Fields with no value indexed.
|
||||
DateFieldType ft2 = new DateFieldType();
|
||||
ft2.setName("my_date2");
|
||||
assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", false, false, null, null, null));
|
||||
|
||||
QueryRewriteContext context = new QueryRewriteContext(null, null, null, null, null, null, null, () -> nowInMillis);
|
||||
assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", false, false, null, null, context));
|
||||
IOUtils.close(reader, w, dir);
|
||||
}
|
||||
|
||||
|
|
|
@ -1904,7 +1904,6 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").lte(-999999999999L)).get(), 3);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/21501")
|
||||
public void testRangeQueryWithTimeZone() throws Exception {
|
||||
assertAcked(prepareCreate("test")
|
||||
.addMapping("type1", "date", "type=date", "num", "type=integer"));
|
||||
|
|
Loading…
Reference in New Issue