mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-17 18:35:25 +00:00
Merge branch 'master' into feature/query-refactoring
This commit is contained in:
commit
a1948350a5
@ -234,7 +234,9 @@ public class AwarenessAllocationDecider extends AllocationDecider {
|
|||||||
int currentNodeCount = shardPerAttribute.get(node.node().attributes().get(awarenessAttribute));
|
int currentNodeCount = shardPerAttribute.get(node.node().attributes().get(awarenessAttribute));
|
||||||
// if we are above with leftover, then we know we are not good, even with mod
|
// if we are above with leftover, then we know we are not good, even with mod
|
||||||
if (currentNodeCount > (requiredCountPerAttribute + leftoverPerAttribute)) {
|
if (currentNodeCount > (requiredCountPerAttribute + leftoverPerAttribute)) {
|
||||||
return allocation.decision(Decision.NO, NAME, "too many shards on nodes for attribute: [%s]", awarenessAttribute);
|
return allocation.decision(Decision.NO, NAME,
|
||||||
|
"too many shards on node for attribute: [%s], required per attribute: [%d], node count: [%d], leftover: [%d]",
|
||||||
|
awarenessAttribute, requiredCountPerAttribute, currentNodeCount, leftoverPerAttribute);
|
||||||
}
|
}
|
||||||
// all is well, we are below or same as average
|
// all is well, we are below or same as average
|
||||||
if (currentNodeCount <= requiredCountPerAttribute) {
|
if (currentNodeCount <= requiredCountPerAttribute) {
|
||||||
|
@ -25,6 +25,7 @@ import org.apache.lucene.index.PostingsEnum;
|
|||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
|
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.TermQuery;
|
import org.apache.lucene.search.TermQuery;
|
||||||
import org.apache.lucene.search.payloads.AveragePayloadFunction;
|
import org.apache.lucene.search.payloads.AveragePayloadFunction;
|
||||||
@ -124,14 +125,23 @@ public final class AllTermQuery extends PayloadTermQuery {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Query rewrite(IndexReader reader) throws IOException {
|
public Query rewrite(IndexReader reader) throws IOException {
|
||||||
|
boolean fieldExists = false;
|
||||||
boolean hasPayloads = false;
|
boolean hasPayloads = false;
|
||||||
for (LeafReaderContext context : reader.leaves()) {
|
for (LeafReaderContext context : reader.leaves()) {
|
||||||
final Terms terms = context.reader().terms(term.field());
|
final Terms terms = context.reader().terms(term.field());
|
||||||
if (terms.hasPayloads()) {
|
if (terms != null) {
|
||||||
hasPayloads = true;
|
fieldExists = true;
|
||||||
break;
|
if (terms.hasPayloads()) {
|
||||||
|
hasPayloads = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (fieldExists == false) {
|
||||||
|
Query rewritten = new MatchNoDocsQuery();
|
||||||
|
rewritten.setBoost(getBoost());
|
||||||
|
return rewritten;
|
||||||
|
}
|
||||||
if (hasPayloads == false) {
|
if (hasPayloads == false) {
|
||||||
TermQuery rewritten = new TermQuery(term);
|
TermQuery rewritten = new TermQuery(term);
|
||||||
rewritten.setBoost(getBoost());
|
rewritten.setBoost(getBoost());
|
||||||
|
@ -72,8 +72,8 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||||||
public static final String CONTENT_TYPE = "date";
|
public static final String CONTENT_TYPE = "date";
|
||||||
|
|
||||||
public static class Defaults extends NumberFieldMapper.Defaults {
|
public static class Defaults extends NumberFieldMapper.Defaults {
|
||||||
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("strictDateOptionalTime||epoch_millis", Locale.ROOT);
|
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("strict_date_optional_time||epoch_millis", Locale.ROOT);
|
||||||
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER_BEFORE_2_0 = Joda.forPattern("dateOptionalTime", Locale.ROOT);
|
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER_BEFORE_2_0 = Joda.forPattern("date_optional_time", Locale.ROOT);
|
||||||
public static final TimeUnit TIME_UNIT = TimeUnit.MILLISECONDS;
|
public static final TimeUnit TIME_UNIT = TimeUnit.MILLISECONDS;
|
||||||
public static final DateFieldType FIELD_TYPE = new DateFieldType();
|
public static final DateFieldType FIELD_TYPE = new DateFieldType();
|
||||||
|
|
||||||
|
@ -89,7 +89,10 @@ abstract class AbstractInternalHDRPercentiles extends InternalNumericMetricsAggr
|
|||||||
keys[i] = in.readDouble();
|
keys[i] = in.readDouble();
|
||||||
}
|
}
|
||||||
long minBarForHighestToLowestValueRatio = in.readLong();
|
long minBarForHighestToLowestValueRatio = in.readLong();
|
||||||
ByteBuffer stateBuffer = ByteBuffer.wrap(in.readByteArray());
|
final int serializedLen = in.readVInt();
|
||||||
|
byte[] bytes = new byte[serializedLen];
|
||||||
|
in.readBytes(bytes, 0, serializedLen);
|
||||||
|
ByteBuffer stateBuffer = ByteBuffer.wrap(bytes);
|
||||||
try {
|
try {
|
||||||
state = DoubleHistogram.decodeFromCompressedByteBuffer(stateBuffer, minBarForHighestToLowestValueRatio);
|
state = DoubleHistogram.decodeFromCompressedByteBuffer(stateBuffer, minBarForHighestToLowestValueRatio);
|
||||||
} catch (DataFormatException e) {
|
} catch (DataFormatException e) {
|
||||||
@ -107,8 +110,9 @@ abstract class AbstractInternalHDRPercentiles extends InternalNumericMetricsAggr
|
|||||||
}
|
}
|
||||||
out.writeLong(state.getHighestToLowestValueRatio());
|
out.writeLong(state.getHighestToLowestValueRatio());
|
||||||
ByteBuffer stateBuffer = ByteBuffer.allocate(state.getNeededByteBufferCapacity());
|
ByteBuffer stateBuffer = ByteBuffer.allocate(state.getNeededByteBufferCapacity());
|
||||||
state.encodeIntoCompressedByteBuffer(stateBuffer);
|
final int serializedLen = state.encodeIntoCompressedByteBuffer(stateBuffer);
|
||||||
out.writeByteArray(stateBuffer.array());
|
out.writeVInt(serializedLen);
|
||||||
|
out.writeBytes(stateBuffer.array(), 0, serializedLen);
|
||||||
out.writeBoolean(keyed);
|
out.writeBoolean(keyed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,6 +117,9 @@ public class TopHitsAggregator extends MetricsAggregator {
|
|||||||
if (collectors == null) {
|
if (collectors == null) {
|
||||||
Sort sort = subSearchContext.sort();
|
Sort sort = subSearchContext.sort();
|
||||||
int topN = subSearchContext.from() + subSearchContext.size();
|
int topN = subSearchContext.from() + subSearchContext.size();
|
||||||
|
// In the QueryPhase we don't need this protection, because it is build into the IndexSearcher,
|
||||||
|
// but here we create collectors ourselves and we need prevent OOM because of crazy an offset and size.
|
||||||
|
topN = Math.min(topN, subSearchContext.searcher().getIndexReader().maxDoc());
|
||||||
TopDocsCollector<?> topLevelCollector = sort != null ? TopFieldCollector.create(sort, topN, true, subSearchContext.trackScores(), subSearchContext.trackScores()) : TopScoreDocCollector.create(topN);
|
TopDocsCollector<?> topLevelCollector = sort != null ? TopFieldCollector.create(sort, topN, true, subSearchContext.trackScores(), subSearchContext.trackScores()) : TopScoreDocCollector.create(topN);
|
||||||
collectors = new TopDocsAndLeafCollector(topLevelCollector);
|
collectors = new TopDocsAndLeafCollector(topLevelCollector);
|
||||||
collectors.leafCollector = collectors.topLevelCollector.getLeafCollector(ctx);
|
collectors.leafCollector = collectors.topLevelCollector.getLeafCollector(ctx);
|
||||||
|
@ -176,7 +176,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
|
public void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
|
||||||
final boolean timeoutSet = searchContext.timeoutInMillis() != -1;
|
final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis();
|
||||||
final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER;
|
final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER;
|
||||||
try {
|
try {
|
||||||
if (timeoutSet || terminateAfterSet) {
|
if (timeoutSet || terminateAfterSet) {
|
||||||
|
@ -495,7 +495,7 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest {
|
|||||||
Version randomVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_0_90_0, Version.V_1_6_1);
|
Version randomVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_0_90_0, Version.V_1_6_1);
|
||||||
IndexService index = createIndex("test", settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build());
|
IndexService index = createIndex("test", settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build());
|
||||||
client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get();
|
client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get();
|
||||||
assertDateFormat("epoch_millis||dateOptionalTime");
|
assertDateFormat("epoch_millis||date_optional_time");
|
||||||
DocumentMapper defaultMapper = index.mapperService().documentMapper("type");
|
DocumentMapper defaultMapper = index.mapperService().documentMapper("type");
|
||||||
|
|
||||||
defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
|
||||||
@ -543,13 +543,13 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest {
|
|||||||
|
|
||||||
public void testThatUpgradingAnOlderIndexToStrictDateWorks() throws Exception {
|
public void testThatUpgradingAnOlderIndexToStrictDateWorks() throws Exception {
|
||||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
.startObject("properties").startObject("date_field").field("type", "date").field("format", "dateOptionalTime").endObject().endObject()
|
.startObject("properties").startObject("date_field").field("type", "date").field("format", "date_optional_time").endObject().endObject()
|
||||||
.endObject().endObject().string();
|
.endObject().endObject().string();
|
||||||
|
|
||||||
Version randomVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_0_90_0, Version.V_1_6_1);
|
Version randomVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_0_90_0, Version.V_1_6_1);
|
||||||
createIndex("test", settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build());
|
createIndex("test", settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build());
|
||||||
client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get();
|
client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get();
|
||||||
assertDateFormat("epoch_millis||dateOptionalTime");
|
assertDateFormat("epoch_millis||date_optional_time");
|
||||||
|
|
||||||
// index doc
|
// index doc
|
||||||
client().prepareIndex("test", "type", "1").setSource(XContentFactory.jsonBuilder()
|
client().prepareIndex("test", "type", "1").setSource(XContentFactory.jsonBuilder()
|
||||||
@ -561,12 +561,12 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest {
|
|||||||
String newMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
String newMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||||
.startObject("properties").startObject("date_field")
|
.startObject("properties").startObject("date_field")
|
||||||
.field("type", "date")
|
.field("type", "date")
|
||||||
.field("format", "strictDateOptionalTime||epoch_millis")
|
.field("format", "strict_date_optional_time||epoch_millis")
|
||||||
.endObject().endObject().endObject().endObject().string();
|
.endObject().endObject().endObject().endObject().string();
|
||||||
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource(newMapping).get();
|
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource(newMapping).get();
|
||||||
assertThat(putMappingResponse.isAcknowledged(), is(true));
|
assertThat(putMappingResponse.isAcknowledged(), is(true));
|
||||||
|
|
||||||
assertDateFormat("strictDateOptionalTime||epoch_millis");
|
assertDateFormat("strict_date_optional_time||epoch_millis");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertDateFormat(String expectedFormat) throws IOException {
|
private void assertDateFormat(String expectedFormat) throws IOException {
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
package org.elasticsearch.search.aggregations.bucket;
|
package org.elasticsearch.search.aggregations.bucket;
|
||||||
|
|
||||||
import org.apache.lucene.search.Explanation;
|
import org.apache.lucene.search.Explanation;
|
||||||
|
import org.apache.lucene.util.ArrayUtil;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
@ -928,4 +929,20 @@ public class TopHitsTests extends ElasticsearchIntegrationTest {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDontExplode() throws Exception {
|
||||||
|
SearchResponse response = client()
|
||||||
|
.prepareSearch("idx")
|
||||||
|
.setTypes("type")
|
||||||
|
.addAggregation(terms("terms")
|
||||||
|
.executionHint(randomExecutionHint())
|
||||||
|
.field(TERMS_AGGS_FIELD)
|
||||||
|
.subAggregation(
|
||||||
|
topHits("hits").setSize(ArrayUtil.MAX_ARRAY_LENGTH - 1).addSort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.get();
|
||||||
|
assertNoFailures(response);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1853,6 +1853,24 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest {
|
|||||||
assertHitCount(searchResponse, 1l);
|
assertHitCount(searchResponse, 1l);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAllFieldEmptyMapping() throws Exception {
|
||||||
|
client().prepareIndex("myindex", "mytype").setId("1").setSource("{}").setRefresh(true).get();
|
||||||
|
SearchResponse response = client().prepareSearch("myindex").setQuery(matchQuery("_all", "foo")).get();
|
||||||
|
assertNoFailures(response);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAllDisabledButQueried() throws Exception {
|
||||||
|
createIndex("myindex");
|
||||||
|
assertAcked(client().admin().indices().preparePutMapping("myindex").setType("mytype").setSource(
|
||||||
|
jsonBuilder().startObject().startObject("mytype").startObject("_all").field("enabled", false)));
|
||||||
|
client().prepareIndex("myindex", "mytype").setId("1").setSource("bar", "foo").setRefresh(true).get();
|
||||||
|
SearchResponse response = client().prepareSearch("myindex").setQuery(matchQuery("_all", "foo")).get();
|
||||||
|
assertNoFailures(response);
|
||||||
|
assertHitCount(response, 0);
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testIndicesQuery() throws Exception {
|
public void testIndicesQuery() throws Exception {
|
||||||
createIndex("index1", "index2", "index3");
|
createIndex("index1", "index2", "index3");
|
||||||
|
@ -50,6 +50,7 @@ To use it, first create a `BulkProcessor` instance:
|
|||||||
import org.elasticsearch.action.bulk.BulkProcessor;
|
import org.elasticsearch.action.bulk.BulkProcessor;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
|
||||||
BulkProcessor bulkProcessor = BulkProcessor.builder(
|
BulkProcessor bulkProcessor = BulkProcessor.builder(
|
||||||
client, <1>
|
client, <1>
|
||||||
|
@ -173,6 +173,66 @@ public class AzureSnapshotRestoreITest extends AbstractAzureTest {
|
|||||||
assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
|
assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For issue #51: https://github.com/elasticsearch/elasticsearch-cloud-azure/issues/51
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testMultipleSnapshots() throws URISyntaxException, StorageException {
|
||||||
|
final String indexName = "test-idx-1";
|
||||||
|
final String typeName = "doc";
|
||||||
|
final String repositoryName = "test-repo";
|
||||||
|
final String snapshot1Name = "test-snap-1";
|
||||||
|
final String snapshot2Name = "test-snap-2";
|
||||||
|
|
||||||
|
Client client = client();
|
||||||
|
|
||||||
|
logger.info("creating index [{}]", indexName);
|
||||||
|
createIndex(indexName);
|
||||||
|
ensureGreen();
|
||||||
|
|
||||||
|
logger.info("indexing first document");
|
||||||
|
index(indexName, typeName, Integer.toString(1), "foo", "bar " + Integer.toString(1));
|
||||||
|
refresh();
|
||||||
|
assertThat(client.prepareCount(indexName).get().getCount(), equalTo(1L));
|
||||||
|
|
||||||
|
logger.info("creating Azure repository with path [{}]", getRepositoryPath());
|
||||||
|
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName)
|
||||||
|
.setType("azure").setSettings(Settings.settingsBuilder()
|
||||||
|
.put(Repository.CONTAINER, getContainerName())
|
||||||
|
.put(Repository.BASE_PATH, getRepositoryPath())
|
||||||
|
.put(Repository.BASE_PATH, randomIntBetween(1000, 10000), ByteSizeUnit.BYTES)
|
||||||
|
).get();
|
||||||
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
||||||
|
logger.info("creating snapshot [{}]", snapshot1Name);
|
||||||
|
CreateSnapshotResponse createSnapshotResponse1 = client.admin().cluster().prepareCreateSnapshot(repositoryName, snapshot1Name).setWaitForCompletion(true).setIndices(indexName).get();
|
||||||
|
assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||||
|
assertThat(createSnapshotResponse1.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse1.getSnapshotInfo().totalShards()));
|
||||||
|
|
||||||
|
assertThat(client.admin().cluster().prepareGetSnapshots(repositoryName).setSnapshots(snapshot1Name).get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
|
|
||||||
|
logger.info("indexing second document");
|
||||||
|
index(indexName, typeName, Integer.toString(2), "foo", "bar " + Integer.toString(2));
|
||||||
|
refresh();
|
||||||
|
assertThat(client.prepareCount(indexName).get().getCount(), equalTo(2L));
|
||||||
|
|
||||||
|
logger.info("creating snapshot [{}]", snapshot2Name);
|
||||||
|
CreateSnapshotResponse createSnapshotResponse2 = client.admin().cluster().prepareCreateSnapshot(repositoryName, snapshot2Name).setWaitForCompletion(true).setIndices(indexName).get();
|
||||||
|
assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||||
|
assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()));
|
||||||
|
|
||||||
|
assertThat(client.admin().cluster().prepareGetSnapshots(repositoryName).setSnapshots(snapshot2Name).get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||||
|
|
||||||
|
logger.info("closing index [{}]", indexName);
|
||||||
|
client.admin().indices().prepareClose(indexName).get();
|
||||||
|
|
||||||
|
logger.info("attempting restore from snapshot [{}]", snapshot1Name);
|
||||||
|
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repositoryName, snapshot1Name).setWaitForCompletion(true).execute().actionGet();
|
||||||
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||||
|
ensureGreen();
|
||||||
|
assertThat(client.prepareCount(indexName).get().getCount(), equalTo(1L));
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMultipleRepositories() {
|
public void testMultipleRepositories() {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user