Merge branch 'master' into feature/query-refactoring

This commit is contained in:
javanna 2015-08-07 16:45:36 +02:00 committed by Luca Cavanna
commit d920c9d6bf
6 changed files with 12 additions and 146 deletions

View File

@ -1903,40 +1903,6 @@ public class SearchQueryIT extends ESIntegTestCase {
assertFirstHit(searchResponse, hasId("1"));
}
@Test
public void testIndicesFilter() throws Exception {
createIndex("index1", "index2", "index3");
client().prepareIndex("index1", "type1").setId("1").setSource("text", "value1").get();
client().prepareIndex("index2", "type2").setId("2").setSource("text", "value2").get();
client().prepareIndex("index3", "type3").setId("3").setSource("text", "value3").get();
refresh();
SearchResponse searchResponse = client().prepareSearch("index1", "index2", "index3")
.setPostFilter(indicesQuery(termQuery("text", "value1"), "index1")
.noMatchQuery(termQuery("text", "value2"))).get();
assertHitCount(searchResponse, 2l);
assertSearchHits(searchResponse, "1", "2");
//default no match filter is "all"
searchResponse = client().prepareSearch("index1", "index2", "index3")
.setPostFilter(indicesQuery(termQuery("text", "value1"), "index1")).get();
assertHitCount(searchResponse, 3l);
assertSearchHits(searchResponse, "1", "2", "3");
searchResponse = client().prepareSearch("index1", "index2", "index3")
.setPostFilter(indicesQuery(termQuery("text", "value1"), "index1")
.noMatchQuery("all")).get();
assertHitCount(searchResponse, 3l);
assertSearchHits(searchResponse, "1", "2", "3");
searchResponse = client().prepareSearch("index1", "index2", "index3")
.setPostFilter(indicesQuery(termQuery("text", "value1"), "index1")
.noMatchQuery("none")).get();
assertHitCount(searchResponse, 1l);
assertFirstHit(searchResponse, hasId("1"));
}
@Test // https://github.com/elasticsearch/elasticsearch/issues/2416
public void testIndicesQuerySkipParsing() throws Exception {
createIndex("simple");
@ -1969,37 +1935,6 @@ public class SearchQueryIT extends ESIntegTestCase {
assertSearchHits(searchResponse, "1", "2");
}
@Test // https://github.com/elasticsearch/elasticsearch/issues/2416
public void testIndicesFilterSkipParsing() throws Exception {
createIndex("simple");
assertAcked(prepareCreate("related")
.addMapping("child", jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent")
.endObject().endObject().endObject()));
indexRandom(true,
client().prepareIndex("simple", "lone").setId("1").setSource("text", "value1"),
client().prepareIndex("related", "parent").setId("2").setSource("text", "parent"),
client().prepareIndex("related", "child").setId("3").setParent("2").setSource("text", "value2"));
//has_child fails if executed on "simple" index
try {
client().prepareSearch("simple")
.setPostFilter(hasChildQuery("child", termQuery("text", "value1"))).get();
fail("Should have failed as has_child query can only be executed against parent-child types");
} catch (SearchPhaseExecutionException e) {
assertThat(e.shardFailures().length, greaterThan(0));
for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
assertThat(shardSearchFailure.reason(), containsString("No mapping for for type [child]"));
}
}
SearchResponse searchResponse = client().prepareSearch("related", "simple")
.setPostFilter(indicesQuery(hasChildQuery("child", termQuery("text", "value2")), "related")
.noMatchQuery(termQuery("text", "value1"))).get();
assertHitCount(searchResponse, 2l);
assertSearchHits(searchResponse, "1", "2");
}
@Test
public void testIndicesQueryMissingIndices() throws IOException, ExecutionException, InterruptedException {
createIndex("index1");
@ -2069,79 +2004,6 @@ public class SearchQueryIT extends ESIntegTestCase {
}
}
@Test
public void testIndicesFilterMissingIndices() throws IOException, ExecutionException, InterruptedException {
createIndex("index1");
createIndex("index2");
createIndex("index3");
indexRandom(true,
client().prepareIndex("index1", "type1", "1").setSource("field", "match"),
client().prepareIndex("index1", "type1", "2").setSource("field", "no_match"),
client().prepareIndex("index2", "type1", "10").setSource("field", "match"),
client().prepareIndex("index2", "type1", "20").setSource("field", "no_match"),
client().prepareIndex("index3", "type1", "100").setSource("field", "match"),
client().prepareIndex("index3", "type1", "200").setSource("field", "no_match"));
//all indices are missing
SearchResponse searchResponse = client().prepareSearch().setQuery(
filteredQuery(matchAllQuery(),
indicesQuery(termQuery("field", "missing"), "test1", "test2", "test3")
.noMatchQuery(termQuery("field", "match")))).get();
assertHitCount(searchResponse, 3l);
for (SearchHit hit : searchResponse.getHits().getHits()) {
if ("index1".equals(hit.index())) {
assertThat(hit, hasId("1"));
} else if ("index2".equals(hit.index())) {
assertThat(hit, hasId("10"));
} else if ("index3".equals(hit.index())) {
assertThat(hit, hasId("100"));
} else {
fail("Returned documents should belong to either index1, index2 or index3");
}
}
//only one index specified, which is missing
searchResponse = client().prepareSearch().setQuery(
filteredQuery(matchAllQuery(),
indicesQuery(termQuery("field", "missing"), "test1")
.noMatchQuery(termQuery("field", "match")))).get();
assertHitCount(searchResponse, 3l);
for (SearchHit hit : searchResponse.getHits().getHits()) {
if ("index1".equals(hit.index())) {
assertThat(hit, hasId("1"));
} else if ("index2".equals(hit.index())) {
assertThat(hit, hasId("10"));
} else if ("index3".equals(hit.index())) {
assertThat(hit, hasId("100"));
} else {
fail("Returned documents should belong to either index1, index2 or index3");
}
}
//more than one index specified, one of them is missing
searchResponse = client().prepareSearch().setQuery(
filteredQuery(matchAllQuery(),
indicesQuery(termQuery("field", "missing"), "index1", "test1")
.noMatchQuery(termQuery("field", "match")))).get();
assertHitCount(searchResponse, 2l);
for (SearchHit hit : searchResponse.getHits().getHits()) {
if ("index2".equals(hit.index())) {
assertThat(hit, hasId("10"));
} else if ("index3".equals(hit.index())) {
assertThat(hit, hasId("100"));
} else {
fail("Returned documents should belong to either index2 or index3");
}
}
}
@Test
public void testMinScore() throws ExecutionException, InterruptedException {
createIndex("test");

View File

@ -111,8 +111,7 @@
<available file="${basedir}/../licenses" type="dir"/>
</condition>
<echo taskName="license check">Running license check</echo>
<!-- don't run on windows, because everyone hates it -->
<exec failonerror="${licenses.exists}" executable="perl" osfamily="unix"
<exec failonerror="${licenses.exists}" executable="perl"
dir="${elasticsearch.tools.directory}/license-check">
<arg value="check_license_and_sha.pl"/>
<arg value="--check"/>

View File

@ -68,7 +68,7 @@ fi
if [ "$REMOVE_SERVICE" = "true" ]; then
if command -v systemctl >/dev/null; then
systemctl --no-reload disable elasticsearch.service > /dev/null 2>&1 || true
systemctl disable elasticsearch.service > /dev/null 2>&1 || true
fi
if command -v chkconfig >/dev/null; then

View File

@ -116,11 +116,6 @@ setup() {
# The removal must disable the service
# see prerm file
if is_systemd; then
# Redhat based systemd distros usually returns exit code 1
# OpenSUSE13 returns 0
run systemctl status elasticsearch.service
[ "$status" -eq 1 ] || [ "$status" -eq 0 ]
run systemctl is-enabled elasticsearch.service
[ "$status" -eq 1 ]
fi

View File

@ -20,6 +20,11 @@ bucket_key = value - rem
From the rounding function above it can be seen that the intervals themselves **must** be integers.
WARNING: Currently, values are cast to integers before being bucketed, which
might cause negative floating-point values to fall into the wrong bucket. For
instance, `-4.5` with an interval of `2` would be cast to `-4`, and so would
end up in the `-4 <= val < -2` bucket instead of the `-6 <= val < -4` bucket.
The following snippet "buckets" the products based on their `price` by interval of `50`:
[source,js]

View File

@ -70,3 +70,8 @@ The commands supported are:
allocate the shard to. It also accepts `allow_primary` flag to
explicitly specify that it is allowed to explicitly allocate a primary
shard (might result in data loss).
WARNING: The `allow_primary` parameter will force a new _empty_ primary shard
to be allocated *without any data*. If a node which has a copy of the original
primary shard (including data) rejoins the cluster later on, that data will be
deleted: the old shard copy will be replaced by the new live shard copy.