Merge branch 'master' into close-index-api-refactoring

This commit is contained in:
Tanguy Leroux 2018-12-17 10:14:38 +01:00
commit 79999d37d4
404 changed files with 10125 additions and 5622 deletions

View File

@ -70,6 +70,59 @@ public class TestingConventionsTasks extends DefaultTask {
// Run only after everything is compiled
Boilerplate.getJavaSourceSets(getProject()).all(sourceSet -> dependsOn(sourceSet.getClassesTaskName()));
}
@Input
public Map<String, Set<File>> classFilesPerTask(FileTree testClassFiles) {
Map<String, Set<File>> collector = new HashMap<>();
// RandomizedTestingTask
collector.putAll(
Stream.concat(
getProject().getTasks().withType(getRandomizedTestingTask()).stream(),
// Look at sub-projects too. As sometimes tests are implemented in parent but ran in sub-projects against
// different configurations
getProject().getSubprojects().stream().flatMap(subproject ->
subproject.getTasks().withType(getRandomizedTestingTask()).stream()
)
)
.filter(Task::getEnabled)
.collect(Collectors.toMap(
Task::getPath,
task -> testClassFiles.matching(getRandomizedTestingPatternSet(task)).getFiles()
))
);
// Gradle Test
collector.putAll(
Stream.concat(
getProject().getTasks().withType(Test.class).stream(),
getProject().getSubprojects().stream().flatMap(subproject ->
subproject.getTasks().withType(Test.class).stream()
)
)
.filter(Task::getEnabled)
.collect(Collectors.toMap(
Task::getPath,
task -> task.getCandidateClassFiles().getFiles()
))
);
return Collections.unmodifiableMap(collector);
}
@Input
public Map<String, File> getTestClassNames() {
if (testClassNames == null) {
testClassNames = Boilerplate.getJavaSourceSets(getProject()).getByName("test").getOutput().getClassesDirs()
.getFiles().stream()
.filter(File::exists)
.flatMap(testRoot -> walkPathAndLoadClasses(testRoot).entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
return testClassNames;
}
@OutputFile
public File getSuccessMarker() {
return new File(getProject().getBuildDir(), "markers/" + getName());
}
@TaskAction
public void doCheck() throws IOException {
@ -112,7 +165,7 @@ public class TestingConventionsTasks extends DefaultTask {
.collect(Collectors.toSet())
)
);
problems = collectProblems(
checkNoneExists(
"Test classes implemented by inner classes will not run",
@ -130,13 +183,13 @@ public class TestingConventionsTasks extends DefaultTask {
),
collectProblems(
testClassesPerTask.entrySet().stream()
.map( entry ->
checkAtLeastOneExists(
"test class in " + entry.getKey(),
entry.getValue().stream()
.map( entry ->
checkAtLeastOneExists(
"test class in " + entry.getKey(),
entry.getValue().stream()
)
)
)
.collect(Collectors.joining())
.collect(Collectors.joining())
),
checkNoneExists(
"Test classes are not included in any enabled task (" +
@ -161,7 +214,6 @@ public class TestingConventionsTasks extends DefaultTask {
}
}
private String collectProblems(String... problems) {
return Stream.of(problems)
.map(String::trim)
@ -170,42 +222,6 @@ public class TestingConventionsTasks extends DefaultTask {
.collect(Collectors.joining());
}
@Input
public Map<String, Set<File>> classFilesPerTask(FileTree testClassFiles) {
Map<String, Set<File>> collector = new HashMap<>();
// RandomizedTestingTask
collector.putAll(
Stream.concat(
getProject().getTasks().withType(getRandomizedTestingTask()).stream(),
// Look at sub-projects too. As sometimes tests are implemented in parent but ran in sub-projects against
// different configurations
getProject().getSubprojects().stream().flatMap(subproject ->
subproject.getTasks().withType(getRandomizedTestingTask()).stream()
)
)
.filter(Task::getEnabled)
.collect(Collectors.toMap(
Task::getPath,
task -> testClassFiles.matching(getRandomizedTestingPatternSet(task)).getFiles()
))
);
// Gradle Test
collector.putAll(
Stream.concat(
getProject().getTasks().withType(Test.class).stream(),
getProject().getSubprojects().stream().flatMap(subproject ->
subproject.getTasks().withType(Test.class).stream()
)
)
.filter(Task::getEnabled)
.collect(Collectors.toMap(
Task::getPath,
task -> task.getCandidateClassFiles().getFiles()
))
);
return Collections.unmodifiableMap(collector);
}
@SuppressWarnings("unchecked")
private PatternFilterable getRandomizedTestingPatternSet(Task task) {
try {
@ -232,23 +248,6 @@ public class TestingConventionsTasks extends DefaultTask {
}
}
@Input
public Map<String, File> getTestClassNames() {
if (testClassNames == null) {
testClassNames = Boilerplate.getJavaSourceSets(getProject()).getByName("test").getOutput().getClassesDirs()
.getFiles().stream()
.filter(File::exists)
.flatMap(testRoot -> walkPathAndLoadClasses(testRoot).entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
return testClassNames;
}
@OutputFile
public File getSuccessMarker() {
return new File(getProject().getBuildDir(), "markers/" + getName());
}
private String checkNoneExists(String message, Stream<? extends Class<?>> stream) {
String problem = stream
.map(each -> " * " + each.getName())

View File

@ -62,7 +62,6 @@
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardsService.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotsService.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]ThreadPool.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]VersionTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]aliases[/\\]IndexAliasesIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]deps[/\\]joda[/\\]SimpleJodaTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]EnvironmentTests.java" checks="LineLength" />
@ -70,7 +69,6 @@
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]explain[/\\]ExplainActionIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]get[/\\]GetActionIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indexing[/\\]IndexActionIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indexlifecycle[/\\]IndexLifecycleActionIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorServiceSettingsTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]FullRollingRestartIT.java" checks="LineLength" />

View File

@ -1,5 +1,5 @@
elasticsearch = 7.0.0
lucene = 8.0.0-snapshot-7e4555a2fd
lucene = 8.0.0-snapshot-774e9aefbc
# optional dependencies
spatial4j = 0.7

View File

@ -19,8 +19,10 @@
package org.elasticsearch.client.benchmark.transport;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.benchmark.AbstractBenchmark;
import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor;
@ -32,10 +34,9 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugin.noop.NoopPlugin;
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction;
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkRequestBuilder;
import org.elasticsearch.plugin.noop.action.search.NoopSearchAction;
import org.elasticsearch.plugin.noop.action.search.NoopSearchRequestBuilder;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import java.net.InetAddress;
@ -79,13 +80,13 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
@Override
public boolean bulkIndex(List<String> bulkData) {
NoopBulkRequestBuilder builder = new NoopBulkRequestBuilder(client,NoopBulkAction.INSTANCE);
BulkRequest bulkRequest = new BulkRequest();
for (String bulkItem : bulkData) {
builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8), XContentType.JSON));
bulkRequest.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8), XContentType.JSON));
}
BulkResponse bulkResponse;
try {
bulkResponse = builder.execute().get();
bulkResponse = client.execute(NoopBulkAction.INSTANCE, bulkRequest).get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false;
@ -108,11 +109,12 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
@Override
public boolean search(String source) {
final SearchResponse response;
NoopSearchRequestBuilder builder = new NoopSearchRequestBuilder(client, NoopSearchAction.INSTANCE);
try {
builder.setIndices(indexName);
builder.setQuery(QueryBuilders.wrapperQuery(source));
response = client.execute(NoopSearchAction.INSTANCE, builder.request()).get();
final SearchRequest searchRequest = new SearchRequest(indexName);
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
searchRequest.source(searchSourceBuilder);
searchSourceBuilder.query(QueryBuilders.wrapperQuery(source));
response = client.execute(NoopSearchAction.INSTANCE, searchRequest).get();
return response.status() == RestStatus.OK;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();

View File

@ -1,155 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop.action.bulk;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteRequestBuilder;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.WriteRequestBuilder;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse>
implements WriteRequestBuilder<NoopBulkRequestBuilder> {
public NoopBulkRequestBuilder(ElasticsearchClient client, NoopBulkAction action) {
super(client, action, new BulkRequest());
}
/**
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
* (for example, if no id is provided, one will be generated, or usage of the create flag).
*/
public NoopBulkRequestBuilder add(IndexRequest request) {
super.request.add(request);
return this;
}
/**
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
* (for example, if no id is provided, one will be generated, or usage of the create flag).
*/
public NoopBulkRequestBuilder add(IndexRequestBuilder request) {
super.request.add(request.request());
return this;
}
/**
* Adds an {@link DeleteRequest} to the list of actions to execute.
*/
public NoopBulkRequestBuilder add(DeleteRequest request) {
super.request.add(request);
return this;
}
/**
* Adds an {@link DeleteRequest} to the list of actions to execute.
*/
public NoopBulkRequestBuilder add(DeleteRequestBuilder request) {
super.request.add(request.request());
return this;
}
/**
* Adds an {@link UpdateRequest} to the list of actions to execute.
*/
public NoopBulkRequestBuilder add(UpdateRequest request) {
super.request.add(request);
return this;
}
/**
* Adds an {@link UpdateRequest} to the list of actions to execute.
*/
public NoopBulkRequestBuilder add(UpdateRequestBuilder request) {
super.request.add(request.request());
return this;
}
/**
* Adds a framed data in binary format
*/
public NoopBulkRequestBuilder add(byte[] data, int from, int length, XContentType xContentType) throws Exception {
request.add(data, from, length, null, null, xContentType);
return this;
}
/**
* Adds a framed data in binary format
*/
public NoopBulkRequestBuilder add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType,
XContentType xContentType) throws Exception {
request.add(data, from, length, defaultIndex, defaultType, xContentType);
return this;
}
/**
* Sets the number of shard copies that must be active before proceeding with the write.
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
*/
public NoopBulkRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
request.waitForActiveShards(waitForActiveShards);
return this;
}
/**
* A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
* to get the ActiveShardCount.
*/
public NoopBulkRequestBuilder setWaitForActiveShards(final int waitForActiveShards) {
return setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards));
}
/**
* A timeout to wait if the index operation can't be performed immediately.
* Defaults to {@code 1m}.
*/
public final NoopBulkRequestBuilder setTimeout(TimeValue timeout) {
request.timeout(timeout);
return this;
}
/**
* A timeout to wait if the index operation can't be performed immediately.
* Defaults to {@code 1m}.
*/
public final NoopBulkRequestBuilder setTimeout(String timeout) {
request.timeout(timeout);
return this;
}
/**
* The number of actions currently in the bulk.
*/
public int numberOfActions() {
return request.numberOfActions();
}
}

View File

@ -1,496 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop.action.search;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.rescore.RescorerBuilder;
import org.elasticsearch.search.slice.SliceBuilder;
import org.elasticsearch.search.sort.SortBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.search.suggest.SuggestBuilder;
import java.util.Arrays;
import java.util.List;
public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse> {
public NoopSearchRequestBuilder(ElasticsearchClient client, NoopSearchAction action) {
super(client, action, new SearchRequest());
}
/**
* Sets the indices the search will be executed on.
*/
public NoopSearchRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* The document types to execute the search against. Defaults to be executed against
* all types.
*/
public NoopSearchRequestBuilder setTypes(String... types) {
request.types(types);
return this;
}
/**
* The search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}.
*/
public NoopSearchRequestBuilder setSearchType(SearchType searchType) {
request.searchType(searchType);
return this;
}
/**
* The a string representation search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}. Can be
* one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
* "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch".
*/
public NoopSearchRequestBuilder setSearchType(String searchType) {
request.searchType(searchType);
return this;
}
/**
* If set, will enable scrolling of the search request.
*/
public NoopSearchRequestBuilder setScroll(Scroll scroll) {
request.scroll(scroll);
return this;
}
/**
* If set, will enable scrolling of the search request for the specified timeout.
*/
public NoopSearchRequestBuilder setScroll(TimeValue keepAlive) {
request.scroll(keepAlive);
return this;
}
/**
* If set, will enable scrolling of the search request for the specified timeout.
*/
public NoopSearchRequestBuilder setScroll(String keepAlive) {
request.scroll(keepAlive);
return this;
}
/**
* An optional timeout to control how long search is allowed to take.
*/
public NoopSearchRequestBuilder setTimeout(TimeValue timeout) {
sourceBuilder().timeout(timeout);
return this;
}
/**
* An optional document count, upon collecting which the search
* query will early terminate
*/
public NoopSearchRequestBuilder setTerminateAfter(int terminateAfter) {
sourceBuilder().terminateAfter(terminateAfter);
return this;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public NoopSearchRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
/**
* The routing values to control the shards that the search will be executed on.
*/
public NoopSearchRequestBuilder setRouting(String... routing) {
request.routing(routing);
return this;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* {@code _local} to prefer local shards or a custom value, which guarantees that the same order
* will be used across different requests.
*/
public NoopSearchRequestBuilder setPreference(String preference) {
request.preference(preference);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
* <p>
* For example indices that don't exist.
*/
public NoopSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request().indicesOptions(indicesOptions);
return this;
}
/**
* Constructs a new search source builder with a search query.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public NoopSearchRequestBuilder setQuery(QueryBuilder queryBuilder) {
sourceBuilder().query(queryBuilder);
return this;
}
/**
* Sets a filter that will be executed after the query has been executed and only has affect on the search hits
* (not aggregations). This filter is always executed as last filtering mechanism.
*/
public NoopSearchRequestBuilder setPostFilter(QueryBuilder postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets the minimum score below which docs will be filtered out.
*/
public NoopSearchRequestBuilder setMinScore(float minScore) {
sourceBuilder().minScore(minScore);
return this;
}
/**
* From index to start the search from. Defaults to {@code 0}.
*/
public NoopSearchRequestBuilder setFrom(int from) {
sourceBuilder().from(from);
return this;
}
/**
* The number of search hits to return. Defaults to {@code 10}.
*/
public NoopSearchRequestBuilder setSize(int size) {
sourceBuilder().size(size);
return this;
}
/**
* Should each {@link org.elasticsearch.search.SearchHit} be returned with an
* explanation of the hit (ranking).
*/
public NoopSearchRequestBuilder setExplain(boolean explain) {
sourceBuilder().explain(explain);
return this;
}
/**
* Should each {@link org.elasticsearch.search.SearchHit} be returned with its
* version.
*/
public NoopSearchRequestBuilder setVersion(boolean version) {
sourceBuilder().version(version);
return this;
}
/**
* Sets the boost a specific index will receive when the query is executed against it.
*
* @param index The index to apply the boost against
* @param indexBoost The boost to apply to the index
*/
public NoopSearchRequestBuilder addIndexBoost(String index, float indexBoost) {
sourceBuilder().indexBoost(index, indexBoost);
return this;
}
/**
* The stats groups this request will be aggregated under.
*/
public NoopSearchRequestBuilder setStats(String... statsGroups) {
sourceBuilder().stats(Arrays.asList(statsGroups));
return this;
}
/**
* The stats groups this request will be aggregated under.
*/
public NoopSearchRequestBuilder setStats(List<String> statsGroups) {
sourceBuilder().stats(statsGroups);
return this;
}
/**
* Indicates whether the response should contain the stored _source for every hit
*/
public NoopSearchRequestBuilder setFetchSource(boolean fetch) {
sourceBuilder().fetchSource(fetch);
return this;
}
/**
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param include An optional include (optionally wildcarded) pattern to filter the returned _source
* @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
*/
public NoopSearchRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
sourceBuilder().fetchSource(include, exclude);
return this;
}
/**
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
* @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
*/
public NoopSearchRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
sourceBuilder().fetchSource(includes, excludes);
return this;
}
/**
* Adds a docvalue based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The field to get from the docvalue
*/
public NoopSearchRequestBuilder addDocValueField(String name) {
sourceBuilder().docValueField(name);
return this;
}
/**
* Adds a stored field to load and return (note, it must be stored) as part of the search request.
* If none are specified, the source of the document will be return.
*/
public NoopSearchRequestBuilder addStoredField(String field) {
sourceBuilder().storedField(field);
return this;
}
/**
* Adds a script based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The name that will represent this value in the return hit
* @param script The script to use
*/
public NoopSearchRequestBuilder addScriptField(String name, Script script) {
sourceBuilder().scriptField(name, script);
return this;
}
/**
* Adds a sort against the given field name and the sort ordering.
*
* @param field The name of the field
* @param order The sort ordering
*/
public NoopSearchRequestBuilder addSort(String field, SortOrder order) {
sourceBuilder().sort(field, order);
return this;
}
/**
* Adds a generic sort builder.
*
* @see org.elasticsearch.search.sort.SortBuilders
*/
public NoopSearchRequestBuilder addSort(SortBuilder<?> sort) {
sourceBuilder().sort(sort);
return this;
}
/**
* Set the sort values that indicates which docs this request should "search after".
*/
public NoopSearchRequestBuilder searchAfter(Object[] values) {
sourceBuilder().searchAfter(values);
return this;
}
public NoopSearchRequestBuilder slice(SliceBuilder builder) {
sourceBuilder().slice(builder);
return this;
}
/**
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
* {@code false}.
*/
public NoopSearchRequestBuilder setTrackScores(boolean trackScores) {
sourceBuilder().trackScores(trackScores);
return this;
}
/**
* Sets the fields to load and return as part of the search request. If none
* are specified, the source of the document will be returned.
*/
public NoopSearchRequestBuilder storedFields(String... fields) {
sourceBuilder().storedFields(Arrays.asList(fields));
return this;
}
/**
* Adds an aggregation to the search operation.
*/
public NoopSearchRequestBuilder addAggregation(AggregationBuilder aggregation) {
sourceBuilder().aggregation(aggregation);
return this;
}
/**
* Adds an aggregation to the search operation.
*/
public NoopSearchRequestBuilder addAggregation(PipelineAggregationBuilder aggregation) {
sourceBuilder().aggregation(aggregation);
return this;
}
public NoopSearchRequestBuilder highlighter(HighlightBuilder highlightBuilder) {
sourceBuilder().highlighter(highlightBuilder);
return this;
}
/**
* Delegates to {@link org.elasticsearch.search.builder.SearchSourceBuilder#suggest(SuggestBuilder)}
*/
public NoopSearchRequestBuilder suggest(SuggestBuilder suggestBuilder) {
sourceBuilder().suggest(suggestBuilder);
return this;
}
/**
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
* {@link #addRescorer(org.elasticsearch.search.rescore.RescorerBuilder, int)}.
*
* @param rescorer rescorer configuration
* @return this for chaining
*/
public NoopSearchRequestBuilder setRescorer(RescorerBuilder<?> rescorer) {
sourceBuilder().clearRescorers();
return addRescorer(rescorer);
}
/**
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
* {@link #addRescorer(org.elasticsearch.search.rescore.RescorerBuilder, int)}.
*
* @param rescorer rescorer configuration
* @param window rescore window
* @return this for chaining
*/
public NoopSearchRequestBuilder setRescorer(RescorerBuilder<?> rescorer, int window) {
sourceBuilder().clearRescorers();
return addRescorer(rescorer.windowSize(window));
}
/**
* Adds a new rescorer.
*
* @param rescorer rescorer configuration
* @return this for chaining
*/
public NoopSearchRequestBuilder addRescorer(RescorerBuilder<?> rescorer) {
sourceBuilder().addRescorer(rescorer);
return this;
}
/**
* Adds a new rescorer.
*
* @param rescorer rescorer configuration
* @param window rescore window
* @return this for chaining
*/
public NoopSearchRequestBuilder addRescorer(RescorerBuilder<?> rescorer, int window) {
sourceBuilder().addRescorer(rescorer.windowSize(window));
return this;
}
/**
* Clears all rescorers from the builder.
*
* @return this for chaining
*/
public NoopSearchRequestBuilder clearRescorers() {
sourceBuilder().clearRescorers();
return this;
}
/**
* Sets the source of the request as a SearchSourceBuilder.
*/
public NoopSearchRequestBuilder setSource(SearchSourceBuilder source) {
request.source(source);
return this;
}
/**
* Sets if this request should use the request cache or not, assuming that it can (for
* example, if "now" is used, it will never be cached). By default (not set, or null,
* will default to the index level setting if request cache is enabled or not).
*/
public NoopSearchRequestBuilder setRequestCache(Boolean requestCache) {
request.requestCache(requestCache);
return this;
}
/**
* Should the query be profiled. Defaults to <code>false</code>
*/
public NoopSearchRequestBuilder setProfile(boolean profile) {
sourceBuilder().profile(profile);
return this;
}
@Override
public String toString() {
if (request.source() != null) {
return request.source().toString();
}
return new SearchSourceBuilder().toString();
}
private SearchSourceBuilder sourceBuilder() {
if (request.source() == null) {
request.source(new SearchSourceBuilder());
}
return request.source();
}
}

View File

@ -70,6 +70,7 @@ import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.rankeval.RankEvalRequest;
import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest;
import org.elasticsearch.index.reindex.DeleteByQueryRequest;
@ -316,7 +317,9 @@ final class RequestConverters {
}
static Request update(UpdateRequest updateRequest) throws IOException {
String endpoint = endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update");
String endpoint = updateRequest.type().equals(MapperService.SINGLE_MAPPING_NAME)
? endpoint(updateRequest.index(), "_update", updateRequest.id())
: endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update");
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
Params parameters = new Params(request);

View File

@ -50,6 +50,8 @@ import org.elasticsearch.client.security.GetSslCertificatesRequest;
import org.elasticsearch.client.security.GetSslCertificatesResponse;
import org.elasticsearch.client.security.GetUserPrivilegesRequest;
import org.elasticsearch.client.security.GetUserPrivilegesResponse;
import org.elasticsearch.client.security.GetUsersRequest;
import org.elasticsearch.client.security.GetUsersResponse;
import org.elasticsearch.client.security.HasPrivilegesRequest;
import org.elasticsearch.client.security.HasPrivilegesResponse;
import org.elasticsearch.client.security.InvalidateTokenRequest;
@ -81,6 +83,33 @@ public final class SecurityClient {
this.restHighLevelClient = restHighLevelClient;
}
/**
* Get a user, or list of users, in the native realm synchronously.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html">
* the docs</a> for more information.
* @param request the request with the user's name
* @param options the request options (e.g., headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response from the get users call
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public GetUsersResponse getUsers(GetUsersRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::getUsers, options,
GetUsersResponse::fromXContent, emptySet());
}
/**
* Get a user, or list of users, in the native realm asynchronously.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html">
* the docs</a> for more information.
* @param request the request with the user's name
* @param options the request options (e.g., headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
*/
public void getUsersAsync(GetUsersRequest request, RequestOptions options, ActionListener<GetUsersResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getUsers, options,
GetUsersResponse::fromXContent, listener, emptySet());
}
/**
* Create/update a user in the native realm synchronously.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html">

View File

@ -36,6 +36,7 @@ import org.elasticsearch.client.security.EnableUserRequest;
import org.elasticsearch.client.security.GetPrivilegesRequest;
import org.elasticsearch.client.security.GetRoleMappingsRequest;
import org.elasticsearch.client.security.GetRolesRequest;
import org.elasticsearch.client.security.GetUsersRequest;
import org.elasticsearch.client.security.HasPrivilegesRequest;
import org.elasticsearch.client.security.InvalidateTokenRequest;
import org.elasticsearch.client.security.PutPrivilegesRequest;
@ -67,6 +68,15 @@ final class SecurityRequestConverters {
return request;
}
static Request getUsers(GetUsersRequest getUsersRequest) {
RequestConverters.EndpointBuilder builder = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_security/user");
if (getUsersRequest.getUsernames().size() > 0) {
builder.addPathPart(Strings.collectionToCommaDelimitedString(getUsersRequest.getUsernames()));
}
return new Request(HttpGet.METHOD_NAME, builder.build());
}
static Request putUser(PutUserRequest putUserRequest) throws IOException {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_security/user")

View File

@ -0,0 +1,58 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.security;
import org.elasticsearch.client.Validatable;
import org.elasticsearch.common.util.set.Sets;
import java.util.Collections;
import java.util.Objects;
import java.util.Set;
/**
* Request object to retrieve users from the native realm
*/
public class GetUsersRequest implements Validatable {
private final Set<String> usernames;
public GetUsersRequest(final String... usernames) {
if (usernames != null) {
this.usernames = Collections.unmodifiableSet(Sets.newHashSet(usernames));
} else {
this.usernames = Collections.emptySet();
}
}
public Set<String> getUsernames() {
return usernames;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof GetUsersRequest)) return false;
GetUsersRequest that = (GetUsersRequest) o;
return Objects.equals(usernames, that.usernames);
}
@Override
public int hashCode() {
return Objects.hash(usernames);
}
}

View File

@ -0,0 +1,134 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.security;
import org.elasticsearch.client.security.user.User;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.common.xcontent.XContentParserUtils;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/**
* Response when requesting zero or more users.
* Returns a List of {@link User} objects
*/
public class GetUsersResponse {
private final Set<User> users;
private final Set<User> enabledUsers;
public GetUsersResponse(Set<User> users, Set<User> enabledUsers) {
this.users = Collections.unmodifiableSet(users);
this.enabledUsers = Collections.unmodifiableSet(enabledUsers);
}
public Set<User> getUsers() {
return users;
}
public Set<User> getEnabledUsers() {
return enabledUsers;
}
public static GetUsersResponse fromXContent(XContentParser parser) throws IOException {
XContentParserUtils.ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
final Set<User> users = new HashSet<>();
final Set<User> enabledUsers = new HashSet<>();
Token token;
while ((token = parser.nextToken()) != Token.END_OBJECT) {
XContentParserUtils.ensureExpectedToken(Token.FIELD_NAME, token, parser::getTokenLocation);
ParsedUser parsedUser = USER_PARSER.parse(parser, parser.currentName());
users.add(parsedUser.user);
if (parsedUser.enabled) {
enabledUsers.add(parsedUser.user);
}
}
return new GetUsersResponse(users, enabledUsers);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof GetUsersResponse)) return false;
GetUsersResponse that = (GetUsersResponse) o;
return Objects.equals(users, that.users);
}
@Override
public int hashCode() {
return Objects.hash(users);
}
public static final ParseField USERNAME = new ParseField("username");
public static final ParseField ROLES = new ParseField("roles");
public static final ParseField FULL_NAME = new ParseField("full_name");
public static final ParseField EMAIL = new ParseField("email");
public static final ParseField METADATA = new ParseField("metadata");
public static final ParseField ENABLED = new ParseField("enabled");
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<ParsedUser, String> USER_PARSER = new ConstructingObjectParser<>("user_info",
(constructorObjects) -> {
int i = 0;
final String username = (String) constructorObjects[i++];
final Collection<String> roles = (Collection<String>) constructorObjects[i++];
final Map<String, Object> metadata = (Map<String, Object>) constructorObjects[i++];
final Boolean enabled = (Boolean) constructorObjects[i++];
final String fullName = (String) constructorObjects[i++];
final String email = (String) constructorObjects[i++];
return new ParsedUser(username, roles, metadata, enabled, fullName, email);
});
static {
USER_PARSER.declareString(constructorArg(), USERNAME);
USER_PARSER.declareStringArray(constructorArg(), ROLES);
USER_PARSER.declareObject(constructorArg(), (parser, c) -> parser.map(), METADATA);
USER_PARSER.declareBoolean(constructorArg(), ENABLED);
USER_PARSER.declareStringOrNull(optionalConstructorArg(), FULL_NAME);
USER_PARSER.declareStringOrNull(optionalConstructorArg(), EMAIL);
}
protected static final class ParsedUser {
protected User user;
protected boolean enabled;
public ParsedUser(String username, Collection<String> roles, Map<String, Object> metadata, Boolean enabled,
@Nullable String fullName, @Nullable String email) {
String checkedUsername = username = Objects.requireNonNull(username, "`username` is required, cannot be null");
Collection<String> checkedRoles = Collections.unmodifiableSet(new HashSet<>(
Objects.requireNonNull(roles, "`roles` is required, cannot be null. Pass an empty Collection instead.")));
Map<String, Object> checkedMetadata = Collections
.unmodifiableMap(Objects.requireNonNull(metadata, "`metadata` is required, cannot be null. Pass an empty map instead."));
this.user = new User(checkedUsername, checkedRoles, checkedMetadata, fullName, email);
this.enabled = enabled;
}
}
}

View File

@ -29,7 +29,6 @@ import java.util.Map;
import java.util.Objects;
import java.util.Set;
/**
* A user to be utilized with security APIs.
* Can be an existing authenticated user or it can be a new user to be enrolled to the native realm.

View File

@ -65,6 +65,10 @@ import org.elasticsearch.index.reindex.DeleteByQueryRequest;
import org.elasticsearch.index.reindex.UpdateByQueryAction;
import org.elasticsearch.index.reindex.UpdateByQueryRequest;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.rest.action.document.RestDeleteAction;
import org.elasticsearch.rest.action.document.RestGetAction;
import org.elasticsearch.rest.action.document.RestMultiGetAction;
import org.elasticsearch.rest.action.document.RestUpdateAction;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
@ -173,6 +177,23 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
}
}
public void testDeleteWithTypes() throws IOException {
String docId = "id";
highLevelClient().index(new IndexRequest("index", "type", docId)
.source(Collections.singletonMap("foo", "bar")), RequestOptions.DEFAULT);
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId);
DeleteResponse deleteResponse = execute(deleteRequest,
highLevelClient()::delete,
highLevelClient()::deleteAsync,
expectWarnings(RestDeleteAction.TYPES_DEPRECATION_MESSAGE));
assertEquals("index", deleteResponse.getIndex());
assertEquals("type", deleteResponse.getType());
assertEquals(docId, deleteResponse.getId());
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
}
public void testExists() throws IOException {
{
GetRequest getRequest = new GetRequest("index", "id");
@ -331,6 +352,29 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
}
}
public void testGetWithTypes() throws IOException {
String document = "{\"field\":\"value\"}";
IndexRequest index = new IndexRequest("index", "type", "id");
index.source(document, XContentType.JSON);
index.setRefreshPolicy(RefreshPolicy.IMMEDIATE);
highLevelClient().index(index, RequestOptions.DEFAULT);
GetRequest getRequest = new GetRequest("index", "type", "id");
GetResponse getResponse = execute(getRequest,
highLevelClient()::get,
highLevelClient()::getAsync,
expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE));
assertEquals("index", getResponse.getIndex());
assertEquals("type", getResponse.getType());
assertEquals("id", getResponse.getId());
assertTrue(getResponse.isExists());
assertFalse(getResponse.isSourceEmpty());
assertEquals(1L, getResponse.getVersion());
assertEquals(document, getResponse.getSourceAsString());
}
public void testMultiGet() throws IOException {
{
MultiGetRequest multiGetRequest = new MultiGetRequest();
@ -387,6 +431,36 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
}
}
public void testMultiGetWithTypes() throws IOException {
BulkRequest bulk = new BulkRequest();
bulk.setRefreshPolicy(RefreshPolicy.IMMEDIATE);
bulk.add(new IndexRequest("index", "type", "id1")
.source("{\"field\":\"value1\"}", XContentType.JSON));
bulk.add(new IndexRequest("index", "type", "id2")
.source("{\"field\":\"value2\"}", XContentType.JSON));
highLevelClient().bulk(bulk, RequestOptions.DEFAULT);
MultiGetRequest multiGetRequest = new MultiGetRequest();
multiGetRequest.add("index", "id1");
multiGetRequest.add("index", "type", "id2");
MultiGetResponse response = execute(multiGetRequest,
highLevelClient()::mget,
highLevelClient()::mgetAsync,
expectWarnings(RestMultiGetAction.TYPES_DEPRECATION_MESSAGE));
assertEquals(2, response.getResponses().length);
GetResponse firstResponse = response.getResponses()[0].getResponse();
assertEquals("index", firstResponse.getIndex());
assertEquals("type", firstResponse.getType());
assertEquals("id1", firstResponse.getId());
GetResponse secondResponse = response.getResponses()[1].getResponse();
assertEquals("index", secondResponse.getIndex());
assertEquals("type", secondResponse.getType());
assertEquals("id2", secondResponse.getId());
}
public void testIndex() throws IOException {
final XContentType xContentType = randomFrom(XContentType.values());
{
@ -492,7 +566,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
public void testUpdate() throws IOException {
{
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "does_not_exist");
UpdateRequest updateRequest = new UpdateRequest("index", "does_not_exist");
updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values()));
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () ->
@ -507,14 +581,14 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT);
assertEquals(RestStatus.CREATED, indexResponse.status());
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "id");
UpdateRequest updateRequest = new UpdateRequest("index", "id");
updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values()));
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
assertEquals(RestStatus.OK, updateResponse.status());
assertEquals(indexResponse.getVersion() + 1, updateResponse.getVersion());
UpdateRequest updateRequestConflict = new UpdateRequest("index", "_doc", "id");
UpdateRequest updateRequestConflict = new UpdateRequest("index", "id");
updateRequestConflict.doc(singletonMap("field", "with_version_conflict"), randomFrom(XContentType.values()));
updateRequestConflict.version(indexResponse.getVersion());
@ -530,7 +604,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT);
assertEquals(RestStatus.CREATED, indexResponse.status());
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "with_script");
UpdateRequest updateRequest = new UpdateRequest("index", "with_script");
Script script = new Script(ScriptType.INLINE, "painless", "ctx._source.counter += params.count", singletonMap("count", 8));
updateRequest.script(script);
updateRequest.fetchSource(true);
@ -551,7 +625,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
assertEquals(RestStatus.CREATED, indexResponse.status());
assertEquals(12L, indexResponse.getVersion());
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "with_doc");
UpdateRequest updateRequest = new UpdateRequest("index", "with_doc");
updateRequest.doc(singletonMap("field_2", "two"), randomFrom(XContentType.values()));
updateRequest.fetchSource("field_*", "field_3");
@ -573,7 +647,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
assertEquals(RestStatus.CREATED, indexResponse.status());
assertEquals(1L, indexResponse.getVersion());
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "noop");
UpdateRequest updateRequest = new UpdateRequest("index", "noop");
updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values()));
UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
@ -589,7 +663,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
assertEquals(2L, updateResponse.getVersion());
}
{
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "with_upsert");
UpdateRequest updateRequest = new UpdateRequest("index", "with_upsert");
updateRequest.upsert(singletonMap("doc_status", "created"));
updateRequest.doc(singletonMap("doc_status", "updated"));
updateRequest.fetchSource(true);
@ -604,7 +678,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
assertEquals("created", getResult.sourceAsMap().get("doc_status"));
}
{
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "with_doc_as_upsert");
UpdateRequest updateRequest = new UpdateRequest("index", "with_doc_as_upsert");
updateRequest.doc(singletonMap("field", "initialized"));
updateRequest.fetchSource(true);
updateRequest.docAsUpsert(true);
@ -619,7 +693,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
assertEquals("initialized", getResult.sourceAsMap().get("field"));
}
{
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "with_scripted_upsert");
UpdateRequest updateRequest = new UpdateRequest("index", "with_scripted_upsert");
updateRequest.fetchSource(true);
updateRequest.script(new Script(ScriptType.INLINE, "painless", "ctx._source.level = params.test", singletonMap("test", "C")));
updateRequest.scriptedUpsert(true);
@ -637,7 +711,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
}
{
IllegalStateException exception = expectThrows(IllegalStateException.class, () -> {
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", "id");
UpdateRequest updateRequest = new UpdateRequest("index", "id");
updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), XContentType.JSON));
updateRequest.upsert(new IndexRequest().source(Collections.singletonMap("field", "upsert"), XContentType.YAML));
execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync);
@ -647,6 +721,22 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
}
}
public void testUpdateWithTypes() throws IOException {
IndexRequest indexRequest = new IndexRequest("index", "type", "id");
indexRequest.source(singletonMap("field", "value"));
IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT);
UpdateRequest updateRequest = new UpdateRequest("index", "type", "id");
updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values()));
UpdateResponse updateResponse = execute(updateRequest,
highLevelClient()::update,
highLevelClient()::updateAsync,
expectWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE));
assertEquals(RestStatus.OK, updateResponse.status());
assertEquals(indexResponse.getVersion() + 1, updateResponse.getVersion());
}
public void testBulk() throws IOException {
int nbItems = randomIntBetween(10, 100);
boolean[] errors = new boolean[nbItems];
@ -687,7 +777,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
bulkRequest.add(createRequest);
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", id)
UpdateRequest updateRequest = new UpdateRequest("index", id)
.doc(new IndexRequest().source(source, xContentType));
if (erroneous == false) {
assertEquals(RestStatus.CREATED,
@ -996,7 +1086,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
processor.add(createRequest);
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = new UpdateRequest("index", "_doc", id)
UpdateRequest updateRequest = new UpdateRequest("index", id)
.doc(new IndexRequest().source(xContentType, "id", i));
if (erroneous == false) {
assertEquals(RestStatus.CREATED,

View File

@ -630,10 +630,9 @@ public class RequestConvertersTests extends ESTestCase {
Map<String, String> expectedParams = new HashMap<>();
String index = randomAlphaOfLengthBetween(3, 10);
String type = randomAlphaOfLengthBetween(3, 10);
String id = randomAlphaOfLengthBetween(3, 10);
UpdateRequest updateRequest = new UpdateRequest(index, type, id);
UpdateRequest updateRequest = new UpdateRequest(index, id);
updateRequest.detectNoop(randomBoolean());
if (randomBoolean()) {
@ -687,7 +686,7 @@ public class RequestConvertersTests extends ESTestCase {
}
Request request = RequestConverters.update(updateRequest);
assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.getEndpoint());
assertEquals("/" + index + "/_update/" + id, request.getEndpoint());
assertEquals(expectedParams, request.getParameters());
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
@ -718,6 +717,23 @@ public class RequestConvertersTests extends ESTestCase {
}
}
public void testUpdateWithType() throws IOException {
String index = randomAlphaOfLengthBetween(3, 10);
String type = randomAlphaOfLengthBetween(3, 10);
String id = randomAlphaOfLengthBetween(3, 10);
UpdateRequest updateRequest = new UpdateRequest(index, type, id);
XContentType xContentType = XContentType.JSON;
BytesReference source = RandomObjects.randomSource(random(), xContentType);
updateRequest.doc(new IndexRequest().source(source, xContentType));
Request request = RequestConverters.update(updateRequest);
assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.getEndpoint());
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertToXContentBody(updateRequest, request.getEntity());
}
public void testUpdateWithDifferentContentTypes() {
IllegalStateException exception = expectThrows(IllegalStateException.class, () -> {
UpdateRequest updateRequest = new UpdateRequest();

View File

@ -28,6 +28,8 @@ import org.elasticsearch.client.security.DeleteUserRequest;
import org.elasticsearch.client.security.DeleteUserResponse;
import org.elasticsearch.client.security.GetRolesRequest;
import org.elasticsearch.client.security.GetRolesResponse;
import org.elasticsearch.client.security.GetUsersRequest;
import org.elasticsearch.client.security.GetUsersResponse;
import org.elasticsearch.client.security.PutRoleRequest;
import org.elasticsearch.client.security.PutRoleResponse;
import org.elasticsearch.client.security.PutUserRequest;
@ -42,6 +44,7 @@ import org.elasticsearch.client.security.user.privileges.IndicesPrivilegesTests;
import org.elasticsearch.client.security.user.privileges.Role;
import org.elasticsearch.common.CharArrays;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.HashMap;
@ -74,6 +77,22 @@ public class SecurityIT extends ESRestHighLevelClientTestCase {
highLevelClient().getLowLevelClient().performRequest(deleteUserRequest);
}
public void testGetUser() throws Exception {
final SecurityClient securityClient = highLevelClient().security();
// create user
final PutUserRequest putUserRequest = randomPutUserRequest(randomBoolean());
final PutUserResponse putUserResponse = execute(putUserRequest, securityClient::putUser, securityClient::putUserAsync);
// assert user created
assertThat(putUserResponse.isCreated(), is(true));
// get user
final GetUsersRequest getUsersRequest = new GetUsersRequest(putUserRequest.getUser().getUsername());
final GetUsersResponse getUsersResponse = execute(getUsersRequest, securityClient::getUsers, securityClient::getUsersAsync);
// assert user was correctly retrieved
ArrayList<User> users = new ArrayList<>();
users.addAll(getUsersResponse.getUsers());
assertThat(users.get(0), is(putUserRequest.getUser()));
}
public void testAuthenticate() throws Exception {
final SecurityClient securityClient = highLevelClient().security();
// test fixture: put enabled user
@ -89,6 +108,15 @@ public class SecurityIT extends ESRestHighLevelClientTestCase {
assertThat(authenticateResponse.getUser(), is(putUserRequest.getUser()));
assertThat(authenticateResponse.enabled(), is(true));
// get user
final GetUsersRequest getUsersRequest =
new GetUsersRequest(putUserRequest.getUser().getUsername());
final GetUsersResponse getUsersResponse =
execute(getUsersRequest, securityClient::getUsers, securityClient::getUsersAsync);
ArrayList<User> users = new ArrayList<>();
users.addAll(getUsersResponse.getUsers());
assertThat(users.get(0), is(putUserRequest.getUser()));
// delete user
final DeleteUserRequest deleteUserRequest =
new DeleteUserRequest(putUserRequest.getUser().getUsername(), putUserRequest.getRefreshPolicy());

View File

@ -34,6 +34,7 @@ import org.elasticsearch.client.security.EnableUserRequest;
import org.elasticsearch.client.security.GetPrivilegesRequest;
import org.elasticsearch.client.security.GetRoleMappingsRequest;
import org.elasticsearch.client.security.GetRolesRequest;
import org.elasticsearch.client.security.GetUsersRequest;
import org.elasticsearch.client.security.PutPrivilegesRequest;
import org.elasticsearch.client.security.PutRoleMappingRequest;
import org.elasticsearch.client.security.PutRoleRequest;
@ -101,6 +102,21 @@ public class SecurityRequestConvertersTests extends ESTestCase {
assertNull(request.getEntity());
}
public void testGetUsers() {
final String[] users = randomArray(0, 5, String[]::new, () -> randomAlphaOfLength(5));
GetUsersRequest getUsersRequest = new GetUsersRequest(users);
Request request = SecurityRequestConverters.getUsers(getUsersRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
if (users.length == 0) {
assertEquals("/_security/user", request.getEndpoint());
} else {
assertEquals("/_security/user/" + Strings.collectionToCommaDelimitedString(getUsersRequest.getUsernames()),
request.getEndpoint());
}
assertNull(request.getEntity());
assertEquals(Collections.emptyMap(), request.getParameters());
}
public void testPutRoleMapping() throws IOException {
final String username = randomAlphaOfLengthBetween(4, 7);
final String rolename = randomAlphaOfLengthBetween(4, 7);

View File

@ -296,8 +296,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
//tag::update-request
UpdateRequest request = new UpdateRequest(
"posts", // <1>
"_doc", // <2>
"1"); // <3>
"1"); // <2>
//end::update-request
request.fetchSource(true);
//tag::update-request-with-inline-script
@ -311,7 +310,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
assertEquals(4, updateResponse.getGetResult().getSource().get("field"));
request = new UpdateRequest("posts", "_doc", "1").fetchSource(true);
request = new UpdateRequest("posts", "1").fetchSource(true);
//tag::update-request-with-stored-script
Script stored = new Script(
ScriptType.STORED, null, "increment-field", parameters); // <1>
@ -326,7 +325,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
Map<String, Object> jsonMap = new HashMap<>();
jsonMap.put("updated", new Date());
jsonMap.put("reason", "daily update");
UpdateRequest request = new UpdateRequest("posts", "_doc", "1")
UpdateRequest request = new UpdateRequest("posts", "1")
.doc(jsonMap); // <1>
//end::update-request-with-doc-as-map
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
@ -341,7 +340,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
builder.field("reason", "daily update");
}
builder.endObject();
UpdateRequest request = new UpdateRequest("posts", "_doc", "1")
UpdateRequest request = new UpdateRequest("posts", "1")
.doc(builder); // <1>
//end::update-request-with-doc-as-xcontent
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
@ -349,7 +348,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
}
{
//tag::update-request-shortcut
UpdateRequest request = new UpdateRequest("posts", "_doc", "1")
UpdateRequest request = new UpdateRequest("posts", "1")
.doc("updated", new Date(),
"reason", "daily update"); // <1>
//end::update-request-shortcut
@ -358,7 +357,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
}
{
//tag::update-request-with-doc-as-string
UpdateRequest request = new UpdateRequest("posts", "_doc", "1");
UpdateRequest request = new UpdateRequest("posts", "1");
String jsonString = "{" +
"\"updated\":\"2017-01-01\"," +
"\"reason\":\"daily update\"" +
@ -374,7 +373,6 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::update-response
String index = updateResponse.getIndex();
String type = updateResponse.getType();
String id = updateResponse.getId();
long version = updateResponse.getVersion();
if (updateResponse.getResult() == DocWriteResponse.Result.CREATED) {
@ -415,7 +413,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
}
{
//tag::update-docnotfound
UpdateRequest request = new UpdateRequest("posts", "_doc", "does_not_exist")
UpdateRequest request = new UpdateRequest("posts", "does_not_exist")
.doc("field", "value");
try {
UpdateResponse updateResponse = client.update(
@ -429,7 +427,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
}
{
// tag::update-conflict
UpdateRequest request = new UpdateRequest("posts", "_doc", "1")
UpdateRequest request = new UpdateRequest("posts", "1")
.doc("field", "value")
.version(1);
try {
@ -443,7 +441,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
// end::update-conflict
}
{
UpdateRequest request = new UpdateRequest("posts", "_doc", "1").doc("reason", "no source");
UpdateRequest request = new UpdateRequest("posts", "1").doc("reason", "no source");
//tag::update-request-no-source
request.fetchSource(true); // <1>
//end::update-request-no-source
@ -453,7 +451,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
assertEquals(3, updateResponse.getGetResult().sourceAsMap().size());
}
{
UpdateRequest request = new UpdateRequest("posts", "_doc", "1").doc("reason", "source includes");
UpdateRequest request = new UpdateRequest("posts", "1").doc("reason", "source includes");
//tag::update-request-source-include
String[] includes = new String[]{"updated", "r*"};
String[] excludes = Strings.EMPTY_ARRAY;
@ -468,7 +466,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(sourceAsMap.containsKey("updated"));
}
{
UpdateRequest request = new UpdateRequest("posts", "_doc", "1").doc("reason", "source excludes");
UpdateRequest request = new UpdateRequest("posts", "1").doc("reason", "source excludes");
//tag::update-request-source-exclude
String[] includes = Strings.EMPTY_ARRAY;
String[] excludes = new String[]{"updated"};
@ -483,7 +481,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(sourceAsMap.containsKey("field"));
}
{
UpdateRequest request = new UpdateRequest("posts", "_doc", "id");
UpdateRequest request = new UpdateRequest("posts", "id");
// tag::update-request-routing
request.routing("routing"); // <1>
// end::update-request-routing
@ -520,7 +518,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
// end::update-request-active-shards
}
{
UpdateRequest request = new UpdateRequest("posts", "_doc", "async").doc("reason", "async update").docAsUpsert(true);
UpdateRequest request = new UpdateRequest("posts", "async").doc("reason", "async update").docAsUpsert(true);
ActionListener<UpdateResponse> listener;
// tag::update-execute-listener
@ -695,7 +693,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::bulk-request-with-mixed-operations
BulkRequest request = new BulkRequest();
request.add(new DeleteRequest("posts", "3")); // <1>
request.add(new UpdateRequest("posts", "_doc", "2") // <2>
request.add(new UpdateRequest("posts", "2") // <2>
.doc(XContentType.JSON,"other", "test"));
request.add(new IndexRequest("posts", "_doc", "4") // <3>
.source(XContentType.JSON,"field", "baz"));

View File

@ -54,6 +54,8 @@ import org.elasticsearch.client.security.GetRolesRequest;
import org.elasticsearch.client.security.GetRolesResponse;
import org.elasticsearch.client.security.GetSslCertificatesResponse;
import org.elasticsearch.client.security.GetUserPrivilegesResponse;
import org.elasticsearch.client.security.GetUsersRequest;
import org.elasticsearch.client.security.GetUsersResponse;
import org.elasticsearch.client.security.HasPrivilegesRequest;
import org.elasticsearch.client.security.HasPrivilegesResponse;
import org.elasticsearch.client.security.InvalidateTokenRequest;
@ -109,6 +111,98 @@ import static org.hamcrest.Matchers.nullValue;
public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase {
public void testGetUsers() throws Exception {
final RestHighLevelClient client = highLevelClient();
String[] usernames = new String[] {"user1", "user2", "user3"};
addUser(client, usernames[0], randomAlphaOfLengthBetween(6, 10));
addUser(client, usernames[1], randomAlphaOfLengthBetween(6, 10));
addUser(client, usernames[2], randomAlphaOfLengthBetween(6, 10));
{
//tag::get-users-request
GetUsersRequest request = new GetUsersRequest(usernames[0]);
//end::get-users-request
//tag::get-users-execute
GetUsersResponse response = client.security().getUsers(request, RequestOptions.DEFAULT);
//end::get-users-execute
//tag::get-users-response
List<User> users = new ArrayList<>(1);
users.addAll(response.getUsers());
//end::get-users-response
assertNotNull(response);
assertThat(users.size(), equalTo(1));
assertThat(users.get(0).getUsername(), is(usernames[0]));
}
{
//tag::get-users-list-request
GetUsersRequest request = new GetUsersRequest(usernames);
GetUsersResponse response = client.security().getUsers(request, RequestOptions.DEFAULT);
//end::get-users-list-request
List<User> users = new ArrayList<>(3);
users.addAll(response.getUsers());
assertNotNull(response);
assertThat(users.size(), equalTo(3));
assertThat(users.get(0).getUsername(), equalTo(usernames[0]));
assertThat(users.get(1).getUsername(), equalTo(usernames[1]));
assertThat(users.get(2).getUsername(), equalTo(usernames[2]));
assertThat(users.size(), equalTo(3));
}
{
//tag::get-users-all-request
GetUsersRequest request = new GetUsersRequest();
GetUsersResponse response = client.security().getUsers(request, RequestOptions.DEFAULT);
//end::get-users-all-request
List<User> users = new ArrayList<>(3);
users.addAll(response.getUsers());
assertNotNull(response);
// 9 users are expected to be returned
// test_users (3): user1, user2, user3
// system_users (6): elastic, beats_system, apm_system, logstash_system, kibana, remote_monitoring_user
assertThat(users.size(), equalTo(9));
}
{
GetUsersRequest request = new GetUsersRequest(usernames[0]);
ActionListener<GetUsersResponse> listener;
//tag::get-users-execute-listener
listener = new ActionListener<GetUsersResponse>() {
@Override
public void onResponse(GetUsersResponse getRolesResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
//end::get-users-execute-listener
assertNotNull(listener);
// Replace the empty listener by a blocking listener in test
final PlainActionFuture<GetUsersResponse> future = new PlainActionFuture<>();
listener = future;
//tag::get-users-execute-async
client.security().getUsersAsync(request, RequestOptions.DEFAULT, listener); // <1>
//end::get-users-execute-async
final GetUsersResponse response = future.get(30, TimeUnit.SECONDS);
List<User> users = new ArrayList<>(1);
users.addAll(response.getUsers());
assertNotNull(response);
assertThat(users.size(), equalTo(1));
assertThat(users.get(0).getUsername(), equalTo(usernames[0]));
}
}
public void testPutUser() throws Exception {
RestHighLevelClient client = highLevelClient();

View File

@ -38,12 +38,12 @@ public class AuthenticateResponseTests extends ESTestCase {
public void testFromXContent() throws IOException {
xContentTester(
this::createParser,
this::createTestInstance,
this::toXContent,
AuthenticateResponse::fromXContent)
.supportsUnknownFields(false)
.test();
this::createParser,
this::createTestInstance,
this::toXContent,
AuthenticateResponse::fromXContent)
.supportsUnknownFields(false)
.test();
}
public void testEqualsAndHashCode() {
@ -108,7 +108,7 @@ public class AuthenticateResponseTests extends ESTestCase {
private AuthenticateResponse copy(AuthenticateResponse response) {
final User originalUser = response.getUser();
final User copyUser = new User(originalUser.getUsername(), originalUser.getRoles(), originalUser.getMetadata(),
originalUser.getFullName(), originalUser.getEmail());
originalUser.getFullName(), originalUser.getEmail());
return new AuthenticateResponse(copyUser, response.enabled(), response.getAuthenticationRealm(),
response.getLookupRealm());
}
@ -117,9 +117,9 @@ public class AuthenticateResponseTests extends ESTestCase {
final User originalUser = response.getUser();
switch (randomIntBetween(1, 8)) {
case 1:
return new AuthenticateResponse(new User(originalUser.getUsername() + "wrong", originalUser.getRoles(),
return new AuthenticateResponse(new User(originalUser.getUsername() + "wrong", originalUser.getRoles(),
originalUser.getMetadata(), originalUser.getFullName(), originalUser.getEmail()), response.enabled(),
response.getAuthenticationRealm(), response.getLookupRealm());
response.getAuthenticationRealm(), response.getLookupRealm());
case 2:
final Collection<String> wrongRoles = new ArrayList<>(originalUser.getRoles());
wrongRoles.add(randomAlphaOfLengthBetween(1, 4));
@ -134,11 +134,11 @@ public class AuthenticateResponseTests extends ESTestCase {
response.getLookupRealm());
case 4:
return new AuthenticateResponse(new User(originalUser.getUsername(), originalUser.getRoles(), originalUser.getMetadata(),
originalUser.getFullName() + "wrong", originalUser.getEmail()), response.enabled(),
originalUser.getFullName() + "wrong", originalUser.getEmail()), response.enabled(),
response.getAuthenticationRealm(), response.getLookupRealm());
case 5:
return new AuthenticateResponse(new User(originalUser.getUsername(), originalUser.getRoles(), originalUser.getMetadata(),
originalUser.getFullName(), originalUser.getEmail() + "wrong"), response.enabled(),
originalUser.getFullName(), originalUser.getEmail() + "wrong"), response.enabled(),
response.getAuthenticationRealm(), response.getLookupRealm());
case 6:
return new AuthenticateResponse(new User(originalUser.getUsername(), originalUser.getRoles(), originalUser.getMetadata(),

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.security;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.EqualsHashCodeTestUtils;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
public class GetUsersRequestTests extends ESTestCase {
public void testGetUsersRequest() {
final String[] users = randomArray(0, 5, String[]::new, () -> randomAlphaOfLength(5));
GetUsersRequest getUsersRequest = new GetUsersRequest(users);
assertThat(getUsersRequest.getUsernames().size(), equalTo(users.length));
assertThat(getUsersRequest.getUsernames(), containsInAnyOrder(users));
}
public void testEqualsHashCode() {
final String[] users = randomArray(0, 5, String[]::new, () -> randomAlphaOfLength(5));
final GetUsersRequest getUsersRequest = new GetUsersRequest(users);
assertNotNull(getUsersRequest);
EqualsHashCodeTestUtils.checkEqualsAndHashCode(getUsersRequest, (original) -> {
return new GetUsersRequest(original.getUsernames().toArray(new String[0]));
});
EqualsHashCodeTestUtils.checkEqualsAndHashCode(getUsersRequest, (original) -> {
return new GetUsersRequest(original.getUsernames().toArray(new String[0]));
}, GetUsersRequestTests::mutateTestItem);
}
private static GetUsersRequest mutateTestItem(GetUsersRequest original) {
final int minRoles = original.getUsernames().isEmpty() ? 1 : 0;
return new GetUsersRequest(randomArray(minRoles, 5, String[]::new, () -> randomAlphaOfLength(6)));
}
}

View File

@ -0,0 +1,126 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.security;
import org.elasticsearch.client.security.user.User;
import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.EqualsHashCodeTestUtils;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static org.hamcrest.Matchers.equalTo;
/** tests the Response for getting users from the security HLRC */
public class GetUsersResponseTests extends ESTestCase {
public void testFromXContent() throws IOException {
String json =
"{\n" +
" \"jacknich\": {\n" +
" \"username\": \"jacknich\",\n" +
" \"roles\": [\n" +
" \"admin\", \"other_role1\"\n" +
" ],\n" +
" \"full_name\": \"Jack Nicholson\",\n" +
" \"email\": \"jacknich@example.com\",\n" +
" \"metadata\": { \"intelligence\" : 7 },\n" +
" \"enabled\": true\n" +
" }\n" +
"}";
final GetUsersResponse response = GetUsersResponse.fromXContent((XContentType.JSON.xContent().createParser(
new NamedXContentRegistry(Collections.emptyList()), new DeprecationHandler() {
@Override
public void usedDeprecatedName(String usedName, String modernName) {
}
@Override
public void usedDeprecatedField(String usedName, String replacedWith) {
}
}, json)));
assertThat(response.getUsers().size(), equalTo(1));
final User user = response.getUsers().iterator().next();
assertThat(user.getUsername(), equalTo("jacknich"));
assertThat(user.getRoles().size(), equalTo(2));
assertThat(user.getFullName(), equalTo("Jack Nicholson"));
assertThat(user.getEmail(), equalTo("jacknich@example.com"));
final Map<String, Object> metadata = new HashMap<>();
metadata.put("intelligence", 7);
assertThat(metadata, equalTo(user.getMetadata()));
}
public void testEqualsHashCode() {
final Set<User> users = new HashSet<>();
final Set<User> enabledUsers = new HashSet<>();
Map<String, Object> metadata = new HashMap<>();
metadata.put("intelligence", 1);
final User user1 = new User("testUser1", Arrays.asList(new String[] {"admin", "other_role1"}),
metadata, "Test User 1", null);
users.add(user1);
enabledUsers.add(user1);
Map<String, Object> metadata2 = new HashMap<>();
metadata2.put("intelligence", 9);
metadata2.put("specialty", "geo");
final User user2 = new User("testUser2", Arrays.asList(new String[] {"admin"}),
metadata, "Test User 2", "testuser2@example.com");
users.add(user2);
enabledUsers.add(user2);
final GetUsersResponse getUsersResponse = new GetUsersResponse(users, enabledUsers);
assertNotNull(getUsersResponse);
EqualsHashCodeTestUtils.checkEqualsAndHashCode(getUsersResponse, (original) -> {
return new GetUsersResponse(original.getUsers(), original.getEnabledUsers());
});
EqualsHashCodeTestUtils.checkEqualsAndHashCode(getUsersResponse, (original) -> {
return new GetUsersResponse(original.getUsers(), original.getEnabledUsers());
}, GetUsersResponseTests::mutateTestItem);
}
private static GetUsersResponse mutateTestItem(GetUsersResponse original) {
if (randomBoolean()) {
final Set<User> users = new HashSet<>();
final Set<User> enabledUsers = new HashSet<>();
Map<String, Object> metadata = new HashMap<>();
metadata.put("intelligence", 1);
final User user1 = new User("testUser1", Arrays.asList(new String[] {"admin", "other_role1"}),
metadata, "Test User 1", null);
users.add(user1);
enabledUsers.add(user1);
return new GetUsersResponse(users, enabledUsers);
}
Map<String, Object> metadata = new HashMap<>();
metadata.put("intelligence", 5); // change intelligence
final User user1 = new User("testUser1", Arrays.asList(new String[] {"admin", "other_role1"}),
metadata, "Test User 1", null);
Set<User> newUsers = original.getUsers().stream().collect(Collectors.toSet());
Set<User> enabledUsers = original.getEnabledUsers().stream().collect(Collectors.toSet());
newUsers.clear();
enabledUsers.clear();
newUsers.add(user1);
enabledUsers.add(user1);
return new GetUsersResponse(newUsers, enabledUsers);
}
}

View File

@ -46,7 +46,7 @@ public class CreatedLocationHeaderIT extends ESRestTestCase {
}
public void testUpsert() throws IOException {
Request request = new Request("POST", "test/_doc/1/_update");
Request request = new Request("POST", "test/_update/1");
request.setJsonEntity("{"
+ "\"doc\": {\"test\": \"test\"},"
+ "\"doc_as_upsert\": true}");

View File

@ -69,7 +69,7 @@ public class WaitForRefreshAndCloseIT extends ESRestTestCase {
Request createDoc = new Request("PUT", docPath());
createDoc.setJsonEntity("{\"test\":\"test\"}");
client().performRequest(createDoc);
Request updateDoc = new Request("POST", docPath() + "/_update");
Request updateDoc = new Request("POST", "test/_update/1");
updateDoc.setJsonEntity("{\"doc\":{\"name\":\"test\"}}");
closeWhileListenerEngaged(start(updateDoc));
}

View File

@ -67,11 +67,11 @@ ${path.logs}
#
#discovery.zen.ping.unicast.hosts: ["host1", "host2"]
#
# Prevent the "split brain" by configuring the majority of nodes (total number of master-eligible nodes / 2 + 1):
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
#discovery.zen.minimum_master_nodes:
#cluster.initial_master_nodes: ["node-1", "node-2"]
#
# For more information, consult the zen discovery module documentation.
# For more information, consult the discovery and cluster formation module documentation.
#
# ---------------------------------- Gateway -----------------------------------
#

View File

@ -17,8 +17,7 @@ An +{request}+ requires the following arguments:
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
<1> Index
<2> Type
<3> Document id
<2> Document id
The Update API allows to update an existing document by using a script
or by passing a partial document.

View File

@ -0,0 +1,48 @@
--
:api: get-users
:request: GetUsersRequest
:respnse: GetUsersResponse
--
[id="{upid}-{api}"]
=== Get Users API
[id="{upid}-{api}-request"]
==== Get Users Request
Retrieving a user can be performed using the `security().getUsers()`
method and by setting the username on +{request}+:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
Retrieving multiple users can be performed using the `security().getUsers()`
method and by setting multiple usernames on +{request}+:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-list-request]
--------------------------------------------------
Retrieving all users can be performed using the `security().getUsers()`
method without specifying any usernames on +{request}+:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-all-request]
--------------------------------------------------
include::../execution.asciidoc[]
[id="{upid}-{api}-response"]
==== Get Users Response
The returned +{response}+ allows getting information about the retrieved users as follows.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------

View File

@ -387,6 +387,7 @@ include::rollup/get_rollup_index_caps.asciidoc[]
The Java High Level REST Client supports the following Security APIs:
* <<java-rest-high-security-put-user>>
* <<{upid}-get-users>>
* <<{upid}-delete-user>>
* <<java-rest-high-security-enable-user>>
* <<java-rest-high-security-disable-user>>
@ -410,6 +411,7 @@ The Java High Level REST Client supports the following Security APIs:
* <<{upid}-delete-privileges>>
include::security/put-user.asciidoc[]
include::security/get-users.asciidoc[]
include::security/delete-user.asciidoc[]
include::security/enable-user.asciidoc[]
include::security/disable-user.asciidoc[]

View File

@ -11,11 +11,12 @@ include::install_remove.asciidoc[]
[[discovery-ec2-usage]]
==== Getting started with AWS
The plugin provides a hosts provider for zen discovery named `ec2`. This hosts provider
finds other Elasticsearch instances in EC2 through AWS metadata. Authentication is done using
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[IAM Role]
credentials by default. The only necessary configuration change to enable the plugin
is setting the unicast host provider for zen discovery:
The plugin provides a hosts provider for zen discovery named `ec2`. This hosts
provider finds other Elasticsearch instances in EC2 through AWS metadata.
Authentication is done using
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html[IAM
Role] credentials by default. To enable the plugin, set the unicast host
provider for Zen discovery to `ec2`:
[source,yaml]
----
@ -51,9 +52,9 @@ Those that must be stored in the keystore are marked as `Secure`.
`endpoint`::
The ec2 service endpoint to connect to. This will be automatically
figured out by the ec2 client based on the instance location, but
can be specified explicitly. See http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region.
The ec2 service endpoint to connect to. See
http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region. This
defaults to `ec2.us-east-1.amazonaws.com`.
`protocol`::

View File

@ -27,6 +27,7 @@ A `percentiles_bucket` aggregation looks like this in isolation:
details)|Optional | `skip`
|`format` |format to apply to the output value of this aggregation |Optional | `null`
|`percents` |The list of percentiles to calculate |Optional | `[ 1, 5, 25, 50, 75, 95, 99 ]`
|`keyed` |Flag which returns the range as an hash instead of an array of key-value pairs |Optional | `true`
|===
The following snippet calculates the percentiles for the total monthly `sales` buckets:

View File

@ -49,8 +49,8 @@ For more information about index settings, see {ref}/index-modules.html[Index mo
If you want to replicate indices created by APM Server or Beats, and are
allowing APM Server or Beats to manage index templates, you need to configure
soft deletes on the underlying index templates. To configure soft deletes on the
underlying index templates, add the following changes to the relevant APM Server
or Beats configuration file.
underlying index templates, incorporate the following changes to the relevant
APM Server or Beats configuration file.
["source","yaml"]
----------------------------------------------------------------------
@ -62,3 +62,37 @@ setup.template.settings:
For additional information on controlling the index templates managed by APM
Server or Beats, see the relevant documentation on loading the Elasticsearch
index template.
[float]
[[ccr-overview-logstash]]
==== Setting soft deletes on indices created by Logstash
If you want to replicate indices created by Logstash, and are using Logstash to
manage index templates, you need to configure soft deletes on a custom Logstash
index template. To configure soft deletes on the underlying index template,
incorporate the following change to a custom Logstash template.
["source","js"]
----------------------------------------------------------------------
{
"settings" : {
"index.soft_deletes.retention.operations" : 1024
}
}
----------------------------------------------------------------------
// NOTCONSOLE
Additionally, you will need to configure the Elasticsearch output plugin to use
this custom template.
["source","ruby"]
----------------------------------------------------------------------
output {
elasticsearch {
template => "/path/to/custom/logstash/template.json"
}
}
----------------------------------------------------------------------
For additional information on controlling the index templates managed by
Logstash, see the relevant documentation on the Elasticsearch output plugin.

View File

@ -42,6 +42,7 @@ PUT _ilm/policy/my_policy
settings for a particular index {ilm} will not manage that index.
To set the policy for an index there are two options:
1. Apply the policy to an index template and bootstrap creating the first index
2. Apply the policy to a new index in a create index request

View File

@ -90,7 +90,8 @@ Response:
"repository" : "my_repository",
"snapshot" : "my_snapshot",
"index" : "index1",
"version" : "{version}"
"version" : "{version}",
"restoreUUID": "PDh1ZAOaRbiGIVtCvZOMww"
},
"target" : {
"id" : "ryqJ5lO5S4-lSFbGntkEkg",

View File

@ -33,7 +33,7 @@ PUT _template/template_1
},
"created_at": {
"type": "date",
"format": "EEE MMM dd HH:mm:ss Z YYYY"
"format": "EEE MMM dd HH:mm:ss Z yyyy"
}
}
}

View File

@ -29,3 +29,9 @@ To check if a document is missing a value, you can use
Malformed scripts, either in search templates, ingest pipelines or search
requests, return `400 - Bad request` while they would previously return
`500 - Internal Server Error`. This also applies for stored scripts.
[float]
==== getValues() removed
The `ScriptDocValues#getValues()` method is deprecated in 6.6 and will
be removed in 7.0. Use `doc["foo"]` in place of `doc["foo"].values`.

View File

@ -40,6 +40,11 @@ The queries in this group are:
A simpler, more robust version of the `query_string` syntax suitable
for exposing directly to users.
<<query-dsl-intervals-query,`intervals` query>>::
A full text query that allows fine-grained control of the ordering and
proximity of matching terms
include::match-query.asciidoc[]
include::match-phrase-query.asciidoc[]
@ -53,3 +58,5 @@ include::common-terms-query.asciidoc[]
include::query-string-query.asciidoc[]
include::simple-query-string-query.asciidoc[]
include::intervals-query.asciidoc[]

View File

@ -0,0 +1,260 @@
[[query-dsl-intervals-query]]
=== Intervals query
An `intervals` query allows fine-grained control over the order and proximity of
matching terms. Matching rules are constructed from a small set of definitions,
and the rules are then applied to terms from a particular `field`.
The definitions produce sequences of minimal intervals that span terms in a
body of text. These intervals can be further combined and filtered by
parent sources.
The example below will search for the phrase `my favourite food` appearing
before the terms `hot` and `water` or `cold` and `porridge` in any order, in
the field `my_text`
[source,js]
--------------------------------------------------
POST _search
{
"query": {
"intervals" : {
"my_text" : {
"all_of" : {
"ordered" : true,
"intervals" : [
{
"match" : {
"query" : "my favourite food",
"max_gaps" : 0,
"ordered" : true
}
},
{
"any_of" : {
"intervals" : [
{ "match" : { "query" : "hot water" } },
{ "match" : { "query" : "cold porridge" } }
]
}
}
]
},
"boost" : 2.0,
"_name" : "favourite_food"
}
}
}
}
--------------------------------------------------
// CONSOLE
In the above example, the text `my favourite food is cold porridge` would
match because the two intervals matching `my favourite food` and `cold
porridge` appear in the correct order, but the text `when it's cold my
favourite food is porridge` would not match, because the interval matching
`cold porridge` starts before the interval matching `my favourite food`.
[[intervals-match]]
==== `match`
The `match` rule matches analyzed text, and takes the following parameters:
[horizontal]
`query`::
The text to match.
`max_gaps`::
Specify a maximum number of gaps between the terms in the text. Terms that
appear further apart than this will not match. If unspecified, or set to -1,
then there is no width restriction on the match. If set to 0 then the terms
must appear next to each other.
`ordered`::
Whether or not the terms must appear in their specified order. Defaults to
`false`
`analyzer`::
Which analyzer should be used to analyze terms in the `query`. By
default, the search analyzer of the top-level field will be used.
`filter`::
An optional <<interval_filter,interval filter>>
[[intervals-all_of]]
==== `all_of`
`all_of` returns returns matches that span a combination of other rules.
[horizontal]
`intervals`::
An array of rules to combine. All rules must produce a match in a
document for the overall source to match.
`max_gaps`::
Specify a maximum number of gaps between the rules. Combinations that match
across a distance greater than this will not match. If set to -1 or
unspecified, there is no restriction on this distance. If set to 0, then the
matches produced by the rules must all appear immediately next to each other.
`ordered`::
Whether the intervals produced by the rules should appear in the order in
which they are specified. Defaults to `false`
`filter`::
An optional <<interval_filter,interval filter>>
[[intervals-any_of]]
==== `any_of`
The `any_of` rule emits intervals produced by any of its sub-rules.
[horizontal]
`intervals`::
An array of rules to match
`filter`::
An optional <<interval_filter,interval filter>>
[[interval_filter]]
==== filters
You can filter intervals produced by any rules by their relation to the
intervals produced by another rule. The following example will return
documents that have the words `hot` and `porridge` within 10 positions
of each other, without the word `salty` in between:
[source,js]
--------------------------------------------------
POST _search
{
"query": {
"intervals" : {
"my_text" : {
"match" : {
"query" : "hot porridge",
"max_gaps" : 10,
"filter" : {
"not_containing" : {
"match" : {
"query" : "salty"
}
}
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
The following filters are available:
[horizontal]
`containing`::
Produces intervals that contain an interval from the filter rule
`contained_by`::
Produces intervals that are contained by an interval from the filter rule
`not_containing`::
Produces intervals that do not contain an interval from the filter rule
`not_contained_by`::
Produces intervals that are not contained by an interval from the filter rule
`not_overlapping`::
Produces intervals that do not overlap with an interval from the filter rule
[[interval-minimization]]
==== Minimization
The intervals query always minimizes intervals, to ensure that queries can
run in linear time. This can sometimes cause surprising results, particularly
when using `max_gaps` restrictions or filters. For example, take the
following query, searching for `salty` contained within the phrase `hot
porridge`:
[source,js]
--------------------------------------------------
POST _search
{
"query": {
"intervals" : {
"my_text" : {
"match" : {
"query" : "salty",
"filter" : {
"contained_by" : {
"match" : {
"query" : "hot porridge"
}
}
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
This query will *not* match a document containing the phrase `hot porridge is
salty porridge`, because the intervals returned by the match query for `hot
porridge` only cover the initial two terms in this document, and these do not
overlap the intervals covering `salty`.
Another restriction to be aware of is the case of `any_of` rules that contain
sub-rules which overlap. In particular, if one of the rules is a strict
prefix of the other, then the longer rule will never be matched, which can
cause surprises when used in combination with `max_gaps`. Consider the
following query, searching for `the` immediately followed by `big` or `big bad`,
immediately followed by `wolf`:
[source,js]
--------------------------------------------------
POST _search
{
"query": {
"intervals" : {
"my_text" : {
"all_of" : {
"intervals" : [
{ "match" : { "query" : "the" } },
{ "any_of" : {
"intervals" : [
{ "match" : { "query" : "big" } },
{ "match" : { "query" : "big bad" } }
] } },
{ "match" : { "query" : "wolf" } }
],
"max_gaps" : 0,
"ordered" : true
}
}
}
}
}
--------------------------------------------------
// CONSOLE
Counter-intuitively, this query *will not* match the document `the big bad
wolf`, because the `any_of` rule in the middle will only produce intervals
for `big` - intervals for `big bad` being longer than those for `big`, while
starting at the same position, and so being minimized away. In these cases,
it's better to rewrite the query so that all of the options are explicitly
laid out at the top level:
[source,js]
--------------------------------------------------
POST _search
{
"query": {
"intervals" : {
"my_text" : {
"any_of" : {
"intervals" : [
{ "match" : {
"query" : "the big bad wolf",
"ordered" : true,
"max_gaps" : 0 } },
{ "match" : {
"query" : "the big wolf",
"ordered" : true,
"max_gaps" : 0 } }
]
}
}
}
}
}
--------------------------------------------------
// CONSOLE

View File

@ -64,4 +64,4 @@ Multiple clusters, each with its own namespace, connected to each other in a fed
|===
As one can see while the mapping between the concepts are not exactly one to one and the semantics somewhat different, there are more things in common than differences. In fact, thanks to SQL declarative nature, many concepts can move across {es} transparently and the terminology of the two likely to be used interchangeably through-out the rest of the material.
As one can see while the mapping between the concepts are not exactly one to one and the semantics somewhat different, there are more things in common than differences. In fact, thanks to SQL declarative nature, many concepts can move across {es} transparently and the terminology of the two likely to be used interchangeably throughout the rest of the material.

View File

@ -1,10 +1,151 @@
[role="xpack"]
[testenv="basic"]
[[sql-functions-datetime]]
=== Date and Time Functions
=== Date/Time and Interval Functions and Operators
beta[]
{es-sql} offers a wide range of facilities for performing date/time manipulations.
[[sql-functions-datetime-interval]]
==== Intervals
A common requirement when dealing with date/time in general revolves around
the notion of ``interval``s, a topic that is worth exploring in the context of {es} and {es-sql}.
{es} has comprehensive support for <<date-math, date math>> both inside <<date-math-index-names, index names>> and <<mapping-date-format, queries>>.
Inside {es-sql} the former is supported as is by passing the expression in the table name, while the latter is supported through the standard SQL `INTERVAL`.
The table below shows the mapping between {es} and {es-sql}:
[cols="^m,^m",options="header"]
|===
| {es} | {es-sql}
2+h| Index/Table date math
2+|<index-{now/M{YYYY.MM}}>
2+h| Query date math
| 1y | INTERVAL 1 YEAR
| 2M | INTERVAL 2 MONTH
| 3w | INTERVAL 21 DAY
| 4d | INTERVAL 4 DAY
| 5h | INTERVAL 5 HOUR
| 6m | INTERVAL 6 MINUTE
| 7s | INTERVAL 7 SECOND
|===
`INTERVAL` allows either `YEAR` and `MONTH` to be mixed together _or_ `DAY`, `HOUR`, `MINUTE` and `SECOND`.
TIP: {es-sql} accepts also the plural for each time unit (e.g. both `YEAR` and `YEARS` are valid).
Example of the possible combinations below:
[cols="^,^",options="header"]
|===
| Interval | Description
| `INTERVAL '1-2' YEAR TO MONTH` | 1 year and 2 months
| `INTERVAL '3 4' DAYS TO HOURS` | 3 days and 4 hours
| `INTERVAL '5 6:12' DAYS TO MINUTES` | 5 days, 6 hours and 12 minutes
| `INTERVAL '3 4:56:01' DAY TO SECOND` | 3 days, 4 hours, 56 minutes and 1 second
| `INTERVAL '2 3:45:01.23456789' DAY TO SECOND` | 2 days, 3 hours, 45 minutes, 1 second and 234567890 nanoseconds
| `INTERVAL '123:45' HOUR TO MINUTES` | 123 hours and 45 minutes
| `INTERVAL '65:43:21.0123' HOUR TO SECONDS` | 65 hours, 43 minutes, 21 seconds and 12300000 nanoseconds
| `INTERVAL '45:01.23' MINUTES TO SECONDS` | 45 minutes, 1 second and 230000000 nanoseconds
|===
==== Operators
Basic arithmetic operators (`+`, `-`, etc) support date-time parameters as indicated below:
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs.csv-spec[dtIntervalPlusInterval]
--------------------------------------------------
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs.csv-spec[dtDatePlusInterval]
--------------------------------------------------
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs.csv-spec[dtMinusInterval]
--------------------------------------------------
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs.csv-spec[dtIntervalMinusInterval]
--------------------------------------------------
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs.csv-spec[dtDateMinusInterval]
--------------------------------------------------
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs.csv-spec[dtIntervalMul]
--------------------------------------------------
==== Functions
beta[]
[[sql-functions-current-timestamp]]
==== `CURRENT_TIMESTAMP`/`NOW`
.Synopsis:
[source, sql]
--------------------------------------------------
CURRENT_TIMESTAMP
CURRENT_TIMESTAMP(precision <1>)
NOW()
--------------------------------------------------
*Input*:
<1> fractional digits - optional
*Output*: date/time
.Description:
Returns the date/time when the current query reached the server.
As a function, `CURRENT_TIMESTAMP()` accepts _precision_ as an optional
parameter for rounding the second fractional digits (nanoseconds).
This method always returns the same value within a query.
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs.csv-spec[curTs]
--------------------------------------------------
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs.csv-spec[curTsFunction]
--------------------------------------------------
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs.csv-spec[curTsFunctionPrecision]
--------------------------------------------------
Typically this function is used for relative date/time filtering:
["source","sql",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-specs}/docs.csv-spec[filterNow]
--------------------------------------------------
[[sql-functions-datetime-day]]
==== `DAY_OF_MONTH`/`DOM`/`DAY`

View File

@ -0,0 +1,54 @@
[role="xpack"]
[testenv="basic"]
[[sql-functions-grouping]]
=== Grouping Functions
beta[]
Functions for creating special __grouping__s (also known as _bucketing_); as such these need to be used
as part of the <<sql-syntax-group-by, grouping>>.
[[sql-functions-grouping-histogram]]
==== `HISTOGRAM`
.Synopsis
[source, sql]
----
HISTOGRAM ( numeric_exp<1>, numeric_interval<2>)
HISTOGRAM ( date_exp<3>, date_time_interval<4>)
----
*Input*:
<1> numeric expression (typically a field)
<2> numeric interval
<3> date/time expression (typically a field)
<4> date/time <<sql-functions-datetime-interval, interval>>
*Output*: non-empty buckets or groups of the given expression divided according to the given interval
.Description
The histogram function takes all matching values and divides them into buckets with fixed size matching the given interval, using (roughly) the following formula:
[source, sql]
----
bucket_key = Math.floor(value / interval) * interval
----
`Histogram` can be applied on either numeric fields:
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[histogramNumeric]
----
or date/time fields:
["source","sql",subs="attributes,callouts,macros"]
----
include-tagged::{sql-specs}/docs.csv-spec[histogramDate]
----

View File

@ -9,6 +9,7 @@ beta[]
* <<sql-operators, Operators>>
* <<sql-functions-aggs, Aggregate>>
* <<sql-functions-grouping, Grouping>>
* <<sql-functions-datetime, Date-Time>>
* <<sql-functions-search, Full-Text Search>>
* <<sql-functions-math, Mathematical>>
@ -19,6 +20,7 @@ beta[]
include::operators.asciidoc[]
include::aggs.asciidoc[]
include::grouping.asciidoc[]
include::date-time.asciidoc[]
include::search.asciidoc[]
include::math.asciidoc[]

View File

@ -7,42 +7,71 @@ beta[]
Most of {es} <<mapping-types, data types>> are available in {es-sql}, as indicated below:
[cols="^,^,^",options="header"]
[cols="^,^m,^",options="header"]
|===
| {es} type | SQL type | SQL precision
| {es} type | SQL type | SQL precision
3+h| Core types
| <<null-value, `null`>> | `null` | 0
| <<boolean, `boolean`>> | `boolean` | 1
| <<number, `byte`>> | `tinyint` | 3
| <<number, `short`>> | `smallint` | 5
| <<number, `integer`>> | `integer` | 10
| <<number, `long`>> | `bigint` | 19
| <<number, `double`>> | `double` | 15
| <<number, `float`>> | `real` | 7
| <<number, `half_float`>> | `float` | 16
| <<number, `scaled_float`>> | `float` | 19
| <<keyword, `keyword`>> | `varchar` | based on <<ignore-above>>
| <<text, `text`>> | `varchar` | 2,147,483,647
| <<binary, `binary`>> | `varbinary` | 2,147,483,647
| <<date, `date`>> | `timestamp` | 24
3+h| Complex types
| <<object, `object`>> | `struct` | 0
| <<nested, `nested`>> | `struct` | 0
| <<null-value, `null`>> | null | 0
| <<boolean, `boolean`>> | boolean | 1
| <<number, `byte`>> | tinyint | 3
| <<number, `short`>> | smallint | 5
| <<number, `integer`>> | integer | 10
| <<number, `long`>> | bigint | 19
| <<number, `double`>> | double | 15
| <<number, `float`>> | real | 7
| <<number, `half_float`>> | float | 16
| <<number, `scaled_float`>> | float | 19
| <<keyword, `keyword`>> | varchar | based on <<ignore-above>>
| <<text, `text`>> | varchar | 2,147,483,647
| <<binary, `binary`>> | varbinary | 2,147,483,647
| <<date, `date`>> | timestamp | 24
| <<ip, `ip`>> | varchar | 39
3+h| Complex types
| <<object, `object`>> | struct | 0
| <<nested, `nested`>> | struct | 0
3+h| Unsupported types
| _types not mentioned above_ | `unsupported`| 0
| _types not mentioned above_ | unsupported | 0
|===
Obviously, not all types in {es} have an equivalent in SQL and vice-versa hence why, {es-sql}
uses the data type _particularities_ of the former over the latter as ultimately {es} is the backing store.
In addition to the types above, {es-sql} also supports at _runtime_ SQL-specific types that do not have an equivalent in {es}.
Such types cannot be loaded from {es} (as it does not know about them) however can be used inside {es-sql} in queries or their results.
The table below indicates these types:
[cols="^m,^",options="header"]
|===
| SQL type | SQL precision
| interval_year | 7
| interval_month | 7
| interval_day | 23
| interval_hour | 23
| interval_minute | 23
| interval_second | 23
| interval_year_to_month | 7
| interval_day_to_hour | 23
| interval_day_to_minute | 23
| interval_day_to_second | 23
| interval_hour_to_minute | 23
| interval_hour_to_second | 23
| interval_minute_to_second | 23
|===
[[sql-multi-field]]
[float]

View File

@ -0,0 +1 @@
69279f27885c43662ca7216a6939dacbdf9b4795

View File

@ -1 +0,0 @@
70b328502ac1cc2d27c899a642ffb2f4f1d2b9f3

View File

@ -67,13 +67,11 @@ class org.elasticsearch.common.geo.GeoPoint {
class org.elasticsearch.index.fielddata.ScriptDocValues$Strings {
String get(int)
String getValue()
List getValues()
}
class org.elasticsearch.index.fielddata.ScriptDocValues$Longs {
Long get(int)
long getValue()
List getValues()
}
class org.elasticsearch.script.JodaCompatibleZonedDateTime {
@ -131,9 +129,9 @@ class org.elasticsearch.script.JodaCompatibleZonedDateTime {
#### Joda methods that exist in java time
boolean equals(Object)
int hashCode()
boolean isAfter(ZonedDateTime)
boolean isBefore(ZonedDateTime)
boolean isEqual(ZonedDateTime)
boolean isAfter(JodaCompatibleZonedDateTime)
boolean isBefore(JodaCompatibleZonedDateTime)
boolean isEqual(JodaCompatibleZonedDateTime)
String toString()
#### Joda time methods
@ -163,19 +161,16 @@ class org.elasticsearch.script.JodaCompatibleZonedDateTime {
class org.elasticsearch.index.fielddata.ScriptDocValues$Dates {
JodaCompatibleZonedDateTime get(int)
JodaCompatibleZonedDateTime getValue()
List getValues()
}
class org.elasticsearch.index.fielddata.ScriptDocValues$Doubles {
Double get(int)
double getValue()
List getValues()
}
class org.elasticsearch.index.fielddata.ScriptDocValues$GeoPoints {
org.elasticsearch.common.geo.GeoPoint get(int)
org.elasticsearch.common.geo.GeoPoint getValue()
List getValues()
double getLat()
double getLon()
double[] getLats()
@ -193,13 +188,11 @@ class org.elasticsearch.index.fielddata.ScriptDocValues$GeoPoints {
class org.elasticsearch.index.fielddata.ScriptDocValues$Booleans {
Boolean get(int)
boolean getValue()
List getValues()
}
class org.elasticsearch.index.fielddata.ScriptDocValues$BytesRefs {
BytesRef get(int)
BytesRef getValue()
List getValues()
}
class org.apache.lucene.util.BytesRef {
@ -213,7 +206,6 @@ class org.apache.lucene.util.BytesRef {
class org.elasticsearch.index.mapper.IpFieldMapper$IpFieldType$IpScriptDocValues {
String get(int)
String getValue()
List getValues()
}
class org.elasticsearch.search.lookup.FieldLookup {
@ -268,4 +260,4 @@ static_import {
int staticAddIntsTest(int, int) from_class org.elasticsearch.painless.StaticTest
float staticAddFloatsTest(float, float) from_class org.elasticsearch.painless.FeatureTest
int testAddWithState(int, int, int, double) bound_to org.elasticsearch.painless.BindingTest
}
}

View File

@ -154,19 +154,9 @@ class BulkByScrollParallelizationHelper {
}
slicedSource = request.source().copyWithNewSlice(sliceBuilder);
}
slices[slice] = new SearchRequest()
.source(slicedSource)
.searchType(request.searchType())
.indices(request.indices())
.types(request.types())
.routing(request.routing())
.preference(request.preference())
.requestCache(request.requestCache())
.scroll(request.scroll())
.indicesOptions(request.indicesOptions());
if (request.allowPartialSearchResults() != null) {
slices[slice].allowPartialSearchResults(request.allowPartialSearchResults());
}
SearchRequest searchRequest = new SearchRequest(request);
searchRequest.source(slicedSource);
slices[slice] = searchRequest;
}
return slices;
}

View File

@ -56,6 +56,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportSettings;
import java.io.IOException;
import java.net.InetSocketAddress;
@ -149,22 +150,22 @@ public class Netty4Transport extends TcpTransport {
bootstrap.group(eventLoopGroup);
bootstrap.channel(NioSocketChannel.class);
bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings));
bootstrap.option(ChannelOption.SO_KEEPALIVE, TCP_KEEP_ALIVE.get(settings));
bootstrap.option(ChannelOption.TCP_NODELAY, TransportSettings.TCP_NO_DELAY.get(settings));
bootstrap.option(ChannelOption.SO_KEEPALIVE, TransportSettings.TCP_KEEP_ALIVE.get(settings));
final ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.get(settings);
final ByteSizeValue tcpSendBufferSize = TransportSettings.TCP_SEND_BUFFER_SIZE.get(settings);
if (tcpSendBufferSize.getBytes() > 0) {
bootstrap.option(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes()));
}
final ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings);
final ByteSizeValue tcpReceiveBufferSize = TransportSettings.TCP_RECEIVE_BUFFER_SIZE.get(settings);
if (tcpReceiveBufferSize.getBytes() > 0) {
bootstrap.option(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes()));
}
bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator);
final boolean reuseAddress = TCP_REUSE_ADDRESS.get(settings);
final boolean reuseAddress = TransportSettings.TCP_REUSE_ADDRESS.get(settings);
bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
return bootstrap;

View File

@ -30,7 +30,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.mocksocket.MockSocket;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportSettings;
import org.junit.After;
import org.junit.Before;
@ -51,8 +51,8 @@ public class Netty4SizeHeaderFrameDecoderTests extends ESTestCase {
private final Settings settings = Settings.builder()
.put("node.name", "NettySizeHeaderFrameDecoderTests")
.put(TcpTransport.BIND_HOST.getKey(), "127.0.0.1")
.put(TcpTransport.PORT.getKey(), "0")
.put(TransportSettings.BIND_HOST.getKey(), "127.0.0.1")
.put(TransportSettings.PORT.getKey(), "0")
.build();
private ThreadPool threadPool;

View File

@ -37,8 +37,8 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TcpChannel;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportSettings;
import java.io.IOException;
import java.net.InetSocketAddress;
@ -80,7 +80,7 @@ public class Netty4TransportIT extends ESNetty4IntegTestCase {
fail("Expected exception, but didn't happen");
} catch (ElasticsearchException e) {
assertThat(e.getMessage(), containsString("MY MESSAGE"));
assertThat(channelProfileName, is(TcpTransport.DEFAULT_PROFILE));
assertThat(channelProfileName, is(TransportSettings.DEFAULT_PROFILE));
}
}
@ -116,7 +116,7 @@ public class Netty4TransportIT extends ESNetty4IntegTestCase {
InetSocketAddress remoteAddress, byte status) throws IOException {
String action = super.handleRequest(channel, profileName, stream, requestId, messageLengthBytes, version,
remoteAddress, status);
channelProfileName = TcpTransport.DEFAULT_PROFILE;
channelProfileName = TransportSettings.DEFAULT_PROFILE;
return action;
}

View File

@ -31,6 +31,7 @@ import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportSettings;
import org.junit.Before;
import java.util.Collections;
@ -53,7 +54,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
public void testThatNettyCanBindToMultiplePorts() throws Exception {
Settings settings = Settings.builder()
.put("network.host", host)
.put(TcpTransport.PORT.getKey(), 22) // will not actually bind to this
.put(TransportSettings.PORT.getKey(), 22) // will not actually bind to this
.put("transport.profiles.default.port", 0)
.put("transport.profiles.client1.port", 0)
.build();
@ -70,7 +71,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
public void testThatDefaultProfileInheritsFromStandardSettings() throws Exception {
Settings settings = Settings.builder()
.put("network.host", host)
.put(TcpTransport.PORT.getKey(), 0)
.put(TransportSettings.PORT.getKey(), 0)
.put("transport.profiles.client1.port", 0)
.build();
@ -87,7 +88,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
Settings settings = Settings.builder()
.put("network.host", host)
.put(TcpTransport.PORT.getKey(), 0)
.put(TransportSettings.PORT.getKey(), 0)
.put("transport.profiles.client1.whatever", "foo")
.build();
@ -103,7 +104,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
public void testThatDefaultProfilePortOverridesGeneralConfiguration() throws Exception {
Settings settings = Settings.builder()
.put("network.host", host)
.put(TcpTransport.PORT.getKey(), 22) // will not actually bind to this
.put(TransportSettings.PORT.getKey(), 22) // will not actually bind to this
.put("transport.profiles.default.port", 0)
.build();

View File

@ -37,9 +37,8 @@ import org.elasticsearch.transport.BindTransportException;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.ConnectionProfile;
import org.elasticsearch.transport.TcpChannel;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings;
import java.net.InetAddress;
import java.net.UnknownHostException;
@ -75,7 +74,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
@Override
protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) {
settings = Settings.builder().put(settings).put(TcpTransport.PORT.getKey(), "0").build();
settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), "0").build();
MockTransportService transportService = nettyFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake);
transportService.start();
return transportService;
@ -97,9 +96,9 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
int port = serviceA.boundAddress().publishAddress().getPort();
Settings settings = Settings.builder()
.put(Node.NODE_NAME_SETTING.getKey(), "foobar")
.put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "")
.put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
.put("transport.tcp.port", port)
.put(TransportSettings.TRACE_LOG_INCLUDE_SETTING.getKey(), "")
.put(TransportSettings.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
.put(TransportSettings.PORT.getKey(), port)
.build();
ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> {

View File

@ -0,0 +1 @@
143e925924dcc9cb8ad1b584727c2c3b6c9e5633

View File

@ -1 +0,0 @@
643eede8327f69cf0332cecd13100536daa5f04a

View File

@ -0,0 +1 @@
53281a354724cf52babb0460e51d5b6ec99ecad4

View File

@ -1 +0,0 @@
191f07773cd9e8fa76914d7ba1a79292fd465230

View File

@ -0,0 +1 @@
d12356cdbcf4ed17586fef5e6fd1a6ea068821b5

View File

@ -1 +0,0 @@
f981e4522f24af287a47e582080e6f4eae3bfbd9

View File

@ -0,0 +1 @@
b0f3b0409db20717a5229bc639f703eca97ebd4c

View File

@ -1 +0,0 @@
4e79b61404330b938aee53c19a7c1628b23c06e8

View File

@ -0,0 +1 @@
79f18e781a83062919eb60e06a96184ffda4a0c3

View File

@ -1 +0,0 @@
dbd77958185f15d27510ae554b4f91366f477e41

View File

@ -0,0 +1 @@
3c78ca17dd641a3efe1bea980e5290159867b85d

View File

@ -1 +0,0 @@
0b7bcb14961ad1ff22157dff78497b409fd76050

View File

@ -0,0 +1 @@
019b424ea61617788f460218fbdd9c2107a7ff5a

View File

@ -1 +0,0 @@
4e2d601547ffe3e8ccc814a25ce35e3ba7e369b3

View File

@ -166,7 +166,7 @@ public class AzureUnicastHostsProvider implements UnicastHostsProvider {
InetAddress ipAddress = null;
try {
ipAddress = networkService.resolvePublishHostAddresses(
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY));
NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY));
logger.trace("ip of current node: [{}]", ipAddress);
} catch (IOException e) {
// We can't find the publish host address... Hmmm. Too bad :-(

View File

@ -37,7 +37,7 @@ import org.elasticsearch.node.Node;
import org.elasticsearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportSettings;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
@ -118,7 +118,7 @@ public class AzureDiscoveryClusterFormationTests extends ESIntegTestCase {
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
.put(DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING.getKey(), AzureDiscoveryPlugin.AZURE)
.put(Environment.PATH_LOGS_SETTING.getKey(), resolve)
.put(TcpTransport.PORT.getKey(), 0)
.put(TransportSettings.PORT.getKey(), 0)
.put(Node.WRITE_PORTS_FILE_SETTING.getKey(), "true")
.put(AzureComputeService.Management.ENDPOINT_SETTING.getKey(), "https://" + InetAddress.getLoopbackAddress().getHostAddress() +
":" + httpsServer.getAddress().getPort())

View File

@ -20,19 +20,20 @@
package org.elasticsearch.discovery.ec2;
import com.amazonaws.services.ec2.model.Tag;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.MockTcpTransport;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.nio.MockNioTransport;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
@ -72,8 +73,9 @@ public class Ec2DiscoveryTests extends ESTestCase {
@Before
public void createTransportService() {
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList());
final Transport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE,
new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList())) {
final Transport transport = new MockNioTransport(Settings.EMPTY, Version.CURRENT, threadPool,
new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry,
new NoneCircuitBreakerService()) {
@Override
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException {
// we just need to ensure we don't resolve DNS here

View File

@ -164,7 +164,7 @@ public class Ec2NetworkTests extends ESTestCase {
NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver()));
InetAddress[] addresses = networkService.resolveBindHostAddresses(
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY));
NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY));
if (expected == null) {
fail("We should get an IOException, resolved addressed:" + Arrays.toString(addresses));
}

View File

@ -120,7 +120,7 @@ public class GceUnicastHostsProvider implements UnicastHostsProvider {
String ipAddress = null;
try {
InetAddress inetAddress = networkService.resolvePublishHostAddresses(
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY));
NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY));
if (inetAddress != null) {
ipAddress = NetworkAddress.format(inetAddress);
}

View File

@ -110,7 +110,7 @@ public class GceNetworkTests extends ESTestCase {
NetworkService networkService = new NetworkService(Collections.singletonList(new GceNameResolver(mock)));
try {
InetAddress[] addresses = networkService.resolveBindHostAddresses(
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY));
NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY));
if (expected == null) {
fail("We should get a IllegalArgumentException when setting network.host: _gce:doesnotexist_");
}

View File

@ -51,6 +51,7 @@ import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.IndicesService;
@ -130,7 +131,7 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
IndexShard shard = indexService.getShard(0);
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
sourceToParse, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
shard.refresh("test");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
@ -185,7 +186,7 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
IndexShard shard = indexService.getShard(0);
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
sourceToParse, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
shard.refresh("test");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
@ -384,7 +385,7 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
IndexShard shard = indexService.getShard(0);
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
sourceToParse, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
shard.refresh("test");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
@ -426,7 +427,7 @@ public class AnnotatedTextFieldMapperTests extends ESSingleNodeTestCase {
IndexShard shard = indexService.getShard(0);
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL,
sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
sourceToParse, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
shard.refresh("test");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();

View File

@ -23,8 +23,8 @@ esplugin {
}
dependencies {
compile 'com.google.cloud:google-cloud-storage:1.40.0'
compile 'com.google.cloud:google-cloud-core:1.40.0'
compile 'com.google.cloud:google-cloud-storage:1.55.0'
compile 'com.google.cloud:google-cloud-core:1.55.0'
compile 'com.google.guava:guava:20.0'
compile "joda-time:joda-time:${versions.joda}"
compile 'com.google.http-client:google-http-client:1.24.1'
@ -40,7 +40,7 @@ dependencies {
compile 'com.google.code.gson:gson:2.7'
compile 'com.google.api.grpc:proto-google-common-protos:1.12.0'
compile 'com.google.api.grpc:proto-google-iam-v1:0.12.0'
compile 'com.google.cloud:google-cloud-core-http:1.40.0'
compile 'com.google.cloud:google-cloud-core-http:1.55.0'
compile 'com.google.auth:google-auth-library-credentials:0.10.0'
compile 'com.google.auth:google-auth-library-oauth2-http:0.10.0'
compile 'com.google.oauth-client:google-oauth-client:1.24.1'

View File

@ -1 +0,0 @@
4985701f989030e262cf8f4e38cc954115f5b082

View File

@ -0,0 +1 @@
9e50a2a559128b7938cfd6598753d4c7383472dc

View File

@ -1 +0,0 @@
67f5806beda32894f1e6c9527925b64199fd2e4f

View File

@ -0,0 +1 @@
f26862445efffd8cb3a7f4b1f2a91b7c5143ee1f

View File

@ -1 +0,0 @@
fabefef46f07d1e334123f0de17702708b4dfbd1

View File

@ -0,0 +1 @@
ca19f55eeb96609243bf3a15fdafd497432f6673

View File

@ -80,6 +80,11 @@ class MockStorage implements Storage {
}
}
@Override
public Bucket lockRetentionPolicy(final BucketInfo bucket, final BucketTargetOption... options) {
return null;
}
@Override
public Blob get(BlobId blob) {
if (bucketName.equals(blob.getBucket())) {

View File

@ -37,8 +37,8 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TcpChannel;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportSettings;
import java.io.IOException;
import java.net.InetSocketAddress;
@ -80,7 +80,7 @@ public class NioTransportIT extends NioIntegTestCase {
fail("Expected exception, but didn't happen");
} catch (ElasticsearchException e) {
assertThat(e.getMessage(), containsString("MY MESSAGE"));
assertThat(channelProfileName, is(TcpTransport.DEFAULT_PROFILE));
assertThat(channelProfileName, is(TransportSettings.DEFAULT_PROFILE));
}
}
@ -112,7 +112,7 @@ public class NioTransportIT extends NioIntegTestCase {
InetSocketAddress remoteAddress, byte status) throws IOException {
String action = super.handleRequest(channel, profileName, stream, requestId, messageLengthBytes, version,
remoteAddress, status);
channelProfileName = TcpTransport.DEFAULT_PROFILE;
channelProfileName = TransportSettings.DEFAULT_PROFILE;
return action;
}

View File

@ -37,9 +37,8 @@ import org.elasticsearch.transport.BindTransportException;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.ConnectionProfile;
import org.elasticsearch.transport.TcpChannel;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings;
import java.io.IOException;
import java.net.InetAddress;
@ -79,7 +78,7 @@ public class SimpleNioTransportTests extends AbstractSimpleTransportTestCase {
@Override
protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) {
settings = Settings.builder().put(settings)
.put(TcpTransport.PORT.getKey(), "0")
.put(TransportSettings.PORT.getKey(), "0")
.build();
MockTransportService transportService = nioFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake);
transportService.start();
@ -104,9 +103,9 @@ public class SimpleNioTransportTests extends AbstractSimpleTransportTestCase {
int port = serviceA.boundAddress().publishAddress().getPort();
Settings settings = Settings.builder()
.put(Node.NODE_NAME_SETTING.getKey(), "foobar")
.put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "")
.put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
.put("transport.tcp.port", port)
.put(TransportSettings.TRACE_LOG_INCLUDE_SETTING.getKey(), "")
.put(TransportSettings.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
.put(TransportSettings.PORT.getKey(), port)
.build();
ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> {

View File

@ -27,6 +27,7 @@ import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.rest.action.document.RestGetAction;
import org.elasticsearch.rest.action.document.RestUpdateAction;
import org.elasticsearch.rest.action.search.RestExplainAction;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Booleans;
@ -626,6 +627,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
String docId = (String) hit.get("_id");
Request updateRequest = new Request("POST", "/" + index + "/doc/" + docId + "/_update");
updateRequest.setOptions(expectWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE));
updateRequest.setJsonEntity("{ \"doc\" : { \"foo\": \"bar\"}}");
client().performRequest(updateRequest);

View File

@ -3,8 +3,8 @@
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html",
"methods": ["POST"],
"url": {
"path": "/{index}/{type}/{id}/_update",
"paths": ["/{index}/{type}/{id}/_update", "/{index}/_doc/{id}/_update"],
"path": "/{index}/_update/{id}",
"paths": ["/{index}/_update/{id}", "/{index}/{type}/{id}/_update"],
"parts": {
"id": {
"type": "string",

View File

@ -0,0 +1,327 @@
setup:
- skip:
version: " - 6.99.99"
reason: "Implemented in 7.0"
- do:
indices.create:
index: test
body:
mappings:
test:
properties:
text:
type: text
analyzer: standard
- do:
bulk:
refresh: true
body:
- '{"index": {"_index": "test", "_type": "test", "_id": "1"}}'
- '{"text" : "Some like it hot, some like it cold"}'
- '{"index": {"_index": "test", "_type": "test", "_id": "2"}}'
- '{"text" : "Its cold outside, theres no kind of atmosphere"}'
- '{"index": {"_index": "test", "_type": "test", "_id": "3"}}'
- '{"text" : "Baby its cold there outside"}'
- '{"index": {"_index": "test", "_type": "test", "_id": "4"}}'
- '{"text" : "Outside it is cold and wet"}'
---
"Test ordered matching":
- do:
search:
index: test
body:
query:
intervals:
text:
match:
query: "cold outside"
ordered: true
- match: { hits.total.value: 2 }
---
"Test default unordered matching":
- do:
search:
index: test
body:
query:
intervals:
text:
match:
query: "cold outside"
- match: { hits.total.value: 3 }
---
"Test explicit unordered matching":
- do:
search:
index: test
body:
query:
intervals:
text:
match:
query: "cold outside"
ordered: false
- match: { hits.total.value: 3 }
---
"Test phrase matching":
- do:
search:
index: test
body:
query:
intervals:
text:
match:
query: "cold outside"
ordered: true
max_gaps: 0
- match: { hits.total.value: 1 }
---
"Test unordered max_gaps matching":
- do:
search:
index: test
body:
query:
intervals:
text:
match:
query: "cold outside"
max_gaps: 1
- match: { hits.total.value: 2 }
---
"Test ordered max_gaps matching":
- do:
search:
index: test
body:
query:
intervals:
text:
match:
query: "cold outside"
max_gaps: 0
ordered: true
- match: { hits.total.value: 1 }
---
"Test ordered combination with disjunction":
- do:
search:
index: test
body:
query:
intervals:
text:
all_of:
intervals:
- any_of:
intervals:
- match:
query: "cold"
- match:
query: "outside"
- match:
query: "atmosphere"
ordered: true
- match: { hits.total.value: 1 }
---
"Test ordered combination with max_gaps":
- do:
search:
index: test
body:
query:
intervals:
text:
all_of:
intervals:
- match:
query: "cold"
- match:
query: "outside"
max_gaps: 0
ordered: true
- match: { hits.total.value: 1 }
---
"Test ordered combination":
- do:
search:
index: test
body:
query:
intervals:
text:
all_of:
intervals:
- match:
query: "cold"
- match:
query: "outside"
ordered: true
- match: { hits.total.value: 2 }
---
"Test unordered combination":
- do:
search:
index: test
body:
query:
intervals:
text:
all_of:
intervals:
- match:
query: "cold"
- match:
query: "outside"
max_gaps: 1
ordered: false
- match: { hits.total.value: 2 }
---
"Test block combination":
- do:
search:
index: test
body:
query:
intervals:
text:
all_of:
intervals:
- match:
query: "cold"
- match:
query: "outside"
ordered: true
max_gaps: 0
- match: { hits.total.value: 1 }
---
"Test containing":
- do:
search:
index: test
body:
query:
intervals:
text:
all_of:
intervals:
- match:
query: "cold"
- match:
query: "outside"
ordered: false
filter:
containing:
match:
query: "is"
- match: { hits.total.value: 1 }
---
"Test not containing":
- do:
search:
index: test
body:
query:
intervals:
text:
all_of:
intervals:
- match:
query: "cold"
- match:
query: "outside"
ordered: false
filter:
not_containing:
match:
query: "is"
- match: { hits.total.value: 2 }
---
"Test contained_by":
- do:
search:
index: test
body:
query:
intervals:
text:
match:
query: "is"
filter:
contained_by:
all_of:
intervals:
- match:
query: "cold"
- match:
query: "outside"
ordered: false
- match: { hits.total.value: 1 }
---
"Test not_contained_by":
- do:
search:
index: test
body:
query:
intervals:
text:
match:
query: "it"
filter:
not_contained_by:
all_of:
intervals:
- match:
query: "cold"
- match:
query: "outside"
- match: { hits.total.value: 1 }
---
"Test not_overlapping":
- do:
search:
index: test
body:
query:
intervals:
text:
all_of:
intervals:
- match:
query: "cold"
- match:
query: "outside"
ordered: true
filter:
not_overlapping:
all_of:
intervals:
- match:
query: "baby"
- match:
query: "there"
ordered: false
- match: { hits.total.value: 1 }

View File

@ -0,0 +1 @@
729c6a031e3849874028020301e1f45a05d5a0bb

View File

@ -1 +0,0 @@
37be26a0881a2ae009a7057d6f384b75136d98f7

View File

@ -0,0 +1 @@
5f831dea7c0bafd6306653144388a8ecd1186158

View File

@ -1 +0,0 @@
a5b18174ee3936b29218a0320b2a8b94e7150871

View File

@ -0,0 +1 @@
49b3ac44b6749a7ebf0c2e41a81e7910133d2fcc

View File

@ -1 +0,0 @@
47253358ac340c35845c2a1007849db4234740da

View File

@ -0,0 +1 @@
0396dff0af03463e784b86fd1a24008e2f07daa2

View File

@ -1 +0,0 @@
98799c869205e22d903a797dcb495c31954699e0

View File

@ -0,0 +1 @@
d552b941fef2a64ab4c9b2509906950257f92262

View File

@ -1 +0,0 @@
7bb476d98f9e9caf7ba62ac1b0feb791979c36c9

View File

@ -0,0 +1 @@
192e9374124c14c7cd594a6f87aed61806e6e402

View File

@ -1 +0,0 @@
a7a3562acc7f0c20ad9d24bc21f140d920de973b

View File

@ -0,0 +1 @@
b9345c0321a3f4c7aa69ecfaf15cdee74180e409

View File

@ -1 +0,0 @@
0b76d08438b959417d2372512ce6f43347085f51

View File

@ -0,0 +1 @@
ace540746369ded8b2f354d35002f5ccf6a58aab

View File

@ -1 +0,0 @@
0ed089a34dbd66f8b153d292d6dd2a04f99ce8af

Some files were not shown because too many files have changed in this diff Show More