Merge branch 'master' into feature/client_aggs_parsing
This commit is contained in:
commit
f538d7b8d6
|
@ -37,16 +37,21 @@ public class NoticeTask extends DefaultTask {
|
|||
@OutputFile
|
||||
File outputFile = new File(project.buildDir, "notices/${name}/NOTICE.txt")
|
||||
|
||||
/** Configurations to inspect dependencies*/
|
||||
private List<Project> dependencies = new ArrayList<>()
|
||||
/** Directories to include notices from */
|
||||
private List<File> licensesDirs = new ArrayList<>()
|
||||
|
||||
public NoticeTask() {
|
||||
description = 'Create a notice file from dependencies'
|
||||
// Default licenses directory is ${projectDir}/licenses (if it exists)
|
||||
File licensesDir = new File(project.projectDir, 'licenses')
|
||||
if (licensesDir.exists()) {
|
||||
licensesDirs.add(licensesDir)
|
||||
}
|
||||
}
|
||||
|
||||
/** Add notices from licenses found in the given project. */
|
||||
public void dependencies(Project project) {
|
||||
dependencies.add(project)
|
||||
/** Add notices from the specified directory. */
|
||||
public void licensesDir(File licensesDir) {
|
||||
licensesDirs.add(licensesDir)
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
|
@ -54,17 +59,29 @@ public class NoticeTask extends DefaultTask {
|
|||
StringBuilder output = new StringBuilder()
|
||||
output.append(inputFile.getText('UTF-8'))
|
||||
output.append('\n\n')
|
||||
Set<String> seen = new HashSet<>()
|
||||
for (Project dep : dependencies) {
|
||||
File licensesDir = new File(dep.projectDir, 'licenses')
|
||||
if (licensesDir.exists() == false) continue
|
||||
licensesDir.eachFileMatch({ it ==~ /.*-NOTICE\.txt/ && seen.contains(it) == false}) { File file ->
|
||||
// This is a map rather than a set so that the sort order is the 3rd
|
||||
// party component names, unaffected by the full path to the various files
|
||||
Map<String, File> seen = new TreeMap<>()
|
||||
for (File licensesDir : licensesDirs) {
|
||||
licensesDir.eachFileMatch({ it ==~ /.*-NOTICE\.txt/ }) { File file ->
|
||||
String name = file.name.substring(0, file.name.length() - '-NOTICE.txt'.length())
|
||||
appendFile(file, name, 'NOTICE', output)
|
||||
appendFile(new File(file.parentFile, "${name}-LICENSE.txt"), name, 'LICENSE', output)
|
||||
seen.add(file.name)
|
||||
if (seen.containsKey(name)) {
|
||||
File prevFile = seen.get(name)
|
||||
if (prevFile.text != file.text) {
|
||||
throw new RuntimeException("Two different notices exist for dependency '" +
|
||||
name + "': " + prevFile + " and " + file)
|
||||
}
|
||||
} else {
|
||||
seen.put(name, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, File> entry : seen.entrySet()) {
|
||||
String name = entry.getKey()
|
||||
File file = entry.getValue()
|
||||
appendFile(file, name, 'NOTICE', output)
|
||||
appendFile(new File(file.parentFile, "${name}-LICENSE.txt"), name, 'LICENSE', output)
|
||||
}
|
||||
outputFile.setText(output.toString(), 'UTF-8')
|
||||
}
|
||||
|
||||
|
|
|
@ -260,7 +260,6 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
File noticeFile = project.pluginProperties.extension.noticeFile
|
||||
if (noticeFile != null) {
|
||||
NoticeTask generateNotice = project.tasks.create('generateNotice', NoticeTask.class)
|
||||
generateNotice.dependencies(project)
|
||||
generateNotice.inputFile = noticeFile
|
||||
project.bundlePlugin.from(generateNotice)
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
<suppress files="client[/\\]test[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]RestClientTestUtil.java" checks="LineLength" />
|
||||
<suppress files="client[/\\]rest[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]RestClientTests.java" checks="LineLength" />
|
||||
<suppress files="client[/\\]rest[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]SyncResponseListenerTests.java" checks="LineLength" />
|
||||
<suppress files="client[/\\]rest[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]HeapBufferedAsyncResponseConsumerTests.java" checks="LineLength" />
|
||||
<suppress files="client[/\\]rest-high-level[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]Request.java" checks="LineLength" />
|
||||
<suppress files="client[/\\]rest-high-level[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]RestHighLevelClient.java" checks="LineLength" />
|
||||
<suppress files="client[/\\]rest-high-level[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]CrudIT.java" checks="LineLength" />
|
||||
|
@ -3894,15 +3895,15 @@
|
|||
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]azure[/\\]AzureSnapshotRestoreListSnapshotsTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]azure[/\\]AzureSnapshotRestoreTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]azure[/\\]RepositoryAzureClientYamlTestSuiteIT.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]gcs[/\\]GoogleCloudStorageBlobContainer.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]gcs[/\\]GoogleCloudStorageBlobStore.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]gcs[/\\]util[/\\]SocketAccess.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugin[/\\]repository[/\\]gcs[/\\]GoogleCloudStoragePlugin.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]gcs[/\\]GoogleCloudStorageBlobContainer.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]gcs[/\\]GoogleCloudStorageBlobStore.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]gcs[/\\]SocketAccess.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]gcs[/\\]GoogleCloudStoragePlugin.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]gcs[/\\]GoogleCloudStorageRepository.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]gcs[/\\]GoogleCloudStorageService.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]gcs[/\\]GoogleCloudStorageBlobStoreContainerTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]gcs[/\\]GoogleCloudStorageBlobStoreTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]gcs[/\\]MockHttpTransport.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]gcs[/\\]GoogleCloudStorageBlobStoreContainerTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]gcs[/\\]GoogleCloudStorageBlobStoreTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]gcs[/\\]MockHttpTransport.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-gcs[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]gcs[/\\]GoogleCloudStorageBlobStoreRepositoryTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-hdfs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]hdfs[/\\]HdfsBlobContainer.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-hdfs[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]hdfs[/\\]HdfsBlobStore.java" checks="LineLength" />
|
||||
|
|
|
@ -8,7 +8,7 @@ jts = 1.13
|
|||
jackson = 2.8.6
|
||||
snakeyaml = 1.15
|
||||
# When updating log4j, please update also docs/java-api/index.asciidoc
|
||||
log4j = 2.7
|
||||
log4j = 2.8.2
|
||||
slf4j = 1.6.2
|
||||
jna = 4.4.0
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import static org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBuff
|
|||
* consumer object. Users can implement this interface and pass their own instance to the specialized
|
||||
* performRequest methods that accept an {@link HttpAsyncResponseConsumerFactory} instance as argument.
|
||||
*/
|
||||
interface HttpAsyncResponseConsumerFactory {
|
||||
public interface HttpAsyncResponseConsumerFactory {
|
||||
|
||||
/**
|
||||
* Creates the default type of {@link HttpAsyncResponseConsumer}, based on heap buffering with a buffer limit of 100MB.
|
||||
|
@ -53,7 +53,7 @@ interface HttpAsyncResponseConsumerFactory {
|
|||
|
||||
private final int bufferLimit;
|
||||
|
||||
HeapBufferedResponseConsumerFactory(int bufferLimitBytes) {
|
||||
public HeapBufferedResponseConsumerFactory(int bufferLimitBytes) {
|
||||
this.bufferLimit = bufferLimitBytes;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,19 +24,24 @@ import org.apache.http.HttpEntity;
|
|||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.entity.BasicHttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.ContentDecoder;
|
||||
import org.apache.http.nio.IOControl;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.apache.http.protocol.HttpContext;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.spy;
|
||||
|
@ -97,6 +102,26 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase {
|
|||
bufferLimitTest(consumer, bufferLimit);
|
||||
}
|
||||
|
||||
public void testCanConfigureHeapBufferLimitFromOutsidePackage() throws ClassNotFoundException, NoSuchMethodException,
|
||||
IllegalAccessException, InvocationTargetException, InstantiationException {
|
||||
int bufferLimit = randomIntBetween(1, Integer.MAX_VALUE);
|
||||
//we use reflection to make sure that the class can be instantiated from the outside, and the constructor is public
|
||||
Constructor<?> constructor = HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.class.getConstructor(Integer.TYPE);
|
||||
assertEquals(Modifier.PUBLIC, constructor.getModifiers() & Modifier.PUBLIC);
|
||||
Object object = constructor.newInstance(bufferLimit);
|
||||
assertThat(object, instanceOf(HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.class));
|
||||
HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory =
|
||||
(HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory) object;
|
||||
HttpAsyncResponseConsumer<HttpResponse> consumer = consumerFactory.createHttpAsyncResponseConsumer();
|
||||
assertThat(consumer, instanceOf(HeapBufferedAsyncResponseConsumer.class));
|
||||
HeapBufferedAsyncResponseConsumer bufferedAsyncResponseConsumer = (HeapBufferedAsyncResponseConsumer) consumer;
|
||||
assertEquals(bufferLimit, bufferedAsyncResponseConsumer.getBufferLimit());
|
||||
}
|
||||
|
||||
public void testHttpAsyncResponseConsumerFactoryVisibility() throws ClassNotFoundException {
|
||||
assertEquals(Modifier.PUBLIC, HttpAsyncResponseConsumerFactory.class.getModifiers() & Modifier.PUBLIC);
|
||||
}
|
||||
|
||||
private static void bufferLimitTest(HeapBufferedAsyncResponseConsumer consumer, int bufferLimit) throws Exception {
|
||||
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
|
||||
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
|
||||
|
|
|
@ -231,9 +231,11 @@ thirdPartyAudit.excludes = [
|
|||
'org.apache.commons.compress.utils.IOUtils',
|
||||
'org.apache.commons.csv.CSVFormat',
|
||||
'org.apache.commons.csv.QuoteMode',
|
||||
'org.apache.kafka.clients.producer.Callback',
|
||||
'org.apache.kafka.clients.producer.KafkaProducer',
|
||||
'org.apache.kafka.clients.producer.Producer',
|
||||
'org.apache.kafka.clients.producer.ProducerRecord',
|
||||
'org.apache.kafka.clients.producer.RecordMetadata',
|
||||
'org.codehaus.stax2.XMLStreamWriter2',
|
||||
'org.jctools.queues.MessagePassingQueue$Consumer',
|
||||
'org.jctools.queues.MpscArrayQueue',
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
39f4e6c2d68d4ef8fd4b0883d165682dedd5be52
|
|
@ -0,0 +1 @@
|
|||
f1543534b8413aac91fa54d1fff65dfff76818cd
|
|
@ -1 +0,0 @@
|
|||
8de00e382a817981b737be84cb8def687d392963
|
|
@ -0,0 +1 @@
|
|||
e590eeb783348ce8ddef205b82127f9084d82bf3
|
|
@ -1 +0,0 @@
|
|||
a3f2b4e64c61a7fc1ed8f1e5ba371933404ed98a
|
|
@ -0,0 +1 @@
|
|||
979fc0cf8460302e4ffbfe38c1b66a99450b0bb7
|
|
@ -98,27 +98,26 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
final int numShards = context.getNumShards();
|
||||
final boolean isScrollSearch = context.getRequest().scroll() != null;
|
||||
List<SearchPhaseResult> phaseResults = queryResults.asList();
|
||||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollSearch, phaseResults);
|
||||
String scrollId = isScrollSearch ? TransportSearchHelper.buildScrollId(queryResults) : null;
|
||||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce();
|
||||
final boolean queryAndFetchOptimization = queryResults.length() == 1;
|
||||
final Runnable finishPhase = ()
|
||||
-> moveToNextPhase(searchPhaseController, sortedShardDocs, scrollId, reducedQueryPhase, queryAndFetchOptimization ?
|
||||
-> moveToNextPhase(searchPhaseController, scrollId, reducedQueryPhase, queryAndFetchOptimization ?
|
||||
queryResults : fetchResults);
|
||||
if (queryAndFetchOptimization) {
|
||||
assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null;
|
||||
// query AND fetch optimization
|
||||
finishPhase.run();
|
||||
} else {
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(numShards, sortedShardDocs);
|
||||
if (sortedShardDocs.length == 0) { // no docs to fetch -- sidestep everything and return
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(numShards, reducedQueryPhase.scoreDocs);
|
||||
if (reducedQueryPhase.scoreDocs.length == 0) { // no docs to fetch -- sidestep everything and return
|
||||
phaseResults.stream()
|
||||
.map(e -> e.queryResult())
|
||||
.forEach(this::releaseIrrelevantSearchContext); // we have to release contexts here to free up resources
|
||||
finishPhase.run();
|
||||
} else {
|
||||
final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch ?
|
||||
searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs, numShards)
|
||||
searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, numShards)
|
||||
: null;
|
||||
final CountedCollector<FetchSearchResult> counter = new CountedCollector<>(r -> fetchResults.set(r.getShardIndex(), r),
|
||||
docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not
|
||||
|
@ -188,7 +187,7 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) {
|
||||
// we only release search context that we did not fetch from if we are not scrolling
|
||||
// and if it has at lease one hit that didn't make it to the global topDocs
|
||||
if (context.getRequest().scroll() == null && queryResult.hasHits()) {
|
||||
if (context.getRequest().scroll() == null && queryResult.hasSearchContext()) {
|
||||
try {
|
||||
Transport.Connection connection = context.getConnection(queryResult.getSearchShardTarget().getNodeId());
|
||||
context.sendReleaseSearchContext(queryResult.getRequestId(), connection);
|
||||
|
@ -198,11 +197,11 @@ final class FetchSearchPhase extends SearchPhase {
|
|||
}
|
||||
}
|
||||
|
||||
private void moveToNextPhase(SearchPhaseController searchPhaseController, ScoreDoc[] sortedDocs,
|
||||
private void moveToNextPhase(SearchPhaseController searchPhaseController,
|
||||
String scrollId, SearchPhaseController.ReducedQueryPhase reducedQueryPhase,
|
||||
AtomicArray<? extends SearchPhaseResult> fetchResultsArr) {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(context.getRequest().scroll() != null,
|
||||
sortedDocs, reducedQueryPhase, fetchResultsArr.asList(), fetchResultsArr::get);
|
||||
reducedQueryPhase, fetchResultsArr.asList(), fetchResultsArr::get);
|
||||
context.executeNextPhase(this, nextPhaseFactory.apply(context.buildSearchResponse(internalResponse, scrollId)));
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
|
@ -56,7 +57,6 @@ import org.elasticsearch.search.suggest.Suggest.Suggestion;
|
|||
import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -147,42 +147,47 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
* @param ignoreFrom Whether to ignore the from and sort all hits in each shard result.
|
||||
* Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase.
|
||||
* @param results the search phase results to obtain the sort docs from
|
||||
* @param bufferedTopDocs the pre-consumed buffered top docs
|
||||
* @param topDocsStats the top docs stats to fill
|
||||
* @param from the offset into the search results top docs
|
||||
* @param size the number of hits to return from the merged top docs
|
||||
*/
|
||||
public ScoreDoc[] sortDocs(boolean ignoreFrom, Collection<? extends SearchPhaseResult> results) throws IOException {
|
||||
public SortedTopDocs sortDocs(boolean ignoreFrom, Collection<? extends SearchPhaseResult> results,
|
||||
final Collection<TopDocs> bufferedTopDocs, final TopDocsStats topDocsStats, int from, int size) {
|
||||
if (results.isEmpty()) {
|
||||
return EMPTY_DOCS;
|
||||
return SortedTopDocs.EMPTY;
|
||||
}
|
||||
final Collection<TopDocs> topDocs = new ArrayList<>();
|
||||
final Collection<TopDocs> topDocs = bufferedTopDocs == null ? new ArrayList<>() : bufferedTopDocs;
|
||||
final Map<String, List<Suggestion<CompletionSuggestion.Entry>>> groupedCompletionSuggestions = new HashMap<>();
|
||||
int from = -1;
|
||||
int size = -1;
|
||||
for (SearchPhaseResult sortedResult : results) {
|
||||
for (SearchPhaseResult sortedResult : results) { // TODO we can move this loop into the reduce call to only loop over this once
|
||||
/* We loop over all results once, group together the completion suggestions if there are any and collect relevant
|
||||
* top docs results. Each top docs gets it's shard index set on all top docs to simplify top docs merging down the road
|
||||
* this allowed to remove a single shared optimization code here since now we don't materialized a dense array of
|
||||
* top docs anymore but instead only pass relevant results / top docs to the merge method*/
|
||||
QuerySearchResult queryResult = sortedResult.queryResult();
|
||||
if (queryResult.hasHits()) {
|
||||
from = queryResult.from();
|
||||
size = queryResult.size();
|
||||
TopDocs td = queryResult.topDocs();
|
||||
if (td != null && td.scoreDocs.length > 0) {
|
||||
if (queryResult.hasConsumedTopDocs() == false) { // already consumed?
|
||||
final TopDocs td = queryResult.consumeTopDocs();
|
||||
assert td != null;
|
||||
topDocsStats.add(td);
|
||||
if (td.scoreDocs.length > 0) { // make sure we set the shard index before we add it - the consumer didn't do that yet
|
||||
setShardIndex(td, queryResult.getShardIndex());
|
||||
topDocs.add(td);
|
||||
}
|
||||
}
|
||||
if (queryResult.hasSuggestHits()) {
|
||||
Suggest shardSuggest = queryResult.suggest();
|
||||
if (shardSuggest != null) {
|
||||
for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) {
|
||||
suggestion.setShardIndex(sortedResult.getShardIndex());
|
||||
List<Suggestion<CompletionSuggestion.Entry>> suggestions =
|
||||
groupedCompletionSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>());
|
||||
suggestions.add(suggestion);
|
||||
}
|
||||
for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) {
|
||||
suggestion.setShardIndex(sortedResult.getShardIndex());
|
||||
List<Suggestion<CompletionSuggestion.Entry>> suggestions =
|
||||
groupedCompletionSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>());
|
||||
suggestions.add(suggestion);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (size != -1) {
|
||||
final ScoreDoc[] mergedScoreDocs = mergeTopDocs(topDocs, size, ignoreFrom ? 0 : from);
|
||||
final boolean hasHits = (groupedCompletionSuggestions.isEmpty() && topDocs.isEmpty()) == false;
|
||||
if (hasHits) {
|
||||
final TopDocs mergedTopDocs = mergeTopDocs(topDocs, size, ignoreFrom ? 0 : from);
|
||||
final ScoreDoc[] mergedScoreDocs = mergedTopDocs == null ? EMPTY_DOCS : mergedTopDocs.scoreDocs;
|
||||
ScoreDoc[] scoreDocs = mergedScoreDocs;
|
||||
if (groupedCompletionSuggestions.isEmpty() == false) {
|
||||
int numSuggestDocs = 0;
|
||||
|
@ -204,23 +209,35 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
return scoreDocs;
|
||||
final boolean isSortedByField;
|
||||
final SortField[] sortFields;
|
||||
if (mergedTopDocs != null && mergedTopDocs instanceof TopFieldDocs) {
|
||||
TopFieldDocs fieldDocs = (TopFieldDocs) mergedTopDocs;
|
||||
isSortedByField = (fieldDocs instanceof CollapseTopFieldDocs &&
|
||||
fieldDocs.fields.length == 1 && fieldDocs.fields[0].getType() == SortField.Type.SCORE) == false;
|
||||
sortFields = fieldDocs.fields;
|
||||
} else {
|
||||
isSortedByField = false;
|
||||
sortFields = null;
|
||||
}
|
||||
return new SortedTopDocs(scoreDocs, isSortedByField, sortFields);
|
||||
} else {
|
||||
// no relevant docs - just return an empty array
|
||||
return EMPTY_DOCS;
|
||||
// no relevant docs
|
||||
return SortedTopDocs.EMPTY;
|
||||
}
|
||||
}
|
||||
|
||||
private ScoreDoc[] mergeTopDocs(Collection<TopDocs> results, int topN, int from) {
|
||||
TopDocs mergeTopDocs(Collection<TopDocs> results, int topN, int from) {
|
||||
if (results.isEmpty()) {
|
||||
return EMPTY_DOCS;
|
||||
return null;
|
||||
}
|
||||
assert results.isEmpty() == false;
|
||||
final boolean setShardIndex = false;
|
||||
final TopDocs topDocs = results.stream().findFirst().get();
|
||||
final TopDocs mergedTopDocs;
|
||||
final int numShards = results.size();
|
||||
if (numShards == 1 && from == 0) { // only one shard and no pagination we can just return the topDocs as we got them.
|
||||
return topDocs.scoreDocs;
|
||||
return topDocs;
|
||||
} else if (topDocs instanceof CollapseTopFieldDocs) {
|
||||
CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) topDocs;
|
||||
final Sort sort = new Sort(firstTopDocs.fields);
|
||||
|
@ -235,7 +252,7 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
final TopDocs[] shardTopDocs = results.toArray(new TopDocs[numShards]);
|
||||
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs, setShardIndex);
|
||||
}
|
||||
return mergedTopDocs.scoreDocs;
|
||||
return mergedTopDocs;
|
||||
}
|
||||
|
||||
private static void setShardIndex(TopDocs topDocs, int shardIndex) {
|
||||
|
@ -249,12 +266,12 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase,
|
||||
ScoreDoc[] sortedScoreDocs, int numShards) {
|
||||
ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards];
|
||||
if (reducedQueryPhase.isEmpty() == false) {
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase, int numShards) {
|
||||
final ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards];
|
||||
if (reducedQueryPhase.isEmptyResult == false) {
|
||||
final ScoreDoc[] sortedScoreDocs = reducedQueryPhase.scoreDocs;
|
||||
// from is always zero as when we use scroll, we ignore from
|
||||
long size = Math.min(reducedQueryPhase.fetchHits, reducedQueryPhase.oneResult.size());
|
||||
long size = Math.min(reducedQueryPhase.fetchHits, reducedQueryPhase.size);
|
||||
// with collapsing we can have more hits than sorted docs
|
||||
size = Math.min(sortedScoreDocs.length, size);
|
||||
for (int sortedDocsIndex = 0; sortedDocsIndex < size; sortedDocsIndex++) {
|
||||
|
@ -288,13 +305,13 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
* Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named
|
||||
* completion suggestion ordered by suggestion name
|
||||
*/
|
||||
public InternalSearchResponse merge(boolean ignoreFrom, ScoreDoc[] sortedDocs,
|
||||
ReducedQueryPhase reducedQueryPhase,
|
||||
public InternalSearchResponse merge(boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase,
|
||||
Collection<? extends SearchPhaseResult> fetchResults, IntFunction<SearchPhaseResult> resultsLookup) {
|
||||
if (reducedQueryPhase.isEmpty()) {
|
||||
if (reducedQueryPhase.isEmptyResult) {
|
||||
return InternalSearchResponse.empty();
|
||||
}
|
||||
SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, sortedDocs, fetchResults, resultsLookup);
|
||||
ScoreDoc[] sortedDocs = reducedQueryPhase.scoreDocs;
|
||||
SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResults, resultsLookup);
|
||||
if (reducedQueryPhase.suggest != null) {
|
||||
if (!fetchResults.isEmpty()) {
|
||||
int currentOffset = hits.getHits().length;
|
||||
|
@ -304,6 +321,10 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
ScoreDoc shardDoc = sortedDocs[scoreDocIndex];
|
||||
SearchPhaseResult searchResultProvider = resultsLookup.apply(shardDoc.shardIndex);
|
||||
if (searchResultProvider == null) {
|
||||
// this can happen if we are hitting a shard failure during the fetch phase
|
||||
// in this case we referenced the shard result via teh ScoreDoc but never got a
|
||||
// result from fetch.
|
||||
// TODO it would be nice to assert this in the future
|
||||
continue;
|
||||
}
|
||||
FetchSearchResult fetchResult = searchResultProvider.fetchResult();
|
||||
|
@ -325,21 +346,15 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
return reducedQueryPhase.buildResponse(hits);
|
||||
}
|
||||
|
||||
private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom, ScoreDoc[] sortedDocs,
|
||||
private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom,
|
||||
Collection<? extends SearchPhaseResult> fetchResults, IntFunction<SearchPhaseResult> resultsLookup) {
|
||||
boolean sorted = false;
|
||||
final boolean sorted = reducedQueryPhase.isSortedByField;
|
||||
ScoreDoc[] sortedDocs = reducedQueryPhase.scoreDocs;
|
||||
int sortScoreIndex = -1;
|
||||
if (reducedQueryPhase.oneResult.topDocs() instanceof TopFieldDocs) {
|
||||
TopFieldDocs fieldDocs = (TopFieldDocs) reducedQueryPhase.oneResult.queryResult().topDocs();
|
||||
if (fieldDocs instanceof CollapseTopFieldDocs &&
|
||||
fieldDocs.fields.length == 1 && fieldDocs.fields[0].getType() == SortField.Type.SCORE) {
|
||||
sorted = false;
|
||||
} else {
|
||||
sorted = true;
|
||||
for (int i = 0; i < fieldDocs.fields.length; i++) {
|
||||
if (fieldDocs.fields[i].getType() == SortField.Type.SCORE) {
|
||||
sortScoreIndex = i;
|
||||
}
|
||||
if (sorted) {
|
||||
for (int i = 0; i < reducedQueryPhase.sortField.length; i++) {
|
||||
if (reducedQueryPhase.sortField[i].getType() == SortField.Type.SCORE) {
|
||||
sortScoreIndex = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -347,8 +362,8 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
for (SearchPhaseResult entry : fetchResults) {
|
||||
entry.fetchResult().initCounter();
|
||||
}
|
||||
int from = ignoreFrom ? 0 : reducedQueryPhase.oneResult.queryResult().from();
|
||||
int numSearchHits = (int) Math.min(reducedQueryPhase.fetchHits - from, reducedQueryPhase.oneResult.size());
|
||||
int from = ignoreFrom ? 0 : reducedQueryPhase.from;
|
||||
int numSearchHits = (int) Math.min(reducedQueryPhase.fetchHits - from, reducedQueryPhase.size);
|
||||
// with collapsing we can have more fetch hits than sorted docs
|
||||
numSearchHits = Math.min(sortedDocs.length, numSearchHits);
|
||||
// merge hits
|
||||
|
@ -358,6 +373,10 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
ScoreDoc shardDoc = sortedDocs[i];
|
||||
SearchPhaseResult fetchResultProvider = resultsLookup.apply(shardDoc.shardIndex);
|
||||
if (fetchResultProvider == null) {
|
||||
// this can happen if we are hitting a shard failure during the fetch phase
|
||||
// in this case we referenced the shard result via teh ScoreDoc but never got a
|
||||
// result from fetch.
|
||||
// TODO it would be nice to assert this in the future
|
||||
continue;
|
||||
}
|
||||
FetchSearchResult fetchResult = fetchResultProvider.fetchResult();
|
||||
|
@ -368,7 +387,7 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
searchHit.shard(fetchResult.getSearchShardTarget());
|
||||
if (sorted) {
|
||||
FieldDoc fieldDoc = (FieldDoc) shardDoc;
|
||||
searchHit.sortValues(fieldDoc.fields, reducedQueryPhase.oneResult.sortValueFormats());
|
||||
searchHit.sortValues(fieldDoc.fields, reducedQueryPhase.sortValueFormats);
|
||||
if (sortScoreIndex != -1) {
|
||||
searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
|
||||
}
|
||||
|
@ -385,42 +404,42 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
* Reduces the given query results and consumes all aggregations and profile results.
|
||||
* @param queryResults a list of non-null query shard results
|
||||
*/
|
||||
public ReducedQueryPhase reducedQueryPhase(List<? extends SearchPhaseResult> queryResults) {
|
||||
return reducedQueryPhase(queryResults, null, 0);
|
||||
public ReducedQueryPhase reducedQueryPhase(Collection<? extends SearchPhaseResult> queryResults, boolean isScrollRequest) {
|
||||
return reducedQueryPhase(queryResults, null, new ArrayList<>(), new TopDocsStats(), 0, isScrollRequest);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces the given query results and consumes all aggregations and profile results.
|
||||
* @param queryResults a list of non-null query shard results
|
||||
* @param bufferdAggs a list of pre-collected / buffered aggregations. if this list is non-null all aggregations have been consumed
|
||||
* @param bufferedAggs a list of pre-collected / buffered aggregations. if this list is non-null all aggregations have been consumed
|
||||
* from all non-null query results.
|
||||
* @param bufferedTopDocs a list of pre-collected / buffered top docs. if this list is non-null all top docs have been consumed
|
||||
* from all non-null query results.
|
||||
* @param numReducePhases the number of non-final reduce phases applied to the query results.
|
||||
* @see QuerySearchResult#consumeAggs()
|
||||
* @see QuerySearchResult#consumeProfileResult()
|
||||
*/
|
||||
private ReducedQueryPhase reducedQueryPhase(Collection<? extends SearchPhaseResult> queryResults,
|
||||
List<InternalAggregations> bufferdAggs, int numReducePhases) {
|
||||
List<InternalAggregations> bufferedAggs, List<TopDocs> bufferedTopDocs,
|
||||
TopDocsStats topDocsStats, int numReducePhases, boolean isScrollRequest) {
|
||||
assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases;
|
||||
numReducePhases++; // increment for this phase
|
||||
long totalHits = 0;
|
||||
long fetchHits = 0;
|
||||
float maxScore = Float.NEGATIVE_INFINITY;
|
||||
boolean timedOut = false;
|
||||
Boolean terminatedEarly = null;
|
||||
if (queryResults.isEmpty()) { // early terminate we have nothing to reduce
|
||||
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, null, null, null, null,
|
||||
numReducePhases);
|
||||
return new ReducedQueryPhase(topDocsStats.totalHits, topDocsStats.fetchHits, topDocsStats.maxScore,
|
||||
timedOut, terminatedEarly, null, null, null, EMPTY_DOCS, null, null, numReducePhases, false, 0, 0, true);
|
||||
}
|
||||
final QuerySearchResult firstResult = queryResults.stream().findFirst().get().queryResult();
|
||||
final boolean hasSuggest = firstResult.suggest() != null;
|
||||
final boolean hasProfileResults = firstResult.hasProfileResults();
|
||||
final boolean consumeAggs;
|
||||
final List<InternalAggregations> aggregationsList;
|
||||
if (bufferdAggs != null) {
|
||||
if (bufferedAggs != null) {
|
||||
consumeAggs = false;
|
||||
// we already have results from intermediate reduces and just need to perform the final reduce
|
||||
assert firstResult.hasAggs() : "firstResult has no aggs but we got non null buffered aggs?";
|
||||
aggregationsList = bufferdAggs;
|
||||
aggregationsList = bufferedAggs;
|
||||
} else if (firstResult.hasAggs()) {
|
||||
// the number of shards was less than the buffer size so we reduce agg results directly
|
||||
aggregationsList = new ArrayList<>(queryResults.size());
|
||||
|
@ -435,8 +454,12 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
final Map<String, List<Suggestion>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap();
|
||||
final Map<String, ProfileShardResult> profileResults = hasProfileResults ? new HashMap<>(queryResults.size())
|
||||
: Collections.emptyMap();
|
||||
int from = 0;
|
||||
int size = 0;
|
||||
for (SearchPhaseResult entry : queryResults) {
|
||||
QuerySearchResult result = entry.queryResult();
|
||||
from = result.from();
|
||||
size = result.size();
|
||||
if (result.searchTimedOut()) {
|
||||
timedOut = true;
|
||||
}
|
||||
|
@ -447,11 +470,6 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
terminatedEarly = true;
|
||||
}
|
||||
}
|
||||
totalHits += result.topDocs().totalHits;
|
||||
fetchHits += result.topDocs().scoreDocs.length;
|
||||
if (!Float.isNaN(result.topDocs().getMaxScore())) {
|
||||
maxScore = Math.max(maxScore, result.topDocs().getMaxScore());
|
||||
}
|
||||
if (hasSuggest) {
|
||||
assert result.suggest() != null;
|
||||
for (Suggestion<? extends Suggestion.Entry<? extends Suggestion.Entry.Option>> suggestion : result.suggest()) {
|
||||
|
@ -472,8 +490,11 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
final InternalAggregations aggregations = aggregationsList.isEmpty() ? null : reduceAggs(aggregationsList,
|
||||
firstResult.pipelineAggregators(), reduceContext);
|
||||
final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults);
|
||||
return new ReducedQueryPhase(totalHits, fetchHits, maxScore, timedOut, terminatedEarly, firstResult, suggest, aggregations,
|
||||
shardResults, numReducePhases);
|
||||
final SortedTopDocs scoreDocs = this.sortDocs(isScrollRequest, queryResults, bufferedTopDocs, topDocsStats, from, size);
|
||||
return new ReducedQueryPhase(topDocsStats.totalHits, topDocsStats.fetchHits, topDocsStats.maxScore,
|
||||
timedOut, terminatedEarly, suggest, aggregations, shardResults, scoreDocs.scoreDocs, scoreDocs.sortFields,
|
||||
firstResult != null ? firstResult.sortValueFormats() : null,
|
||||
numReducePhases, scoreDocs.isSortedByField, size, from, firstResult == null);
|
||||
}
|
||||
|
||||
|
||||
|
@ -514,8 +535,6 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
final boolean timedOut;
|
||||
// non null and true if at least one reduced result was terminated early
|
||||
final Boolean terminatedEarly;
|
||||
// an non-null arbitrary query result if was at least one reduced result
|
||||
final QuerySearchResult oneResult;
|
||||
// the reduced suggest results
|
||||
final Suggest suggest;
|
||||
// the reduced internal aggregations
|
||||
|
@ -524,10 +543,25 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
final SearchProfileShardResults shardResults;
|
||||
// the number of reduces phases
|
||||
final int numReducePhases;
|
||||
// the searches merged top docs
|
||||
final ScoreDoc[] scoreDocs;
|
||||
// the top docs sort fields used to sort the score docs, <code>null</code> if the results are not sorted
|
||||
final SortField[] sortField;
|
||||
// <code>true</code> iff the result score docs is sorted by a field (not score), this implies that <code>sortField</code> is set.
|
||||
final boolean isSortedByField;
|
||||
// the size of the top hits to return
|
||||
final int size;
|
||||
// <code>true</code> iff the query phase had no results. Otherwise <code>false</code>
|
||||
final boolean isEmptyResult;
|
||||
// the offset into the merged top hits
|
||||
final int from;
|
||||
// sort value formats used to sort / format the result
|
||||
final DocValueFormat[] sortValueFormats;
|
||||
|
||||
ReducedQueryPhase(long totalHits, long fetchHits, float maxScore, boolean timedOut, Boolean terminatedEarly,
|
||||
QuerySearchResult oneResult, Suggest suggest, InternalAggregations aggregations,
|
||||
SearchProfileShardResults shardResults, int numReducePhases) {
|
||||
ReducedQueryPhase(long totalHits, long fetchHits, float maxScore, boolean timedOut, Boolean terminatedEarly, Suggest suggest,
|
||||
InternalAggregations aggregations, SearchProfileShardResults shardResults, ScoreDoc[] scoreDocs,
|
||||
SortField[] sortFields, DocValueFormat[] sortValueFormats, int numReducePhases, boolean isSortedByField, int size,
|
||||
int from, boolean isEmptyResult) {
|
||||
if (numReducePhases <= 0) {
|
||||
throw new IllegalArgumentException("at least one reduce phase must have been applied but was: " + numReducePhases);
|
||||
}
|
||||
|
@ -540,27 +574,26 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
}
|
||||
this.timedOut = timedOut;
|
||||
this.terminatedEarly = terminatedEarly;
|
||||
this.oneResult = oneResult;
|
||||
this.suggest = suggest;
|
||||
this.aggregations = aggregations;
|
||||
this.shardResults = shardResults;
|
||||
this.numReducePhases = numReducePhases;
|
||||
this.scoreDocs = scoreDocs;
|
||||
this.sortField = sortFields;
|
||||
this.isSortedByField = isSortedByField;
|
||||
this.size = size;
|
||||
this.from = from;
|
||||
this.isEmptyResult = isEmptyResult;
|
||||
this.sortValueFormats = sortValueFormats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new search response from the given merged hits.
|
||||
* @see #merge(boolean, ScoreDoc[], ReducedQueryPhase, Collection, IntFunction)
|
||||
* @see #merge(boolean, ReducedQueryPhase, Collection, IntFunction)
|
||||
*/
|
||||
public InternalSearchResponse buildResponse(SearchHits hits) {
|
||||
return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly, numReducePhases);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the query phase had no results. Otherwise <code>false</code>
|
||||
*/
|
||||
public boolean isEmpty() {
|
||||
return oneResult == null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -569,12 +602,16 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
* This implementation can be configured to batch up a certain amount of results and only reduce them
|
||||
* iff the buffer is exhausted.
|
||||
*/
|
||||
static final class QueryPhaseResultConsumer
|
||||
extends InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> {
|
||||
private final InternalAggregations[] buffer;
|
||||
static final class QueryPhaseResultConsumer extends InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> {
|
||||
private final InternalAggregations[] aggsBuffer;
|
||||
private final TopDocs[] topDocsBuffer;
|
||||
private final boolean hasAggs;
|
||||
private final boolean hasTopDocs;
|
||||
private final int bufferSize;
|
||||
private int index;
|
||||
private final SearchPhaseController controller;
|
||||
private int numReducePhases = 0;
|
||||
private final TopDocsStats topDocsStats = new TopDocsStats();
|
||||
|
||||
/**
|
||||
* Creates a new {@link QueryPhaseResultConsumer}
|
||||
|
@ -583,7 +620,8 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
* @param bufferSize the size of the reduce buffer. if the buffer size is smaller than the number of expected results
|
||||
* the buffer is used to incrementally reduce aggregation results before all shards responded.
|
||||
*/
|
||||
private QueryPhaseResultConsumer(SearchPhaseController controller, int expectedResultSize, int bufferSize) {
|
||||
private QueryPhaseResultConsumer(SearchPhaseController controller, int expectedResultSize, int bufferSize,
|
||||
boolean hasTopDocs, boolean hasAggs) {
|
||||
super(expectedResultSize);
|
||||
if (expectedResultSize != 1 && bufferSize < 2) {
|
||||
throw new IllegalArgumentException("buffer size must be >= 2 if there is more than one expected result");
|
||||
|
@ -591,39 +629,68 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
if (expectedResultSize <= bufferSize) {
|
||||
throw new IllegalArgumentException("buffer size must be less than the expected result size");
|
||||
}
|
||||
if (hasAggs == false && hasTopDocs == false) {
|
||||
throw new IllegalArgumentException("either aggs or top docs must be present");
|
||||
}
|
||||
this.controller = controller;
|
||||
// no need to buffer anything if we have less expected results. in this case we don't consume any results ahead of time.
|
||||
this.buffer = new InternalAggregations[bufferSize];
|
||||
this.aggsBuffer = new InternalAggregations[hasAggs ? bufferSize : 0];
|
||||
this.topDocsBuffer = new TopDocs[hasTopDocs ? bufferSize : 0];
|
||||
this.hasTopDocs = hasTopDocs;
|
||||
this.hasAggs = hasAggs;
|
||||
this.bufferSize = bufferSize;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void consumeResult(SearchPhaseResult result) {
|
||||
super.consumeResult(result);
|
||||
QuerySearchResult queryResult = result.queryResult();
|
||||
assert queryResult.hasAggs() : "this collector should only be used if aggs are requested";
|
||||
consumeInternal(queryResult);
|
||||
}
|
||||
|
||||
private synchronized void consumeInternal(QuerySearchResult querySearchResult) {
|
||||
InternalAggregations aggregations = (InternalAggregations) querySearchResult.consumeAggs();
|
||||
if (index == buffer.length) {
|
||||
InternalAggregations reducedAggs = controller.reduceAggsIncrementally(Arrays.asList(buffer));
|
||||
Arrays.fill(buffer, null);
|
||||
if (index == bufferSize) {
|
||||
if (hasAggs) {
|
||||
InternalAggregations reducedAggs = controller.reduceAggsIncrementally(Arrays.asList(aggsBuffer));
|
||||
Arrays.fill(aggsBuffer, null);
|
||||
aggsBuffer[0] = reducedAggs;
|
||||
}
|
||||
if (hasTopDocs) {
|
||||
TopDocs reducedTopDocs = controller.mergeTopDocs(Arrays.asList(topDocsBuffer),
|
||||
querySearchResult.from() + querySearchResult.size() // we have to merge here in the same way we collect on a shard
|
||||
, 0);
|
||||
Arrays.fill(topDocsBuffer, null);
|
||||
topDocsBuffer[0] = reducedTopDocs;
|
||||
}
|
||||
numReducePhases++;
|
||||
buffer[0] = reducedAggs;
|
||||
index = 1;
|
||||
}
|
||||
final int i = index++;
|
||||
buffer[i] = aggregations;
|
||||
if (hasAggs) {
|
||||
aggsBuffer[i] = (InternalAggregations) querySearchResult.consumeAggs();
|
||||
}
|
||||
if (hasTopDocs) {
|
||||
final TopDocs topDocs = querySearchResult.consumeTopDocs(); // can't be null
|
||||
topDocsStats.add(topDocs);
|
||||
SearchPhaseController.setShardIndex(topDocs, querySearchResult.getShardIndex());
|
||||
topDocsBuffer[i] = topDocs;
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized List<InternalAggregations> getRemaining() {
|
||||
return Arrays.asList(buffer).subList(0, index);
|
||||
private synchronized List<InternalAggregations> getRemainingAggs() {
|
||||
return hasAggs ? Arrays.asList(aggsBuffer).subList(0, index) : null;
|
||||
}
|
||||
|
||||
private synchronized List<TopDocs> getRemainingTopDocs() {
|
||||
return hasTopDocs ? Arrays.asList(topDocsBuffer).subList(0, index) : null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public ReducedQueryPhase reduce() {
|
||||
return controller.reducedQueryPhase(results.asList(), getRemaining(), numReducePhases);
|
||||
return controller.reducedQueryPhase(results.asList(), getRemainingAggs(), getRemainingTopDocs(), topDocsStats,
|
||||
numReducePhases, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -641,17 +708,49 @@ public final class SearchPhaseController extends AbstractComponent {
|
|||
*/
|
||||
InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> newSearchPhaseResults(SearchRequest request, int numShards) {
|
||||
SearchSourceBuilder source = request.source();
|
||||
if (source != null && source.aggregations() != null) {
|
||||
boolean isScrollRequest = request.scroll() != null;
|
||||
final boolean hasAggs = source != null && source.aggregations() != null;
|
||||
final boolean hasTopDocs = source == null || source.size() != 0;
|
||||
|
||||
if (isScrollRequest == false && (hasAggs || hasTopDocs)) {
|
||||
// no incremental reduce if scroll is used - we only hit a single shard or sometimes more...
|
||||
if (request.getBatchedReduceSize() < numShards) {
|
||||
// only use this if there are aggs and if there are more shards than we should reduce at once
|
||||
return new QueryPhaseResultConsumer(this, numShards, request.getBatchedReduceSize());
|
||||
return new QueryPhaseResultConsumer(this, numShards, request.getBatchedReduceSize(), hasTopDocs, hasAggs);
|
||||
}
|
||||
}
|
||||
return new InitialSearchPhase.SearchPhaseResults(numShards) {
|
||||
@Override
|
||||
public ReducedQueryPhase reduce() {
|
||||
return reducedQueryPhase(results.asList());
|
||||
return reducedQueryPhase(results.asList(), isScrollRequest);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
static final class TopDocsStats {
|
||||
long totalHits;
|
||||
long fetchHits;
|
||||
float maxScore = Float.NEGATIVE_INFINITY;
|
||||
|
||||
void add(TopDocs topDocs) {
|
||||
totalHits += topDocs.totalHits;
|
||||
fetchHits += topDocs.scoreDocs.length;
|
||||
if (!Float.isNaN(topDocs.getMaxScore())) {
|
||||
maxScore = Math.max(maxScore, topDocs.getMaxScore());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static final class SortedTopDocs {
|
||||
static final SortedTopDocs EMPTY = new SortedTopDocs(EMPTY_DOCS, false, null);
|
||||
final ScoreDoc[] scoreDocs;
|
||||
final boolean isSortedByField;
|
||||
final SortField[] sortFields;
|
||||
|
||||
SortedTopDocs(ScoreDoc[] scoreDocs, boolean isSortedByField, SortField[] sortFields) {
|
||||
this.scoreDocs = scoreDocs;
|
||||
this.isSortedByField = isSortedByField;
|
||||
this.sortFields = sortFields;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -173,9 +173,8 @@ final class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
|||
|
||||
private void innerFinishHim() throws Exception {
|
||||
List<QueryFetchSearchResult> queryFetchSearchResults = queryFetchResults.asList();
|
||||
ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults.asList());
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs,
|
||||
searchPhaseController.reducedQueryPhase(queryFetchSearchResults), queryFetchSearchResults, queryFetchResults::get);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true,
|
||||
searchPhaseController.reducedQueryPhase(queryFetchSearchResults, true), queryFetchSearchResults, queryFetchResults::get);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
|
|
|
@ -55,7 +55,6 @@ final class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private volatile ScoreDoc[] sortedShardDocs;
|
||||
private final AtomicInteger successfulOps;
|
||||
|
||||
SearchScrollQueryThenFetchAsyncAction(Logger logger, ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
|
@ -171,16 +170,15 @@ final class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
}
|
||||
|
||||
private void executeFetchPhase() throws Exception {
|
||||
sortedShardDocs = searchPhaseController.sortDocs(true, queryResults.asList());
|
||||
if (sortedShardDocs.length == 0) {
|
||||
finishHim(searchPhaseController.reducedQueryPhase(queryResults.asList()));
|
||||
final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResults.asList(),
|
||||
true);
|
||||
if (reducedQueryPhase.scoreDocs.length == 0) {
|
||||
finishHim(reducedQueryPhase);
|
||||
return;
|
||||
}
|
||||
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(queryResults.length(), sortedShardDocs);
|
||||
SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResults.asList());
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs,
|
||||
queryResults.length());
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(queryResults.length(), reducedQueryPhase.scoreDocs);
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, queryResults.length());
|
||||
final CountDown counter = new CountDown(docIdsToLoad.length);
|
||||
for (int i = 0; i < docIdsToLoad.length; i++) {
|
||||
final int index = i;
|
||||
|
@ -222,8 +220,8 @@ final class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
|||
|
||||
private void finishHim(SearchPhaseController.ReducedQueryPhase queryPhase) {
|
||||
try {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryPhase,
|
||||
fetchResults.asList(), fetchResults::get);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(true, queryPhase, fetchResults.asList(),
|
||||
fetchResults::get);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
|
|
|
@ -21,8 +21,8 @@ package org.elasticsearch.bootstrap;
|
|||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
import org.elasticsearch.plugins.Platforms;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
|
|
@ -456,10 +456,22 @@ public interface Client extends ElasticsearchClient, Releasable {
|
|||
*/
|
||||
void clearScroll(ClearScrollRequest request, ActionListener<ClearScrollResponse> listener);
|
||||
|
||||
/**
|
||||
* @deprecated Use _field_caps instead or run a min/max aggregations on the desired fields
|
||||
*/
|
||||
@Deprecated
|
||||
FieldStatsRequestBuilder prepareFieldStats();
|
||||
|
||||
/**
|
||||
* @deprecated Use _field_caps instead or run a min/max aggregations on the desired fields
|
||||
*/
|
||||
@Deprecated
|
||||
ActionFuture<FieldStatsResponse> fieldStats(FieldStatsRequest request);
|
||||
|
||||
/**
|
||||
* @deprecated Use _field_caps instead or run a min/max aggregations on the desired fields
|
||||
*/
|
||||
@Deprecated
|
||||
void fieldStats(FieldStatsRequest request, ActionListener<FieldStatsResponse> listener);
|
||||
|
||||
/**
|
||||
|
|
|
@ -87,6 +87,10 @@ public abstract class SecureSetting<T> extends Setting<T> {
|
|||
checkDeprecation(settings);
|
||||
final SecureSettings secureSettings = settings.getSecureSettings();
|
||||
if (secureSettings == null || secureSettings.getSettingNames().contains(getKey()) == false) {
|
||||
if (super.exists(settings)) {
|
||||
throw new IllegalArgumentException("Setting [" + getKey() + "] is a secure setting" +
|
||||
" and must be stored inside the Elasticsearch keystore, but was found inside elasticsearch.yml");
|
||||
}
|
||||
return getFallback(settings);
|
||||
}
|
||||
try {
|
||||
|
@ -117,14 +121,7 @@ public abstract class SecureSetting<T> extends Setting<T> {
|
|||
* This may be any sensitive string, e.g. a username, a password, an auth token, etc.
|
||||
*/
|
||||
public static Setting<SecureString> secureString(String name, Setting<SecureString> fallback,
|
||||
boolean allowLegacy, Property... properties) {
|
||||
final Setting<String> legacy;
|
||||
if (allowLegacy) {
|
||||
Property[] legacyProperties = ArrayUtils.concat(properties, LEGACY_PROPERTIES, Property.class);
|
||||
legacy = Setting.simpleString(name, legacyProperties);
|
||||
} else {
|
||||
legacy = null;
|
||||
}
|
||||
Property... properties) {
|
||||
return new SecureSetting<SecureString>(name, properties) {
|
||||
@Override
|
||||
protected SecureString getSecret(SecureSettings secureSettings) throws GeneralSecurityException {
|
||||
|
@ -132,26 +129,11 @@ public abstract class SecureSetting<T> extends Setting<T> {
|
|||
}
|
||||
@Override
|
||||
SecureString getFallback(Settings settings) {
|
||||
if (legacy != null && legacy.exists(settings)) {
|
||||
return new SecureString(legacy.get(settings).toCharArray());
|
||||
}
|
||||
if (fallback != null) {
|
||||
return fallback.get(settings);
|
||||
}
|
||||
return new SecureString(new char[0]); // this means "setting does not exist"
|
||||
}
|
||||
@Override
|
||||
protected void checkDeprecation(Settings settings) {
|
||||
super.checkDeprecation(settings);
|
||||
if (legacy != null) {
|
||||
legacy.checkDeprecation(settings);
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public boolean exists(Settings settings) {
|
||||
// handle legacy, which is internal to this setting
|
||||
return super.exists(settings) || legacy != null && legacy.exists(settings);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -31,14 +31,6 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
|
|||
* to get the concrete values as a list using {@link #asList()}.
|
||||
*/
|
||||
public class AtomicArray<E> {
|
||||
|
||||
private static final AtomicArray EMPTY = new AtomicArray(0);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <E> E empty() {
|
||||
return (E) EMPTY;
|
||||
}
|
||||
|
||||
private final AtomicReferenceArray<E> array;
|
||||
private volatile List<E> nonNullList;
|
||||
|
||||
|
@ -53,7 +45,6 @@ public class AtomicArray<E> {
|
|||
return array.length();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets the element at position {@code i} to the given value.
|
||||
*
|
||||
|
|
|
@ -69,9 +69,8 @@ public class NamedXContentRegistry {
|
|||
}
|
||||
/**
|
||||
* Creates a new entry which can be stored by the registry.
|
||||
* @deprecated prefer {@link Entry#Entry(Class, ParseField, CheckedFunction)}. Contexts will be removed when possible
|
||||
* Prefer {@link Entry#Entry(Class, ParseField, CheckedFunction)} unless you need a context to carry around while parsing.
|
||||
*/
|
||||
@Deprecated
|
||||
public <T> Entry(Class<T> categoryClass, ParseField name, ContextParser<Object, ? extends T> parser) {
|
||||
this.categoryClass = Objects.requireNonNull(categoryClass);
|
||||
this.name = Objects.requireNonNull(name);
|
||||
|
|
|
@ -441,8 +441,8 @@ public class InternalEngine extends Engine {
|
|||
if (versionValue == null) {
|
||||
return OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND;
|
||||
} else {
|
||||
return op.version() > versionValue.getVersion() ?
|
||||
OpVsLuceneDocStatus.OP_NEWER : OpVsLuceneDocStatus.OP_STALE_OR_EQUAL;
|
||||
return op.versionType().isVersionConflictForWrites(versionValue.getVersion(), op.version(), versionValue.isDelete()) ?
|
||||
OpVsLuceneDocStatus.OP_STALE_OR_EQUAL : OpVsLuceneDocStatus.OP_NEWER;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -305,10 +305,6 @@ public class PluginsService extends AbstractComponent {
|
|||
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsDirectory)) {
|
||||
for (Path plugin : stream) {
|
||||
if (FileSystemUtils.isHidden(plugin)) {
|
||||
logger.trace("--- skip hidden plugin file[{}]", plugin.toAbsolutePath());
|
||||
continue;
|
||||
}
|
||||
logger.trace("--- adding plugin [{}]", plugin.toAbsolutePath());
|
||||
final PluginInfo info;
|
||||
try {
|
||||
|
|
|
@ -45,10 +45,19 @@ import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHead
|
|||
public class RestFieldStatsAction extends BaseRestHandler {
|
||||
public RestFieldStatsAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_field_stats", this);
|
||||
controller.registerHandler(POST, "/_field_stats", this);
|
||||
controller.registerHandler(GET, "/{index}/_field_stats", this);
|
||||
controller.registerHandler(POST, "/{index}/_field_stats", this);
|
||||
controller.registerAsDeprecatedHandler(GET, "/_field_stats", this,
|
||||
deprecationMessage(), deprecationLogger);
|
||||
controller.registerAsDeprecatedHandler(POST, "/_field_stats", this,
|
||||
deprecationMessage(), deprecationLogger);
|
||||
controller.registerAsDeprecatedHandler(GET, "/{index}/_field_stats", this,
|
||||
deprecationMessage(), deprecationLogger);
|
||||
controller.registerAsDeprecatedHandler(POST, "/{index}/_field_stats", this,
|
||||
deprecationMessage(), deprecationLogger);
|
||||
}
|
||||
|
||||
static String deprecationMessage() {
|
||||
return "[_field_stats] endpoint is deprecated! Use [_field_caps] instead or " +
|
||||
"run a min/max aggregations on the desired fields.";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -259,7 +259,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
|
||||
loadOrExecuteQueryPhase(request, context);
|
||||
|
||||
if (context.queryResult().hasHits() == false && context.scrollContext() == null) {
|
||||
if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) {
|
||||
freeContext(context.id());
|
||||
} else {
|
||||
contextProcessedSuccessfully(context);
|
||||
|
@ -341,7 +341,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
operationListener.onPreQueryPhase(context);
|
||||
long time = System.nanoTime();
|
||||
queryPhase.execute(context);
|
||||
if (context.queryResult().hasHits() == false && context.scrollContext() == null) {
|
||||
if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) {
|
||||
// no hits, we can release the context since there will be no fetch phase
|
||||
freeContext(context.id());
|
||||
} else {
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -25,6 +27,12 @@ import java.util.Map;
|
|||
*/
|
||||
public interface Aggregation {
|
||||
|
||||
/**
|
||||
* Delimiter used when prefixing aggregation names with their type
|
||||
* using the typed_keys parameter
|
||||
*/
|
||||
String TYPED_KEYS_DELIMITER = "#";
|
||||
|
||||
/**
|
||||
* @return The name of this aggregation.
|
||||
*/
|
||||
|
@ -34,4 +42,22 @@ public interface Aggregation {
|
|||
* Get the optional byte array metadata that was set on the aggregation
|
||||
*/
|
||||
Map<String, Object> getMetaData();
|
||||
|
||||
/**
|
||||
* Common xcontent fields that are shared among addAggregation
|
||||
*/
|
||||
final class CommonFields extends ParseField.CommonFields {
|
||||
public static final ParseField META = new ParseField("meta");
|
||||
public static final ParseField BUCKETS = new ParseField("buckets");
|
||||
public static final ParseField VALUE = new ParseField("value");
|
||||
public static final ParseField VALUES = new ParseField("values");
|
||||
public static final ParseField VALUE_AS_STRING = new ParseField("value_as_string");
|
||||
public static final ParseField DOC_COUNT = new ParseField("doc_count");
|
||||
public static final ParseField KEY = new ParseField("key");
|
||||
public static final ParseField KEY_AS_STRING = new ParseField("key_as_string");
|
||||
public static final ParseField FROM = new ParseField("from");
|
||||
public static final ParseField FROM_AS_STRING = new ParseField("from_as_string");
|
||||
public static final ParseField TO = new ParseField("to");
|
||||
public static final ParseField TO_AS_STRING = new ParseField("to_as_string");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,14 +45,4 @@ public interface Aggregations extends Iterable<Aggregation> {
|
|||
* Returns the aggregation that is associated with the specified name.
|
||||
*/
|
||||
<A extends Aggregation> A get(String name);
|
||||
|
||||
/**
|
||||
* Get the value of specified path in the aggregation.
|
||||
*
|
||||
* @param path
|
||||
* the path to the property in the aggregation tree
|
||||
* @return the value of the property
|
||||
*/
|
||||
Object getProperty(String path);
|
||||
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -40,9 +39,6 @@ import java.util.Objects;
|
|||
*/
|
||||
public abstract class InternalAggregation implements Aggregation, ToXContent, NamedWriteable {
|
||||
|
||||
/** Delimiter used when prefixing aggregation names with their type using the typed_keys parameter **/
|
||||
public static final String TYPED_KEYS_DELIMITER = "#";
|
||||
|
||||
public static class ReduceContext {
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
|
@ -242,21 +238,4 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, Na
|
|||
return this == obj;
|
||||
}
|
||||
|
||||
/**
|
||||
* Common xcontent fields that are shared among addAggregation
|
||||
*/
|
||||
public static final class CommonFields extends ParseField.CommonFields {
|
||||
public static final ParseField META = new ParseField("meta");
|
||||
public static final ParseField BUCKETS = new ParseField("buckets");
|
||||
public static final ParseField VALUE = new ParseField("value");
|
||||
public static final ParseField VALUES = new ParseField("values");
|
||||
public static final ParseField VALUE_AS_STRING = new ParseField("value_as_string");
|
||||
public static final ParseField DOC_COUNT = new ParseField("doc_count");
|
||||
public static final ParseField KEY = new ParseField("key");
|
||||
public static final ParseField KEY_AS_STRING = new ParseField("key_as_string");
|
||||
public static final ParseField FROM = new ParseField("from");
|
||||
public static final ParseField FROM_AS_STRING = new ParseField("from_as_string");
|
||||
public static final ParseField TO = new ParseField("to");
|
||||
public static final ParseField TO_AS_STRING = new ParseField("to_as_string");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.Streamable;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationPath;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -107,24 +106,6 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl
|
|||
return (A) asMap().get(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getProperty(String path) {
|
||||
AggregationPath aggPath = AggregationPath.parse(path);
|
||||
return getProperty(aggPath.getPathElementsAsStringList());
|
||||
}
|
||||
|
||||
public Object getProperty(List<String> path) {
|
||||
if (path.isEmpty()) {
|
||||
return this;
|
||||
}
|
||||
String aggName = path.get(0);
|
||||
InternalAggregation aggregation = get(aggName);
|
||||
if (aggregation == null) {
|
||||
throw new IllegalArgumentException("Cannot find an aggregation named [" + aggName + "]");
|
||||
}
|
||||
return aggregation.getProperty(path.subList(1, path.size()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Reduces the given lists of addAggregation.
|
||||
*
|
||||
|
|
|
@ -62,6 +62,9 @@ public abstract class InternalMultiBucketAggregation<A extends InternalMultiBuck
|
|||
*/
|
||||
public abstract B createBucket(InternalAggregations aggregations, B prototype);
|
||||
|
||||
@Override
|
||||
public abstract List<? extends InternalBucket> getBuckets();
|
||||
|
||||
@Override
|
||||
public Object getProperty(List<String> path) {
|
||||
if (path.isEmpty()) {
|
||||
|
@ -69,7 +72,7 @@ public abstract class InternalMultiBucketAggregation<A extends InternalMultiBuck
|
|||
} else if (path.get(0).equals("_bucket_count")) {
|
||||
return getBuckets().size();
|
||||
} else {
|
||||
List<? extends Bucket> buckets = getBuckets();
|
||||
List<? extends InternalBucket> buckets = getBuckets();
|
||||
Object[] propertyArray = new Object[buckets.size()];
|
||||
for (int i = 0; i < buckets.size(); i++) {
|
||||
propertyArray[i] = buckets.get(i).getProperty(getName(), path);
|
||||
|
@ -79,7 +82,7 @@ public abstract class InternalMultiBucketAggregation<A extends InternalMultiBuck
|
|||
}
|
||||
|
||||
public abstract static class InternalBucket implements Bucket {
|
||||
@Override
|
||||
|
||||
public Object getProperty(String containingAggName, List<String> path) {
|
||||
if (path.isEmpty()) {
|
||||
return this;
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.bucket;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.util.Comparators;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -29,7 +27,6 @@ import org.elasticsearch.search.aggregations.Aggregations;
|
|||
import org.elasticsearch.search.aggregations.HasAggregations;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationPath;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -40,7 +37,7 @@ public interface MultiBucketsAggregation extends Aggregation {
|
|||
* A bucket represents a criteria to which all documents that fall in it adhere to. It is also uniquely identified
|
||||
* by a key, and can potentially hold sub-aggregations computed over all documents in it.
|
||||
*/
|
||||
public interface Bucket extends HasAggregations, ToXContent, Writeable {
|
||||
interface Bucket extends HasAggregations, ToXContent, Writeable {
|
||||
/**
|
||||
* @return The key associated with the bucket
|
||||
*/
|
||||
|
@ -62,8 +59,6 @@ public interface MultiBucketsAggregation extends Aggregation {
|
|||
@Override
|
||||
Aggregations getAggregations();
|
||||
|
||||
Object getProperty(String containingAggName, List<String> path);
|
||||
|
||||
class SubAggregationComparator<B extends Bucket> implements java.util.Comparator<B> {
|
||||
|
||||
private final AggregationPath path;
|
||||
|
|
|
@ -38,6 +38,5 @@ public interface GeoHashGrid extends MultiBucketsAggregation {
|
|||
* @return The buckets of this aggregation (each bucket representing a geohash grid cell)
|
||||
*/
|
||||
@Override
|
||||
List<Bucket> getBuckets();
|
||||
|
||||
List<? extends Bucket> getBuckets();
|
||||
}
|
||||
|
|
|
@ -185,7 +185,7 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation<Internal
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<GeoHashGrid.Bucket> getBuckets() {
|
||||
public List<InternalGeoHashGrid.Bucket> getBuckets() {
|
||||
return unmodifiableList(buckets);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,8 +48,7 @@ public interface Histogram extends MultiBucketsAggregation {
|
|||
* @return The buckets of this histogram (each bucket representing an interval in the histogram)
|
||||
*/
|
||||
@Override
|
||||
List<Bucket> getBuckets();
|
||||
|
||||
List<? extends Bucket> getBuckets();
|
||||
|
||||
/**
|
||||
* A strategy defining the order in which the buckets in this histogram are ordered.
|
||||
|
|
|
@ -265,7 +265,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<Histogram.Bucket> getBuckets() {
|
||||
public List<InternalDateHistogram.Bucket> getBuckets() {
|
||||
return Collections.unmodifiableList(buckets);
|
||||
}
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<Histogram.Bucket> getBuckets() {
|
||||
public List<InternalHistogram.Bucket> getBuckets() {
|
||||
return Collections.unmodifiableList(buckets);
|
||||
}
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ public final class InternalBinaryRange
|
|||
}
|
||||
|
||||
@Override
|
||||
public List<Range.Bucket> getBuckets() {
|
||||
public List<InternalBinaryRange.Bucket> getBuckets() {
|
||||
return unmodifiableList(buckets);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.sampler;
|
|||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.aggregations.Aggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
@ -59,7 +60,7 @@ public class UnmappedSampler extends InternalSampler {
|
|||
|
||||
@Override
|
||||
public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(InternalAggregation.CommonFields.DOC_COUNT.getPreferredName(), 0);
|
||||
builder.field(Aggregation.CommonFields.DOC_COUNT.getPreferredName(), 0);
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -147,13 +147,13 @@ public class BucketHelpers {
|
|||
* <code>aggPath</code>
|
||||
*/
|
||||
public static Double resolveBucketValue(MultiBucketsAggregation agg,
|
||||
InternalMultiBucketAggregation.Bucket bucket, String aggPath, GapPolicy gapPolicy) {
|
||||
InternalMultiBucketAggregation.InternalBucket bucket, String aggPath, GapPolicy gapPolicy) {
|
||||
List<String> aggPathsList = AggregationPath.parse(aggPath).getPathElementsAsStringList();
|
||||
return resolveBucketValue(agg, bucket, aggPathsList, gapPolicy);
|
||||
}
|
||||
|
||||
public static Double resolveBucketValue(MultiBucketsAggregation agg,
|
||||
InternalMultiBucketAggregation.Bucket bucket, List<String> aggPathAsList, GapPolicy gapPolicy) {
|
||||
InternalMultiBucketAggregation.InternalBucket bucket, List<String> aggPathAsList, GapPolicy gapPolicy) {
|
||||
try {
|
||||
Object propertyValue = bucket.getProperty(agg.getName(), aggPathAsList);
|
||||
if (propertyValue == null) {
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.search.aggregations.Aggregations;
|
|||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
@ -82,9 +81,8 @@ public abstract class BucketMetricsPipelineAggregator extends SiblingPipelineAgg
|
|||
if (aggregation.getName().equals(bucketsPath.get(0))) {
|
||||
bucketsPath = bucketsPath.subList(1, bucketsPath.size());
|
||||
InternalMultiBucketAggregation<?, ?> multiBucketsAgg = (InternalMultiBucketAggregation<?, ?>) aggregation;
|
||||
List<? extends Bucket> buckets = multiBucketsAgg.getBuckets();
|
||||
for (int i = 0; i < buckets.size(); i++) {
|
||||
Bucket bucket = buckets.get(i);
|
||||
List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = multiBucketsAgg.getBuckets();
|
||||
for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) {
|
||||
Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy);
|
||||
if (bucketValue != null && !Double.isNaN(bucketValue)) {
|
||||
collectBucketValue(bucket.getKeyAsString(), bucketValue);
|
||||
|
|
|
@ -31,14 +31,12 @@ import org.elasticsearch.search.aggregations.InternalAggregation;
|
|||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -89,12 +87,13 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator {
|
|||
|
||||
@Override
|
||||
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
|
||||
InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg = (InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation;
|
||||
List<? extends Bucket> buckets = originalAgg.getBuckets();
|
||||
InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg =
|
||||
(InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation;
|
||||
List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = originalAgg.getBuckets();
|
||||
|
||||
CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS);
|
||||
List newBuckets = new ArrayList<>();
|
||||
for (Bucket bucket : buckets) {
|
||||
List<InternalMultiBucketAggregation.InternalBucket> newBuckets = new ArrayList<>();
|
||||
for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) {
|
||||
Map<String, Object> vars = new HashMap<>();
|
||||
if (script.getParams() != null) {
|
||||
vars.putAll(script.getParams());
|
||||
|
@ -122,13 +121,12 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator {
|
|||
throw new AggregationExecutionException("series_arithmetic script for reducer [" + name()
|
||||
+ "] must return a Number");
|
||||
}
|
||||
final List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
|
||||
return (InternalAggregation) p;
|
||||
}).collect(Collectors.toList());
|
||||
final List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map(
|
||||
(p) -> (InternalAggregation) p).collect(Collectors.toList());
|
||||
aggs.add(new InternalSimpleValue(name(), ((Number) returned).doubleValue(), formatter,
|
||||
new ArrayList<>(), metaData()));
|
||||
InternalMultiBucketAggregation.InternalBucket newBucket = originalAgg.createBucket(new InternalAggregations(aggs),
|
||||
(InternalMultiBucketAggregation.InternalBucket) bucket);
|
||||
bucket);
|
||||
newBuckets.add(newBucket);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,13 +29,11 @@ import org.elasticsearch.script.ScriptContext;
|
|||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -84,11 +82,11 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator {
|
|||
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
|
||||
InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg =
|
||||
(InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation;
|
||||
List<? extends Bucket> buckets = originalAgg.getBuckets();
|
||||
List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = originalAgg.getBuckets();
|
||||
|
||||
CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS);
|
||||
List newBuckets = new ArrayList<>();
|
||||
for (Bucket bucket : buckets) {
|
||||
List<InternalMultiBucketAggregation.InternalBucket> newBuckets = new ArrayList<>();
|
||||
for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) {
|
||||
Map<String, Object> vars = new HashMap<>();
|
||||
if (script.getParams() != null) {
|
||||
vars.putAll(script.getParams());
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.search.DocValueFormat;
|
|||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
|
@ -70,13 +70,14 @@ public class CumulativeSumPipelineAggregator extends PipelineAggregator {
|
|||
|
||||
@Override
|
||||
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
|
||||
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
InternalMultiBucketAggregation<? extends InternalMultiBucketAggregation, ? extends InternalMultiBucketAggregation.InternalBucket>
|
||||
histo = (InternalMultiBucketAggregation<? extends InternalMultiBucketAggregation, ? extends
|
||||
InternalMultiBucketAggregation.InternalBucket>) aggregation;
|
||||
List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = histo.getBuckets();
|
||||
HistogramFactory factory = (HistogramFactory) histo;
|
||||
|
||||
List<Bucket> newBuckets = new ArrayList<>();
|
||||
double sum = 0;
|
||||
for (Bucket bucket : buckets) {
|
||||
for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) {
|
||||
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], GapPolicy.INSERT_ZEROS);
|
||||
sum += thisBucketValue;
|
||||
List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.search.DocValueFormat;
|
|||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
|
@ -77,14 +77,16 @@ public class DerivativePipelineAggregator extends PipelineAggregator {
|
|||
|
||||
@Override
|
||||
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
|
||||
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
InternalMultiBucketAggregation<? extends InternalMultiBucketAggregation, ? extends InternalMultiBucketAggregation.InternalBucket>
|
||||
histo = (InternalMultiBucketAggregation<? extends InternalMultiBucketAggregation, ? extends
|
||||
InternalMultiBucketAggregation.InternalBucket>) aggregation;
|
||||
List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = histo.getBuckets();
|
||||
HistogramFactory factory = (HistogramFactory) histo;
|
||||
|
||||
List<Bucket> newBuckets = new ArrayList<>();
|
||||
Number lastBucketKey = null;
|
||||
Double lastBucketValue = null;
|
||||
for (Bucket bucket : buckets) {
|
||||
for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) {
|
||||
Number thisBucketKey = factory.getKey(bucket);
|
||||
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
|
||||
if (lastBucketValue != null && thisBucketValue != null) {
|
||||
|
@ -107,5 +109,4 @@ public class DerivativePipelineAggregator extends PipelineAggregator {
|
|||
}
|
||||
return factory.createAggregation(newBuckets);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.search.DocValueFormat;
|
|||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;
|
||||
|
@ -93,8 +94,10 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
|
||||
@Override
|
||||
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
|
||||
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
InternalMultiBucketAggregation<? extends InternalMultiBucketAggregation, ? extends InternalMultiBucketAggregation.InternalBucket>
|
||||
histo = (InternalMultiBucketAggregation<? extends InternalMultiBucketAggregation, ? extends
|
||||
InternalMultiBucketAggregation.InternalBucket>) aggregation;
|
||||
List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = histo.getBuckets();
|
||||
HistogramFactory factory = (HistogramFactory) histo;
|
||||
|
||||
List<Bucket> newBuckets = new ArrayList<>();
|
||||
|
@ -110,7 +113,7 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
model = minimize(buckets, histo, model);
|
||||
}
|
||||
|
||||
for (Bucket bucket : buckets) {
|
||||
for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) {
|
||||
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
|
||||
|
||||
// Default is to reuse existing bucket. Simplifies the rest of the logic,
|
||||
|
@ -180,13 +183,14 @@ public class MovAvgPipelineAggregator extends PipelineAggregator {
|
|||
return factory.createAggregation(newBuckets);
|
||||
}
|
||||
|
||||
private MovAvgModel minimize(List<? extends Bucket> buckets, MultiBucketsAggregation histo, MovAvgModel model) {
|
||||
private MovAvgModel minimize(List<? extends InternalMultiBucketAggregation.InternalBucket> buckets,
|
||||
MultiBucketsAggregation histo, MovAvgModel model) {
|
||||
|
||||
int counter = 0;
|
||||
EvictingQueue<Double> values = new EvictingQueue<>(this.window);
|
||||
|
||||
double[] test = new double[window];
|
||||
ListIterator<? extends Bucket> iter = buckets.listIterator(buckets.size());
|
||||
ListIterator<? extends InternalMultiBucketAggregation.InternalBucket> iter = buckets.listIterator(buckets.size());
|
||||
|
||||
// We have to walk the iterator backwards because we don't know if/how many buckets are empty.
|
||||
while (iter.hasPrevious() && counter < window) {
|
||||
|
|
|
@ -26,10 +26,10 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramFactory;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
|
||||
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
@ -80,15 +80,17 @@ public class SerialDiffPipelineAggregator extends PipelineAggregator {
|
|||
|
||||
@Override
|
||||
public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) {
|
||||
MultiBucketsAggregation histo = (MultiBucketsAggregation) aggregation;
|
||||
List<? extends Bucket> buckets = histo.getBuckets();
|
||||
InternalMultiBucketAggregation<? extends InternalMultiBucketAggregation, ? extends InternalMultiBucketAggregation.InternalBucket>
|
||||
histo = (InternalMultiBucketAggregation<? extends InternalMultiBucketAggregation, ? extends
|
||||
InternalMultiBucketAggregation.InternalBucket>) aggregation;
|
||||
List<? extends InternalMultiBucketAggregation.InternalBucket> buckets = histo.getBuckets();
|
||||
HistogramFactory factory = (HistogramFactory) histo;
|
||||
|
||||
List<Bucket> newBuckets = new ArrayList<>();
|
||||
EvictingQueue<Double> lagWindow = new EvictingQueue<>(lag);
|
||||
int counter = 0;
|
||||
|
||||
for (Bucket bucket : buckets) {
|
||||
for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) {
|
||||
Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy);
|
||||
Bucket newBucket = bucket;
|
||||
|
||||
|
@ -111,17 +113,14 @@ public class SerialDiffPipelineAggregator extends PipelineAggregator {
|
|||
if (!Double.isNaN(thisBucketValue) && !Double.isNaN(lagValue)) {
|
||||
double diff = thisBucketValue - lagValue;
|
||||
|
||||
List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> {
|
||||
return (InternalAggregation) p;
|
||||
}).collect(Collectors.toList());
|
||||
aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList<PipelineAggregator>(), metaData()));
|
||||
List<InternalAggregation> aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map(
|
||||
(p) -> (InternalAggregation) p).collect(Collectors.toList());
|
||||
aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList<>(), metaData()));
|
||||
newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs));
|
||||
}
|
||||
|
||||
|
||||
newBuckets.add(newBucket);
|
||||
lagWindow.add(thisBucketValue);
|
||||
|
||||
}
|
||||
return factory.createAggregation(newBuckets);
|
||||
}
|
||||
|
|
|
@ -166,7 +166,7 @@ public class FetchPhase implements SearchPhase {
|
|||
fetchSubPhase.hitsExecute(context, hits);
|
||||
}
|
||||
|
||||
context.fetchResult().hits(new SearchHits(hits, context.queryResult().topDocs().totalHits, context.queryResult().topDocs().getMaxScore()));
|
||||
context.fetchResult().hits(new SearchHits(hits, context.queryResult().getTotalHits(), context.queryResult().getMaxScore()));
|
||||
}
|
||||
|
||||
private int findRootDocumentIfNested(SearchContext context, LeafReaderContext subReaderContext, int subDocId) throws IOException {
|
||||
|
|
|
@ -142,7 +142,6 @@ public class QueryPhase implements SearchPhase {
|
|||
queryResult.searchTimedOut(false);
|
||||
|
||||
final boolean doProfile = searchContext.getProfilers() != null;
|
||||
final SearchType searchType = searchContext.searchType();
|
||||
boolean rescore = false;
|
||||
try {
|
||||
queryResult.from(searchContext.from());
|
||||
|
@ -165,12 +164,7 @@ public class QueryPhase implements SearchPhase {
|
|||
if (searchContext.getProfilers() != null) {
|
||||
collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_COUNT, Collections.emptyList());
|
||||
}
|
||||
topDocsCallable = new Callable<TopDocs>() {
|
||||
@Override
|
||||
public TopDocs call() throws Exception {
|
||||
return new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0);
|
||||
}
|
||||
};
|
||||
topDocsCallable = () -> new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0);
|
||||
} else {
|
||||
// Perhaps have a dedicated scroll phase?
|
||||
final ScrollContext scrollContext = searchContext.scrollContext();
|
||||
|
@ -238,38 +232,35 @@ public class QueryPhase implements SearchPhase {
|
|||
if (doProfile) {
|
||||
collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TOP_HITS, Collections.emptyList());
|
||||
}
|
||||
topDocsCallable = new Callable<TopDocs>() {
|
||||
@Override
|
||||
public TopDocs call() throws Exception {
|
||||
final TopDocs topDocs;
|
||||
if (topDocsCollector instanceof TopDocsCollector) {
|
||||
topDocs = ((TopDocsCollector<?>) topDocsCollector).topDocs();
|
||||
} else if (topDocsCollector instanceof CollapsingTopDocsCollector) {
|
||||
topDocs = ((CollapsingTopDocsCollector) topDocsCollector).getTopDocs();
|
||||
} else {
|
||||
throw new IllegalStateException("Unknown top docs collector " + topDocsCollector.getClass().getName());
|
||||
}
|
||||
if (scrollContext != null) {
|
||||
if (scrollContext.totalHits == -1) {
|
||||
// first round
|
||||
scrollContext.totalHits = topDocs.totalHits;
|
||||
scrollContext.maxScore = topDocs.getMaxScore();
|
||||
} else {
|
||||
// subsequent round: the total number of hits and
|
||||
// the maximum score were computed on the first round
|
||||
topDocs.totalHits = scrollContext.totalHits;
|
||||
topDocs.setMaxScore(scrollContext.maxScore);
|
||||
}
|
||||
if (searchContext.request().numberOfShards() == 1) {
|
||||
// if we fetch the document in the same roundtrip, we already know the last emitted doc
|
||||
if (topDocs.scoreDocs.length > 0) {
|
||||
// set the last emitted doc
|
||||
scrollContext.lastEmittedDoc = topDocs.scoreDocs[topDocs.scoreDocs.length - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
return topDocs;
|
||||
topDocsCallable = () -> {
|
||||
final TopDocs topDocs;
|
||||
if (topDocsCollector instanceof TopDocsCollector) {
|
||||
topDocs = ((TopDocsCollector<?>) topDocsCollector).topDocs();
|
||||
} else if (topDocsCollector instanceof CollapsingTopDocsCollector) {
|
||||
topDocs = ((CollapsingTopDocsCollector) topDocsCollector).getTopDocs();
|
||||
} else {
|
||||
throw new IllegalStateException("Unknown top docs collector " + topDocsCollector.getClass().getName());
|
||||
}
|
||||
if (scrollContext != null) {
|
||||
if (scrollContext.totalHits == -1) {
|
||||
// first round
|
||||
scrollContext.totalHits = topDocs.totalHits;
|
||||
scrollContext.maxScore = topDocs.getMaxScore();
|
||||
} else {
|
||||
// subsequent round: the total number of hits and
|
||||
// the maximum score were computed on the first round
|
||||
topDocs.totalHits = scrollContext.totalHits;
|
||||
topDocs.setMaxScore(scrollContext.maxScore);
|
||||
}
|
||||
if (searchContext.request().numberOfShards() == 1) {
|
||||
// if we fetch the document in the same roundtrip, we already know the last emitted doc
|
||||
if (topDocs.scoreDocs.length > 0) {
|
||||
// set the last emitted doc
|
||||
scrollContext.lastEmittedDoc = topDocs.scoreDocs[topDocs.scoreDocs.length - 1];
|
||||
}
|
||||
}
|
||||
}
|
||||
return topDocs;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -55,6 +55,9 @@ public final class QuerySearchResult extends SearchPhaseResult {
|
|||
private Boolean terminatedEarly = null;
|
||||
private ProfileShardResult profileShardResults;
|
||||
private boolean hasProfileResults;
|
||||
private boolean hasScoreDocs;
|
||||
private int totalHits;
|
||||
private float maxScore;
|
||||
|
||||
public QuerySearchResult() {
|
||||
}
|
||||
|
@ -87,11 +90,34 @@ public final class QuerySearchResult extends SearchPhaseResult {
|
|||
}
|
||||
|
||||
public TopDocs topDocs() {
|
||||
if (topDocs == null) {
|
||||
throw new IllegalStateException("topDocs already consumed");
|
||||
}
|
||||
return topDocs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the top docs have already been consumed.
|
||||
*/
|
||||
public boolean hasConsumedTopDocs() {
|
||||
return topDocs == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns and nulls out the top docs for this search results. This allows to free up memory once the top docs are consumed.
|
||||
* @throws IllegalStateException if the top docs have already been consumed.
|
||||
*/
|
||||
public TopDocs consumeTopDocs() {
|
||||
TopDocs topDocs = this.topDocs;
|
||||
if (topDocs == null) {
|
||||
throw new IllegalStateException("topDocs already consumed");
|
||||
}
|
||||
this.topDocs = null;
|
||||
return topDocs;
|
||||
}
|
||||
|
||||
public void topDocs(TopDocs topDocs, DocValueFormat[] sortValueFormats) {
|
||||
this.topDocs = topDocs;
|
||||
setTopDocs(topDocs);
|
||||
if (topDocs.scoreDocs.length > 0 && topDocs.scoreDocs[0] instanceof FieldDoc) {
|
||||
int numFields = ((FieldDoc) topDocs.scoreDocs[0]).fields.length;
|
||||
if (numFields != sortValueFormats.length) {
|
||||
|
@ -102,12 +128,19 @@ public final class QuerySearchResult extends SearchPhaseResult {
|
|||
this.sortValueFormats = sortValueFormats;
|
||||
}
|
||||
|
||||
private void setTopDocs(TopDocs topDocs) {
|
||||
this.topDocs = topDocs;
|
||||
hasScoreDocs = topDocs.scoreDocs.length > 0;
|
||||
this.totalHits = topDocs.totalHits;
|
||||
this.maxScore = topDocs.getMaxScore();
|
||||
}
|
||||
|
||||
public DocValueFormat[] sortValueFormats() {
|
||||
return sortValueFormats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retruns <code>true</code> if this query result has unconsumed aggregations
|
||||
* Returns <code>true</code> if this query result has unconsumed aggregations
|
||||
*/
|
||||
public boolean hasAggs() {
|
||||
return hasAggs;
|
||||
|
@ -195,10 +228,15 @@ public final class QuerySearchResult extends SearchPhaseResult {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** Returns true iff the result has hits */
|
||||
public boolean hasHits() {
|
||||
return (topDocs != null && topDocs.scoreDocs.length > 0) ||
|
||||
(suggest != null && suggest.hasScoreDocs());
|
||||
/**
|
||||
* Returns <code>true</code> if this result has any suggest score docs
|
||||
*/
|
||||
public boolean hasSuggestHits() {
|
||||
return (suggest != null && suggest.hasScoreDocs());
|
||||
}
|
||||
|
||||
public boolean hasSearchContext() {
|
||||
return hasScoreDocs || hasSuggestHits();
|
||||
}
|
||||
|
||||
public static QuerySearchResult readQuerySearchResult(StreamInput in) throws IOException {
|
||||
|
@ -227,7 +265,7 @@ public final class QuerySearchResult extends SearchPhaseResult {
|
|||
sortValueFormats[i] = in.readNamedWriteable(DocValueFormat.class);
|
||||
}
|
||||
}
|
||||
topDocs = readTopDocs(in);
|
||||
setTopDocs(readTopDocs(in));
|
||||
if (hasAggs = in.readBoolean()) {
|
||||
aggregations = InternalAggregations.readAggregations(in);
|
||||
}
|
||||
|
@ -278,4 +316,12 @@ public final class QuerySearchResult extends SearchPhaseResult {
|
|||
out.writeOptionalBoolean(terminatedEarly);
|
||||
out.writeOptionalWriteable(profileShardResults);
|
||||
}
|
||||
|
||||
public int getTotalHits() {
|
||||
return totalHits;
|
||||
}
|
||||
|
||||
public float getMaxScore() {
|
||||
return maxScore;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.Aggregation;
|
||||
import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry;
|
||||
import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
|
||||
|
@ -373,7 +373,7 @@ public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? ex
|
|||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (params.paramAsBoolean(RestSearchAction.TYPED_KEYS_PARAM, false)) {
|
||||
// Concatenates the type and the name of the suggestion (ex: completion#foo)
|
||||
builder.startArray(String.join(InternalAggregation.TYPED_KEYS_DELIMITER, getType(), getName()));
|
||||
builder.startArray(String.join(Aggregation.TYPED_KEYS_DELIMITER, getType(), getName()));
|
||||
} else {
|
||||
builder.startArray(getName());
|
||||
}
|
||||
|
@ -389,7 +389,7 @@ public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? ex
|
|||
ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation);
|
||||
String typeAndName = parser.currentName();
|
||||
// we need to extract the type prefix from the name and throw error if it is not present
|
||||
int delimiterPos = typeAndName.indexOf(InternalAggregation.TYPED_KEYS_DELIMITER);
|
||||
int delimiterPos = typeAndName.indexOf(Aggregation.TYPED_KEYS_DELIMITER);
|
||||
String type;
|
||||
String name;
|
||||
if (delimiterPos > 0) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedContext;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
|
@ -42,6 +43,7 @@ import org.elasticsearch.search.query.QuerySearchResult;
|
|||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.TestCluster;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -51,12 +53,16 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class SearchPhaseControllerTests extends ESTestCase {
|
||||
|
@ -75,8 +81,16 @@ public class SearchPhaseControllerTests extends ESTestCase {
|
|||
int nShards = randomIntBetween(1, 20);
|
||||
int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2);
|
||||
AtomicArray<SearchPhaseResult> results = generateQueryResults(nShards, suggestions, queryResultSize, false);
|
||||
ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(true, results.asList());
|
||||
Optional<SearchPhaseResult> first = results.asList().stream().findFirst();
|
||||
int from = 0, size = 0;
|
||||
if (first.isPresent()) {
|
||||
from = first.get().queryResult().from();
|
||||
size = first.get().queryResult().size();
|
||||
}
|
||||
int accumulatedLength = Math.min(queryResultSize, getTotalQueryHits(results));
|
||||
ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(true, results.asList(), null, new SearchPhaseController.TopDocsStats(),
|
||||
from, size)
|
||||
.scoreDocs;
|
||||
for (Suggest.Suggestion<?> suggestion : reducedSuggest(results)) {
|
||||
int suggestionSize = suggestion.getEntries().get(0).getOptions().size();
|
||||
accumulatedLength += suggestionSize;
|
||||
|
@ -84,48 +98,71 @@ public class SearchPhaseControllerTests extends ESTestCase {
|
|||
assertThat(sortedDocs.length, equalTo(accumulatedLength));
|
||||
}
|
||||
|
||||
public void testSortIsIdempotent() throws IOException {
|
||||
public void testSortIsIdempotent() throws Exception {
|
||||
int nShards = randomIntBetween(1, 20);
|
||||
int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2);
|
||||
AtomicArray<SearchPhaseResult> results = generateQueryResults(nShards, Collections.emptyList(), queryResultSize,
|
||||
randomBoolean() || true);
|
||||
long randomSeed = randomLong();
|
||||
boolean useConstantScore = randomBoolean();
|
||||
AtomicArray<SearchPhaseResult> results = generateSeededQueryResults(randomSeed, nShards, Collections.emptyList(), queryResultSize,
|
||||
useConstantScore);
|
||||
boolean ignoreFrom = randomBoolean();
|
||||
ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(ignoreFrom, results.asList());
|
||||
Optional<SearchPhaseResult> first = results.asList().stream().findFirst();
|
||||
int from = 0, size = 0;
|
||||
if (first.isPresent()) {
|
||||
from = first.get().queryResult().from();
|
||||
size = first.get().queryResult().size();
|
||||
}
|
||||
SearchPhaseController.TopDocsStats topDocsStats = new SearchPhaseController.TopDocsStats();
|
||||
ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(ignoreFrom, results.asList(), null, topDocsStats, from, size).scoreDocs;
|
||||
|
||||
ScoreDoc[] sortedDocs2 = searchPhaseController.sortDocs(ignoreFrom, results.asList());
|
||||
assertArrayEquals(sortedDocs, sortedDocs2);
|
||||
results = generateSeededQueryResults(randomSeed, nShards, Collections.emptyList(), queryResultSize,
|
||||
useConstantScore);
|
||||
SearchPhaseController.TopDocsStats topDocsStats2 = new SearchPhaseController.TopDocsStats();
|
||||
ScoreDoc[] sortedDocs2 = searchPhaseController.sortDocs(ignoreFrom, results.asList(), null, topDocsStats2, from, size).scoreDocs;
|
||||
assertEquals(sortedDocs.length, sortedDocs2.length);
|
||||
for (int i = 0; i < sortedDocs.length; i++) {
|
||||
assertEquals(sortedDocs[i].doc, sortedDocs2[i].doc);
|
||||
assertEquals(sortedDocs[i].shardIndex, sortedDocs2[i].shardIndex);
|
||||
assertEquals(sortedDocs[i].score, sortedDocs2[i].score, 0.0f);
|
||||
}
|
||||
assertEquals(topDocsStats.maxScore, topDocsStats2.maxScore, 0.0f);
|
||||
assertEquals(topDocsStats.totalHits, topDocsStats2.totalHits);
|
||||
assertEquals(topDocsStats.fetchHits, topDocsStats2.fetchHits);
|
||||
}
|
||||
|
||||
private AtomicArray<SearchPhaseResult> generateSeededQueryResults(long seed, int nShards,
|
||||
List<CompletionSuggestion> suggestions,
|
||||
int searchHitsSize, boolean useConstantScore) throws Exception {
|
||||
return RandomizedContext.current().runWithPrivateRandomness(seed,
|
||||
() -> generateQueryResults(nShards, suggestions, searchHitsSize, useConstantScore));
|
||||
}
|
||||
|
||||
public void testMerge() throws IOException {
|
||||
List<CompletionSuggestion> suggestions = new ArrayList<>();
|
||||
int maxSuggestSize = 0;
|
||||
for (int i = 0; i < randomIntBetween(1, 5); i++) {
|
||||
suggestions.add(new CompletionSuggestion(randomAlphaOfLength(randomIntBetween(1, 5)), randomIntBetween(1, 20)));
|
||||
int size = randomIntBetween(1, 20);
|
||||
maxSuggestSize += size;
|
||||
suggestions.add(new CompletionSuggestion(randomAlphaOfLength(randomIntBetween(1, 5)), size));
|
||||
}
|
||||
int nShards = randomIntBetween(1, 20);
|
||||
int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2);
|
||||
AtomicArray<SearchPhaseResult> queryResults = generateQueryResults(nShards, suggestions, queryResultSize, false);
|
||||
|
||||
// calculate offsets and score doc array
|
||||
List<ScoreDoc> mergedScoreDocs = new ArrayList<>();
|
||||
ScoreDoc[] mergedSearchDocs = getTopShardDocs(queryResults);
|
||||
mergedScoreDocs.addAll(Arrays.asList(mergedSearchDocs));
|
||||
Suggest mergedSuggest = reducedSuggest(queryResults);
|
||||
for (Suggest.Suggestion<?> suggestion : mergedSuggest) {
|
||||
if (suggestion instanceof CompletionSuggestion) {
|
||||
CompletionSuggestion completionSuggestion = ((CompletionSuggestion) suggestion);
|
||||
mergedScoreDocs.addAll(completionSuggestion.getOptions().stream()
|
||||
.map(CompletionSuggestion.Entry.Option::getDoc)
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
}
|
||||
ScoreDoc[] sortedDocs = mergedScoreDocs.toArray(new ScoreDoc[mergedScoreDocs.size()]);
|
||||
AtomicArray<SearchPhaseResult> searchPhaseResultAtomicArray = generateFetchResults(nShards, mergedSearchDocs, mergedSuggest);
|
||||
InternalSearchResponse mergedResponse = searchPhaseController.merge(true, sortedDocs,
|
||||
searchPhaseController.reducedQueryPhase(queryResults.asList()),
|
||||
SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResults.asList(), false);
|
||||
AtomicArray<SearchPhaseResult> searchPhaseResultAtomicArray = generateFetchResults(nShards, reducedQueryPhase.scoreDocs,
|
||||
reducedQueryPhase.suggest);
|
||||
InternalSearchResponse mergedResponse = searchPhaseController.merge(false,
|
||||
reducedQueryPhase,
|
||||
searchPhaseResultAtomicArray.asList(), searchPhaseResultAtomicArray::get);
|
||||
assertThat(mergedResponse.hits().getHits().length, equalTo(mergedSearchDocs.length));
|
||||
int suggestSize = 0;
|
||||
for (Suggest.Suggestion s : reducedQueryPhase.suggest) {
|
||||
Stream<CompletionSuggestion.Entry> stream = s.getEntries().stream();
|
||||
suggestSize += stream.collect(Collectors.summingInt(e -> e.getOptions().size()));
|
||||
}
|
||||
assertThat(suggestSize, lessThanOrEqualTo(maxSuggestSize));
|
||||
assertThat(mergedResponse.hits().getHits().length, equalTo(reducedQueryPhase.scoreDocs.length-suggestSize));
|
||||
Suggest suggestResult = mergedResponse.suggest();
|
||||
for (Suggest.Suggestion<?> suggestion : mergedSuggest) {
|
||||
for (Suggest.Suggestion<?> suggestion : reducedQueryPhase.suggest) {
|
||||
assertThat(suggestion, instanceOf(CompletionSuggestion.class));
|
||||
if (suggestion.getEntries().get(0).getOptions().size() > 0) {
|
||||
CompletionSuggestion suggestionResult = suggestResult.getSuggestion(suggestion.getName());
|
||||
|
@ -209,16 +246,6 @@ public class SearchPhaseControllerTests extends ESTestCase {
|
|||
.collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
private ScoreDoc[] getTopShardDocs(AtomicArray<SearchPhaseResult> results) throws IOException {
|
||||
List<SearchPhaseResult> resultList = results.asList();
|
||||
TopDocs[] shardTopDocs = new TopDocs[resultList.size()];
|
||||
for (int i = 0; i < resultList.size(); i++) {
|
||||
shardTopDocs[i] = resultList.get(i).queryResult().topDocs();
|
||||
}
|
||||
int topN = Math.min(results.get(0).queryResult().size(), getTotalQueryHits(results));
|
||||
return TopDocs.merge(topN, shardTopDocs).scoreDocs;
|
||||
}
|
||||
|
||||
private AtomicArray<SearchPhaseResult> generateFetchResults(int nShards, ScoreDoc[] mergedSearchDocs, Suggest mergedSuggest) {
|
||||
AtomicArray<SearchPhaseResult> fetchResults = new AtomicArray<>(nShards);
|
||||
for (int shardIndex = 0; shardIndex < nShards; shardIndex++) {
|
||||
|
@ -309,30 +336,96 @@ public class SearchPhaseControllerTests extends ESTestCase {
|
|||
InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> consumer =
|
||||
searchPhaseController.newSearchPhaseResults(request, expectedNumResults);
|
||||
AtomicInteger max = new AtomicInteger();
|
||||
CountDownLatch latch = new CountDownLatch(expectedNumResults);
|
||||
Thread[] threads = new Thread[expectedNumResults];
|
||||
for (int i = 0; i < expectedNumResults; i++) {
|
||||
int id = i;
|
||||
Thread t = new Thread(() -> {
|
||||
threads[i] = new Thread(() -> {
|
||||
int number = randomIntBetween(1, 1000);
|
||||
max.updateAndGet(prev -> Math.max(prev, number));
|
||||
QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id));
|
||||
result.topDocs(new TopDocs(id, new ScoreDoc[0], 0.0F), new DocValueFormat[0]);
|
||||
result.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(0, number)}, number), new DocValueFormat[0]);
|
||||
InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", (double) number,
|
||||
DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap())));
|
||||
result.aggregations(aggs);
|
||||
result.setShardIndex(id);
|
||||
result.size(1);
|
||||
consumer.consumeResult(result);
|
||||
latch.countDown();
|
||||
|
||||
});
|
||||
t.start();
|
||||
threads[i].start();
|
||||
}
|
||||
for (int i = 0; i < expectedNumResults; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
latch.await();
|
||||
SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
|
||||
InternalMax internalMax = (InternalMax) reduce.aggregations.asList().get(0);
|
||||
assertEquals(max.get(), internalMax.getValue(), 0.0D);
|
||||
assertEquals(1, reduce.scoreDocs.length);
|
||||
assertEquals(max.get(), reduce.maxScore, 0.0f);
|
||||
assertEquals(expectedNumResults, reduce.totalHits);
|
||||
assertEquals(max.get(), reduce.scoreDocs[0].score, 0.0f);
|
||||
}
|
||||
|
||||
public void testConsumerOnlyAggs() throws InterruptedException {
|
||||
int expectedNumResults = randomIntBetween(1, 100);
|
||||
int bufferSize = randomIntBetween(2, 200);
|
||||
SearchRequest request = new SearchRequest();
|
||||
request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")).size(0));
|
||||
request.setBatchedReduceSize(bufferSize);
|
||||
InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> consumer =
|
||||
searchPhaseController.newSearchPhaseResults(request, expectedNumResults);
|
||||
AtomicInteger max = new AtomicInteger();
|
||||
for (int i = 0; i < expectedNumResults; i++) {
|
||||
int id = i;
|
||||
int number = randomIntBetween(1, 1000);
|
||||
max.updateAndGet(prev -> Math.max(prev, number));
|
||||
QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id));
|
||||
result.topDocs(new TopDocs(1, new ScoreDoc[0], number), new DocValueFormat[0]);
|
||||
InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", (double) number,
|
||||
DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap())));
|
||||
result.aggregations(aggs);
|
||||
result.setShardIndex(id);
|
||||
result.size(1);
|
||||
consumer.consumeResult(result);
|
||||
}
|
||||
SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
|
||||
InternalMax internalMax = (InternalMax) reduce.aggregations.asList().get(0);
|
||||
assertEquals(max.get(), internalMax.getValue(), 0.0D);
|
||||
assertEquals(0, reduce.scoreDocs.length);
|
||||
assertEquals(max.get(), reduce.maxScore, 0.0f);
|
||||
assertEquals(expectedNumResults, reduce.totalHits);
|
||||
}
|
||||
|
||||
|
||||
public void testConsumerOnlyHits() throws InterruptedException {
|
||||
int expectedNumResults = randomIntBetween(1, 100);
|
||||
int bufferSize = randomIntBetween(2, 200);
|
||||
SearchRequest request = new SearchRequest();
|
||||
if (randomBoolean()) {
|
||||
request.source(new SearchSourceBuilder().size(randomIntBetween(1, 10)));
|
||||
}
|
||||
request.setBatchedReduceSize(bufferSize);
|
||||
InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> consumer =
|
||||
searchPhaseController.newSearchPhaseResults(request, expectedNumResults);
|
||||
AtomicInteger max = new AtomicInteger();
|
||||
for (int i = 0; i < expectedNumResults; i++) {
|
||||
int id = i;
|
||||
int number = randomIntBetween(1, 1000);
|
||||
max.updateAndGet(prev -> Math.max(prev, number));
|
||||
QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id));
|
||||
result.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(0, number)}, number), new DocValueFormat[0]);
|
||||
result.setShardIndex(id);
|
||||
result.size(1);
|
||||
consumer.consumeResult(result);
|
||||
}
|
||||
SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
|
||||
assertEquals(1, reduce.scoreDocs.length);
|
||||
assertEquals(max.get(), reduce.maxScore, 0.0f);
|
||||
assertEquals(expectedNumResults, reduce.totalHits);
|
||||
assertEquals(max.get(), reduce.scoreDocs[0].score, 0.0f);
|
||||
}
|
||||
|
||||
|
||||
public void testNewSearchPhaseResults() {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
int expectedNumResults = randomIntBetween(1, 10);
|
||||
|
@ -342,10 +435,22 @@ public class SearchPhaseControllerTests extends ESTestCase {
|
|||
if ((hasAggs = randomBoolean())) {
|
||||
request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")));
|
||||
}
|
||||
final boolean hasTopDocs;
|
||||
if ((hasTopDocs = randomBoolean())) {
|
||||
if (request.source() != null) {
|
||||
request.source().size(randomIntBetween(1, 100));
|
||||
} // no source means size = 10
|
||||
} else {
|
||||
if (request.source() == null) {
|
||||
request.source(new SearchSourceBuilder().size(0));
|
||||
} else {
|
||||
request.source().size(0);
|
||||
}
|
||||
}
|
||||
request.setBatchedReduceSize(bufferSize);
|
||||
InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> consumer
|
||||
= searchPhaseController.newSearchPhaseResults(request, expectedNumResults);
|
||||
if (hasAggs && expectedNumResults > bufferSize) {
|
||||
if ((hasAggs || hasTopDocs) && expectedNumResults > bufferSize) {
|
||||
assertThat("expectedNumResults: " + expectedNumResults + " bufferSize: " + bufferSize,
|
||||
consumer, instanceOf(SearchPhaseController.QueryPhaseResultConsumer.class));
|
||||
} else {
|
||||
|
@ -354,4 +459,36 @@ public class SearchPhaseControllerTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testReduceTopNWithFromOffset() {
|
||||
SearchRequest request = new SearchRequest();
|
||||
request.source(new SearchSourceBuilder().size(5).from(5));
|
||||
request.setBatchedReduceSize(randomIntBetween(2, 4));
|
||||
InitialSearchPhase.SearchPhaseResults<SearchPhaseResult> consumer =
|
||||
searchPhaseController.newSearchPhaseResults(request, 4);
|
||||
int score = 100;
|
||||
for (int i = 0; i < 4; i++) {
|
||||
QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new Index("a", "b"), i));
|
||||
ScoreDoc[] docs = new ScoreDoc[3];
|
||||
for (int j = 0; j < docs.length; j++) {
|
||||
docs[j] = new ScoreDoc(0, score--);
|
||||
}
|
||||
result.topDocs(new TopDocs(3, docs, docs[0].score), new DocValueFormat[0]);
|
||||
result.setShardIndex(i);
|
||||
result.size(5);
|
||||
result.from(5);
|
||||
consumer.consumeResult(result);
|
||||
}
|
||||
// 4*3 results = 12 we get result 5 to 10 here with from=5 and size=5
|
||||
|
||||
SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
|
||||
assertEquals(5, reduce.scoreDocs.length);
|
||||
assertEquals(100.f, reduce.maxScore, 0.0f);
|
||||
assertEquals(12, reduce.totalHits);
|
||||
assertEquals(95.0f, reduce.scoreDocs[0].score, 0.0f);
|
||||
assertEquals(94.0f, reduce.scoreDocs[1].score, 0.0f);
|
||||
assertEquals(93.0f, reduce.scoreDocs[2].score, 0.0f);
|
||||
assertEquals(92.0f, reduce.scoreDocs[3].score, 0.0f);
|
||||
assertEquals(91.0f, reduce.scoreDocs[4].score, 0.0f);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -248,8 +248,8 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
|
|||
logger.info("--> start back the 2 nodes ");
|
||||
String[] newNodes = internalCluster().startNodes(2, settings).stream().toArray(String[]::new);
|
||||
|
||||
ensureGreen();
|
||||
internalCluster().validateClusterFormed();
|
||||
ensureGreen();
|
||||
|
||||
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
assertThat(state.nodes().getSize(), equalTo(4));
|
||||
|
|
|
@ -454,7 +454,7 @@ public class ScopedSettingsTests extends ESTestCase {
|
|||
assertThat(e.getMessage(), startsWith("unknown secure setting [some.secure.setting]"));
|
||||
|
||||
ClusterSettings clusterSettings2 = new ClusterSettings(settings,
|
||||
Collections.singleton(SecureSetting.secureString("some.secure.setting", null, false)));
|
||||
Collections.singleton(SecureSetting.secureString("some.secure.setting", null)));
|
||||
clusterSettings2.validate(settings);
|
||||
}
|
||||
|
||||
|
@ -463,7 +463,7 @@ public class ScopedSettingsTests extends ESTestCase {
|
|||
secureSettings.setString("some.secure.setting", "secret");
|
||||
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY,
|
||||
Collections.singleton(SecureSetting.secureString("some.secure.setting", null, false)));
|
||||
Collections.singleton(SecureSetting.secureString("some.secure.setting", null)));
|
||||
|
||||
Settings diffed = clusterSettings.diff(Settings.EMPTY, settings);
|
||||
assertTrue(diffed.isEmpty());
|
||||
|
|
|
@ -555,4 +555,11 @@ public class SettingsTests extends ESTestCase {
|
|||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
assertTrue(Settings.builder().setSecureSettings(secureSettings).build().isEmpty());
|
||||
}
|
||||
|
||||
public void testSecureSettingConflict() {
|
||||
Setting<SecureString> setting = SecureSetting.secureString("something.secure", null);
|
||||
Settings settings = Settings.builder().put("something.secure", "notreallysecure").build();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> setting.get(settings));
|
||||
assertTrue(e.getMessage().contains("must be stored inside the Elasticsearch keystore"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1309,8 +1309,9 @@ public class InternalEngineTests extends ESTestCase {
|
|||
assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class));
|
||||
}
|
||||
|
||||
protected List<Engine.Operation> generateSingleDocHistory(boolean forReplica, boolean externalVersioning, boolean partialOldPrimary,
|
||||
long primaryTerm, int minOpCount, int maxOpCount) {
|
||||
protected List<Engine.Operation> generateSingleDocHistory(boolean forReplica, VersionType versionType,
|
||||
boolean partialOldPrimary, long primaryTerm,
|
||||
int minOpCount, int maxOpCount) {
|
||||
final int numOfOps = randomIntBetween(minOpCount, maxOpCount);
|
||||
final List<Engine.Operation> ops = new ArrayList<>();
|
||||
final Term id = newUid(Uid.createUid("test", "1"));
|
||||
|
@ -1322,14 +1323,30 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
final String valuePrefix = forReplica ? "r_" : "p_";
|
||||
final boolean incrementTermWhenIntroducingSeqNo = randomBoolean();
|
||||
final VersionType versionType = externalVersioning ? VersionType.EXTERNAL : VersionType.INTERNAL;
|
||||
for (int i = 0; i < numOfOps; i++) {
|
||||
final Engine.Operation op;
|
||||
final long version;
|
||||
switch (versionType) {
|
||||
case INTERNAL:
|
||||
version = forReplica ? i : Versions.MATCH_ANY;
|
||||
break;
|
||||
case EXTERNAL:
|
||||
version = i;
|
||||
break;
|
||||
case EXTERNAL_GTE:
|
||||
version = randomBoolean() ? Math.max(i - 1, 0) : i;
|
||||
break;
|
||||
case FORCE:
|
||||
version = randomNonNegativeLong();
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException("unknown version type: " + versionType);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
op = new Engine.Index(id, testParsedDocument("1", "test", null, testDocumentWithTextField(valuePrefix + i), B_1, null),
|
||||
forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbersService.UNASSIGNED_SEQ_NO,
|
||||
forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm,
|
||||
forReplica || externalVersioning ? i : Versions.MATCH_ANY,
|
||||
version,
|
||||
forReplica ? versionType.versionTypeForReplicationAndRecovery() : versionType,
|
||||
forReplica ? REPLICA : PRIMARY,
|
||||
System.currentTimeMillis(), -1, false
|
||||
|
@ -1338,7 +1355,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
op = new Engine.Delete("test", "1", id,
|
||||
forReplica && i >= startWithSeqNo ? i * 2 : SequenceNumbersService.UNASSIGNED_SEQ_NO,
|
||||
forReplica && i >= startWithSeqNo && incrementTermWhenIntroducingSeqNo ? primaryTerm + 1 : primaryTerm,
|
||||
forReplica || externalVersioning ? i : Versions.MATCH_ANY,
|
||||
version,
|
||||
forReplica ? versionType.versionTypeForReplicationAndRecovery() : versionType,
|
||||
forReplica ? REPLICA : PRIMARY,
|
||||
System.currentTimeMillis());
|
||||
|
@ -1349,10 +1366,20 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testOutOfOrderDocsOnReplica() throws IOException {
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(true, true, false, 2, 2, 20);
|
||||
assertOpsOnReplica(ops, replicaEngine);
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(true,
|
||||
randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 2, 20);
|
||||
assertOpsOnReplica(ops, replicaEngine, true);
|
||||
}
|
||||
|
||||
public void testNonStandardVersioningOnReplica() throws IOException {
|
||||
// TODO: this can be folded into testOutOfOrderDocsOnReplica once out of order
|
||||
// is detected using seq#
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(true,
|
||||
randomFrom(VersionType.EXTERNAL_GTE, VersionType.FORCE), false, 2, 2, 20);
|
||||
assertOpsOnReplica(ops, replicaEngine, false);
|
||||
}
|
||||
|
||||
|
||||
public void testOutOfOrderDocsOnReplicaOldPrimary() throws IOException {
|
||||
IndexSettings oldSettings = IndexSettingsModule.newIndexSettings("testOld", Settings.builder()
|
||||
.put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us
|
||||
|
@ -1365,12 +1392,12 @@ public class InternalEngineTests extends ESTestCase {
|
|||
try (Store oldReplicaStore = createStore();
|
||||
InternalEngine replicaEngine =
|
||||
createEngine(oldSettings, oldReplicaStore, createTempDir("translog-old-replica"), newMergePolicy())) {
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(true, true, true, 2, 2, 20);
|
||||
assertOpsOnReplica(ops, replicaEngine);
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), true, 2, 2, 20);
|
||||
assertOpsOnReplica(ops, replicaEngine, true);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertOpsOnReplica(List<Engine.Operation> ops, InternalEngine replicaEngine) throws IOException {
|
||||
private void assertOpsOnReplica(List<Engine.Operation> ops, InternalEngine replicaEngine, boolean shuffleOps) throws IOException {
|
||||
final Engine.Operation lastOp = ops.get(ops.size() - 1);
|
||||
final String lastFieldValue;
|
||||
if (lastOp instanceof Engine.Index) {
|
||||
|
@ -1380,13 +1407,15 @@ public class InternalEngineTests extends ESTestCase {
|
|||
// delete
|
||||
lastFieldValue = null;
|
||||
}
|
||||
int firstOpWithSeqNo = 0;
|
||||
while (firstOpWithSeqNo < ops.size() && ops.get(firstOpWithSeqNo).seqNo() < 0) {
|
||||
firstOpWithSeqNo++;
|
||||
if (shuffleOps) {
|
||||
int firstOpWithSeqNo = 0;
|
||||
while (firstOpWithSeqNo < ops.size() && ops.get(firstOpWithSeqNo).seqNo() < 0) {
|
||||
firstOpWithSeqNo++;
|
||||
}
|
||||
// shuffle ops but make sure legacy ops are first
|
||||
shuffle(ops.subList(0, firstOpWithSeqNo), random());
|
||||
shuffle(ops.subList(firstOpWithSeqNo, ops.size()), random());
|
||||
}
|
||||
// shuffle ops but make sure legacy ops are first
|
||||
shuffle(ops.subList(0, firstOpWithSeqNo), random());
|
||||
shuffle(ops.subList(firstOpWithSeqNo, ops.size()), random());
|
||||
boolean firstOp = true;
|
||||
for (Engine.Operation op : ops) {
|
||||
logger.info("performing [{}], v [{}], seq# [{}], term [{}]",
|
||||
|
@ -1432,7 +1461,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testConcurrentOutOfDocsOnReplica() throws IOException, InterruptedException {
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(true, true, false, 2, 100, 300);
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(true, randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 100, 300);
|
||||
final Engine.Operation lastOp = ops.get(ops.size() - 1);
|
||||
final String lastFieldValue;
|
||||
if (lastOp instanceof Engine.Index) {
|
||||
|
@ -1492,7 +1521,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testInternalVersioningOnPrimary() throws IOException {
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(false, false, false, 2, 2, 20);
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(false, VersionType.INTERNAL, false, 2, 2, 20);
|
||||
assertOpsOnPrimary(ops, Versions.NOT_FOUND, true, engine);
|
||||
}
|
||||
|
||||
|
@ -1560,7 +1589,9 @@ public class InternalEngineTests extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
// refresh and take the chance to check everything is ok so far
|
||||
assertVisibleCount(engine, docDeleted ? 0 : 1);
|
||||
if (docDeleted == false) {
|
||||
// even if doc is not not deleted, lastFieldValue can still be null if this is the
|
||||
// first op and it failed.
|
||||
if (docDeleted == false && lastFieldValue != null) {
|
||||
try (Searcher searcher = engine.acquireSearcher("test")) {
|
||||
final TotalHitCountCollector collector = new TotalHitCountCollector();
|
||||
searcher.searcher().search(new TermQuery(new Term("value", lastFieldValue)), collector);
|
||||
|
@ -1593,8 +1624,11 @@ public class InternalEngineTests extends ESTestCase {
|
|||
return opsPerformed;
|
||||
}
|
||||
|
||||
public void testExternalVersioningOnPrimary() throws IOException {
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(false, true, false, 2, 2, 20);
|
||||
public void testNonInternalVersioningOnPrimary() throws IOException {
|
||||
final Set<VersionType> nonInternalVersioning = new HashSet<>(Arrays.asList(VersionType.values()));
|
||||
nonInternalVersioning.remove(VersionType.INTERNAL);
|
||||
final VersionType versionType = randomFrom(nonInternalVersioning);
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(false, versionType, false, 2, 2, 20);
|
||||
final Engine.Operation lastOp = ops.get(ops.size() - 1);
|
||||
final String lastFieldValue;
|
||||
if (lastOp instanceof Engine.Index) {
|
||||
|
@ -1604,7 +1638,10 @@ public class InternalEngineTests extends ESTestCase {
|
|||
// delete
|
||||
lastFieldValue = null;
|
||||
}
|
||||
shuffle(ops, random());
|
||||
// other version types don't support out of order processing.
|
||||
if (versionType == VersionType.EXTERNAL) {
|
||||
shuffle(ops, random());
|
||||
}
|
||||
long highestOpVersion = Versions.NOT_FOUND;
|
||||
long seqNo = -1;
|
||||
boolean docDeleted = true;
|
||||
|
@ -1614,7 +1651,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
if (op instanceof Engine.Index) {
|
||||
final Engine.Index index = (Engine.Index) op;
|
||||
Engine.IndexResult result = engine.index(index);
|
||||
if (op.version() > highestOpVersion) {
|
||||
if (op.versionType().isVersionConflictForWrites(highestOpVersion, op.version(), docDeleted) == false) {
|
||||
seqNo++;
|
||||
assertThat(result.getSeqNo(), equalTo(seqNo));
|
||||
assertThat(result.isCreated(), equalTo(docDeleted));
|
||||
|
@ -1632,7 +1669,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
} else {
|
||||
final Engine.Delete delete = (Engine.Delete) op;
|
||||
Engine.DeleteResult result = engine.delete(delete);
|
||||
if (op.version() > highestOpVersion) {
|
||||
if (op.versionType().isVersionConflictForWrites(highestOpVersion, op.version(), docDeleted) == false) {
|
||||
seqNo++;
|
||||
assertThat(result.getSeqNo(), equalTo(seqNo));
|
||||
assertThat(result.isFound(), equalTo(docDeleted == false));
|
||||
|
@ -1658,6 +1695,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
assertVisibleCount(engine, docDeleted ? 0 : 1);
|
||||
if (docDeleted == false) {
|
||||
logger.info("searching for [{}]", lastFieldValue);
|
||||
try (Searcher searcher = engine.acquireSearcher("test")) {
|
||||
final TotalHitCountCollector collector = new TotalHitCountCollector();
|
||||
searcher.searcher().search(new TermQuery(new Term("value", lastFieldValue)), collector);
|
||||
|
@ -1667,13 +1705,13 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testVersioningPromotedReplica() throws IOException {
|
||||
final List<Engine.Operation> replicaOps = generateSingleDocHistory(true, true, false, 1, 2, 20);
|
||||
List<Engine.Operation> primaryOps = generateSingleDocHistory(false, false, false, 2, 2, 20);
|
||||
final List<Engine.Operation> replicaOps = generateSingleDocHistory(true, VersionType.INTERNAL, false, 1, 2, 20);
|
||||
List<Engine.Operation> primaryOps = generateSingleDocHistory(false, VersionType.INTERNAL, false, 2, 2, 20);
|
||||
Engine.Operation lastReplicaOp = replicaOps.get(replicaOps.size() - 1);
|
||||
final boolean deletedOnReplica = lastReplicaOp instanceof Engine.Delete;
|
||||
final long finalReplicaVersion = lastReplicaOp.version();
|
||||
final long finalReplicaSeqNo = lastReplicaOp.seqNo();
|
||||
assertOpsOnReplica(replicaOps, replicaEngine);
|
||||
assertOpsOnReplica(replicaOps, replicaEngine, true);
|
||||
final int opsOnPrimary = assertOpsOnPrimary(primaryOps, finalReplicaVersion, deletedOnReplica, replicaEngine);
|
||||
final long currentSeqNo = getSequenceID(replicaEngine, new Engine.Get(false, lastReplicaOp.uid())).v1();
|
||||
try (Searcher searcher = engine.acquireSearcher("test")) {
|
||||
|
@ -1687,7 +1725,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testConcurrentExternalVersioningOnPrimary() throws IOException, InterruptedException {
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(false, true, false, 2, 100, 300);
|
||||
final List<Engine.Operation> ops = generateSingleDocHistory(false, VersionType.EXTERNAL, false, 2, 100, 300);
|
||||
final Engine.Operation lastOp = ops.get(ops.size() - 1);
|
||||
final String lastFieldValue;
|
||||
if (lastOp instanceof Engine.Index) {
|
||||
|
|
|
@ -178,7 +178,7 @@ public class InternalSettingsPreparerTests extends ESTestCase {
|
|||
secureSettings.setString("foo", "secret");
|
||||
Settings input = Settings.builder().put(baseEnvSettings).setSecureSettings(secureSettings).build();
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(input, null);
|
||||
Setting<SecureString> fakeSetting = SecureSetting.secureString("foo", null, false);
|
||||
Setting<SecureString> fakeSetting = SecureSetting.secureString("foo", null);
|
||||
assertEquals("secret", fakeSetting.get(env.settings()).toString());
|
||||
}
|
||||
|
||||
|
|
|
@ -19,18 +19,22 @@
|
|||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems(value = "ExtrasFS")
|
||||
public class PluginsServiceTests extends ESTestCase {
|
||||
public static class AdditionalSettingsPlugin1 extends Plugin {
|
||||
@Override
|
||||
|
@ -85,7 +89,7 @@ public class PluginsServiceTests extends ESTestCase {
|
|||
PluginsService.getPluginBundles(pluginsDir);
|
||||
fail();
|
||||
} catch (IllegalStateException e) {
|
||||
assertTrue(e.getMessage(), e.getMessage().contains("Could not load plugin descriptor for existing plugin"));
|
||||
assertTrue(e.getMessage(), e.getMessage().contains("Could not load plugin descriptor for existing plugin [plugin-missing-descriptor]"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,4 +103,22 @@ public class PluginsServiceTests extends ESTestCase {
|
|||
assertEquals(1, scriptPlugins.size());
|
||||
assertEquals(FilterablePlugin.class, scriptPlugins.get(0).getClass());
|
||||
}
|
||||
|
||||
public void testHiddenFiles() throws IOException {
|
||||
final Path home = createTempDir();
|
||||
final Settings settings =
|
||||
Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), home)
|
||||
.build();
|
||||
final Path hidden = home.resolve("plugins").resolve(".hidden");
|
||||
Files.createDirectories(hidden);
|
||||
@SuppressWarnings("unchecked")
|
||||
final IllegalStateException e = expectThrows(
|
||||
IllegalStateException.class,
|
||||
() -> newPluginsService(settings));
|
||||
|
||||
final String expected = "Could not load plugin descriptor for existing plugin [.hidden]";
|
||||
assertThat(e, hasToString(containsString(expected)));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -223,8 +223,13 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
new AliasFilter(null, Strings.EMPTY_ARRAY),
|
||||
1.0f),
|
||||
null);
|
||||
// the search context should inherit the default timeout
|
||||
assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5)));
|
||||
try {
|
||||
// the search context should inherit the default timeout
|
||||
assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5)));
|
||||
} finally {
|
||||
contextWithDefaultTimeout.decRef();
|
||||
service.freeContext(contextWithDefaultTimeout.id());
|
||||
}
|
||||
|
||||
final long seconds = randomIntBetween(6, 10);
|
||||
final SearchContext context = service.createContext(
|
||||
|
@ -238,8 +243,14 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
new AliasFilter(null, Strings.EMPTY_ARRAY),
|
||||
1.0f),
|
||||
null);
|
||||
// the search context should inherit the query timeout
|
||||
assertThat(context.timeout(), equalTo(TimeValue.timeValueSeconds(seconds)));
|
||||
try {
|
||||
// the search context should inherit the query timeout
|
||||
assertThat(context.timeout(), equalTo(TimeValue.timeValueSeconds(seconds)));
|
||||
} finally {
|
||||
context.decRef();
|
||||
service.freeContext(context.id());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin {
|
||||
|
|
|
@ -88,7 +88,7 @@ public class DateHistogramOffsetIT extends ESIntegTestCase {
|
|||
assertThat(response.getHits().getTotalHits(), equalTo(5L));
|
||||
|
||||
Histogram histo = response.getAggregations().get("date_histo");
|
||||
List<Histogram.Bucket> buckets = histo.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histo.getBuckets();
|
||||
assertThat(buckets.size(), equalTo(2));
|
||||
|
||||
checkBucketFor(buckets.get(0), new DateTime(2014, 3, 10, 2, 0, DateTimeZone.UTC), 2L);
|
||||
|
|
|
@ -157,7 +157,7 @@ public class GeoHashGridIT extends ESIntegTestCase {
|
|||
assertSearchResponse(response);
|
||||
|
||||
GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
|
||||
List<Bucket> buckets = geoGrid.getBuckets();
|
||||
List<? extends Bucket> buckets = geoGrid.getBuckets();
|
||||
Object[] propertiesKeys = (Object[]) ((InternalAggregation)geoGrid).getProperty("_key");
|
||||
Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)geoGrid).getProperty("_count");
|
||||
for (int i = 0; i < buckets.size(); i++) {
|
||||
|
|
|
@ -990,7 +990,7 @@ public class HistogramIT extends ESIntegTestCase {
|
|||
assertSearchResponse(r);
|
||||
|
||||
Histogram histogram = r.getAggregations().get("histo");
|
||||
List<Bucket> buckets = histogram.getBuckets();
|
||||
List<? extends Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(2, buckets.size());
|
||||
assertEquals(-0.65, (double) buckets.get(0).getKey(), 0.01d);
|
||||
assertEquals(1, buckets.get(0).getDocCount());
|
||||
|
|
|
@ -105,7 +105,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
testBothCases(LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2015-01-01"), asLong("2017-12-31")), dataset,
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD),
|
||||
histogram -> {
|
||||
List<Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(3, buckets.size());
|
||||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
|
@ -128,7 +128,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
Arrays.asList("2017-01-01", "2017-02-02", "2017-02-03", "2017-03-04", "2017-03-05", "2017-03-06"),
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.MONTH).field(DATE_FIELD),
|
||||
histogram -> {
|
||||
List<Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(3, buckets.size());
|
||||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
|
@ -159,7 +159,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
),
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.DAY).field(DATE_FIELD).minDocCount(1L),
|
||||
histogram -> {
|
||||
List<Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(4, buckets.size());
|
||||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
|
@ -197,7 +197,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
),
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.HOUR).field(DATE_FIELD).minDocCount(1L),
|
||||
histogram -> {
|
||||
List<Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(6, buckets.size());
|
||||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
|
@ -238,7 +238,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
),
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.MINUTE).field(DATE_FIELD).minDocCount(1L),
|
||||
histogram -> {
|
||||
List<Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(3, buckets.size());
|
||||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
|
@ -268,7 +268,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
),
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.SECOND).field(DATE_FIELD).minDocCount(1L),
|
||||
histogram -> {
|
||||
List<Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(3, buckets.size());
|
||||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
|
@ -300,7 +300,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
testSearchAndReduceCase(query, timestamps,
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD).minDocCount(0L),
|
||||
histogram -> {
|
||||
List<Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(4, buckets.size());
|
||||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
|
@ -325,7 +325,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase {
|
|||
testSearchAndReduceCase(query, timestamps,
|
||||
aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD).minDocCount(3L),
|
||||
histogram -> {
|
||||
List<Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
assertEquals(1, buckets.size());
|
||||
|
||||
Histogram.Bucket bucket = buckets.get(0);
|
||||
|
|
|
@ -157,9 +157,8 @@ public class GeoCentroidIT extends AbstractGeoTestCase {
|
|||
GeoHashGrid grid = response.getAggregations().get("geoGrid");
|
||||
assertThat(grid, notNullValue());
|
||||
assertThat(grid.getName(), equalTo("geoGrid"));
|
||||
List<GeoHashGrid.Bucket> buckets = grid.getBuckets();
|
||||
for (int i=0; i < buckets.size(); ++i) {
|
||||
GeoHashGrid.Bucket cell = buckets.get(i);
|
||||
List<? extends GeoHashGrid.Bucket> buckets = grid.getBuckets();
|
||||
for (GeoHashGrid.Bucket cell : buckets) {
|
||||
String geohash = cell.getKeyAsString();
|
||||
GeoPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash);
|
||||
GeoCentroid centroidAgg = cell.getAggregations().get(aggName);
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder;
|
|||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
|
||||
|
@ -381,7 +382,8 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
deriv = bucket.getAggregations().get("deriv");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.value(), equalTo(4.0));
|
||||
assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(4.0));
|
||||
assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty(
|
||||
"histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(4.0));
|
||||
assertThat((DateTime) propertiesKeys[1], equalTo(key));
|
||||
assertThat((long) propertiesDocCounts[1], equalTo(2L));
|
||||
assertThat((double) propertiesCounts[1], equalTo(5.0));
|
||||
|
@ -398,7 +400,8 @@ public class DateDerivativeIT extends ESIntegTestCase {
|
|||
deriv = bucket.getAggregations().get("deriv");
|
||||
assertThat(deriv, notNullValue());
|
||||
assertThat(deriv.value(), equalTo(10.0));
|
||||
assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(10.0));
|
||||
assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty(
|
||||
"histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(10.0));
|
||||
assertThat((DateTime) propertiesKeys[2], equalTo(key));
|
||||
assertThat((long) propertiesDocCounts[2], equalTo(3L));
|
||||
assertThat((double) propertiesCounts[2], equalTo(15.0));
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.search.SearchResponse;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
|
||||
import org.elasticsearch.search.aggregations.metrics.stats.Stats;
|
||||
|
@ -279,7 +280,8 @@ public class DerivativeIT extends ESIntegTestCase {
|
|||
assertThat(sumDeriv, notNullValue());
|
||||
long sumDerivValue = expectedSum - expectedSumPreviousBucket;
|
||||
assertThat(sumDeriv.value(), equalTo((double) sumDerivValue));
|
||||
assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()),
|
||||
assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty("histo",
|
||||
AggregationPath.parse("deriv.value").getPathElementsAsStringList()),
|
||||
equalTo((double) sumDerivValue));
|
||||
} else {
|
||||
assertThat(sumDeriv, nullValue());
|
||||
|
@ -324,7 +326,8 @@ public class DerivativeIT extends ESIntegTestCase {
|
|||
assertThat(sumDeriv, notNullValue());
|
||||
long sumDerivValue = expectedSum - expectedSumPreviousBucket;
|
||||
assertThat(sumDeriv.value(), equalTo((double) sumDerivValue));
|
||||
assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()),
|
||||
assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty("histo",
|
||||
AggregationPath.parse("deriv.value").getPathElementsAsStringList()),
|
||||
equalTo((double) sumDerivValue));
|
||||
} else {
|
||||
assertThat(sumDeriv, nullValue());
|
||||
|
@ -451,7 +454,7 @@ public class DerivativeIT extends ESIntegTestCase {
|
|||
Histogram deriv = searchResponse.getAggregations().get("histo");
|
||||
assertThat(deriv, Matchers.notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<Bucket> buckets = deriv.getBuckets();
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
assertThat(buckets.size(), equalTo(valueCounts_empty.length));
|
||||
|
||||
for (int i = 0; i < valueCounts_empty.length; i++) {
|
||||
|
@ -480,7 +483,7 @@ public class DerivativeIT extends ESIntegTestCase {
|
|||
Histogram deriv = searchResponse.getAggregations().get("histo");
|
||||
assertThat(deriv, Matchers.notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<Bucket> buckets = deriv.getBuckets();
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
assertThat(buckets.size(), equalTo(valueCounts_empty.length));
|
||||
|
||||
double lastSumValue = Double.NaN;
|
||||
|
@ -522,7 +525,7 @@ public class DerivativeIT extends ESIntegTestCase {
|
|||
Histogram deriv = searchResponse.getAggregations().get("histo");
|
||||
assertThat(deriv, Matchers.notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<Bucket> buckets = deriv.getBuckets();
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
assertThat(buckets.size(), equalTo(valueCounts_empty.length));
|
||||
|
||||
double lastSumValue = Double.NaN;
|
||||
|
@ -561,7 +564,7 @@ public class DerivativeIT extends ESIntegTestCase {
|
|||
Histogram deriv = searchResponse.getAggregations().get("histo");
|
||||
assertThat(deriv, Matchers.notNullValue());
|
||||
assertThat(deriv.getName(), equalTo("histo"));
|
||||
List<Bucket> buckets = deriv.getBuckets();
|
||||
List<? extends Bucket> buckets = deriv.getBuckets();
|
||||
assertThat(buckets.size(), equalTo(numBuckets_empty_rnd));
|
||||
|
||||
double lastSumValue = Double.NaN;
|
||||
|
|
|
@ -49,12 +49,12 @@ Collection distributions = project.subprojects.findAll {
|
|||
|
||||
// integ test zip only uses core, so a different notice file is needed there
|
||||
task buildCoreNotice(type: NoticeTask) {
|
||||
dependencies project(':core')
|
||||
licensesDir new File(project(':core').projectDir, 'licenses')
|
||||
}
|
||||
|
||||
// other distributions include notices from modules as well, which are added below later
|
||||
task buildFullNotice(type: NoticeTask) {
|
||||
dependencies project(':core')
|
||||
licensesDir new File(project(':core').projectDir, 'licenses')
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
|
@ -73,7 +73,10 @@ ext.restTestExpansions = [
|
|||
// loop over modules to also setup cross task dependencies and increment our modules counter
|
||||
project.rootProject.subprojects.findAll { it.path.startsWith(':modules:') }.each { Project module ->
|
||||
buildFullNotice {
|
||||
dependencies module
|
||||
def defaultLicensesDir = new File(module.projectDir, 'licenses')
|
||||
if (defaultLicensesDir.exists()) {
|
||||
licensesDir defaultLicensesDir
|
||||
}
|
||||
}
|
||||
buildModules {
|
||||
dependsOn({ project(module.path).bundlePlugin })
|
||||
|
|
|
@ -87,7 +87,6 @@ buildRestTests.expectedUnconvertedCandidates = [
|
|||
'reference/mapping/types/nested.asciidoc',
|
||||
'reference/mapping/types/object.asciidoc',
|
||||
'reference/mapping/types/percolator.asciidoc',
|
||||
'reference/modules/scripting/security.asciidoc',
|
||||
'reference/modules/cross-cluster-search.asciidoc', // this is hard to test since we need 2 clusters -- maybe we can trick it into referencing itself...
|
||||
'reference/search/field-stats.asciidoc',
|
||||
'reference/search/profile.asciidoc',
|
||||
|
|
|
@ -44,12 +44,12 @@ You need to also include Log4j 2 dependencies:
|
|||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-api</artifactId>
|
||||
<version>2.7</version>
|
||||
<version>2.8.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-core</artifactId>
|
||||
<version>2.7</version>
|
||||
<version>2.8.2</version>
|
||||
</dependency>
|
||||
--------------------------------------------------
|
||||
|
||||
|
@ -77,12 +77,12 @@ If you want to use another logger than Log4j 2, you can use http://www.slf4j.org
|
|||
<dependency>
|
||||
<groupId>org.apache.logging.log4j</groupId>
|
||||
<artifactId>log4j-to-slf4j</artifactId>
|
||||
<version>2.7</version>
|
||||
<version>2.8.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
<version>1.7.21</version>
|
||||
<version>1.7.24</version>
|
||||
</dependency>
|
||||
--------------------------------------------------
|
||||
|
||||
|
|
|
@ -76,6 +76,7 @@ Read more in {ref}/integration-tests.html#changing-node-configuration[Changing N
|
|||
|
||||
|
||||
[float]
|
||||
[[plugin-authors-jsm]]
|
||||
=== Java Security permissions
|
||||
|
||||
Some plugins may need additional security permissions. A plugin can include
|
||||
|
@ -111,4 +112,3 @@ AccessController.doPrivileged(
|
|||
|
||||
See http://www.oracle.com/technetwork/java/seccodeguide-139067.html[Secure Coding Guidelines for Java SE]
|
||||
for more information.
|
||||
|
||||
|
|
|
@ -21,6 +21,8 @@ region inside the repository settings. Instead, specify the full endpoint if a c
|
|||
s3 location is needed, or rely on the default behavior which automatically locates
|
||||
the region of the configured bucket.
|
||||
|
||||
* Specifying s3 signer type has been removed, including `cloud.aws.signer` and `cloud.aws.s3.signer`.
|
||||
|
||||
==== Azure Repository plugin
|
||||
|
||||
* The container an azure repository is configured with will no longer be created automatically.
|
||||
|
@ -33,3 +35,15 @@ name space have been removed. This includes `repositories.azure.account`, `repos
|
|||
You must set those settings per repository instead. Respectively `account`, `container`, `base_path`,
|
||||
`location_mode`, `chunk_size` and `compress`.
|
||||
See {plugins}/repository-azure-usage.html#repository-azure-repository-settings[Azure Repository settings].
|
||||
|
||||
==== EC2 Discovery plugin
|
||||
|
||||
* Specifying ec2 signer type has been removed, including `cloud.aws.signer` and `cloud.aws.ec2.signer`.
|
||||
|
||||
* The region setting has been removed. This includes the settings `cloud.aws.region`
|
||||
and `cloud.aws.ec2.region`. Instead, specify the full endpoint.
|
||||
|
||||
==== Ignoring hidden folders
|
||||
|
||||
Previous versions of Elasticsearch would skip hidden files and directories when
|
||||
scanning the plugins folder. This leniency has been removed.
|
||||
|
|
|
@ -1,73 +1,115 @@
|
|||
[[modules-scripting-security]]
|
||||
=== Scripting and security
|
||||
|
||||
You should never run Elasticsearch as the `root` user, as this would allow a
|
||||
script to access or do *anything* on your server, without limitations.
|
||||
While Elasticsearch contributors make every effort to prevent scripts from
|
||||
running amok, security is something best done in
|
||||
https://en.wikipedia.org/wiki/Defense_in_depth_(computing)[layers] because
|
||||
all software has bugs and it is important to minimize the risk of failure in
|
||||
any security layer. Find below rules of thumb for how to keep Elasticsearch
|
||||
from being a vulnerability.
|
||||
|
||||
You should not expose Elasticsearch directly to users, but instead have a
|
||||
proxy application inbetween. If you *do* intend to expose Elasticsearch
|
||||
directly to your users, then you have to decide whether you trust them enough
|
||||
to run scripts on your box or not, and apply the appropriate safety measures.
|
||||
|
||||
[[enable-dynamic-scripting]]
|
||||
[float]
|
||||
=== Enabling dynamic scripting
|
||||
=== Do not run as root
|
||||
First and foremost, never run Elasticsearch as the `root` user as this would
|
||||
allow any successful effort to circumvent the other security layers to do
|
||||
*anything* on your server. Elasticsearch will refuse to start if it detects
|
||||
that it is running as `root` but this is so important that it is worth double
|
||||
and triple checking.
|
||||
|
||||
The `script.*` settings allow for <<security-script-fine,fine-grained>>
|
||||
control of which script languages (e.g `painless`) are allowed to
|
||||
run in which context ( e.g. `search`, `aggs`, `update`), and where the script
|
||||
source is allowed to come from (i.e. `inline`, `stored`, `file`).
|
||||
[float]
|
||||
=== Do not expose Elasticsearch directly to users
|
||||
Do not expose Elasticsearch directly to users, instead have an application
|
||||
make requests on behalf of users. If this is not possible, have an application
|
||||
to sanitize requests from users. If *that* is not possible then have some
|
||||
mechanism to track which users did what. Understand that it is quite possible
|
||||
to write a <<search, `_search`>> that overwhelms Elasticsearch and brings down
|
||||
the cluster. All such searches should be considered bugs and the Elasticsearch
|
||||
contributors make an effort to prevent this but they are still possible.
|
||||
|
||||
For instance, the following setting enables `stored` `update` scripts for
|
||||
`painless`:
|
||||
[float]
|
||||
=== Do not expose Elasticsearch directly to the Internet
|
||||
Do not expose Elasticsearch to the Internet, instead have an application
|
||||
make requests on behalf of the Internet. Do not entertain the thought of having
|
||||
an application "sanitize" requests to Elasticsearch. Understand that it is
|
||||
possible for a sufficiently determined malicious user to write searches that
|
||||
overwhelm the Elasticsearch cluster and bring it down. For example:
|
||||
|
||||
[source,yaml]
|
||||
----------------
|
||||
script.engine.painless.inline.update: true
|
||||
----------------
|
||||
Good:
|
||||
* Users type text into a search box and the text is sent directly to a
|
||||
<<query-dsl-match-query>>, <<query-dsl-match-query-phrase>>,
|
||||
<<query-dsl-simple-query-string-query>>, or any of the <<search-suggesters>>.
|
||||
* Running a script with any of the above queries that was written as part of
|
||||
the application development process.
|
||||
* Running a script with `params` provided by users.
|
||||
* User actions makes documents with a fixed structure.
|
||||
|
||||
Less fine-grained settings exist which allow you to enable or disable scripts
|
||||
for all sources, all languages, or all contexts. The following settings
|
||||
enable `inline` and `stored` scripts for all languages in all contexts:
|
||||
Bad:
|
||||
* Users can write arbitrary scripts, queries, `_search` requests.
|
||||
* User actions make documents with structure defined by users.
|
||||
|
||||
[source,yaml]
|
||||
-----------------------------------
|
||||
script.inline: true
|
||||
script.stored: true
|
||||
-----------------------------------
|
||||
[float]
|
||||
[[modules-scripting-security-do-no-weaken]]
|
||||
=== Do not weaken script security settings
|
||||
By default Elasticsearch will run inline, stored, and filesystem scripts for
|
||||
sandboxed languages, namely the scripting language Painless, the template
|
||||
language Mustache, and the expression language Expressions. These *ought* to be
|
||||
safe to expose to trusted users and to your application servers because they
|
||||
have strong security sandboxes. By default Elasticsearch will only run
|
||||
filesystem scripts for non-sandboxed languages and enabling them is a poor
|
||||
choice because:
|
||||
1. This drops a layer of security, leaving only Elasticsearch's builtin
|
||||
<<modules-scripting-other-layers, security layers>>.
|
||||
2. Non-sandboxed scripts have unchecked access to Elasticsearch's internals and
|
||||
can cause all kinds of trouble if misused.
|
||||
|
||||
WARNING: The above settings mean that anybody who can send requests to your
|
||||
Elasticsearch instance can run whatever scripts they choose! This is a
|
||||
security risk and may well lead to your Elasticsearch cluster being
|
||||
compromised.
|
||||
|
||||
[float]
|
||||
[[modules-scripting-other-layers]]
|
||||
=== Other security layers
|
||||
In addition to user privileges and script sandboxing Elasticsearch uses the
|
||||
http://www.oracle.com/technetwork/java/seccodeguide-139067.html[Java Security Manager]
|
||||
and native security tools as additional layers of security.
|
||||
|
||||
As part of its startup sequence Elasticsearch enables the Java Security Manager
|
||||
which limits the actions that can be taken by portions of the code. Painless
|
||||
uses this to limit the actions that generated Painless scripts can take,
|
||||
preventing them from being able to do things like write files and listen to
|
||||
sockets.
|
||||
|
||||
Elasticsearch uses
|
||||
https://en.wikipedia.org/wiki/Seccomp[seccomp] in Linux,
|
||||
https://www.chromium.org/developers/design-documents/sandbox/osx-sandboxing-design[Seatbelt]
|
||||
in macOS, and
|
||||
https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147[ActiveProcessLimit]
|
||||
on Windows to prevent Elasticsearch from forking or executing other processes.
|
||||
|
||||
Below this we describe the security settings for scripts and how you can
|
||||
change from the defaults described above. You should be very, very careful
|
||||
when allowing more than the defaults. Any extra permissions weakens the total
|
||||
security of the Elasticsearch deployment.
|
||||
|
||||
[[security-script-source]]
|
||||
[float]
|
||||
=== Script source settings
|
||||
|
||||
Scripts may be enabled or disabled depending on their source: `inline`,
|
||||
`stored` in the cluster state, or from a `file` on each node in the cluster.
|
||||
Each of these settings takes one of these values:
|
||||
|
||||
|
||||
[horizontal]
|
||||
`false`:: Scripting is disabled.
|
||||
`true`:: Scripting is enabled.
|
||||
|
||||
The default values are the following:
|
||||
Which scripts Elasticsearch will execute where is controlled by settings
|
||||
starting with `scripts.`. The simplest settings allow scripts to be enabled
|
||||
or disabled based on where they are stored. For example:
|
||||
|
||||
[source,yaml]
|
||||
-----------------------------------
|
||||
script.inline: false
|
||||
script.stored: false
|
||||
script.file: true
|
||||
script.inline: false <1>
|
||||
script.stored: false <2>
|
||||
script.file: true <3>
|
||||
-----------------------------------
|
||||
<1> Refuse to run scripts provided inline in the API.
|
||||
<2> Refuse to run scripts stored using the API.
|
||||
<3> Run scripts found on the filesystem in `/etc/elasticsearch/scripts`
|
||||
(rpm or deb) or `config/scripts` (zip or tar).
|
||||
|
||||
NOTE: Global scripting settings affect the `mustache` scripting language.
|
||||
<<search-template,Search templates>> internally use the `mustache` language,
|
||||
and will still be enabled by default as the `mustache` engine is sandboxed,
|
||||
but they will be enabled/disabled according to fine-grained settings
|
||||
specified in `elasticsearch.yml`.
|
||||
NOTE: These settings override the defaults mentioned
|
||||
<<modules-scripting-security-do-no-weaken, above>>. Recreating the defaults
|
||||
requires more fine grained settings described <<security-script-fine, below>>.
|
||||
|
||||
[[security-script-context]]
|
||||
[float]
|
||||
|
@ -102,15 +144,13 @@ script.plugin: false
|
|||
=== Fine-grained script settings
|
||||
|
||||
First, the high-level script settings described above are applied in order
|
||||
(context settings have precedence over source settings). Then, fine-grained
|
||||
(context settings have precedence over source settings). Then fine-grained
|
||||
settings which include the script language take precedence over any high-level
|
||||
settings.
|
||||
|
||||
Fine-grained settings have the form:
|
||||
settings. They have two forms:
|
||||
|
||||
[source,yaml]
|
||||
------------------------
|
||||
script.engine.{lang}.{source}.{context}: true|false
|
||||
script.engine.{lang}.{inline|file|stored}.{context}: true|false
|
||||
------------------------
|
||||
|
||||
And
|
||||
|
@ -132,124 +172,9 @@ script.engine.painless.inline: true <2>
|
|||
script.engine.painless.stored.search: true <3>
|
||||
script.engine.painless.stored.aggs: true <3>
|
||||
|
||||
script.engine.mustache.stored.search: true <4>
|
||||
script.engine.mustache.stored.search: true <4>
|
||||
-----------------------------------
|
||||
<1> Disable all scripting from any source.
|
||||
<2> Allow inline Groovy scripts for all operations
|
||||
<3> Allow stored Groovy scripts to be used for search and aggregations.
|
||||
<2> Allow inline Painless scripts for all operations.
|
||||
<3> Allow stored Painless scripts to be used for search and aggregations.
|
||||
<4> Allow stored Mustache templates to be used for search.
|
||||
|
||||
[[java-security-manager]]
|
||||
[float]
|
||||
=== Java Security Manager
|
||||
|
||||
Elasticsearch runs with the https://docs.oracle.com/javase/tutorial/essential/environment/security.html[Java Security Manager]
|
||||
enabled by default. The security policy in Elasticsearch locks down the
|
||||
permissions granted to each class to the bare minimum required to operate.
|
||||
The benefit of doing this is that it severely limits the attack vectors
|
||||
available to a hacker.
|
||||
|
||||
Restricting permissions is particularly important with scripting languages
|
||||
like Groovy which is designed to do anything that can be done
|
||||
in Java itself, including writing to the file system, opening sockets to
|
||||
remote servers, etc.
|
||||
|
||||
[float]
|
||||
=== Script Classloader Whitelist
|
||||
|
||||
Scripting languages are only allowed to load classes which appear in a
|
||||
hardcoded whitelist that can be found in
|
||||
https://github.com/elastic/elasticsearch/blob/{branch}/core/src/main/java/org/elasticsearch/script/ClassPermission.java[`org.elasticsearch.script.ClassPermission`].
|
||||
|
||||
|
||||
In a script, attempting to load a class that does not appear in the whitelist
|
||||
_may_ result in a `ClassNotFoundException`, for instance this script:
|
||||
|
||||
[source,js]
|
||||
------------------------------
|
||||
GET _search
|
||||
{
|
||||
"script_fields": {
|
||||
"the_hour": {
|
||||
"script": "use(java.math.BigInteger); new BigInteger(1)"
|
||||
}
|
||||
}
|
||||
}
|
||||
------------------------------
|
||||
|
||||
will return the following exception:
|
||||
|
||||
[source,js]
|
||||
------------------------------
|
||||
{
|
||||
"reason": {
|
||||
"type": "script_exception",
|
||||
"reason": "failed to run inline script [use(java.math.BigInteger); new BigInteger(1)] using lang [painless]",
|
||||
"caused_by": {
|
||||
"type": "no_class_def_found_error",
|
||||
"reason": "java/math/BigInteger",
|
||||
"caused_by": {
|
||||
"type": "class_not_found_exception",
|
||||
"reason": "java.math.BigInteger"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
------------------------------
|
||||
|
||||
[float]
|
||||
== Dealing with Java Security Manager issues
|
||||
|
||||
If you encounter issues with the Java Security Manager, you have two options
|
||||
for resolving these issues:
|
||||
|
||||
[float]
|
||||
=== Fix the security problem
|
||||
|
||||
The safest and most secure long term solution is to change the code causing
|
||||
the security issue. We recognise that this may take time to do correctly and
|
||||
so we provide the following two alternatives.
|
||||
|
||||
[float]
|
||||
=== Customising the classloader whitelist
|
||||
|
||||
The classloader whitelist can be customised by tweaking the local Java
|
||||
Security Policy either:
|
||||
|
||||
* system wide: `$JAVA_HOME/lib/security/java.policy`,
|
||||
* for just the `elasticsearch` user: `/home/elasticsearch/.java.policy`
|
||||
* by adding a system property to the <<jvm-options,jvm.options>> configuration: `-Djava.security.policy=someURL`, or
|
||||
* via the `ES_JAVA_OPTS` environment variable with `-Djava.security.policy=someURL`:
|
||||
+
|
||||
[source,js]
|
||||
---------------------------------
|
||||
export ES_JAVA_OPTS="${ES_JAVA_OPTS} -Djava.security.policy=file:///path/to/my.policy`
|
||||
./bin/elasticsearch
|
||||
---------------------------------
|
||||
|
||||
Permissions may be granted at the class, package, or global level. For instance:
|
||||
|
||||
[source,js]
|
||||
----------------------------------
|
||||
grant {
|
||||
permission org.elasticsearch.script.ClassPermission "java.util.Base64"; // allow class
|
||||
permission org.elasticsearch.script.ClassPermission "java.util.*"; // allow package
|
||||
permission org.elasticsearch.script.ClassPermission "*"; // allow all (disables filtering basically)
|
||||
};
|
||||
----------------------------------
|
||||
|
||||
[TIP]
|
||||
======================================
|
||||
|
||||
Before adding classes to the whitelist, consider the security impact that it
|
||||
will have on Elasticsearch. Do you really need an extra class or can your code
|
||||
be rewritten in a more secure way?
|
||||
|
||||
It is quite possible that we have not whitelisted a generically useful and
|
||||
safe class. If you have a class that you think should be whitelisted by
|
||||
default, please open an issue on GitHub and we will consider the impact of
|
||||
doing so.
|
||||
|
||||
======================================
|
||||
|
||||
See http://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html for more information.
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
experimental[]
|
||||
|
||||
deprecated[5.4.0, `_field_stats` is deprecated, use `_field_caps` instead or run an min/max aggregation on the desired fields]
|
||||
|
||||
The field stats api allows one to find statistical properties of a field
|
||||
without executing a search, but looking up measurements that are natively
|
||||
available in the Lucene index. This can be useful to explore a dataset which
|
||||
|
@ -19,6 +21,7 @@ All indices:
|
|||
GET _field_stats?fields=rating
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[warning:[_field_stats] endpoint is deprecated! Use [_field_caps] instead or run a min/max aggregations on the desired fields.]
|
||||
|
||||
Specific indices:
|
||||
|
||||
|
@ -27,6 +30,7 @@ Specific indices:
|
|||
GET twitter/_field_stats?fields=rating
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[warning:[_field_stats] endpoint is deprecated! Use [_field_caps] instead or run a min/max aggregations on the desired fields.]
|
||||
// TEST[setup:twitter]
|
||||
|
||||
Supported request options:
|
||||
|
@ -47,6 +51,7 @@ POST _field_stats?level=indices
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[warning:[_field_stats] endpoint is deprecated! Use [_field_caps] instead or run a min/max aggregations on the desired fields.]
|
||||
|
||||
This is equivalent to the previous request.
|
||||
|
||||
|
@ -122,6 +127,7 @@ Request:
|
|||
GET _field_stats?fields=rating,answer_count,creation_date,display_name
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[warning:[_field_stats] endpoint is deprecated! Use [_field_caps] instead or run a min/max aggregations on the desired fields.]
|
||||
|
||||
Response:
|
||||
|
||||
|
@ -235,6 +241,7 @@ Request:
|
|||
GET _field_stats?fields=rating,answer_count,creation_date,display_name&level=indices
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[warning:[_field_stats] endpoint is deprecated! Use [_field_caps] instead or run a min/max aggregations on the desired fields.]
|
||||
|
||||
Response:
|
||||
|
||||
|
@ -330,6 +337,7 @@ POST _field_stats?level=indices
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[warning:[_field_stats] endpoint is deprecated! Use [_field_caps] instead or run a min/max aggregations on the desired fields.]
|
||||
|
||||
<1> The fields to compute and return field stats for.
|
||||
<2> The set index constraints. Note that index constrains can be defined for fields that aren't defined in the `fields` option.
|
||||
|
@ -368,5 +376,6 @@ POST _field_stats?level=indices
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[warning:[_field_stats] endpoint is deprecated! Use [_field_caps] instead or run a min/max aggregations on the desired fields.]
|
||||
|
||||
<1> Custom date format
|
||||
|
|
|
@ -28,8 +28,8 @@ documentation of the mustache project].
|
|||
|
||||
NOTE: The mustache language is implemented in elasticsearch as a sandboxed
|
||||
scripting language, hence it obeys settings that may be used to enable or
|
||||
disable scripts per language, source and operation as described in
|
||||
<<enable-dynamic-scripting, scripting docs>>
|
||||
disable scripts per language, source and operation as described in the
|
||||
<<security-script-source, scripting docs>>
|
||||
|
||||
[float]
|
||||
==== More template examples
|
||||
|
|
|
@ -616,7 +616,7 @@ public class MoreExpressionTests extends ESIntegTestCase {
|
|||
Histogram histogram = response.getAggregations().get("histogram");
|
||||
assertThat(histogram, notNullValue());
|
||||
assertThat(histogram.getName(), equalTo("histogram"));
|
||||
List<Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
List<? extends Histogram.Bucket> buckets = histogram.getBuckets();
|
||||
|
||||
for (int bucketCount = 0; bucketCount < buckets.size(); ++bucketCount) {
|
||||
Histogram.Bucket bucket = buckets.get(bucketCount);
|
||||
|
|
|
@ -59,7 +59,7 @@ final class RemoteRequestBuilders {
|
|||
static Map<String, String> initialSearchParams(SearchRequest searchRequest, Version remoteVersion) {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
if (searchRequest.scroll() != null) {
|
||||
params.put("scroll", searchRequest.scroll().keepAlive().toString());
|
||||
params.put("scroll", searchRequest.scroll().keepAlive().getStringRep());
|
||||
}
|
||||
params.put("size", Integer.toString(searchRequest.source().size()));
|
||||
if (searchRequest.source().version() == null || searchRequest.source().version() == true) {
|
||||
|
@ -168,7 +168,7 @@ final class RemoteRequestBuilders {
|
|||
}
|
||||
|
||||
static Map<String, String> scrollParams(TimeValue keepAlive) {
|
||||
return singletonMap("scroll", keepAlive.toString());
|
||||
return singletonMap("scroll", keepAlive.getStringRep());
|
||||
}
|
||||
|
||||
static HttpEntity scrollEntity(String scroll, Version remoteVersion) {
|
||||
|
|
|
@ -35,11 +35,11 @@ import java.io.InputStreamReader;
|
|||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.clearScrollEntity;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchEntity;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchParams;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchPath;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollEntity;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.clearScrollEntity;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollParams;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
|
@ -150,7 +150,11 @@ public class RemoteRequestBuildersTests extends ESTestCase {
|
|||
|
||||
Map<String, String> params = initialSearchParams(searchRequest, remoteVersion);
|
||||
|
||||
assertThat(params, scroll == null ? not(hasKey("scroll")) : hasEntry("scroll", scroll.toString()));
|
||||
if (scroll == null) {
|
||||
assertThat(params, not(hasKey("scroll")));
|
||||
} else {
|
||||
assertEquals(scroll, TimeValue.parseTimeValue(params.get("scroll"), "scroll"));
|
||||
}
|
||||
assertThat(params, hasEntry("size", Integer.toString(size)));
|
||||
assertThat(params, fetchVersion == null || fetchVersion == true ? hasEntry("version", null) : not(hasEntry("version", null)));
|
||||
}
|
||||
|
@ -181,7 +185,7 @@ public class RemoteRequestBuildersTests extends ESTestCase {
|
|||
|
||||
public void testScrollParams() {
|
||||
TimeValue scroll = TimeValue.parseTimeValue(randomPositiveTimeValue(), "test");
|
||||
assertThat(scrollParams(scroll), hasEntry("scroll", scroll.toString()));
|
||||
assertEquals(scroll, TimeValue.parseTimeValue(scrollParams(scroll).get("scroll"), "scroll"));
|
||||
}
|
||||
|
||||
public void testScrollEntity() throws IOException {
|
||||
|
|
|
@ -459,3 +459,87 @@
|
|||
id: 1
|
||||
- match: { _source.text: "test" }
|
||||
- is_false: _source.filtered
|
||||
|
||||
---
|
||||
"Reindex from remote with rethrottle":
|
||||
# Throttling happens between each scroll batch so we need to control the size of the batch by using a single shard
|
||||
# and a small batch size on the request
|
||||
- do:
|
||||
indices.create:
|
||||
index: source
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: "1"
|
||||
number_of_replicas: "0"
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 2
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 3
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
requests_per_second: .00000001 # About 9.5 years to complete the request
|
||||
wait_for_completion: false
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
remote:
|
||||
host: http://${host}
|
||||
index: source
|
||||
size: 1
|
||||
dest:
|
||||
index: dest
|
||||
- match: {task: '/.+:\d+/'}
|
||||
- set: {task: task}
|
||||
|
||||
- do:
|
||||
reindex_rethrottle:
|
||||
requests_per_second: -1
|
||||
task_id: $task
|
||||
|
||||
- do:
|
||||
tasks.get:
|
||||
wait_for_completion: true
|
||||
task_id: $task
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
text: test
|
||||
- match: {hits.total: 3}
|
||||
|
||||
# Make sure reindex closed all the scroll contexts
|
||||
- do:
|
||||
indices.stats:
|
||||
index: source
|
||||
metric: search
|
||||
- match: {indices.source.total.search.open_contexts: 0}
|
||||
|
|
|
@ -22,6 +22,8 @@ package org.elasticsearch.discovery.ec2;
|
|||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.Protocol;
|
||||
import com.amazonaws.services.ec2.AmazonEC2;
|
||||
import org.elasticsearch.common.settings.SecureSetting;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -42,50 +44,43 @@ interface AwsEc2Service {
|
|||
/**
|
||||
* cloud.aws.access_key: AWS Access key. Shared with repository-s3 plugin
|
||||
*/
|
||||
Setting<String> KEY_SETTING =
|
||||
Setting.simpleString("cloud.aws.access_key", Property.NodeScope, Property.Filtered, Property.Shared);
|
||||
Setting<SecureString> KEY_SETTING = new Setting<>("cloud.aws.access_key", "", SecureString::new,
|
||||
Property.NodeScope, Property.Filtered, Property.Shared, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.secret_key: AWS Secret key. Shared with repository-s3 plugin
|
||||
*/
|
||||
Setting<String> SECRET_SETTING =
|
||||
Setting.simpleString("cloud.aws.secret_key", Property.NodeScope, Property.Filtered, Property.Shared);
|
||||
Setting<SecureString> SECRET_SETTING = new Setting<>("cloud.aws.secret_key", "", SecureString::new,
|
||||
Property.NodeScope, Property.Filtered, Property.Shared, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.protocol: Protocol for AWS API: http or https. Defaults to https. Shared with repository-s3 plugin
|
||||
*/
|
||||
Setting<Protocol> PROTOCOL_SETTING = new Setting<>("cloud.aws.protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)),
|
||||
Property.NodeScope, Property.Shared);
|
||||
Property.NodeScope, Property.Shared, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.proxy.host: In case of proxy, define its hostname/IP. Shared with repository-s3 plugin
|
||||
*/
|
||||
Setting<String> PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", Property.NodeScope, Property.Shared);
|
||||
Setting<String> PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host",
|
||||
Property.NodeScope, Property.Shared, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.proxy.port: In case of proxy, define its port. Defaults to 80. Shared with repository-s3 plugin
|
||||
*/
|
||||
Setting<Integer> PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, Property.NodeScope,
|
||||
Property.Shared);
|
||||
Setting<Integer> PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16,
|
||||
Property.NodeScope, Property.Shared, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.proxy.username: In case of proxy with auth, define the username. Shared with repository-s3 plugin
|
||||
*/
|
||||
Setting<String> PROXY_USERNAME_SETTING = Setting.simpleString("cloud.aws.proxy.username", Property.NodeScope, Property.Shared);
|
||||
Setting<SecureString> PROXY_USERNAME_SETTING = new Setting<>("cloud.aws.proxy.username", "", SecureString::new,
|
||||
Property.NodeScope, Property.Filtered, Property.Shared, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.proxy.password: In case of proxy with auth, define the password. Shared with repository-s3 plugin
|
||||
*/
|
||||
Setting<String> PROXY_PASSWORD_SETTING =
|
||||
Setting.simpleString("cloud.aws.proxy.password", Property.NodeScope, Property.Filtered, Property.Shared);
|
||||
/**
|
||||
* cloud.aws.signer: If you are using an old AWS API version, you can define a Signer. Shared with repository-s3 plugin
|
||||
*/
|
||||
Setting<String> SIGNER_SETTING = Setting.simpleString("cloud.aws.signer", Property.NodeScope, Property.Shared);
|
||||
/**
|
||||
* cloud.aws.region: Region. Shared with repository-s3 plugin
|
||||
*/
|
||||
Setting<String> REGION_SETTING =
|
||||
new Setting<>("cloud.aws.region", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope, Property.Shared);
|
||||
Setting<SecureString> PROXY_PASSWORD_SETTING = new Setting<>("cloud.aws.proxy.password", "", SecureString::new,
|
||||
Property.NodeScope, Property.Filtered, Property.Shared, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.read_timeout: Socket read timeout. Shared with repository-s3 plugin
|
||||
*/
|
||||
Setting<TimeValue> READ_TIMEOUT = Setting.timeSetting("cloud.aws.read_timeout",
|
||||
TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope, Property.Shared);
|
||||
TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope, Property.Shared, Property.Deprecated);
|
||||
|
||||
/**
|
||||
* Defines specific ec2 settings starting with cloud.aws.ec2.
|
||||
|
@ -95,69 +90,57 @@ interface AwsEc2Service {
|
|||
* cloud.aws.ec2.access_key: AWS Access key specific for EC2 API calls. Defaults to cloud.aws.access_key.
|
||||
* @see AwsEc2Service#KEY_SETTING
|
||||
*/
|
||||
Setting<String> KEY_SETTING = new Setting<>("cloud.aws.ec2.access_key", AwsEc2Service.KEY_SETTING, Function.identity(),
|
||||
Property.NodeScope, Property.Filtered);
|
||||
Setting<SecureString> KEY_SETTING = new Setting<>("cloud.aws.ec2.access_key", AwsEc2Service.KEY_SETTING,
|
||||
SecureString::new, Property.NodeScope, Property.Filtered, Property.Deprecated);
|
||||
|
||||
/**
|
||||
* cloud.aws.ec2.secret_key: AWS Secret key specific for EC2 API calls. Defaults to cloud.aws.secret_key.
|
||||
* @see AwsEc2Service#SECRET_SETTING
|
||||
*/
|
||||
Setting<String> SECRET_SETTING = new Setting<>("cloud.aws.ec2.secret_key", AwsEc2Service.SECRET_SETTING, Function.identity(),
|
||||
Property.NodeScope, Property.Filtered);
|
||||
Setting<SecureString> SECRET_SETTING = new Setting<>("cloud.aws.ec2.secret_key", AwsEc2Service.SECRET_SETTING,
|
||||
SecureString::new, Property.NodeScope, Property.Filtered, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.ec2.protocol: Protocol for AWS API specific for EC2 API calls: http or https. Defaults to cloud.aws.protocol.
|
||||
* @see AwsEc2Service#PROTOCOL_SETTING
|
||||
*/
|
||||
Setting<Protocol> PROTOCOL_SETTING = new Setting<>("cloud.aws.ec2.protocol", AwsEc2Service.PROTOCOL_SETTING,
|
||||
s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope);
|
||||
s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.ec2.proxy.host: In case of proxy, define its hostname/IP specific for EC2 API calls. Defaults to cloud.aws.proxy.host.
|
||||
* @see AwsEc2Service#PROXY_HOST_SETTING
|
||||
*/
|
||||
Setting<String> PROXY_HOST_SETTING = new Setting<>("cloud.aws.ec2.proxy.host", AwsEc2Service.PROXY_HOST_SETTING,
|
||||
Function.identity(), Property.NodeScope);
|
||||
Function.identity(), Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.ec2.proxy.port: In case of proxy, define its port specific for EC2 API calls. Defaults to cloud.aws.proxy.port.
|
||||
* @see AwsEc2Service#PROXY_PORT_SETTING
|
||||
*/
|
||||
Setting<Integer> PROXY_PORT_SETTING = new Setting<>("cloud.aws.ec2.proxy.port", AwsEc2Service.PROXY_PORT_SETTING,
|
||||
s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.ec2.proxy.port"), Property.NodeScope);
|
||||
s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.ec2.proxy.port"), Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.ec2.proxy.username: In case of proxy with auth, define the username specific for EC2 API calls.
|
||||
* Defaults to cloud.aws.proxy.username.
|
||||
* @see AwsEc2Service#PROXY_USERNAME_SETTING
|
||||
*/
|
||||
Setting<String> PROXY_USERNAME_SETTING = new Setting<>("cloud.aws.ec2.proxy.username", AwsEc2Service.PROXY_USERNAME_SETTING,
|
||||
Function.identity(), Property.NodeScope);
|
||||
Setting<SecureString> PROXY_USERNAME_SETTING = new Setting<>("cloud.aws.ec2.proxy.username", AwsEc2Service.PROXY_USERNAME_SETTING,
|
||||
SecureString::new, Property.NodeScope, Property.Filtered, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.ec2.proxy.password: In case of proxy with auth, define the password specific for EC2 API calls.
|
||||
* Defaults to cloud.aws.proxy.password.
|
||||
* @see AwsEc2Service#PROXY_PASSWORD_SETTING
|
||||
*/
|
||||
Setting<String> PROXY_PASSWORD_SETTING = new Setting<>("cloud.aws.ec2.proxy.password", AwsEc2Service.PROXY_PASSWORD_SETTING,
|
||||
Function.identity(), Property.NodeScope, Property.Filtered);
|
||||
/**
|
||||
* cloud.aws.ec2.signer: If you are using an old AWS API version, you can define a Signer. Specific for EC2 API calls.
|
||||
* Defaults to cloud.aws.signer.
|
||||
* @see AwsEc2Service#SIGNER_SETTING
|
||||
*/
|
||||
Setting<String> SIGNER_SETTING = new Setting<>("cloud.aws.ec2.signer", AwsEc2Service.SIGNER_SETTING, Function.identity(),
|
||||
Property.NodeScope);
|
||||
/**
|
||||
* cloud.aws.ec2.region: Region specific for EC2 API calls. Defaults to cloud.aws.region.
|
||||
* @see AwsEc2Service#REGION_SETTING
|
||||
*/
|
||||
Setting<String> REGION_SETTING = new Setting<>("cloud.aws.ec2.region", AwsEc2Service.REGION_SETTING,
|
||||
s -> s.toLowerCase(Locale.ROOT), Property.NodeScope);
|
||||
Setting<SecureString> PROXY_PASSWORD_SETTING = new Setting<>("cloud.aws.ec2.proxy.password", AwsEc2Service.PROXY_PASSWORD_SETTING,
|
||||
SecureString::new, Property.NodeScope, Property.Filtered, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.ec2.endpoint: Endpoint. If not set, endpoint will be guessed based on region setting.
|
||||
*/
|
||||
Setting<String> ENDPOINT_SETTING = Setting.simpleString("cloud.aws.ec2.endpoint", Property.NodeScope);
|
||||
Setting<String> ENDPOINT_SETTING = Setting.simpleString("cloud.aws.ec2.endpoint", Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.ec2.read_timeout: Socket read timeout. Defaults to cloud.aws.read_timeout
|
||||
* @see AwsEc2Service#READ_TIMEOUT
|
||||
*/
|
||||
Setting<TimeValue> READ_TIMEOUT =
|
||||
Setting.timeSetting("cloud.aws.ec2.read_timeout", AwsEc2Service.READ_TIMEOUT, Property.NodeScope);
|
||||
Setting.timeSetting("cloud.aws.ec2.read_timeout", AwsEc2Service.READ_TIMEOUT, Property.NodeScope, Property.Deprecated);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -172,6 +155,40 @@ interface AwsEc2Service {
|
|||
public static final String TAG_PREFIX = "tag:";
|
||||
}
|
||||
|
||||
/** The access key (ie login id) for connecting to ec2. */
|
||||
Setting<SecureString> ACCESS_KEY_SETTING = SecureSetting.secureString("discovery.ec2.access_key", CLOUD_EC2.KEY_SETTING);
|
||||
|
||||
/** The secret key (ie password) for connecting to ec2. */
|
||||
Setting<SecureString> SECRET_KEY_SETTING = SecureSetting.secureString("discovery.ec2.secret_key", CLOUD_EC2.SECRET_SETTING);
|
||||
|
||||
/** An override for the ec2 endpoint to connect to. */
|
||||
Setting<String> ENDPOINT_SETTING = new Setting<>("discovery.ec2.endpoint", CLOUD_EC2.ENDPOINT_SETTING,
|
||||
s -> s.toLowerCase(Locale.ROOT), Setting.Property.NodeScope);
|
||||
|
||||
/** The protocol to use to connect to to ec2. */
|
||||
Setting<Protocol> PROTOCOL_SETTING = new Setting<>("discovery.ec2.protocol", CLOUD_EC2.PROTOCOL_SETTING,
|
||||
s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Setting.Property.NodeScope);
|
||||
|
||||
/** The host name of a proxy to connect to ec2 through. */
|
||||
Setting<String> PROXY_HOST_SETTING = new Setting<>("discovery.ec2.proxy.host", CLOUD_EC2.PROXY_HOST_SETTING,
|
||||
Function.identity(), Setting.Property.NodeScope);
|
||||
|
||||
/** The port of a proxy to connect to ec2 through. */
|
||||
Setting<Integer> PROXY_PORT_SETTING = Setting.intSetting("discovery.ec2.proxy.port", CLOUD_EC2.PROXY_PORT_SETTING,
|
||||
0, Setting.Property.NodeScope);
|
||||
|
||||
/** The username of a proxy to connect to s3 through. */
|
||||
Setting<SecureString> PROXY_USERNAME_SETTING = SecureSetting.secureString("discovery.ec2.proxy.username",
|
||||
CLOUD_EC2.PROXY_USERNAME_SETTING);
|
||||
|
||||
/** The password of a proxy to connect to s3 through. */
|
||||
Setting<SecureString> PROXY_PASSWORD_SETTING = SecureSetting.secureString("discovery.ec2.proxy.password",
|
||||
CLOUD_EC2.PROXY_PASSWORD_SETTING);
|
||||
|
||||
/** The socket timeout for connecting to s3. */
|
||||
Setting<TimeValue> READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout",
|
||||
CLOUD_EC2.READ_TIMEOUT, Setting.Property.NodeScope);
|
||||
|
||||
/**
|
||||
* discovery.ec2.host_type: The type of host type to use to communicate with other instances.
|
||||
* Can be one of private_ip, public_ip, private_dns, public_dns or tag:XXXX where
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.logging.log4j.Logger;
|
|||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service, Closeable {
|
||||
|
@ -68,14 +69,15 @@ class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service, Clos
|
|||
protected static AWSCredentialsProvider buildCredentials(Logger logger, Settings settings) {
|
||||
AWSCredentialsProvider credentials;
|
||||
|
||||
String key = CLOUD_EC2.KEY_SETTING.get(settings);
|
||||
String secret = CLOUD_EC2.SECRET_SETTING.get(settings);
|
||||
if (key.isEmpty() && secret.isEmpty()) {
|
||||
logger.debug("Using either environment variables, system properties or instance profile credentials");
|
||||
credentials = new DefaultAWSCredentialsProviderChain();
|
||||
} else {
|
||||
logger.debug("Using basic key/secret credentials");
|
||||
credentials = new StaticCredentialsProvider(new BasicAWSCredentials(key, secret));
|
||||
try (SecureString key = DISCOVERY_EC2.ACCESS_KEY_SETTING.get(settings);
|
||||
SecureString secret = DISCOVERY_EC2.SECRET_KEY_SETTING.get(settings)) {
|
||||
if (key.length() == 0 && secret.length() == 0) {
|
||||
logger.debug("Using either environment variables, system properties or instance profile credentials");
|
||||
credentials = new DefaultAWSCredentialsProviderChain();
|
||||
} else {
|
||||
logger.debug("Using basic key/secret credentials");
|
||||
credentials = new StaticCredentialsProvider(new BasicAWSCredentials(key.toString(), secret.toString()));
|
||||
}
|
||||
}
|
||||
|
||||
return credentials;
|
||||
|
@ -86,26 +88,20 @@ class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service, Clos
|
|||
// the response metadata cache is only there for diagnostics purposes,
|
||||
// but can force objects from every response to the old generation.
|
||||
clientConfiguration.setResponseMetadataCacheSize(0);
|
||||
clientConfiguration.setProtocol(CLOUD_EC2.PROTOCOL_SETTING.get(settings));
|
||||
clientConfiguration.setProtocol(DISCOVERY_EC2.PROTOCOL_SETTING.get(settings));
|
||||
|
||||
if (PROXY_HOST_SETTING.exists(settings) || CLOUD_EC2.PROXY_HOST_SETTING.exists(settings)) {
|
||||
String proxyHost = CLOUD_EC2.PROXY_HOST_SETTING.get(settings);
|
||||
Integer proxyPort = CLOUD_EC2.PROXY_PORT_SETTING.get(settings);
|
||||
String proxyUsername = CLOUD_EC2.PROXY_USERNAME_SETTING.get(settings);
|
||||
String proxyPassword = CLOUD_EC2.PROXY_PASSWORD_SETTING.get(settings);
|
||||
if (PROXY_HOST_SETTING.exists(settings) || DISCOVERY_EC2.PROXY_HOST_SETTING.exists(settings)) {
|
||||
String proxyHost = DISCOVERY_EC2.PROXY_HOST_SETTING.get(settings);
|
||||
Integer proxyPort = DISCOVERY_EC2.PROXY_PORT_SETTING.get(settings);
|
||||
try (SecureString proxyUsername = DISCOVERY_EC2.PROXY_USERNAME_SETTING.get(settings);
|
||||
SecureString proxyPassword = DISCOVERY_EC2.PROXY_PASSWORD_SETTING.get(settings)) {
|
||||
|
||||
clientConfiguration
|
||||
.withProxyHost(proxyHost)
|
||||
.withProxyPort(proxyPort)
|
||||
.withProxyUsername(proxyUsername)
|
||||
.withProxyPassword(proxyPassword);
|
||||
}
|
||||
|
||||
// #155: we might have 3rd party users using older EC2 API version
|
||||
String awsSigner = CLOUD_EC2.SIGNER_SETTING.get(settings);
|
||||
if (Strings.hasText(awsSigner)) {
|
||||
logger.debug("using AWS API signer [{}]", awsSigner);
|
||||
AwsSigner.configureSigner(awsSigner, clientConfiguration);
|
||||
clientConfiguration
|
||||
.withProxyHost(proxyHost)
|
||||
.withProxyPort(proxyPort)
|
||||
.withProxyUsername(proxyUsername.toString())
|
||||
.withProxyPassword(proxyPassword.toString());
|
||||
}
|
||||
}
|
||||
|
||||
// Increase the number of retries in case of 5xx API responses
|
||||
|
@ -125,82 +121,16 @@ class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service, Clos
|
|||
10,
|
||||
false);
|
||||
clientConfiguration.setRetryPolicy(retryPolicy);
|
||||
clientConfiguration.setSocketTimeout((int) CLOUD_EC2.READ_TIMEOUT.get(settings).millis());
|
||||
clientConfiguration.setSocketTimeout((int) DISCOVERY_EC2.READ_TIMEOUT_SETTING.get(settings).millis());
|
||||
|
||||
return clientConfiguration;
|
||||
}
|
||||
|
||||
protected static String findEndpoint(Logger logger, Settings settings) {
|
||||
String endpoint = null;
|
||||
if (CLOUD_EC2.ENDPOINT_SETTING.exists(settings)) {
|
||||
endpoint = CLOUD_EC2.ENDPOINT_SETTING.get(settings);
|
||||
if (DISCOVERY_EC2.ENDPOINT_SETTING.exists(settings) || CLOUD_EC2.ENDPOINT_SETTING.exists(settings)) {
|
||||
endpoint = DISCOVERY_EC2.ENDPOINT_SETTING.get(settings);
|
||||
logger.debug("using explicit ec2 endpoint [{}]", endpoint);
|
||||
} else if (REGION_SETTING.exists(settings) || CLOUD_EC2.REGION_SETTING.exists(settings)) {
|
||||
final String region = CLOUD_EC2.REGION_SETTING.get(settings);
|
||||
switch (region) {
|
||||
case "us-east-1":
|
||||
case "us-east":
|
||||
endpoint = "ec2.us-east-1.amazonaws.com";
|
||||
break;
|
||||
case "us-east-2":
|
||||
endpoint = "ec2.us-east-2.amazonaws.com";
|
||||
break;
|
||||
case "us-west":
|
||||
case "us-west-1":
|
||||
endpoint = "ec2.us-west-1.amazonaws.com";
|
||||
break;
|
||||
case "us-west-2":
|
||||
endpoint = "ec2.us-west-2.amazonaws.com";
|
||||
break;
|
||||
case "ap-southeast":
|
||||
case "ap-southeast-1":
|
||||
endpoint = "ec2.ap-southeast-1.amazonaws.com";
|
||||
break;
|
||||
case "ap-south":
|
||||
case "ap-south-1":
|
||||
endpoint = "ec2.ap-south-1.amazonaws.com";
|
||||
break;
|
||||
case "us-gov-west":
|
||||
case "us-gov-west-1":
|
||||
endpoint = "ec2.us-gov-west-1.amazonaws.com";
|
||||
break;
|
||||
case "ap-southeast-2":
|
||||
endpoint = "ec2.ap-southeast-2.amazonaws.com";
|
||||
break;
|
||||
case "ap-northeast":
|
||||
case "ap-northeast-1":
|
||||
endpoint = "ec2.ap-northeast-1.amazonaws.com";
|
||||
break;
|
||||
case "ap-northeast-2":
|
||||
endpoint = "ec2.ap-northeast-2.amazonaws.com";
|
||||
break;
|
||||
case "eu-west":
|
||||
case "eu-west-1":
|
||||
endpoint = "ec2.eu-west-1.amazonaws.com";
|
||||
break;
|
||||
case "eu-west-2":
|
||||
endpoint = "ec2.eu-west-2.amazonaws.com";
|
||||
break;
|
||||
case "eu-central":
|
||||
case "eu-central-1":
|
||||
endpoint = "ec2.eu-central-1.amazonaws.com";
|
||||
break;
|
||||
case "sa-east":
|
||||
case "sa-east-1":
|
||||
endpoint = "ec2.sa-east-1.amazonaws.com";
|
||||
break;
|
||||
case "cn-north":
|
||||
case "cn-north-1":
|
||||
endpoint = "ec2.cn-north-1.amazonaws.com.cn";
|
||||
break;
|
||||
case "ca-central":
|
||||
case "ca-central-1":
|
||||
endpoint = "ec2.ca-central-1.amazonaws.com";
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("No automatic endpoint could be derived from region [" + region + "]");
|
||||
}
|
||||
logger.debug("using ec2 region [{}], with endpoint [{}]", region, endpoint);
|
||||
}
|
||||
return endpoint;
|
||||
}
|
||||
|
|
|
@ -127,8 +127,6 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
|
|||
AwsEc2Service.PROXY_PORT_SETTING,
|
||||
AwsEc2Service.PROXY_USERNAME_SETTING,
|
||||
AwsEc2Service.PROXY_PASSWORD_SETTING,
|
||||
AwsEc2Service.SIGNER_SETTING,
|
||||
AwsEc2Service.REGION_SETTING,
|
||||
AwsEc2Service.READ_TIMEOUT,
|
||||
// Register EC2 specific settings: cloud.aws.ec2
|
||||
AwsEc2Service.CLOUD_EC2.KEY_SETTING,
|
||||
|
@ -138,11 +136,18 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
|
|||
AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.SIGNER_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.REGION_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.READ_TIMEOUT,
|
||||
// Register EC2 discovery settings: discovery.ec2
|
||||
AwsEc2Service.DISCOVERY_EC2.ACCESS_KEY_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.SECRET_KEY_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.ENDPOINT_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.PROTOCOL_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.PROXY_HOST_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.PROXY_PORT_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.PROXY_USERNAME_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.PROXY_PASSWORD_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.READ_TIMEOUT_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.HOST_TYPE_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.ANY_GROUP_SETTING,
|
||||
AwsEc2Service.DISCOVERY_EC2.GROUPS_SETTING,
|
||||
|
|
|
@ -24,6 +24,8 @@ import com.amazonaws.Protocol;
|
|||
import com.amazonaws.auth.AWSCredentials;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.ec2.AwsEc2Service;
|
||||
import org.elasticsearch.discovery.ec2.AwsEc2ServiceImpl;
|
||||
|
@ -42,19 +44,35 @@ public class AwsEc2ServiceImplTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testAWSCredentialsWithElasticsearchAwsSettings() {
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("discovery.ec2.access_key", "aws_key");
|
||||
secureSettings.setString("discovery.ec2.secret_key", "aws_secret");
|
||||
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
launchAWSCredentialsWithElasticsearchSettingsTest(settings, "aws_key", "aws_secret");
|
||||
}
|
||||
|
||||
public void testAWSCredentialsWithElasticsearchAwsSettingsBackcompat() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(AwsEc2Service.KEY_SETTING.getKey(), "aws_key")
|
||||
.put(AwsEc2Service.SECRET_SETTING.getKey(), "aws_secret")
|
||||
.build();
|
||||
launchAWSCredentialsWithElasticsearchSettingsTest(settings, "aws_key", "aws_secret");
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] {
|
||||
AwsEc2Service.KEY_SETTING,
|
||||
AwsEc2Service.SECRET_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
public void testAWSCredentialsWithElasticsearchEc2Settings() {
|
||||
public void testAWSCredentialsWithElasticsearchEc2SettingsBackcompat() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(AwsEc2Service.CLOUD_EC2.KEY_SETTING.getKey(), "ec2_key")
|
||||
.put(AwsEc2Service.CLOUD_EC2.SECRET_SETTING.getKey(), "ec2_secret")
|
||||
.build();
|
||||
launchAWSCredentialsWithElasticsearchSettingsTest(settings, "ec2_key", "ec2_secret");
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] {
|
||||
AwsEc2Service.CLOUD_EC2.KEY_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.SECRET_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
public void testAWSCredentialsWithElasticsearchAwsAndEc2Settings() {
|
||||
|
@ -65,6 +83,12 @@ public class AwsEc2ServiceImplTests extends ESTestCase {
|
|||
.put(AwsEc2Service.CLOUD_EC2.SECRET_SETTING.getKey(), "ec2_secret")
|
||||
.build();
|
||||
launchAWSCredentialsWithElasticsearchSettingsTest(settings, "ec2_key", "ec2_secret");
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] {
|
||||
AwsEc2Service.KEY_SETTING,
|
||||
AwsEc2Service.SECRET_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.KEY_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.SECRET_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings settings, String expectedKey, String expectedSecret) {
|
||||
|
@ -74,22 +98,43 @@ public class AwsEc2ServiceImplTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testAWSDefaultConfiguration() {
|
||||
launchAWSConfigurationTest(Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, null,
|
||||
launchAWSConfigurationTest(Settings.EMPTY, Protocol.HTTPS, null, -1, null, null,
|
||||
ClientConfiguration.DEFAULT_SOCKET_TIMEOUT);
|
||||
}
|
||||
|
||||
public void testAWSConfigurationWithAwsSettings() {
|
||||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("discovery.ec2.proxy.username", "aws_proxy_username");
|
||||
secureSettings.setString("discovery.ec2.proxy.password", "aws_proxy_password");
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.ec2.protocol", "http")
|
||||
.put("discovery.ec2.proxy.host", "aws_proxy_host")
|
||||
.put("discovery.ec2.proxy.port", 8080)
|
||||
.put("discovery.ec2.read_timeout", "10s")
|
||||
.setSecureSettings(secureSettings)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", "aws_proxy_password", 10000);
|
||||
}
|
||||
|
||||
public void testAWSConfigurationWithAwsSettingsBackcompat() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(AwsEc2Service.PROTOCOL_SETTING.getKey(), "http")
|
||||
.put(AwsEc2Service.PROXY_HOST_SETTING.getKey(), "aws_proxy_host")
|
||||
.put(AwsEc2Service.PROXY_PORT_SETTING.getKey(), 8080)
|
||||
.put(AwsEc2Service.PROXY_USERNAME_SETTING.getKey(), "aws_proxy_username")
|
||||
.put(AwsEc2Service.PROXY_PASSWORD_SETTING.getKey(), "aws_proxy_password")
|
||||
.put(AwsEc2Service.SIGNER_SETTING.getKey(), "AWS3SignerType")
|
||||
.put(AwsEc2Service.READ_TIMEOUT.getKey(), "10s")
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", "aws_proxy_password",
|
||||
"AWS3SignerType", 10000);
|
||||
10000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] {
|
||||
AwsEc2Service.PROTOCOL_SETTING,
|
||||
AwsEc2Service.PROXY_HOST_SETTING,
|
||||
AwsEc2Service.PROXY_PORT_SETTING,
|
||||
AwsEc2Service.PROXY_USERNAME_SETTING,
|
||||
AwsEc2Service.PROXY_PASSWORD_SETTING,
|
||||
AwsEc2Service.READ_TIMEOUT
|
||||
});
|
||||
}
|
||||
|
||||
public void testAWSConfigurationWithAwsAndEc2Settings() {
|
||||
|
@ -99,17 +144,29 @@ public class AwsEc2ServiceImplTests extends ESTestCase {
|
|||
.put(AwsEc2Service.PROXY_PORT_SETTING.getKey(), 8080)
|
||||
.put(AwsEc2Service.PROXY_USERNAME_SETTING.getKey(), "aws_proxy_username")
|
||||
.put(AwsEc2Service.PROXY_PASSWORD_SETTING.getKey(), "aws_proxy_password")
|
||||
.put(AwsEc2Service.SIGNER_SETTING.getKey(), "AWS3SignerType")
|
||||
.put(AwsEc2Service.READ_TIMEOUT.getKey(), "20s")
|
||||
.put(AwsEc2Service.CLOUD_EC2.PROTOCOL_SETTING.getKey(), "https")
|
||||
.put(AwsEc2Service.CLOUD_EC2.PROXY_HOST_SETTING.getKey(), "ec2_proxy_host")
|
||||
.put(AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING.getKey(), 8081)
|
||||
.put(AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING.getKey(), "ec2_proxy_username")
|
||||
.put(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.getKey(), "ec2_proxy_password")
|
||||
.put(AwsEc2Service.CLOUD_EC2.SIGNER_SETTING.getKey(), "NoOpSignerType")
|
||||
.put(AwsEc2Service.CLOUD_EC2.READ_TIMEOUT.getKey(), "10s")
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Protocol.HTTPS, "ec2_proxy_host", 8081, "ec2_proxy_username", "ec2_proxy_password",
|
||||
"NoOpSignerType", 10000);
|
||||
launchAWSConfigurationTest(settings, Protocol.HTTPS, "ec2_proxy_host", 8081, "ec2_proxy_username", "ec2_proxy_password", 10000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] {
|
||||
AwsEc2Service.PROTOCOL_SETTING,
|
||||
AwsEc2Service.PROXY_HOST_SETTING,
|
||||
AwsEc2Service.PROXY_PORT_SETTING,
|
||||
AwsEc2Service.PROXY_USERNAME_SETTING,
|
||||
AwsEc2Service.PROXY_PASSWORD_SETTING,
|
||||
AwsEc2Service.READ_TIMEOUT,
|
||||
AwsEc2Service.CLOUD_EC2.PROTOCOL_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROXY_HOST_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.READ_TIMEOUT
|
||||
});
|
||||
}
|
||||
|
||||
protected void launchAWSConfigurationTest(Settings settings,
|
||||
|
@ -118,7 +175,6 @@ public class AwsEc2ServiceImplTests extends ESTestCase {
|
|||
int expectedProxyPort,
|
||||
String expectedProxyUsername,
|
||||
String expectedProxyPassword,
|
||||
String expectedSigner,
|
||||
int expectedReadTimeout) {
|
||||
ClientConfiguration configuration = AwsEc2ServiceImpl.buildConfiguration(logger, settings);
|
||||
|
||||
|
@ -128,7 +184,6 @@ public class AwsEc2ServiceImplTests extends ESTestCase {
|
|||
assertThat(configuration.getProxyPort(), is(expectedProxyPort));
|
||||
assertThat(configuration.getProxyUsername(), is(expectedProxyUsername));
|
||||
assertThat(configuration.getProxyPassword(), is(expectedProxyPassword));
|
||||
assertThat(configuration.getSignerOverride(), is(expectedSigner));
|
||||
assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout));
|
||||
}
|
||||
|
||||
|
@ -139,36 +194,20 @@ public class AwsEc2ServiceImplTests extends ESTestCase {
|
|||
|
||||
public void testSpecificEndpoint() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING.getKey(), "ec2.endpoint")
|
||||
.put(AwsEc2Service.DISCOVERY_EC2.ENDPOINT_SETTING.getKey(), "ec2.endpoint")
|
||||
.build();
|
||||
String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, settings);
|
||||
assertThat(endpoint, is("ec2.endpoint"));
|
||||
}
|
||||
|
||||
public void testRegionWithAwsSettings() {
|
||||
public void testSpecificEndpointBackcompat() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(AwsEc2Service.REGION_SETTING.getKey(), randomFrom("eu-west", "eu-west-1"))
|
||||
.put(AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING.getKey(), "ec2.endpoint")
|
||||
.build();
|
||||
String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, settings);
|
||||
assertThat(endpoint, is("ec2.eu-west-1.amazonaws.com"));
|
||||
}
|
||||
|
||||
public void testRegionWithAwsAndEc2Settings() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(AwsEc2Service.REGION_SETTING.getKey(), randomFrom("eu-west", "eu-west-1"))
|
||||
.put(AwsEc2Service.CLOUD_EC2.REGION_SETTING.getKey(), randomFrom("us-west", "us-west-1"))
|
||||
.build();
|
||||
String endpoint = AwsEc2ServiceImpl.findEndpoint(logger, settings);
|
||||
assertThat(endpoint, is("ec2.us-west-1.amazonaws.com"));
|
||||
}
|
||||
|
||||
public void testInvalidRegion() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(AwsEc2Service.REGION_SETTING.getKey(), "does-not-exist")
|
||||
.build();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
|
||||
AwsEc2ServiceImpl.findEndpoint(logger, settings);
|
||||
assertThat(endpoint, is("ec2.endpoint"));
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] {
|
||||
AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING
|
||||
});
|
||||
assertThat(e.getMessage(), containsString("No automatic endpoint could be derived from region"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import com.amazonaws.Protocol;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -36,8 +37,6 @@ public class Ec2DiscoverySettingsTests extends ESTestCase {
|
|||
.put(AwsEc2Service.PROXY_PORT_SETTING.getKey(), 10000)
|
||||
.put(AwsEc2Service.PROXY_USERNAME_SETTING.getKey(), "global-proxy-username")
|
||||
.put(AwsEc2Service.PROXY_PASSWORD_SETTING.getKey(), "global-proxy-password")
|
||||
.put(AwsEc2Service.SIGNER_SETTING.getKey(), "global-signer")
|
||||
.put(AwsEc2Service.REGION_SETTING.getKey(), "global-region")
|
||||
.build();
|
||||
|
||||
private static final Settings EC2 = Settings.builder()
|
||||
|
@ -48,8 +47,6 @@ public class Ec2DiscoverySettingsTests extends ESTestCase {
|
|||
.put(AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING.getKey(), 20000)
|
||||
.put(AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING.getKey(), "ec2-proxy-username")
|
||||
.put(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.getKey(), "ec2-proxy-password")
|
||||
.put(AwsEc2Service.CLOUD_EC2.SIGNER_SETTING.getKey(), "ec2-signer")
|
||||
.put(AwsEc2Service.CLOUD_EC2.REGION_SETTING.getKey(), "ec2-region")
|
||||
.put(AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING.getKey(), "ec2-endpoint")
|
||||
.build();
|
||||
|
||||
|
@ -65,9 +62,16 @@ public class Ec2DiscoverySettingsTests extends ESTestCase {
|
|||
assertThat(AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING.get(nodeSettings), is(10000));
|
||||
assertThat(AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING.get(nodeSettings), is("global-proxy-username"));
|
||||
assertThat(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.get(nodeSettings), is("global-proxy-password"));
|
||||
assertThat(AwsEc2Service.CLOUD_EC2.SIGNER_SETTING.get(nodeSettings), is("global-signer"));
|
||||
assertThat(AwsEc2Service.CLOUD_EC2.REGION_SETTING.get(nodeSettings), is("global-region"));
|
||||
assertThat(AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING.get(nodeSettings), isEmptyString());
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] {
|
||||
AwsEc2Service.KEY_SETTING,
|
||||
AwsEc2Service.SECRET_SETTING,
|
||||
AwsEc2Service.PROTOCOL_SETTING,
|
||||
AwsEc2Service.PROXY_HOST_SETTING,
|
||||
AwsEc2Service.PROXY_PORT_SETTING,
|
||||
AwsEc2Service.PROXY_USERNAME_SETTING,
|
||||
AwsEc2Service.PROXY_PASSWORD_SETTING,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -82,9 +86,24 @@ public class Ec2DiscoverySettingsTests extends ESTestCase {
|
|||
assertThat(AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING.get(nodeSettings), is(20000));
|
||||
assertThat(AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING.get(nodeSettings), is("ec2-proxy-username"));
|
||||
assertThat(AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING.get(nodeSettings), is("ec2-proxy-password"));
|
||||
assertThat(AwsEc2Service.CLOUD_EC2.SIGNER_SETTING.get(nodeSettings), is("ec2-signer"));
|
||||
assertThat(AwsEc2Service.CLOUD_EC2.REGION_SETTING.get(nodeSettings), is("ec2-region"));
|
||||
assertThat(AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING.get(nodeSettings), is("ec2-endpoint"));
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] {
|
||||
AwsEc2Service.KEY_SETTING,
|
||||
AwsEc2Service.SECRET_SETTING,
|
||||
AwsEc2Service.PROTOCOL_SETTING,
|
||||
AwsEc2Service.PROXY_HOST_SETTING,
|
||||
AwsEc2Service.PROXY_PORT_SETTING,
|
||||
AwsEc2Service.PROXY_USERNAME_SETTING,
|
||||
AwsEc2Service.PROXY_PASSWORD_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.KEY_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.SECRET_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROTOCOL_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROXY_HOST_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROXY_PORT_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROXY_USERNAME_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.PROXY_PASSWORD_SETTING,
|
||||
AwsEc2Service.CLOUD_EC2.ENDPOINT_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
private Settings buildSettings(Settings... global) {
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
esplugin {
|
||||
description 'The GCS repository plugin adds Google Cloud Storage support for repositories.'
|
||||
classname 'org.elasticsearch.plugin.repository.gcs.GoogleCloudStoragePlugin'
|
||||
classname 'org.elasticsearch.repositories.gcs.GoogleCloudStoragePlugin'
|
||||
}
|
||||
|
||||
versions << [
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.blobstore.gcs;
|
||||
package org.elasticsearch.repositories.gcs;
|
||||
|
||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
|
@ -29,7 +29,7 @@ import java.io.InputStream;
|
|||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.util.Map;
|
||||
|
||||
public class GoogleCloudStorageBlobContainer extends AbstractBlobContainer {
|
||||
class GoogleCloudStorageBlobContainer extends AbstractBlobContainer {
|
||||
|
||||
private final GoogleCloudStorageBlobStore blobStore;
|
||||
private final String path;
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.blobstore.gcs;
|
||||
package org.elasticsearch.repositories.gcs;
|
||||
|
||||
import com.google.api.client.googleapis.batch.BatchRequest;
|
||||
import com.google.api.client.googleapis.batch.json.JsonBatchCallback;
|
||||
|
@ -29,14 +29,12 @@ import com.google.api.services.storage.Storage;
|
|||
import com.google.api.services.storage.model.Bucket;
|
||||
import com.google.api.services.storage.model.Objects;
|
||||
import com.google.api.services.storage.model.StorageObject;
|
||||
import org.elasticsearch.SpecialPermission;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.blobstore.BlobStoreException;
|
||||
import org.elasticsearch.common.blobstore.gcs.util.SocketAccess;
|
||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -45,9 +43,6 @@ import org.elasticsearch.common.util.concurrent.CountDown;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedActionException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
|
@ -62,7 +57,7 @@ import java.util.stream.StreamSupport;
|
|||
|
||||
import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
|
||||
|
||||
public class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore {
|
||||
class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore {
|
||||
|
||||
/**
|
||||
* Google Cloud Storage batch requests are limited to 1000 operations
|
||||
|
@ -72,7 +67,7 @@ public class GoogleCloudStorageBlobStore extends AbstractComponent implements Bl
|
|||
private final Storage client;
|
||||
private final String bucket;
|
||||
|
||||
public GoogleCloudStorageBlobStore(Settings settings, String bucket, Storage storageClient) {
|
||||
GoogleCloudStorageBlobStore(Settings settings, String bucket, Storage storageClient) {
|
||||
super(settings);
|
||||
this.bucket = bucket;
|
||||
this.client = storageClient;
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugin.repository.gcs;
|
||||
package org.elasticsearch.repositories.gcs;
|
||||
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
|
@ -24,14 +24,12 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.blobstore.gcs.GoogleCloudStorageBlobStore;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugin.repository.gcs.GoogleCloudStoragePlugin;
|
||||
import org.elasticsearch.repositories.RepositoryException;
|
||||
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
|
||||
|
||||
|
@ -44,7 +42,7 @@ import static org.elasticsearch.common.settings.Setting.simpleString;
|
|||
import static org.elasticsearch.common.settings.Setting.timeSetting;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
||||
public class GoogleCloudStorageRepository extends BlobStoreRepository {
|
||||
class GoogleCloudStorageRepository extends BlobStoreRepository {
|
||||
|
||||
// package private for testing
|
||||
static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES);
|
||||
|
@ -76,7 +74,7 @@ public class GoogleCloudStorageRepository extends BlobStoreRepository {
|
|||
private final BlobPath basePath;
|
||||
private final GoogleCloudStorageBlobStore blobStore;
|
||||
|
||||
public GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment,
|
||||
GoogleCloudStorageRepository(RepositoryMetaData metadata, Environment environment,
|
||||
NamedXContentRegistry namedXContentRegistry,
|
||||
GoogleCloudStorageService storageService) throws Exception {
|
||||
super(metadata, environment.settings(), namedXContentRegistry);
|
||||
|
|
|
@ -45,7 +45,7 @@ import java.nio.file.Files;
|
|||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
|
||||
public interface GoogleCloudStorageService {
|
||||
interface GoogleCloudStorageService {
|
||||
|
||||
/**
|
||||
* Creates a client that can be used to manage Google Cloud Storage objects.
|
||||
|
@ -67,7 +67,7 @@ public interface GoogleCloudStorageService {
|
|||
|
||||
private final Environment environment;
|
||||
|
||||
public InternalGoogleCloudStorageService(Environment environment) {
|
||||
InternalGoogleCloudStorageService(Environment environment) {
|
||||
super(environment.settings());
|
||||
this.environment = environment;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.blobstore.gcs.util;
|
||||
package org.elasticsearch.repositories.gcs;
|
||||
|
||||
import org.elasticsearch.SpecialPermission;
|
||||
import org.elasticsearch.common.CheckedRunnable;
|
||||
|
@ -34,7 +34,7 @@ import java.security.PrivilegedExceptionAction;
|
|||
* needs {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access
|
||||
* in {@link AccessController#doPrivileged(PrivilegedAction)} blocks.
|
||||
*/
|
||||
public final class SocketAccess {
|
||||
final class SocketAccess {
|
||||
|
||||
private SocketAccess() {}
|
||||
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.blobstore.gcs;
|
||||
package org.elasticsearch.repositories.gcs;
|
||||
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.settings.Settings;
|
|
@ -21,13 +21,11 @@ package org.elasticsearch.repositories.gcs;
|
|||
|
||||
import com.google.api.services.storage.Storage;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.blobstore.gcs.MockHttpTransport;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugin.repository.gcs.GoogleCloudStoragePlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase;
|
||||
import org.junit.BeforeClass;
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.blobstore.gcs;
|
||||
package org.elasticsearch.repositories.gcs;
|
||||
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.settings.Settings;
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.blobstore.gcs;
|
||||
package org.elasticsearch.repositories.gcs;
|
||||
|
||||
import com.google.api.client.http.HttpTransport;
|
||||
import com.google.api.client.http.LowLevelHttpRequest;
|
|
@ -73,11 +73,6 @@ interface AwsS3Service extends LifecycleComponent {
|
|||
*/
|
||||
Setting<SecureString> PROXY_PASSWORD_SETTING = new Setting<>("cloud.aws.proxy.password", "", SecureString::new,
|
||||
Property.NodeScope, Property.Filtered, Property.Deprecated, Property.Shared);
|
||||
/**
|
||||
* cloud.aws.signer: If you are using an old AWS API version, you can define a Signer. Shared with discovery-ec2 plugin
|
||||
*/
|
||||
Setting<String> SIGNER_SETTING = Setting.simpleString("cloud.aws.signer",
|
||||
Property.NodeScope, Property.Deprecated, Property.Shared);
|
||||
/**
|
||||
* cloud.aws.read_timeout: Socket read timeout. Shared with discovery-ec2 plugin
|
||||
*/
|
||||
|
@ -140,14 +135,6 @@ interface AwsS3Service extends LifecycleComponent {
|
|||
Setting<SecureString> PROXY_PASSWORD_SETTING =
|
||||
new Setting<>("cloud.aws.s3.proxy.password", AwsS3Service.PROXY_PASSWORD_SETTING, SecureString::new,
|
||||
Property.NodeScope, Property.Filtered, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.s3.signer: If you are using an old AWS API version, you can define a Signer. Specific for S3 API calls.
|
||||
* Defaults to cloud.aws.signer.
|
||||
* @see AwsS3Service#SIGNER_SETTING
|
||||
*/
|
||||
Setting<String> SIGNER_SETTING =
|
||||
new Setting<>("cloud.aws.s3.signer", AwsS3Service.SIGNER_SETTING, Function.identity(),
|
||||
Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* cloud.aws.s3.endpoint: Endpoint.
|
||||
*/
|
||||
|
|
|
@ -147,13 +147,6 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
|
|||
}
|
||||
clientConfiguration.setUseThrottleRetries(useThrottleRetries);
|
||||
|
||||
// #155: we might have 3rd party users using older S3 API version
|
||||
String awsSigner = CLOUD_S3.SIGNER_SETTING.get(settings);
|
||||
if (Strings.hasText(awsSigner)) {
|
||||
logger.debug("using AWS API signer [{}]", awsSigner);
|
||||
AwsSigner.configureSigner(awsSigner, clientConfiguration, endpoint);
|
||||
}
|
||||
|
||||
TimeValue readTimeout = getConfigValue(null, settings, clientName,
|
||||
S3Repository.READ_TIMEOUT_SETTING, null, CLOUD_S3.READ_TIMEOUT);
|
||||
clientConfiguration.setSocketTimeout((int)readTimeout.millis());
|
||||
|
|
|
@ -65,11 +65,11 @@ class S3Repository extends BlobStoreRepository {
|
|||
|
||||
/** The access key (ie login id) for connecting to s3. */
|
||||
public static final AffixSetting<SecureString> ACCESS_KEY_SETTING = Setting.affixKeySetting(PREFIX, "access_key",
|
||||
key -> SecureSetting.secureString(key, Repositories.KEY_SETTING, false));
|
||||
key -> SecureSetting.secureString(key, Repositories.KEY_SETTING));
|
||||
|
||||
/** The secret key (ie password) for connecting to s3. */
|
||||
public static final AffixSetting<SecureString> SECRET_KEY_SETTING = Setting.affixKeySetting(PREFIX, "secret_key",
|
||||
key -> SecureSetting.secureString(key, Repositories.SECRET_SETTING, false));
|
||||
key -> SecureSetting.secureString(key, Repositories.SECRET_SETTING));
|
||||
|
||||
/** An override for the s3 endpoint to connect to. */
|
||||
public static final AffixSetting<String> ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint",
|
||||
|
@ -89,11 +89,11 @@ class S3Repository extends BlobStoreRepository {
|
|||
|
||||
/** The username of a proxy to connect to s3 through. */
|
||||
public static final AffixSetting<SecureString> PROXY_USERNAME_SETTING = Setting.affixKeySetting(PREFIX, "proxy.username",
|
||||
key -> SecureSetting.secureString(key, AwsS3Service.PROXY_USERNAME_SETTING, false));
|
||||
key -> SecureSetting.secureString(key, AwsS3Service.PROXY_USERNAME_SETTING));
|
||||
|
||||
/** The password of a proxy to connect to s3 through. */
|
||||
public static final AffixSetting<SecureString> PROXY_PASSWORD_SETTING = Setting.affixKeySetting(PREFIX, "proxy.password",
|
||||
key -> SecureSetting.secureString(key, AwsS3Service.PROXY_PASSWORD_SETTING, false));
|
||||
key -> SecureSetting.secureString(key, AwsS3Service.PROXY_PASSWORD_SETTING));
|
||||
|
||||
/** The socket timeout for connecting to s3. */
|
||||
public static final AffixSetting<TimeValue> READ_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "read_timeout",
|
||||
|
|
|
@ -98,7 +98,6 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin {
|
|||
AwsS3Service.PROXY_PORT_SETTING,
|
||||
AwsS3Service.PROXY_USERNAME_SETTING,
|
||||
AwsS3Service.PROXY_PASSWORD_SETTING,
|
||||
AwsS3Service.SIGNER_SETTING,
|
||||
AwsS3Service.READ_TIMEOUT,
|
||||
|
||||
// Register S3 specific settings: cloud.aws.s3
|
||||
|
@ -109,7 +108,6 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin {
|
|||
AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING,
|
||||
AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING,
|
||||
AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING,
|
||||
AwsS3Service.CLOUD_S3.SIGNER_SETTING,
|
||||
AwsS3Service.CLOUD_S3.ENDPOINT_SETTING,
|
||||
AwsS3Service.CLOUD_S3.READ_TIMEOUT,
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testAWSDefaultConfiguration() {
|
||||
launchAWSConfigurationTest(Settings.EMPTY, Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, null, 3, false,
|
||||
launchAWSConfigurationTest(Settings.EMPTY, Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, 3, false,
|
||||
ClientConfiguration.DEFAULT_SOCKET_TIMEOUT);
|
||||
}
|
||||
|
||||
|
@ -200,7 +200,7 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
.put("s3.client.default.read_timeout", "10s")
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username",
|
||||
"aws_proxy_password", null, 3, false, 10000);
|
||||
"aws_proxy_password", 3, false, 10000);
|
||||
}
|
||||
|
||||
public void testAWSConfigurationWithAwsSettingsBackcompat() {
|
||||
|
@ -210,18 +210,16 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
.put(AwsS3Service.PROXY_PORT_SETTING.getKey(), 8080)
|
||||
.put(AwsS3Service.PROXY_USERNAME_SETTING.getKey(), "aws_proxy_username")
|
||||
.put(AwsS3Service.PROXY_PASSWORD_SETTING.getKey(), "aws_proxy_password")
|
||||
.put(AwsS3Service.SIGNER_SETTING.getKey(), "AWS3SignerType")
|
||||
.put(AwsS3Service.READ_TIMEOUT.getKey(), "10s")
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username",
|
||||
"aws_proxy_password", "AWS3SignerType", 3, false, 10000);
|
||||
"aws_proxy_password", 3, false, 10000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[]{
|
||||
AwsS3Service.PROXY_USERNAME_SETTING,
|
||||
AwsS3Service.PROXY_PASSWORD_SETTING,
|
||||
AwsS3Service.PROTOCOL_SETTING,
|
||||
AwsS3Service.PROXY_HOST_SETTING,
|
||||
AwsS3Service.PROXY_PORT_SETTING,
|
||||
AwsS3Service.SIGNER_SETTING,
|
||||
AwsS3Service.READ_TIMEOUT});
|
||||
}
|
||||
|
||||
|
@ -232,32 +230,28 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
.put(AwsS3Service.PROXY_PORT_SETTING.getKey(), 8080)
|
||||
.put(AwsS3Service.PROXY_USERNAME_SETTING.getKey(), "aws_proxy_username")
|
||||
.put(AwsS3Service.PROXY_PASSWORD_SETTING.getKey(), "aws_proxy_password")
|
||||
.put(AwsS3Service.SIGNER_SETTING.getKey(), "AWS3SignerType")
|
||||
.put(AwsS3Service.READ_TIMEOUT.getKey(), "5s")
|
||||
.put(AwsS3Service.CLOUD_S3.PROTOCOL_SETTING.getKey(), "https")
|
||||
.put(AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING.getKey(), "s3_proxy_host")
|
||||
.put(AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING.getKey(), 8081)
|
||||
.put(AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING.getKey(), "s3_proxy_username")
|
||||
.put(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.getKey(), "s3_proxy_password")
|
||||
.put(AwsS3Service.CLOUD_S3.SIGNER_SETTING.getKey(), "NoOpSignerType")
|
||||
.put(AwsS3Service.CLOUD_S3.READ_TIMEOUT.getKey(), "10s")
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, "s3_proxy_host", 8081, "s3_proxy_username",
|
||||
"s3_proxy_password", "NoOpSignerType", 3, false, 10000);
|
||||
"s3_proxy_password", 3, false, 10000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] {
|
||||
AwsS3Service.PROXY_USERNAME_SETTING,
|
||||
AwsS3Service.PROXY_PASSWORD_SETTING,
|
||||
AwsS3Service.PROTOCOL_SETTING,
|
||||
AwsS3Service.PROXY_HOST_SETTING,
|
||||
AwsS3Service.PROXY_PORT_SETTING,
|
||||
AwsS3Service.SIGNER_SETTING,
|
||||
AwsS3Service.READ_TIMEOUT,
|
||||
AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING,
|
||||
AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING,
|
||||
AwsS3Service.CLOUD_S3.PROTOCOL_SETTING,
|
||||
AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING,
|
||||
AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING,
|
||||
AwsS3Service.CLOUD_S3.SIGNER_SETTING,
|
||||
AwsS3Service.CLOUD_S3.READ_TIMEOUT});
|
||||
}
|
||||
|
||||
|
@ -266,7 +260,7 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
.put(S3Repository.Repositories.MAX_RETRIES_SETTING.getKey(), 10)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, null, -1, null,
|
||||
null, null, 10, false, 50000);
|
||||
null, 10, false, 50000);
|
||||
}
|
||||
|
||||
public void testRepositoryMaxRetries() {
|
||||
|
@ -275,7 +269,7 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
.put(S3Repository.Repositories.MAX_RETRIES_SETTING.getKey(), 10)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, repositorySettings, Protocol.HTTPS, null, -1, null,
|
||||
null, null, 20, false, 50000);
|
||||
null, 20, false, 50000);
|
||||
}
|
||||
|
||||
protected void launchAWSConfigurationTest(Settings settings,
|
||||
|
@ -285,7 +279,6 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
int expectedProxyPort,
|
||||
String expectedProxyUsername,
|
||||
String expectedProxyPassword,
|
||||
String expectedSigner,
|
||||
Integer expectedMaxRetries,
|
||||
boolean expectedUseThrottleRetries,
|
||||
int expectedReadTimeout) {
|
||||
|
@ -303,7 +296,6 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
assertThat(configuration.getProxyPort(), is(expectedProxyPort));
|
||||
assertThat(configuration.getProxyUsername(), is(expectedProxyUsername));
|
||||
assertThat(configuration.getProxyPassword(), is(expectedProxyPassword));
|
||||
assertThat(configuration.getSignerOverride(), is(expectedSigner));
|
||||
assertThat(configuration.getMaxErrorRetry(), is(expectedMaxRetries));
|
||||
assertThat(configuration.useThrottledRetries(), is(expectedUseThrottleRetries));
|
||||
assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout));
|
||||
|
|
|
@ -76,8 +76,8 @@ setup:
|
|||
---
|
||||
"Get simple field caps":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
version: " - 5.3.99"
|
||||
reason: this uses a new API that has been added in 5.4.0
|
||||
|
||||
- do:
|
||||
field_caps:
|
||||
|
@ -117,8 +117,8 @@ setup:
|
|||
---
|
||||
"Get nested field caps":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
version: " - 5.3.99"
|
||||
reason: this uses a new API that has been added in 5.4.0
|
||||
|
||||
- do:
|
||||
field_caps:
|
||||
|
@ -148,8 +148,8 @@ setup:
|
|||
---
|
||||
"Get prefix field caps":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: this uses a new API that has been added in 6.0
|
||||
version: " - 5.3.99"
|
||||
reason: this uses a new API that has been added in 5.4.0
|
||||
|
||||
- do:
|
||||
field_caps:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue