merge from master

This commit is contained in:
Boaz Leskes 2016-03-25 15:30:41 +01:00
commit 91021e3019
374 changed files with 5864 additions and 6241 deletions

View File

@ -147,7 +147,7 @@ curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
}'
</pre>
The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index.
The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get their own special index.
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):

View File

@ -39,9 +39,6 @@ class PluginPropertiesExtension {
@Input
String classname
@Input
boolean isolated = true
PluginPropertiesExtension(Project project) {
name = project.name
version = project.version

View File

@ -54,12 +54,6 @@ class PluginPropertiesTask extends Copy {
if (extension.classname == null) {
throw new InvalidUserDataException('classname is a required setting for esplugin')
}
doFirst {
if (extension.isolated == false) {
String warning = "WARNING: Disabling plugin isolation in ${project.path} is deprecated and will be removed in the future"
logger.warn("${'=' * warning.length()}\n${warning}\n${'=' * warning.length()}")
}
}
// configure property substitution
from(templateFile)
into(generatedResourcesDir)
@ -80,7 +74,6 @@ class PluginPropertiesTask extends Copy {
'version': stringSnap(extension.version),
'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch),
'javaVersion': project.targetCompatibility as String,
'isolated': extension.isolated as String,
'classname': extension.classname
]
}

View File

@ -358,7 +358,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]multibindings[/\\]MapBinder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]spi[/\\]InjectionPoint.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]Channels.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]io[/\\]stream[/\\]NamedWriteableRegistry.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]joda[/\\]Joda.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]Lucene.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]lucene[/\\]all[/\\]AllTermQuery.java" checks="LineLength" />
@ -380,7 +379,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]IndexScopedSettings.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]Setting.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]Settings.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]loader[/\\]PropertiesSettingsLoader.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]loader[/\\]XContentSettingsLoader.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]ByteSizeValue.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]TimeValue.java" checks="LineLength" />
@ -660,7 +658,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorQuery.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DummyPluginInfo.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]InstallPluginCommand.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]RemovePluginCommand.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoriesModule.java" checks="LineLength" />
@ -1079,8 +1076,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]rounding[/\\]TimeZoneRoundingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]ScopedSettingsTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]SettingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]loader[/\\]JsonSettingsLoaderTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]settings[/\\]loader[/\\]YamlSettingsLoaderTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]transport[/\\]BoundTransportAddressTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]DistanceUnitTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]FuzzinessTests.java" checks="LineLength" />

View File

@ -38,12 +38,3 @@ java.version=${javaVersion}
#
# 'elasticsearch.version' version of elasticsearch compiled against
elasticsearch.version=${elasticsearchVersion}
#
### deprecated elements for jvm plugins :
#
# 'isolated': true if the plugin should have its own classloader.
# passing false is deprecated, and only intended to support plugins
# that have hard dependencies against each other. If this is
# not specified, then the plugin is isolated by default.
isolated=${isolated}
#

View File

@ -1,4 +1,4 @@
elasticsearch = 5.0.0
elasticsearch = 5.0.0-alpha1
lucene = 6.0.0-snapshot-f0aa4fc
# optional dependencies

View File

@ -64,9 +64,9 @@ public class Version {
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
public static final int V_2_3_0_ID = 2030099;
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_5_0_0_ID = 5000099;
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final Version CURRENT = V_5_0_0;
public static final int V_5_0_0_alpha1_ID = 5000001;
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final Version CURRENT = V_5_0_0_alpha1;
static {
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
@ -79,8 +79,8 @@ public class Version {
public static Version fromId(int id) {
switch (id) {
case V_5_0_0_ID:
return V_5_0_0;
case V_5_0_0_alpha1_ID:
return V_5_0_0_alpha1;
case V_2_3_0_ID:
return V_2_3_0;
case V_2_2_1_ID:

View File

@ -173,8 +173,6 @@ import org.elasticsearch.action.search.TransportClearScrollAction;
import org.elasticsearch.action.search.TransportMultiSearchAction;
import org.elasticsearch.action.search.TransportSearchAction;
import org.elasticsearch.action.search.TransportSearchScrollAction;
import org.elasticsearch.action.suggest.SuggestAction;
import org.elasticsearch.action.suggest.TransportSuggestAction;
import org.elasticsearch.action.support.ActionFilter;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.AutoCreateIndex;
@ -320,7 +318,6 @@ public class ActionModule extends AbstractModule {
registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
TransportShardMultiTermsVectorAction.class);
registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class);
registerAction(SuggestAction.INSTANCE, TransportSuggestAction.class);
registerAction(UpdateAction.INSTANCE, TransportUpdateAction.class);
registerAction(MultiGetAction.INSTANCE, TransportMultiGetAction.class,
TransportShardMultiGetAction.class);

View File

@ -235,7 +235,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
scriptStats = in.readOptionalStreamable(ScriptStats::new);
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
ingestStats = in.readOptionalWritable(IngestStats::new);
ingestStats = in.readOptionalWriteable(IngestStats::new);
}
@Override

View File

@ -84,7 +84,7 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
}
protected void processTasks(CancelTasksRequest request, Consumer<CancellableTask> operation) {
if (request.getTaskId().isSet() == false) {
if (request.getTaskId().isSet()) {
// we are only checking one task, we can optimize it
CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId());
if (task != null) {

View File

@ -178,7 +178,7 @@ public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
}
builder.dateValueField("start_time_in_millis", "start_time", startTime);
builder.timeValueField("running_time_in_nanos", "running_time", runningTimeNanos, TimeUnit.NANOSECONDS);
if (parentTaskId.isSet() == false) {
if (parentTaskId.isSet()) {
builder.field("parent_task_id", parentTaskId.toString());
}
return builder;

View File

@ -84,7 +84,13 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
long timeoutTime = System.nanoTime() + timeout.nanos();
super.processTasks(request, operation.andThen((Task t) -> {
while (System.nanoTime() - timeoutTime < 0) {
if (taskManager.getTask(t.getId()) == null) {
Task task = taskManager.getTask(t.getId());
if (task == null) {
return;
}
if (task.getAction().startsWith(ListTasksAction.NAME)) {
// It doesn't make sense to wait for List Tasks and it can cause an infinite loop of the task waiting
// for itself of one of its child tasks
return;
}
try {

View File

@ -302,7 +302,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
int availableProcessors;
int allocatedProcessors;
long availableMemory;
final ObjectIntHashMap<String> names;
public OsStats() {
@ -326,15 +325,10 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
return allocatedProcessors;
}
public ByteSizeValue getAvailableMemory() {
return new ByteSizeValue(availableMemory);
}
@Override
public void readFrom(StreamInput in) throws IOException {
availableProcessors = in.readVInt();
allocatedProcessors = in.readVInt();
availableMemory = in.readLong();
int size = in.readVInt();
names.clear();
for (int i = 0; i < size; i++) {
@ -346,7 +340,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(availableProcessors);
out.writeVInt(allocatedProcessors);
out.writeLong(availableMemory);
out.writeVInt(names.size());
for (ObjectIntCursor<String> name : names) {
out.writeString(name.key);
@ -365,9 +358,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
static final XContentBuilderString ALLOCATED_PROCESSORS = new XContentBuilderString("allocated_processors");
static final XContentBuilderString NAME = new XContentBuilderString("name");
static final XContentBuilderString NAMES = new XContentBuilderString("names");
static final XContentBuilderString MEM = new XContentBuilderString("mem");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString TOTAL_IN_BYTES = new XContentBuilderString("total_in_bytes");
static final XContentBuilderString COUNT = new XContentBuilderString("count");
}
@ -375,10 +365,6 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors);
builder.field(Fields.ALLOCATED_PROCESSORS, allocatedProcessors);
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, availableMemory);
builder.endObject();
builder.startArray(Fields.NAMES);
for (ObjectIntCursor<String> name : names) {
builder.startObject();

View File

@ -283,7 +283,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
source = in.readString();
updateAllTypes = in.readBoolean();
readTimeout(in);
concreteIndex = in.readOptionalWritable(Index::new);
concreteIndex = in.readOptionalWriteable(Index::new);
}
@Override

View File

@ -42,7 +42,6 @@ import org.elasticsearch.index.search.stats.SearchStats;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.index.suggest.stats.SuggestStats;
import org.elasticsearch.index.translog.TranslogStats;
import org.elasticsearch.index.warmer.WarmerStats;
import org.elasticsearch.indices.IndicesQueryCache;
@ -109,7 +108,7 @@ public class CommonStats implements Streamable, ToXContent {
translog = new TranslogStats();
break;
case Suggest:
suggest = new SuggestStats();
// skip
break;
case RequestCache:
requestCache = new RequestCacheStats();
@ -177,7 +176,7 @@ public class CommonStats implements Streamable, ToXContent {
translog = indexShard.translogStats();
break;
case Suggest:
suggest = indexShard.suggestStats();
// skip
break;
case RequestCache:
requestCache = indexShard.requestCache().stats();
@ -236,9 +235,6 @@ public class CommonStats implements Streamable, ToXContent {
@Nullable
public TranslogStats translog;
@Nullable
public SuggestStats suggest;
@Nullable
public RequestCacheStats requestCache;
@ -367,14 +363,6 @@ public class CommonStats implements Streamable, ToXContent {
} else {
translog.add(stats.getTranslog());
}
if (suggest == null) {
if (stats.getSuggest() != null) {
suggest = new SuggestStats();
suggest.add(stats.getSuggest());
}
} else {
suggest.add(stats.getSuggest());
}
if (requestCache == null) {
if (stats.getRequestCache() != null) {
requestCache = new RequestCacheStats();
@ -468,11 +456,6 @@ public class CommonStats implements Streamable, ToXContent {
return translog;
}
@Nullable
public SuggestStats getSuggest() {
return suggest;
}
@Nullable
public RequestCacheStats getRequestCache() {
return requestCache;
@ -555,7 +538,6 @@ public class CommonStats implements Streamable, ToXContent {
segments = SegmentsStats.readSegmentsStats(in);
}
translog = in.readOptionalStreamable(TranslogStats::new);
suggest = in.readOptionalStreamable(SuggestStats::new);
requestCache = in.readOptionalStreamable(RequestCacheStats::new);
recoveryStats = in.readOptionalStreamable(RecoveryStats::new);
}
@ -647,7 +629,6 @@ public class CommonStats implements Streamable, ToXContent {
segments.writeTo(out);
}
out.writeOptionalStreamable(translog);
out.writeOptionalStreamable(suggest);
out.writeOptionalStreamable(requestCache);
out.writeOptionalStreamable(recoveryStats);
}
@ -700,9 +681,6 @@ public class CommonStats implements Streamable, ToXContent {
if (translog != null) {
translog.toXContent(builder, params);
}
if (suggest != null) {
suggest.toXContent(builder, params);
}
if (requestCache != null) {
requestCache.toXContent(builder, params);
}

View File

@ -190,7 +190,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
out.writeStringArrayNullable(groups);
out.writeStringArrayNullable(fieldDataFields);
out.writeStringArrayNullable(completionDataFields);
if (out.getVersion().onOrAfter(Version.V_5_0_0)) {
if (out.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
out.writeBoolean(includeSegmentFileSizes);
}
}
@ -208,7 +208,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
groups = in.readStringArray();
fieldDataFields = in.readStringArray();
completionDataFields = in.readStringArray();
if (in.getVersion().onOrAfter(Version.V_5_0_0)) {
if (in.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
includeSegmentFileSizes = in.readBoolean();
} else {
includeSegmentFileSizes = false;
@ -244,7 +244,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
Completion("completion"),
Segments("segments"),
Translog("translog"),
Suggest("suggest"),
Suggest("suggest"), // unused
RequestCache("request_cache"),
Recovery("recovery");

View File

@ -152,11 +152,6 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
return this;
}
public IndicesStatsRequestBuilder setSuggest(boolean suggest) {
request.suggest(suggest);
return this;
}
public IndicesStatsRequestBuilder setRequestCache(boolean requestCache) {
request.requestCache(requestCache);
return this;

View File

@ -107,7 +107,7 @@ public class ShardStats implements Streamable, ToXContent {
statePath = in.readString();
dataPath = in.readString();
isCustomDataPath = in.readBoolean();
seqNoStats = in.readOptionalWritable(SeqNoStats::new);
seqNoStats = in.readOptionalWriteable(SeqNoStats::new);
}
@Override

View File

@ -23,13 +23,14 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.ingest.core.IngestDocument;
import org.elasticsearch.ingest.core.Pipeline;
import org.elasticsearch.ingest.core.Processor;
import org.elasticsearch.ingest.core.CompoundProcessor;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.ingest.processor.TrackingResultProcessor.decorate;
class SimulateExecutionService {
private static final String THREAD_POOL_NAME = ThreadPool.Names.MANAGEMENT;
@ -40,40 +41,16 @@ class SimulateExecutionService {
this.threadPool = threadPool;
}
void executeVerboseDocument(Processor processor, IngestDocument ingestDocument, List<SimulateProcessorResult> processorResultList) throws Exception {
if (processor instanceof CompoundProcessor) {
CompoundProcessor cp = (CompoundProcessor) processor;
try {
for (Processor p : cp.getProcessors()) {
executeVerboseDocument(p, ingestDocument, processorResultList);
}
} catch (Exception e) {
for (Processor p : cp.getOnFailureProcessors()) {
executeVerboseDocument(p, ingestDocument, processorResultList);
}
}
} else {
try {
processor.execute(ingestDocument);
processorResultList.add(new SimulateProcessorResult(processor.getTag(), new IngestDocument(ingestDocument)));
} catch (Exception e) {
processorResultList.add(new SimulateProcessorResult(processor.getTag(), e));
throw e;
}
}
}
SimulateDocumentResult executeDocument(Pipeline pipeline, IngestDocument ingestDocument, boolean verbose) {
if (verbose) {
List<SimulateProcessorResult> processorResultList = new ArrayList<>();
IngestDocument currentIngestDocument = new IngestDocument(ingestDocument);
CompoundProcessor pipelineProcessor = new CompoundProcessor(pipeline.getProcessors(), pipeline.getOnFailureProcessors());
CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), processorResultList);
try {
executeVerboseDocument(pipelineProcessor, currentIngestDocument, processorResultList);
verbosePipelineProcessor.execute(ingestDocument);
return new SimulateDocumentVerboseResult(processorResultList);
} catch (Exception e) {
return new SimulateDocumentBaseResult(e);
return new SimulateDocumentVerboseResult(processorResultList);
}
return new SimulateDocumentVerboseResult(processorResultList);
} else {
try {
pipeline.execute(ingestDocument);

View File

@ -21,7 +21,6 @@ package org.elasticsearch.action.percolate;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
@ -127,7 +126,7 @@ public class PercolateRequestBuilder extends ActionRequestBuilder<PercolateReque
/**
* Delegates to {@link PercolateSourceBuilder#addSort(SortBuilder)}
*/
public PercolateRequestBuilder addSort(SortBuilder sort) {
public PercolateRequestBuilder addSort(SortBuilder<?> sort) {
sourceBuilder().addSort(sort);
return this;
}

View File

@ -48,13 +48,13 @@ import java.util.Map;
public class PercolateSourceBuilder extends ToXContentToBytes {
private DocBuilder docBuilder;
private QueryBuilder queryBuilder;
private QueryBuilder<?> queryBuilder;
private Integer size;
private List<SortBuilder> sorts;
private List<SortBuilder<?>> sorts;
private Boolean trackScores;
private HighlightBuilder highlightBuilder;
private List<AggregatorBuilder<?>> aggregationBuilders;
private List<PipelineAggregatorBuilder> pipelineAggregationBuilders;
private List<PipelineAggregatorBuilder<?>> pipelineAggregationBuilders;
/**
* Sets the document to run the percolate queries against.
@ -68,7 +68,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
* Sets a query to reduce the number of percolate queries to be evaluated and score the queries that match based
* on this query.
*/
public PercolateSourceBuilder setQueryBuilder(QueryBuilder queryBuilder) {
public PercolateSourceBuilder setQueryBuilder(QueryBuilder<?> queryBuilder) {
this.queryBuilder = queryBuilder;
return this;
}
@ -98,7 +98,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
*
* By default the matching percolator queries are returned in an undefined order.
*/
public PercolateSourceBuilder addSort(SortBuilder sort) {
public PercolateSourceBuilder addSort(SortBuilder<?> sort) {
if (sorts == null) {
sorts = new ArrayList<>();
}
@ -137,7 +137,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
/**
* Add an aggregation definition.
*/
public PercolateSourceBuilder addAggregation(PipelineAggregatorBuilder aggregationBuilder) {
public PercolateSourceBuilder addAggregation(PipelineAggregatorBuilder<?> aggregationBuilder) {
if (pipelineAggregationBuilders == null) {
pipelineAggregationBuilders = new ArrayList<>();
}
@ -160,10 +160,8 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
}
if (sorts != null) {
builder.startArray("sort");
for (SortBuilder sort : sorts) {
builder.startObject();
for (SortBuilder<?> sort : sorts) {
sort.toXContent(builder, params);
builder.endObject();
}
builder.endArray();
}
@ -182,7 +180,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
}
}
if (pipelineAggregationBuilders != null) {
for (PipelineAggregatorBuilder aggregation : pipelineAggregationBuilders) {
for (PipelineAggregatorBuilder<?> aggregation : pipelineAggregationBuilders) {
aggregation.toXContent(builder, params);
}
}

View File

@ -295,6 +295,13 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
return this.requestCache;
}
/**
* @return true if the request only has suggest
*/
public boolean isSuggestOnly() {
return source != null && source.isSuggestOnly();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);

View File

@ -38,6 +38,7 @@ import java.util.Map;
import java.util.Set;
import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;
/**
*
@ -72,6 +73,17 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
searchRequest.searchType(QUERY_AND_FETCH);
}
if (searchRequest.isSuggestOnly()) {
// disable request cache if we have only suggest
searchRequest.requestCache(false);
switch (searchRequest.searchType()) {
case DFS_QUERY_AND_FETCH:
case DFS_QUERY_THEN_FETCH:
// convert to Q_T_F if we have only suggest
searchRequest.searchType(QUERY_THEN_FETCH);
break;
}
}
} catch (IndexNotFoundException | IndexClosedException e) {
// ignore these failures, we will notify the search response if its really the case from the actual action
} catch (Exception e) {

View File

@ -1,60 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.suggest;
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.suggest.SuggestBuilder;
import java.io.IOException;
/**
* Internal suggest request executed directly against a specific index shard.
*/
public final class ShardSuggestRequest extends BroadcastShardRequest {
private SuggestBuilder suggest;
public ShardSuggestRequest() {
}
ShardSuggestRequest(ShardId shardId, SuggestRequest request) {
super(shardId, request);
this.suggest = request.suggest();
}
public SuggestBuilder suggest() {
return suggest;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
suggest = SuggestBuilder.PROTOTYPE.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
suggest.writeTo(out);
}
}

View File

@ -1,61 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.suggest;
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
/**
* Internal suggest response of a shard suggest request executed directly against a specific shard.
*/
class ShardSuggestResponse extends BroadcastShardResponse {
private final Suggest suggest;
ShardSuggestResponse() {
this.suggest = new Suggest();
}
ShardSuggestResponse(ShardId shardId, Suggest suggest) {
super(shardId);
this.suggest = suggest;
}
public Suggest getSuggest() {
return this.suggest;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
suggest.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
suggest.writeTo(out);
}
}

View File

@ -1,46 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.suggest;
import org.elasticsearch.action.Action;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.search.suggest.Suggest;
/**
*/
public class SuggestAction extends Action<SuggestRequest, SuggestResponse, SuggestRequestBuilder> {
public static final SuggestAction INSTANCE = new SuggestAction();
public static final String NAME = "indices:data/read/suggest";
private SuggestAction() {
super(NAME);
}
@Override
public SuggestResponse newResponse() {
return new SuggestResponse(new Suggest());
}
@Override
public SuggestRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new SuggestRequestBuilder(client, this);
}
}

View File

@ -1,154 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.suggest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.search.suggest.SuggestBuilder;
import java.io.IOException;
import java.util.Arrays;
import java.util.Objects;
/**
* A request to get suggestions for corrections of phrases. Best created with
* {@link org.elasticsearch.client.Requests#suggestRequest(String...)}.
* <p>
* The request requires the suggest query source to be set using
* {@link #suggest(org.elasticsearch.search.suggest.SuggestBuilder)}
*
* @see SuggestResponse
* @see org.elasticsearch.client.Client#suggest(SuggestRequest)
* @see org.elasticsearch.client.Requests#suggestRequest(String...)
* @see org.elasticsearch.search.suggest.SuggestBuilders
*/
public final class SuggestRequest extends BroadcastRequest<SuggestRequest> {
@Nullable
private String routing;
@Nullable
private String preference;
private SuggestBuilder suggest;
public SuggestRequest() {
}
/**
* Constructs a new suggest request against the provided indices. No indices provided means it will
* run against all indices.
*/
public SuggestRequest(String... indices) {
super(indices);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
return validationException;
}
/**
* The suggestion query to get correction suggestions for
*/
public SuggestBuilder suggest() {
return suggest;
}
/**
* set a new source for the suggest query
*/
public SuggestRequest suggest(SuggestBuilder suggest) {
Objects.requireNonNull(suggest, "suggest must not be null");
this.suggest = suggest;
return this;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public String routing() {
return this.routing;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public SuggestRequest routing(String routing) {
this.routing = routing;
return this;
}
/**
* The routing values to control the shards that the search will be executed on.
*/
public SuggestRequest routing(String... routings) {
this.routing = Strings.arrayToCommaDelimitedString(routings);
return this;
}
public SuggestRequest preference(String preference) {
this.preference = preference;
return this;
}
public String preference() {
return this.preference;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
routing = in.readOptionalString();
preference = in.readOptionalString();
suggest = SuggestBuilder.PROTOTYPE.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
Objects.requireNonNull(suggest, "suggest must not be null");
super.writeTo(out);
out.writeOptionalString(routing);
out.writeOptionalString(preference);
suggest.writeTo(out);
}
@Override
public String toString() {
Objects.requireNonNull(suggest, "suggest must not be null");
String sSource = "_na_";
try {
XContentBuilder builder = JsonXContent.contentBuilder();
builder = suggest.toXContent(builder, ToXContent.EMPTY_PARAMS);
sSource = builder.string();
} catch (Exception e) {
// ignore
}
return "[" + Arrays.toString(indices) + "]" + ", suggest[" + sSource + "]";
}
}

View File

@ -1,85 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.suggest;
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.SuggestionBuilder;
/**
* A suggest action request builder.
*/
public class SuggestRequestBuilder extends BroadcastOperationRequestBuilder<SuggestRequest, SuggestResponse, SuggestRequestBuilder> {
final SuggestBuilder suggest = new SuggestBuilder();
public SuggestRequestBuilder(ElasticsearchClient client, SuggestAction action) {
super(client, action, new SuggestRequest());
}
/**
* Add a definition for suggestions to the request
* @param name the name for the suggestion that will also be used in the response
* @param suggestion the suggestion configuration
*/
public SuggestRequestBuilder addSuggestion(String name, SuggestionBuilder<?> suggestion) {
suggest.addSuggestion(name, suggestion);
return this;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public SuggestRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
public SuggestRequestBuilder setSuggestText(String globalText) {
this.suggest.setGlobalText(globalText);
return this;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards,
* _shards:x,y to operate on shards x &amp; y, or a custom value, which guarantees that the same order
* will be used across different requests.
*/
public SuggestRequestBuilder setPreference(String preference) {
request.preference(preference);
return this;
}
/**
* The routing values to control the shards that the search will be executed on.
*/
public SuggestRequestBuilder setRouting(String... routing) {
request.routing(routing);
return this;
}
@Override
protected SuggestRequest beforeExecute(SuggestRequest request) {
request.suggest(suggest);
return request;
}
}

View File

@ -1,82 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.suggest;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
import java.util.List;
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
/**
* The response of the suggest action.
*/
public final class SuggestResponse extends BroadcastResponse {
private final Suggest suggest;
SuggestResponse(Suggest suggest) {
this.suggest = suggest;
}
SuggestResponse(Suggest suggest, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.suggest = suggest;
}
/**
* The Suggestions of the phrase.
*/
public Suggest getSuggest() {
return suggest;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
this.suggest.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
this.suggest.writeTo(out);
}
@Override
public String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
builder.startObject();
suggest.toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {
return "{ \"error\" : \"" + e.getMessage() + "\"}";
}
}
}

View File

@ -1,152 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.suggest;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.suggest.stats.ShardSuggestMetric;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.SuggestPhase;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReferenceArray;
/**
* Defines the transport of a suggestion request across the cluster
*/
public class TransportSuggestAction
extends TransportBroadcastAction<SuggestRequest, SuggestResponse, ShardSuggestRequest, ShardSuggestResponse> {
private final IndicesService indicesService;
private final SuggestPhase suggestPhase;
@Inject
public TransportSuggestAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService, SuggestPhase suggestPhase,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, SuggestAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
SuggestRequest::new, ShardSuggestRequest::new, ThreadPool.Names.SUGGEST);
this.indicesService = indicesService;
this.suggestPhase = suggestPhase;
}
@Override
protected ShardSuggestRequest newShardRequest(int numShards, ShardRouting shard, SuggestRequest request) {
return new ShardSuggestRequest(shard.shardId(), request);
}
@Override
protected ShardSuggestResponse newShardResponse() {
return new ShardSuggestResponse();
}
@Override
protected GroupShardsIterator shards(ClusterState clusterState, SuggestRequest request, String[] concreteIndices) {
Map<String, Set<String>> routingMap =
indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices());
return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, SuggestRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, SuggestRequest countRequest, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
}
@Override
protected SuggestResponse newResponse(SuggestRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
int successfulShards = 0;
int failedShards = 0;
final Map<String, List<Suggest.Suggestion>> groupedSuggestions = new HashMap<>();
List<ShardOperationFailedException> shardFailures = null;
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// simply ignore non active shards
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = new ArrayList<>();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
Suggest suggest = ((ShardSuggestResponse) shardResponse).getSuggest();
Suggest.group(groupedSuggestions, suggest);
successfulShards++;
}
}
return new SuggestResponse(new Suggest(Suggest.reduce(groupedSuggestions)), shardsResponses.length(),
successfulShards, failedShards, shardFailures);
}
@Override
protected ShardSuggestResponse shardOperation(ShardSuggestRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().id());
ShardSuggestMetric suggestMetric = indexShard.getSuggestMetric();
suggestMetric.preSuggest();
long startTime = System.nanoTime();
try (Engine.Searcher searcher = indexShard.acquireSearcher("suggest")) {
SuggestBuilder suggest = request.suggest();
if (suggest != null) {
final SuggestionSearchContext context = suggest.build(indexService.newQueryShardContext());
final Suggest result = suggestPhase.execute(context, searcher.searcher());
return new ShardSuggestResponse(request.shardId(), result);
}
return new ShardSuggestResponse(request.shardId(), new Suggest());
} catch (Throwable ex) {
throw new ElasticsearchException("failed to execute suggest", ex);
} finally {
suggestMetric.postSuggest(System.nanoTime() - startTime);
}
}
}

View File

@ -1,23 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Suggest action.
*/
package org.elasticsearch.action.suggest;

View File

@ -59,7 +59,7 @@ public class ReplicationTask extends Task {
}
public static class Status implements Task.Status {
public static final Status PROTOTYPE = new Status("prototype");
public static final String NAME = "replication";
private final String phase;
@ -73,7 +73,7 @@ public class ReplicationTask extends Task {
@Override
public String getWriteableName() {
return "replication";
return NAME;
}
@Override
@ -88,10 +88,5 @@ public class ReplicationTask extends Task {
public void writeTo(StreamOutput out) throws IOException {
out.writeString(phase);
}
@Override
public Status readFrom(StreamInput in) throws IOException {
return new Status(in);
}
}
}

View File

@ -52,7 +52,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.IndexShard;
@ -359,32 +358,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
});
} else {
try {
failReplicaIfNeeded(t);
} catch (Throwable unexpected) {
logger.error("{} unexpected error while failing replica", unexpected, request.shardId().id());
} finally {
responseWithFailure(t);
}
}
}
private void failReplicaIfNeeded(Throwable t) {
Index index = request.shardId().getIndex();
int shardId = request.shardId().id();
logger.trace("failure on replica [{}][{}], action [{}], request [{}]", t, index, shardId, actionName, request);
if (ignoreReplicaException(t) == false) {
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
return;
}
IndexShard indexShard = indexService.getShardOrNull(shardId);
if (indexShard == null) {
logger.debug("ignoring failed replica {}[{}] because index was already removed.", index, shardId);
return;
}
indexShard.failShard(actionName + " failed on replica", t);
}
}
@ -1101,8 +1075,13 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
totalShards,
success.get(),
failuresArray
)
)
);
if (logger.isTraceEnabled()) {
logger.trace("finished replicating action [{}], request [{}], shardInfo [{}]", actionName, replicaRequest,
finalResponse.getShardInfo());
}
try {
channel.sendResponse(finalResponse);
} catch (IOException responseException) {
@ -1127,7 +1106,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
boolean isRelocated();
void failShard(String reason, @Nullable Throwable e);
ShardRouting routingEntry();
/** returns the primary term of the current opration */
/** returns the primary term of the current operation */
long opPrimaryTerm();
}
@ -1175,7 +1154,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override
public long opPrimaryTerm() {
return routingEntry().primaryTerm();
return indexShard.getPrimaryTerm();
}
}

View File

@ -60,7 +60,7 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (taskId.isSet() == false && nodesIds.length > 0) {
if (taskId.isSet() && nodesIds.length > 0) {
validationException = addValidationError("task id cannot be used together with node ids",
validationException);
}
@ -165,12 +165,12 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
if (getActions() != null && getActions().length > 0 && Regex.simpleMatch(getActions(), task.getAction()) == false) {
return false;
}
if (getTaskId().isSet() == false) {
if (getTaskId().isSet()) {
if(getTaskId().getId() != task.getId()) {
return false;
}
}
if (parentTaskId.isSet() == false) {
if (parentTaskId.isSet()) {
if (parentTaskId.equals(task.getParentTaskId()) == false) {
return false;
}

View File

@ -125,14 +125,14 @@ public abstract class TransportTasksAction<
protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) {
if (request.getTaskId().isSet()) {
return clusterState.nodes().resolveNodesIds(request.getNodesIds());
} else {
return new String[]{request.getTaskId().getNodeId()};
} else {
return clusterState.nodes().resolveNodesIds(request.getNodesIds());
}
}
protected void processTasks(TasksRequest request, Consumer<OperationTask> operation) {
if (request.getTaskId().isSet() == false) {
if (request.getTaskId().isSet()) {
// we are only checking one task, we can optimize it
Task task = taskManager.getTask(request.getTaskId().getId());
if (task != null) {

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.monitor.process.ProcessProbe;
import org.elasticsearch.transport.TransportSettings;
@ -39,7 +40,6 @@ import java.util.Set;
/**
* We enforce limits once any network host is configured. In this case we assume the node is running in production
* and all production limit checks must pass. This should be extended as we go to settings like:
* - discovery.zen.minimum_master_nodes
* - discovery.zen.ping.unicast.hosts is set if we use zen disco
* - ensure we can write in all data directories
* - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform)
@ -114,16 +114,19 @@ final class BootstrapCheck {
}
// the list of checks to execute
private static List<Check> checks(final Settings settings) {
static List<Check> checks(final Settings settings) {
final List<Check> checks = new ArrayList<>();
final FileDescriptorCheck fileDescriptorCheck
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
checks.add(fileDescriptorCheck);
checks.add(new MlockallCheck(BootstrapSettings.MLOCKALL_SETTING.get(settings)));
if (Constants.LINUX) {
checks.add(new MaxNumberOfThreadsCheck());
}
checks.add(new MaxSizeVirtualMemoryCheck());
if (Constants.LINUX || Constants.MAC_OS_X) {
checks.add(new MaxSizeVirtualMemoryCheck());
}
checks.add(new MinMasterNodesCheck(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(settings)));
return Collections.unmodifiableList(checks);
}
@ -184,10 +187,10 @@ final class BootstrapCheck {
@Override
public final String errorMessage() {
return String.format(
Locale.ROOT,
"max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]",
getMaxFileDescriptorCount(),
limit
Locale.ROOT,
"max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]",
getMaxFileDescriptorCount(),
limit
);
}
@ -224,6 +227,26 @@ final class BootstrapCheck {
}
static class MinMasterNodesCheck implements Check {
final boolean minMasterNodesIsSet;
MinMasterNodesCheck(boolean minMasterNodesIsSet) {
this.minMasterNodesIsSet = minMasterNodesIsSet;
}
@Override
public boolean check() {
return minMasterNodesIsSet == false;
}
@Override
public String errorMessage() {
return "please set [" + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() +
"] to a majority of the number of master eligible nodes in your cluster.";
}
}
static class MaxNumberOfThreadsCheck implements Check {
private final long maxNumberOfThreadsThreshold = 1 << 11;
@ -254,7 +277,7 @@ final class BootstrapCheck {
@Override
public boolean check() {
return getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != JNACLibrary.RLIM_INFINITY;
return getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity();
}
@Override
@ -266,6 +289,11 @@ final class BootstrapCheck {
BootstrapInfo.getSystemProperties().get("user.name"));
}
// visible for testing
long getRlimInfinity() {
return JNACLibrary.RLIM_INFINITY;
}
// visible for testing
long getMaxSizeVirtualMemory() {
return JNANatives.MAX_SIZE_VIRTUAL_MEMORY;

View File

@ -76,6 +76,9 @@ class Elasticsearch extends Command {
@Override
protected void execute(Terminal terminal, OptionSet options) throws Exception {
if (options.nonOptionArguments().isEmpty() == false) {
throw new UserError(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments());
}
if (options.has(versionOption)) {
if (options.has(daemonizeOption) || options.has(pidfileOption)) {
throw new UserError(ExitCodes.USAGE, "Elasticsearch version option is mutually exclusive with any other option");

View File

@ -68,9 +68,6 @@ import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.search.SearchScrollRequestBuilder;
import org.elasticsearch.action.suggest.SuggestRequest;
import org.elasticsearch.action.suggest.SuggestRequestBuilder;
import org.elasticsearch.action.suggest.SuggestResponse;
import org.elasticsearch.action.termvectors.MultiTermVectorsRequest;
import org.elasticsearch.action.termvectors.MultiTermVectorsRequestBuilder;
import org.elasticsearch.action.termvectors.MultiTermVectorsResponse;
@ -367,29 +364,6 @@ public interface Client extends ElasticsearchClient, Releasable {
*/
MultiGetRequestBuilder prepareMultiGet();
/**
* Suggestion matching a specific phrase.
*
* @param request The suggest request
* @return The result future
* @see Requests#suggestRequest(String...)
*/
ActionFuture<SuggestResponse> suggest(SuggestRequest request);
/**
* Suggestions matching a specific phrase.
*
* @param request The suggest request
* @param listener A listener to be notified of the result
* @see Requests#suggestRequest(String...)
*/
void suggest(SuggestRequest request, ActionListener<SuggestResponse> listener);
/**
* Suggestions matching a specific phrase.
*/
SuggestRequestBuilder prepareSuggest(String... indices);
/**
* Search across one or more indices and one or more types with a query.
*

View File

@ -60,9 +60,7 @@ import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.suggest.SuggestRequest;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.search.suggest.SuggestBuilder;
/**
* A handy one stop shop for creating requests (make sure to import static this class).
@ -126,16 +124,6 @@ public class Requests {
return new GetRequest(index);
}
/**
* Creates a suggest request for getting suggestions from provided <code>indices</code>.
* The suggest query has to be set using {@link org.elasticsearch.action.suggest.SuggestRequest#suggest(SuggestBuilder)}.
* @param indices The indices to suggest from. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @see org.elasticsearch.client.Client#suggest(org.elasticsearch.action.suggest.SuggestRequest)
*/
public static SuggestRequest suggestRequest(String... indices) {
return new SuggestRequest(indices);
}
/**
* Creates a search request against one or more indices. Note, the search source must be set either using the
* actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}.

View File

@ -314,10 +314,6 @@ import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollAction;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.search.SearchScrollRequestBuilder;
import org.elasticsearch.action.suggest.SuggestAction;
import org.elasticsearch.action.suggest.SuggestRequest;
import org.elasticsearch.action.suggest.SuggestRequestBuilder;
import org.elasticsearch.action.suggest.SuggestResponse;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.ThreadedActionListener;
import org.elasticsearch.action.termvectors.MultiTermVectorsAction;
@ -660,21 +656,6 @@ public abstract class AbstractClient extends AbstractComponent implements Client
return new MultiSearchRequestBuilder(this, MultiSearchAction.INSTANCE);
}
@Override
public ActionFuture<SuggestResponse> suggest(final SuggestRequest request) {
return execute(SuggestAction.INSTANCE, request);
}
@Override
public void suggest(final SuggestRequest request, final ActionListener<SuggestResponse> listener) {
execute(SuggestAction.INSTANCE, request, listener);
}
@Override
public SuggestRequestBuilder prepareSuggest(String... indices) {
return new SuggestRequestBuilder(this, SuggestAction.INSTANCE).setIndices(indices);
}
@Override
public ActionFuture<TermVectorsResponse> termVectors(final TermVectorsRequest request) {
return execute(TermVectorsAction.INSTANCE, request);

View File

@ -63,7 +63,7 @@ import java.util.Set;
/**
* Represents the current state of the cluster.
*
* <p>
* The cluster state object is immutable with an
* exception of the {@link RoutingNodes} structure, which is built on demand from the {@link RoutingTable},
* and cluster state {@link #status}, which is updated during cluster state publishing and applying
@ -74,7 +74,7 @@ import java.util.Set;
* the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish}
* method. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The
* publishing mechanism can be overridden by other discovery.
*
* <p>
* The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state
* differences instead of the entire state on each change. The publishing mechanism should only send differences
* to a node if this node was present in the previous version of the cluster state. If a node is not present was
@ -281,6 +281,16 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
sb.append("state uuid: ").append(stateUUID).append("\n");
sb.append("from_diff: ").append(wasReadFromDiff).append("\n");
sb.append("meta data version: ").append(metaData.version()).append("\n");
for (IndexMetaData indexMetaData : metaData) {
final String TAB = " ";
sb.append(TAB).append(indexMetaData.getIndex());
sb.append(": v[").append(indexMetaData.getVersion()).append("]\n");
for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
sb.append(TAB).append(TAB).append(shard).append(": ");
sb.append("p_term [").append(indexMetaData.primaryTerm(shard)).append("], ");
sb.append("a_ids ").append(indexMetaData.activeAllocationIds(shard)).append("\n");
}
}
sb.append(blocks().prettyPrint());
sb.append(nodes().prettyPrint());
sb.append(routingTable().prettyPrint());
@ -477,7 +487,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
}
builder.endArray();
builder.startObject("primary_terms");
builder.startObject(IndexMetaData.KEY_PRIMARY_TERMS);
for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
builder.field(Integer.toString(shard), indexMetaData.primaryTerm(shard));
}
@ -493,7 +503,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
}
builder.endObject();
// index metdata data
// index metadata
builder.endObject();
}
builder.endObject();

View File

@ -30,7 +30,8 @@ import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
@ -220,14 +221,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
.numberOfShards(1).numberOfReplicas(0).build();
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
public static final String INDEX_STATE_FILE_PREFIX = "state-";
static final String KEY_VERSION = "version";
static final String KEY_SETTINGS = "settings";
static final String KEY_STATE = "state";
static final String KEY_MAPPINGS = "mappings";
static final String KEY_ALIASES = "aliases";
static final String KEY_PRIMARY_TERMS = "primary_terms";
public static final String KEY_PRIMARY_TERMS = "primary_terms";
public static final String INDEX_STATE_FILE_PREFIX = "state-";
private final int numberOfShards;
private final int numberOfReplicas;
@ -312,8 +313,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
/**
* The term of the current selected primary. This is a non-negative number incremented when
* a primary shard is assigned after a full cluster restart (see {@link ShardRouting#initialize(java.lang.String, java.lang.String, long)}
* or a replica shard is promoted to a primary (see {@link ShardRouting#moveToPrimary()}).
* a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary
* See {@link AllocationService#updateMetaDataWithRoutingTable(MetaData, RoutingTable, RoutingTable)}.
**/
public long primaryTerm(int shardId) {
return this.primaryTerms[shardId];
@ -541,17 +542,17 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
@Override
public Custom read(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readFrom(in);
}
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
@Override
public Custom read(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readFrom(in);
}
@Override
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readDiffFrom(in);
}
});
@Override
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readDiffFrom(in);
}
});
activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
DiffableUtils.StringSetValueSerializer.getInstance());
}
@ -903,8 +904,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
}
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {

View File

@ -94,22 +94,6 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
return index;
}
/**
* creates a new {@link IndexRoutingTable} with all shard versions &amp; primary terms set to the highest found.
* This allows synchronizes {@link ShardRouting#primaryTerm()} where we work on
* the individual shards without worrying about synchronization between {@link ShardRouting} instances. This method
* takes care of it.
*
* @return new {@link IndexRoutingTable}
*/
public IndexRoutingTable normalizePrimaryTerms() {
IndexRoutingTable.Builder builder = new Builder(this.index);
for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) {
builder.addIndexShard(cursor.value.normalizePrimaryTerms());
}
return builder.build();
}
public void validate(RoutingTableValidation validation, MetaData metaData) {
if (!metaData.hasIndex(index.getName())) {
validation.addIndexFailure(index.getName(), "Exists in routing does not exists in metadata");
@ -426,12 +410,11 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(index, shardId));
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
final long primaryTerm = indexMetaData.primaryTerm(shardId);
if (asNew && ignoreShards.contains(shardId)) {
// This shards wasn't completely snapshotted - restore it as new shard
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, primaryTerm, i == 0, unassignedInfo));
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, i == 0, unassignedInfo));
} else {
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, i == 0 ? restoreSource : null, primaryTerm, i == 0, unassignedInfo));
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, i == 0 ? restoreSource : null, i == 0, unassignedInfo));
}
}
shards.put(shardId, indexShardRoutingBuilder.build());
@ -448,10 +431,9 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
}
for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
final long primaryTerm = indexMetaData.primaryTerm(shardId);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(index, shardId));
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null,primaryTerm, i == 0, unassignedInfo));
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, i == 0, unassignedInfo));
}
shards.put(shardId, indexShardRoutingBuilder.build());
}
@ -462,11 +444,9 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
for (IntCursor cursor : shards.keys()) {
int shardId = cursor.value;
// version 0, will get updated when reroute will happen
final IndexShardRoutingTable shardRoutingTable = shards.get(shardId);
ShardRouting shard = ShardRouting.newUnassigned(index, shardId, null, shardRoutingTable.primary.primaryTerm(), false,
new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null));
ShardRouting shard = ShardRouting.newUnassigned(index, shardId, null, false, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null));
shards.put(shardId,
new IndexShardRoutingTable.Builder(shardRoutingTable).addShard(shard).build()
new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build()
);
}
return this;

View File

@ -119,40 +119,6 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
this.allInitializingShards = Collections.unmodifiableList(allInitializingShards);
}
/**
* Normalizes all shard routings to the same (highest found) version &amp; primary terms.
*/
public IndexShardRoutingTable normalizePrimaryTerms() {
if (shards.isEmpty()) {
return this;
}
if (shards.size() == 1) {
return this;
}
long highestPrimaryTerm = shards.get(0).primaryTerm();
boolean requiresNormalization = false;
for (int i = 1; i < shards.size(); i++) {
final long primaryTerm = shards.get(i).primaryTerm();
if (highestPrimaryTerm != primaryTerm) {
requiresNormalization = true;
}
highestPrimaryTerm = Math.max(highestPrimaryTerm, primaryTerm);
}
if (!requiresNormalization) {
return this;
}
List<ShardRouting> shardRoutings = new ArrayList<>(shards.size());
for (int i = 0; i < shards.size(); i++) {
if (shards.get(i).primaryTerm() == highestPrimaryTerm) {
shardRoutings.add(shards.get(i));
} else {
shardRoutings.add(new ShardRouting(shards.get(i), highestPrimaryTerm));
}
}
return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(shardRoutings));
}
/**
* Returns the shards id
*
@ -424,21 +390,13 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IndexShardRoutingTable that = (IndexShardRoutingTable) o;
if (!shardId.equals(that.shardId)) {
return false;
}
if (!shards.equals(that.shards)) {
return false;
}
if (!shardId.equals(that.shardId)) return false;
if (!shards.equals(that.shards)) return false;
return true;
}

View File

@ -702,7 +702,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Initializes the current unassigned shard and moves it from the unassigned list.
* If a primary is initalized, it's term is incremented.
*
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
*/

View File

@ -586,10 +586,6 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
if (indicesRouting == null) {
throw new IllegalStateException("once build is called the builder cannot be reused");
}
// normalize the versions right before we build it...
for (ObjectCursor<IndexRoutingTable> indexRoutingTable : indicesRouting.values()) {
indicesRouting.put(indexRoutingTable.value.getIndex().getName(), indexRoutingTable.value.normalizePrimaryTerms());
}
RoutingTable table = new RoutingTable(version, indicesRouting.build());
indicesRouting = null;
return table;

View File

@ -50,7 +50,6 @@ public final class ShardRouting implements Streamable, ToXContent {
private String currentNodeId;
private String relocatingNodeId;
private boolean primary;
private long primaryTerm;
private ShardRoutingState state;
private RestoreSource restoreSource;
private UnassignedInfo unassignedInfo;
@ -65,11 +64,7 @@ public final class ShardRouting implements Streamable, ToXContent {
}
public ShardRouting(ShardRouting copy) {
this(copy, copy.primaryTerm());
}
public ShardRouting(ShardRouting copy, long primaryTerm) {
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), primaryTerm, copy.primary(), copy.state(), copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize());
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize());
}
/**
@ -77,13 +72,12 @@ public final class ShardRouting implements Streamable, ToXContent {
* by either this class or tests. Visible for testing.
*/
ShardRouting(Index index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, long primaryTerm, boolean primary, ShardRoutingState state,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state,
UnassignedInfo unassignedInfo, AllocationId allocationId, boolean internal, long expectedShardSize) {
this.index = index;
this.shardId = shardId;
this.currentNodeId = currentNodeId;
this.relocatingNodeId = relocatingNodeId;
this.primaryTerm = primaryTerm;
this.primary = primary;
this.state = state;
this.asList = Collections.singletonList(this);
@ -106,8 +100,8 @@ public final class ShardRouting implements Streamable, ToXContent {
/**
* Creates a new unassigned shard.
*/
public static ShardRouting newUnassigned(Index index, int shardId, RestoreSource restoreSource, long primaryTerm, boolean primary, UnassignedInfo unassignedInfo) {
return new ShardRouting(index, shardId, null, null, restoreSource, primaryTerm, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE);
public static ShardRouting newUnassigned(Index index, int shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) {
return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE);
}
public Index index() {
@ -207,8 +201,8 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
public ShardRouting buildTargetRelocatingShard() {
assert relocating();
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource,primaryTerm, primary,
ShardRoutingState.INITIALIZING, unassignedInfo, AllocationId.newTargetRelocation(allocationId), true, expectedShardSize);
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, unassignedInfo,
AllocationId.newTargetRelocation(allocationId), true, expectedShardSize);
}
/**
@ -242,16 +236,6 @@ public final class ShardRouting implements Streamable, ToXContent {
return this.primary;
}
/**
* Returns the term of the current primary shard for this shard.
* The term is incremented with every primary promotion/initial assignment.
*
* See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)} for more info.
*/
public long primaryTerm() {
return this.primaryTerm;
}
/**
* The shard state.
*/
@ -282,7 +266,7 @@ public final class ShardRouting implements Streamable, ToXContent {
return false;
}
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0)) {
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
// when no shards with this id have ever been active for this index
return false;
}
@ -325,7 +309,6 @@ public final class ShardRouting implements Streamable, ToXContent {
}
primary = in.readBoolean();
primaryTerm = in.readVLong();
state = ShardRoutingState.fromValue(in.readByte());
restoreSource = RestoreSource.readOptionalRestoreSource(in);
@ -370,7 +353,6 @@ public final class ShardRouting implements Streamable, ToXContent {
}
out.writeBoolean(primary);
out.writeVLong(primaryTerm);
out.writeByte(state.value());
if (restoreSource != null) {
@ -427,7 +409,7 @@ public final class ShardRouting implements Streamable, ToXContent {
}
/**
* Initializes an unassigned shard on a node. If the shard is primary, it's term is incremented.
* Initializes an unassigned shard on a node.
*
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
*/
@ -437,9 +419,6 @@ public final class ShardRouting implements Streamable, ToXContent {
assert relocatingNodeId == null : this;
state = ShardRoutingState.INITIALIZING;
currentNodeId = nodeId;
if (primary) {
primaryTerm++;
}
if (existingAllocationId == null) {
allocationId = AllocationId.newInitializing();
} else {
@ -517,7 +496,6 @@ public final class ShardRouting implements Streamable, ToXContent {
throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary");
}
primary = true;
primaryTerm++;
}
/**
@ -579,9 +557,6 @@ public final class ShardRouting implements Streamable, ToXContent {
assert b == false || this.primary == other.primary :
"ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]";
assert b == false || this.primaryTerm == other.primaryTerm :
"ShardRouting is a relocation target but primary term is different. This [" + this + "], target [" + other + "]";
return b;
}
@ -609,13 +584,10 @@ public final class ShardRouting implements Streamable, ToXContent {
assert b == false || this.primary == other.primary :
"ShardRouting is a relocation source but primary flag is different. This [" + this + "], target [" + other + "]";
assert b == false || this.primaryTerm == other.primaryTerm :
"ShardRouting is a relocation source but primary term is different. This [" + this + "], target [" + other + "]";
return b;
}
/** returns true if the current routing is identical to the other routing in all but meta fields, i.e., version, primary term and unassigned info */
/** returns true if the current routing is identical to the other routing in all but meta fields, i.e., version and unassigned info */
public boolean equalsIgnoringMetaData(ShardRouting other) {
if (primary != other.primary) {
return false;
@ -656,9 +628,6 @@ public final class ShardRouting implements Streamable, ToXContent {
if (unassignedInfo != null ? !unassignedInfo.equals(that.unassignedInfo) : that.unassignedInfo != null) {
return false;
}
if (primaryTerm != that.primaryTerm) {
return false;
}
return equalsIgnoringMetaData(that);
}
@ -675,7 +644,6 @@ public final class ShardRouting implements Streamable, ToXContent {
result = 31 * result + (currentNodeId != null ? currentNodeId.hashCode() : 0);
result = 31 * result + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
result = 31 * result + (primary ? 1 : 0);
result = 31 * result + Long.hashCode(primaryTerm);
result = 31 * result + (state != null ? state.hashCode() : 0);
result = 31 * result + (restoreSource != null ? restoreSource.hashCode() : 0);
result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0);
@ -706,7 +674,6 @@ public final class ShardRouting implements Streamable, ToXContent {
} else {
sb.append("[R]");
}
sb.append(", t[").append(primaryTerm).append("]");
if (this.restoreSource != null) {
sb.append(", restoring[" + restoreSource + "]");
}
@ -728,7 +695,6 @@ public final class ShardRouting implements Streamable, ToXContent {
builder.startObject()
.field("state", state())
.field("primary", primary())
.field("primary_term", primaryTerm())
.field("node", currentNodeId())
.field("relocating_node", relocatingNodeId())
.field("shard", shardId().id())

View File

@ -42,6 +42,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.index.shard.ShardId;
import java.util.ArrayList;
import java.util.Collections;
@ -98,7 +99,7 @@ public class AllocationService extends AbstractComponent {
if (withReroute) {
reroute(allocation);
}
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
logClusterHealthStateChange(
@ -110,37 +111,41 @@ public class AllocationService extends AbstractComponent {
}
protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes) {
return buildChangedResult(metaData, routingNodes, new RoutingExplanations());
protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes) {
return buildChangedResult(oldMetaData, oldRoutingTable, newRoutingNodes, new RoutingExplanations());
}
protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes, RoutingExplanations explanations) {
final RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build();
MetaData newMetaData = updateMetaDataWithRoutingTable(metaData, routingTable);
return new RoutingAllocation.Result(true, routingTable.validateRaiseException(newMetaData), newMetaData, explanations);
protected RoutingAllocation.Result buildChangedResult(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingNodes newRoutingNodes,
RoutingExplanations explanations) {
final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(newRoutingNodes).build();
MetaData newMetaData = updateMetaDataWithRoutingTable(oldMetaData, oldRoutingTable, newRoutingTable);
return new RoutingAllocation.Result(true, newRoutingTable.validateRaiseException(newMetaData), newMetaData, explanations);
}
/**
* Updates the current {@link MetaData} based on the newly created {@link RoutingTable}.
* Updates the current {@link MetaData} based on the newly created {@link RoutingTable}. Specifically
* we update {@link IndexMetaData#getActiveAllocationIds()} and {@link IndexMetaData#primaryTerm(int)} based on
* the changes made during this allocation.
*
* @param currentMetaData {@link MetaData} object from before the routing table was changed.
* @param oldMetaData {@link MetaData} object from before the routing table was changed.
* @param oldRoutingTable {@link RoutingTable} from before the change.
* @param newRoutingTable new {@link RoutingTable} created by the allocation change
* @return adapted {@link MetaData}, potentially the original one if no change was needed.
*/
static MetaData updateMetaDataWithRoutingTable(MetaData currentMetaData, RoutingTable newRoutingTable) {
// make sure index meta data and routing tables are in sync w.r.t active allocation ids
static MetaData updateMetaDataWithRoutingTable(MetaData oldMetaData, RoutingTable oldRoutingTable, RoutingTable newRoutingTable) {
MetaData.Builder metaDataBuilder = null;
for (IndexRoutingTable indexRoutingTable : newRoutingTable) {
final IndexMetaData indexMetaData = currentMetaData.index(indexRoutingTable.getIndex());
if (indexMetaData == null) {
throw new IllegalStateException("no metadata found for index " + indexRoutingTable.getIndex().getName());
for (IndexRoutingTable newIndexTable : newRoutingTable) {
final IndexMetaData oldIndexMetaData = oldMetaData.index(newIndexTable.getIndex());
if (oldIndexMetaData == null) {
throw new IllegalStateException("no metadata found for index " + newIndexTable.getIndex().getName());
}
IndexMetaData.Builder indexMetaDataBuilder = null;
for (IndexShardRoutingTable shardRoutings : indexRoutingTable) {
for (IndexShardRoutingTable newShardTable : newIndexTable) {
final ShardId shardId = newShardTable.shardId();
// update activeAllocationIds
Set<String> activeAllocationIds = shardRoutings.activeShards().stream()
Set<String> activeAllocationIds = newShardTable.activeShards().stream()
.map(ShardRouting::allocationId)
.filter(Objects::nonNull)
.map(AllocationId::getId)
@ -148,34 +153,44 @@ public class AllocationService extends AbstractComponent {
// only update active allocation ids if there is an active shard
if (activeAllocationIds.isEmpty() == false) {
// get currently stored allocation ids
Set<String> storedAllocationIds = indexMetaData.activeAllocationIds(shardRoutings.shardId().id());
Set<String> storedAllocationIds = oldIndexMetaData.activeAllocationIds(shardId.id());
if (activeAllocationIds.equals(storedAllocationIds) == false) {
if (indexMetaDataBuilder == null) {
indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
}
indexMetaDataBuilder.putActiveAllocationIds(shardRoutings.shardId().id(), activeAllocationIds);
indexMetaDataBuilder.putActiveAllocationIds(shardId.id(), activeAllocationIds);
}
}
// update primary terms
final ShardRouting primary = shardRoutings.primaryShard();
if (primary == null) {
throw new IllegalStateException("missing primary shard for " + shardRoutings.shardId());
final ShardRouting newPrimary = newShardTable.primaryShard();
if (newPrimary == null) {
throw new IllegalStateException("missing primary shard for " + newShardTable.shardId());
}
final int shardId = primary.shardId().id();
if (primary.primaryTerm() != indexMetaData.primaryTerm(shardId)) {
assert primary.primaryTerm() > indexMetaData.primaryTerm(shardId) :
"primary term should only increase. Index primary term ["
+ indexMetaData.primaryTerm(shardId) + "] but primary routing is " + primary;
final ShardRouting oldPrimary = oldRoutingTable.shardRoutingTable(shardId).primaryShard();
if (oldPrimary == null) {
throw new IllegalStateException("missing primary shard for " + newShardTable.shardId());
}
// we update the primary term on initial assignment or when a replica is promoted. Most notably we do *not*
// update them when a primary relocates
if (newPrimary.unassigned() ||
newPrimary.isSameAllocation(oldPrimary) ||
// we do not use newPrimary.isTargetRelocationOf(oldPrimary) because that one enforces newPrimary to
// be initializing. However, when the target shard is activated, we still want the primary term to staty
// the same
(oldPrimary.relocating() && newPrimary.isSameAllocation(oldPrimary.buildTargetRelocatingShard()))) {
// do nothing
} else {
// incrementing the primary term
if (indexMetaDataBuilder == null) {
indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
}
indexMetaDataBuilder.primaryTerm(shardId, primary.primaryTerm());
indexMetaDataBuilder.primaryTerm(shardId.id(), oldIndexMetaData.primaryTerm(shardId.id()) + 1);
}
}
if (indexMetaDataBuilder != null) {
if (metaDataBuilder == null) {
metaDataBuilder = MetaData.builder(currentMetaData);
metaDataBuilder = MetaData.builder(oldMetaData);
}
metaDataBuilder.put(indexMetaDataBuilder);
}
@ -183,7 +198,7 @@ public class AllocationService extends AbstractComponent {
if (metaDataBuilder != null) {
return metaDataBuilder.build();
} else {
return currentMetaData;
return oldMetaData;
}
}
@ -214,7 +229,7 @@ public class AllocationService extends AbstractComponent {
}
gatewayAllocator.applyFailedShards(allocation);
reroute(allocation);
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
logClusterHealthStateChange(
new ClusterStateHealth(clusterState),
@ -261,7 +276,7 @@ public class AllocationService extends AbstractComponent {
// the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute
reroute(allocation);
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes, explanations);
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes, explanations);
logClusterHealthStateChange(
new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
@ -294,7 +309,7 @@ public class AllocationService extends AbstractComponent {
if (!reroute(allocation)) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
}
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), clusterState.routingTable(), routingNodes);
logClusterHealthStateChange(
new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),

View File

@ -39,7 +39,7 @@ import static java.util.Collections.unmodifiableSet;
/**
* The {@link RoutingAllocation} keep the state of the current allocation
* of shards and holds the {@link AllocationDeciders} which are responsible
* for the current routing state.
* for the current routing state.
*/
public class RoutingAllocation {
@ -58,10 +58,9 @@ public class RoutingAllocation {
/**
* Creates a new {@link RoutingAllocation.Result}
*
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
* @param routingTable the {@link RoutingTable} this Result references
* @param metaData the {@link MetaData} this Result references
* @param metaData the {@link MetaData} this Result references
*/
public Result(boolean changed, RoutingTable routingTable, MetaData metaData) {
this.changed = changed;
@ -71,10 +70,9 @@ public class RoutingAllocation {
/**
* Creates a new {@link RoutingAllocation.Result}
*
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
* @param routingTable the {@link RoutingTable} this Result references
* @param metaData the {@link MetaData} this Result references
* @param metaData the {@link MetaData} this Result references
* @param explanations Explanation for the reroute actions
*/
public Result(boolean changed, RoutingTable routingTable, MetaData metaData, RoutingExplanations explanations) {
@ -84,9 +82,7 @@ public class RoutingAllocation {
this.explanations = explanations;
}
/**
* determine whether the actual {@link RoutingTable} has been changed
*
/** determine whether the actual {@link RoutingTable} has been changed
* @return <code>true</code> if the {@link RoutingTable} has been changed by allocation. Otherwise <code>false</code>
*/
public boolean changed() {
@ -95,7 +91,6 @@ public class RoutingAllocation {
/**
* Get the {@link MetaData} referenced by this result
*
* @return referenced {@link MetaData}
*/
public MetaData metaData() {
@ -104,7 +99,6 @@ public class RoutingAllocation {
/**
* Get the {@link RoutingTable} referenced by this result
*
* @return referenced {@link RoutingTable}
*/
public RoutingTable routingTable() {
@ -113,7 +107,6 @@ public class RoutingAllocation {
/**
* Get the explanation of this result
*
* @return explanation
*/
public RoutingExplanations explanations() {
@ -144,10 +137,9 @@ public class RoutingAllocation {
/**
* Creates a new {@link RoutingAllocation}
*
* @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations
* @param routingNodes Routing nodes in the current cluster
* @param nodes TODO: Documentation
* @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations
* @param routingNodes Routing nodes in the current cluster
* @param nodes TODO: Documentation
* @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})
*/
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, ClusterInfo clusterInfo, long currentNanoTime) {
@ -165,7 +157,6 @@ public class RoutingAllocation {
/**
* Get {@link AllocationDeciders} used for allocation
*
* @return {@link AllocationDeciders} used for allocation
*/
public AllocationDeciders deciders() {
@ -174,7 +165,6 @@ public class RoutingAllocation {
/**
* Get routing table of current nodes
*
* @return current routing table
*/
public RoutingTable routingTable() {
@ -183,7 +173,6 @@ public class RoutingAllocation {
/**
* Get current routing nodes
*
* @return routing nodes
*/
public RoutingNodes routingNodes() {
@ -192,7 +181,6 @@ public class RoutingAllocation {
/**
* Get metadata of routing nodes
*
* @return Metadata of routing nodes
*/
public MetaData metaData() {
@ -201,7 +189,6 @@ public class RoutingAllocation {
/**
* Get discovery nodes in current routing
*
* @return discovery nodes
*/
public DiscoveryNodes nodes() {
@ -214,7 +201,6 @@ public class RoutingAllocation {
/**
* Get explanations of current routing
*
* @return explanation of routing
*/
public AllocationExplanation explanation() {
@ -271,11 +257,10 @@ public class RoutingAllocation {
/**
* Create a routing decision, including the reason if the debug flag is
* turned on
*
* @param decision decision whether to allow/deny allocation
* @param decision decision whether to allow/deny allocation
* @param deciderLabel a human readable label for the AllocationDecider
* @param reason a format string explanation of the decision
* @param params format string parameters
* @param reason a format string explanation of the decision
* @param params format string parameters
*/
public Decision decision(Decision decision, String deciderLabel, String reason, Object... params) {
if (debugDecision()) {

View File

@ -687,9 +687,8 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
warnAboutSlowTaskIfNeeded(executionTime, source);
} catch (Throwable t) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}{}{}", t, executionTime,
newClusterState.version(), newClusterState.stateUUID(), source, newClusterState.nodes().prettyPrint(),
newClusterState.routingTable().prettyPrint(), newClusterState.getRoutingNodes().prettyPrint());
logger.warn("failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", t, executionTime,
newClusterState.version(), newClusterState.stateUUID(), source, newClusterState.prettyPrint());
// TODO: do we want to call updateTask.onFailure here?
}

View File

@ -445,5 +445,10 @@ public class PagedBytesReference implements BytesReference {
// do nothing
}
@Override
public int available() throws IOException {
return length - pos;
}
}
}

View File

@ -59,6 +59,11 @@ public abstract class FilterStreamInput extends StreamInput {
delegate.close();
}
@Override
public int available() throws IOException {
return delegate.available();
}
@Override
public Version getVersion() {
return delegate.getVersion();

View File

@ -74,6 +74,11 @@ public class InputStreamStreamInput extends StreamInput {
is.close();
}
@Override
public int available() throws IOException {
return is.available();
}
@Override
public int read() throws IOException {
return is.read();

View File

@ -36,7 +36,12 @@ public class NamedWriteableAwareStreamInput extends FilterStreamInput {
@Override
<C> C readNamedWriteable(Class<C> categoryClass) throws IOException {
String name = readString();
NamedWriteable<? extends C> namedWriteable = namedWriteableRegistry.getPrototype(categoryClass, name);
return namedWriteable.readFrom(this);
Writeable.Reader<? extends C> reader = namedWriteableRegistry.getReader(categoryClass, name);
C c = reader.read(this);
if (c == null) {
throw new IOException(
"Writeable.Reader [" + reader + "] returned null which is not allowed and probably means it screwed up the stream.");
}
return c;
}
}

View File

@ -31,54 +31,70 @@ public class NamedWriteableRegistry {
private final Map<Class<?>, InnerRegistry<?>> registry = new HashMap<>();
/**
* Registers a {@link NamedWriteable} prototype given its category
* Register a {@link NamedWriteable} given its category, its name, and a function to read it from the stream.
*
* This method suppresses the rawtypes warning because it intentionally using NamedWriteable instead of {@code NamedWriteable<T>} so it
* is easier to use and because we might be able to drop the type parameter from NamedWriteable entirely some day.
*/
public synchronized <T> void registerPrototype(Class<T> categoryClass, NamedWriteable<? extends T> namedWriteable) {
@SuppressWarnings("rawtypes")
public synchronized <T extends NamedWriteable> void register(Class<T> categoryClass, String name,
Writeable.Reader<? extends T> reader) {
@SuppressWarnings("unchecked")
InnerRegistry<T> innerRegistry = (InnerRegistry<T>)registry.get(categoryClass);
InnerRegistry<T> innerRegistry = (InnerRegistry<T>) registry.get(categoryClass);
if (innerRegistry == null) {
innerRegistry = new InnerRegistry<>(categoryClass);
registry.put(categoryClass, innerRegistry);
}
innerRegistry.registerPrototype(namedWriteable);
innerRegistry.register(name, reader);
}
/**
* Registers a {@link NamedWriteable} prototype given its category.
* @deprecated Prefer {@link #register(Class, String, org.elasticsearch.common.io.stream.Writeable.Reader)}
*/
@Deprecated
@SuppressWarnings("rawtypes") // TODO remove this method entirely before 5.0.0 GA
public synchronized <T extends NamedWriteable> void registerPrototype(Class<T> categoryClass,
NamedWriteable<? extends T> namedWriteable) {
register(categoryClass, namedWriteable.getWriteableName(), namedWriteable::readFrom);
}
/**
* Returns a prototype of the {@link NamedWriteable} object identified by the name provided as argument and its category
*/
public synchronized <T> NamedWriteable<? extends T> getPrototype(Class<T> categoryClass, String name) {
public synchronized <T> Writeable.Reader<? extends T> getReader(Class<T> categoryClass, String name) {
@SuppressWarnings("unchecked")
InnerRegistry<T> innerRegistry = (InnerRegistry<T>)registry.get(categoryClass);
if (innerRegistry == null) {
throw new IllegalArgumentException("unknown named writeable category [" + categoryClass.getName() + "]");
}
return innerRegistry.getPrototype(name);
return innerRegistry.getReader(name);
}
private static class InnerRegistry<T> {
private final Map<String, NamedWriteable<? extends T>> registry = new HashMap<>();
private final Map<String, Writeable.Reader<? extends T>> registry = new HashMap<>();
private final Class<T> categoryClass;
private InnerRegistry(Class<T> categoryClass) {
this.categoryClass = categoryClass;
}
private void registerPrototype(NamedWriteable<? extends T> namedWriteable) {
NamedWriteable<? extends T> existingNamedWriteable = registry.get(namedWriteable.getWriteableName());
if (existingNamedWriteable != null) {
throw new IllegalArgumentException("named writeable of type [" + namedWriteable.getClass().getName() + "] with name [" + namedWriteable.getWriteableName() + "] " +
"is already registered by type [" + existingNamedWriteable.getClass().getName() + "] within category [" + categoryClass.getName() + "]");
private void register(String name, Writeable.Reader<? extends T> reader) {
Writeable.Reader<? extends T> existingReader = registry.get(name);
if (existingReader != null) {
throw new IllegalArgumentException(
"named writeable [" + categoryClass.getName() + "][" + name + "] is already registered by [" + reader + "]");
}
registry.put(namedWriteable.getWriteableName(), namedWriteable);
registry.put(name, reader);
}
private NamedWriteable<? extends T> getPrototype(String name) {
NamedWriteable<? extends T> namedWriteable = registry.get(name);
if (namedWriteable == null) {
throw new IllegalArgumentException("unknown named writeable with name [" + name + "] within category [" + categoryClass.getName() + "]");
private Writeable.Reader<? extends T> getReader(String name) {
Writeable.Reader<? extends T> reader = registry.get(name);
if (reader == null) {
throw new IllegalArgumentException("unknown named writeable [" + categoryClass.getName() + "][" + name + "]");
}
return namedWriteable;
return reader;
}
}
}

View File

@ -37,14 +37,13 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.ingest.IngestStats;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.search.suggest.SuggestionBuilder;
import org.elasticsearch.search.suggest.completion.context.QueryContext;
import org.elasticsearch.search.suggest.phrase.SmoothingModel;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.search.aggregations.AggregatorBuilder;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.search.sort.SortBuilder;
import org.elasticsearch.search.suggest.SuggestionBuilder;
import org.elasticsearch.search.suggest.phrase.SmoothingModel;
import org.elasticsearch.tasks.Task;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
@ -68,7 +67,6 @@ import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Supplier;
import static org.elasticsearch.ElasticsearchException.readException;
@ -157,7 +155,7 @@ public abstract class StreamInput extends InputStream {
*/
public int readInt() throws IOException {
return ((readByte() & 0xFF) << 24) | ((readByte() & 0xFF) << 16)
| ((readByte() & 0xFF) << 8) | (readByte() & 0xFF);
| ((readByte() & 0xFF) << 8) | (readByte() & 0xFF);
}
/**
@ -375,6 +373,9 @@ public abstract class StreamInput extends InputStream {
@Override
public abstract void close() throws IOException;
@Override
public abstract int available() throws IOException;
public String[] readStringArray() throws IOException {
int size = readVInt();
if (size == 0) {
@ -565,9 +566,14 @@ public abstract class StreamInput extends InputStream {
}
}
public <T extends Writeable> T readOptionalWritable(Writeable.IOFunction<StreamInput, T> provider) throws IOException {
public <T extends Writeable> T readOptionalWriteable(Writeable.Reader<T> reader) throws IOException {
if (readBoolean()) {
return provider.apply(this);
T t = reader.read(this);
if (t == null) {
throw new IOException("Writeable.Reader [" + reader
+ "] returned null which is not allowed and probably means it screwed up the stream.");
}
return t;
} else {
return null;
}
@ -687,21 +693,21 @@ public abstract class StreamInput extends InputStream {
/**
* Reads a {@link AggregatorBuilder} from the current stream
*/
public AggregatorBuilder readAggregatorFactory() throws IOException {
public AggregatorBuilder<?> readAggregatorFactory() throws IOException {
return readNamedWriteable(AggregatorBuilder.class);
}
/**
* Reads a {@link PipelineAggregatorBuilder} from the current stream
*/
public PipelineAggregatorBuilder readPipelineAggregatorFactory() throws IOException {
public PipelineAggregatorBuilder<?> readPipelineAggregatorFactory() throws IOException {
return readNamedWriteable(PipelineAggregatorBuilder.class);
}
/**
* Reads a {@link QueryBuilder} from the current stream
*/
public QueryBuilder readQuery() throws IOException {
public QueryBuilder<?> readQuery() throws IOException {
return readNamedWriteable(QueryBuilder.class);
}
@ -726,6 +732,13 @@ public abstract class StreamInput extends InputStream {
return readNamedWriteable(SuggestionBuilder.class);
}
/**
* Reads a {@link SortBuilder} from the current stream
*/
public SortBuilder<?> readSortBuilder() throws IOException {
return readNamedWriteable(SortBuilder.class);
}
/**
* Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream
*/

View File

@ -36,13 +36,13 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.search.suggest.SuggestionBuilder;
import org.elasticsearch.search.suggest.completion.context.QueryContext;
import org.elasticsearch.search.suggest.phrase.SmoothingModel;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.search.aggregations.AggregatorBuilder;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.search.sort.SortBuilder;
import org.elasticsearch.search.suggest.SuggestionBuilder;
import org.elasticsearch.search.suggest.phrase.SmoothingModel;
import org.elasticsearch.tasks.Task;
import org.joda.time.ReadableInstant;
import java.io.EOFException;
@ -531,7 +531,8 @@ public abstract class StreamOutput extends OutputStream {
writeBoolean(false);
}
}
public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOException {
public void writeOptionalWriteable(@Nullable Writeable<?> writeable) throws IOException {
if (writeable != null) {
writeBoolean(true);
writeable.writeTo(this);
@ -662,7 +663,7 @@ public abstract class StreamOutput extends OutputStream {
/**
* Writes a {@link NamedWriteable} to the current stream, by first writing its name and then the object itself
*/
void writeNamedWriteable(NamedWriteable namedWriteable) throws IOException {
void writeNamedWriteable(NamedWriteable<?> namedWriteable) throws IOException {
writeString(namedWriteable.getWriteableName());
namedWriteable.writeTo(this);
}
@ -684,7 +685,7 @@ public abstract class StreamOutput extends OutputStream {
/**
* Writes a {@link QueryBuilder} to the current stream
*/
public void writeQuery(QueryBuilder queryBuilder) throws IOException {
public void writeQuery(QueryBuilder<?> queryBuilder) throws IOException {
writeNamedWriteable(queryBuilder);
}
@ -732,9 +733,9 @@ public abstract class StreamOutput extends OutputStream {
for (T obj: list) {
obj.writeTo(this);
}
}
}
/**
/**
* Writes a {@link RescoreBuilder} to the current stream
*/
public void writeRescorer(RescoreBuilder<?> rescorer) throws IOException {
@ -744,8 +745,15 @@ public abstract class StreamOutput extends OutputStream {
/**
* Writes a {@link SuggestionBuilder} to the current stream
*/
public void writeSuggestion(SuggestionBuilder suggestion) throws IOException {
public void writeSuggestion(SuggestionBuilder<?> suggestion) throws IOException {
writeNamedWriteable(suggestion);
}
/**
* Writes a {@link SortBuilder} to the current stream
*/
public void writeSortBuilder(SortBuilder<?> sort) throws IOException {
writeNamedWriteable(sort);
}
}

View File

@ -23,10 +23,7 @@ import java.io.IOException;
/**
* Implementers can be read from {@linkplain StreamInput} by calling their {@link #readFrom(StreamInput)} method.
*
* It is common for implementers of this interface to declare a <code>public static final</code> instance of themselves named PROTOTYPE so
* users can call {@linkplain #readFrom(StreamInput)} on it. It is also fairly typical for readFrom to be implemented as a method that just
* calls a constructor that takes {@linkplain StreamInput} as a parameter. This allows the fields in the implementer to be
* <code>final</code>.
* Implementers of this interface that also implement {@link Writeable} should see advice there on how to do so.
*/
public interface StreamableReader<T> {
/**

View File

@ -31,21 +31,31 @@ import java.io.IOException;
*
* Prefer implementing this interface over implementing {@link Streamable} where possible. Lots of code depends on {@linkplain Streamable}
* so this isn't always possible.
*
* The fact that this interface extends {@link StreamableReader} should be consider vestigial. Instead of using its
* {@link #readFrom(StreamInput)} method you should prefer using the Reader interface as a reference to a constructor that takes
* {@link StreamInput}. The reasoning behind this is that most "good" readFrom implementations just delegated to such a constructor anyway
* and they required an unsightly PROTOTYPE object.
*/
public interface Writeable<T> extends StreamableReader<T> {
public interface Writeable<T> extends StreamableReader<T> { // TODO remove extends StreamableReader<T> from this interface, and remove <T>
/**
* Write this into the {@linkplain StreamOutput}.
*/
void writeTo(StreamOutput out) throws IOException;
@FunctionalInterface
interface IOFunction<T, R> {
/**
* Applies this function to the given argument.
*
* @param t the function argument
* @return the function result
*/
R apply(T t) throws IOException;
}
@Override
default T readFrom(StreamInput in) throws IOException {
// See class javadoc for reasoning
throw new UnsupportedOperationException("Prefer calling a constructor that takes a StreamInput to calling readFrom.");
}
/**
* Reference to a method that can read some object from a stream. By convention this is a constructor that takes
* {@linkplain StreamInput} as an argument for most classes and a static method for things like enums. Returning null from one of these
* is always wrong - for that we use methods like {@link StreamInput#readOptionalWriteable(Reader)}.
*/
@FunctionalInterface
interface Reader<R> {
R read(StreamInput t) throws IOException;
}
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.client.transport.support.TransportProxyClient;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@ -328,7 +329,7 @@ public class NetworkModule extends AbstractModule {
registerTransportService(NETTY_TRANSPORT, TransportService.class);
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
registerTransport(NETTY_TRANSPORT, NettyTransport.class);
registerTaskStatus(ReplicationTask.Status.PROTOTYPE);
registerTaskStatus(ReplicationTask.Status.NAME, ReplicationTask.Status::new);
if (transportClient == false) {
registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class);
@ -374,8 +375,8 @@ public class NetworkModule extends AbstractModule {
}
}
public void registerTaskStatus(Task.Status prototype) {
namedWriteableRegistry.registerPrototype(Task.Status.class, prototype);
public void registerTaskStatus(String name, Writeable.Reader<? extends Task.Status> reader) {
namedWriteableRegistry.register(Task.Status.class, name, reader);
}
@Override

View File

@ -76,8 +76,9 @@ public class PropertyPlaceholder {
* @param placeholderResolver the <code>PlaceholderResolver</code> to use for replacement.
* @return the supplied value with placeholders replaced inline.
*/
public String replacePlaceholders(String value, PlaceholderResolver placeholderResolver) {
Objects.requireNonNull(value, "Argument 'value' must not be null.");
public String replacePlaceholders(String key, String value, PlaceholderResolver placeholderResolver) {
Objects.requireNonNull(key);
Objects.requireNonNull(value, "value can not be null for [" + key + "]");
return parseStringValue(value, placeholderResolver, new HashSet<String>());
}

View File

@ -344,9 +344,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
ZenDiscovery.MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING,

View File

@ -135,7 +135,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING,
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
EngineConfig.INDEX_CODEC_SETTING,
IndexWarmer.INDEX_NORMS_LOADING_SETTING,
// validate that built-in similarities don't get redefined
Setting.groupSetting("index.similarity.", (s) -> {
Map<String, Settings> groups = s.getAsGroups();

View File

@ -523,6 +523,28 @@ public class Setting<T> extends ToXContentToBytes {
return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), properties);
}
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, ByteSizeValue minValue, ByteSizeValue maxValue,
Property... properties) {
return byteSizeSetting(key, (s) -> value.toString(), minValue, maxValue, properties);
}
public static Setting<ByteSizeValue> byteSizeSetting(String key, Function<Settings, String> defaultValue,
ByteSizeValue minValue, ByteSizeValue maxValue,
Property... properties) {
return new Setting<>(key, defaultValue, (s) -> parseByteSize(s, minValue, maxValue, key), properties);
}
public static ByteSizeValue parseByteSize(String s, ByteSizeValue minValue, ByteSizeValue maxValue, String key) {
ByteSizeValue value = ByteSizeValue.parseBytesSizeValue(s, key);
if (value.bytes() < minValue.bytes()) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
}
if (value.bytes() > maxValue.bytes()) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be =< " + maxValue);
}
return value;
}
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) {
return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties);
}

View File

@ -1221,7 +1221,7 @@ public final class Settings implements ToXContent {
}
};
for (Map.Entry<String, String> entry : new HashMap<>(map).entrySet()) {
String value = propertyPlaceholder.replacePlaceholders(entry.getValue(), placeholderResolver);
String value = propertyPlaceholder.replacePlaceholders(entry.getKey(), entry.getValue(), placeholderResolver);
// if the values exists and has length, we should maintain it in the map
// otherwise, the replace process resolved into removing it
if (Strings.hasLength(value)) {

View File

@ -27,6 +27,10 @@ import org.elasticsearch.common.xcontent.XContentType;
*/
public class JsonSettingsLoader extends XContentSettingsLoader {
public JsonSettingsLoader(boolean allowNullValues) {
super(allowNullValues);
}
@Override
public XContentType contentType() {
return XContentType.JSON;

View File

@ -24,10 +24,12 @@ import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.function.Supplier;
/**
* Settings loader that loads (parses) the settings in a properties format.
@ -36,42 +38,49 @@ public class PropertiesSettingsLoader implements SettingsLoader {
@Override
public Map<String, String> load(String source) throws IOException {
Properties props = new NoDuplicatesProperties();
FastStringReader reader = new FastStringReader(source);
try {
props.load(reader);
Map<String, String> result = new HashMap<>();
for (Map.Entry entry : props.entrySet()) {
result.put((String) entry.getKey(), (String) entry.getValue());
}
return result;
} finally {
IOUtils.closeWhileHandlingException(reader);
}
return load(() -> new FastStringReader(source), (reader, props) -> props.load(reader));
}
@Override
public Map<String, String> load(byte[] source) throws IOException {
Properties props = new NoDuplicatesProperties();
StreamInput stream = StreamInput.wrap(source);
return load(() -> StreamInput.wrap(source), (inStream, props) -> props.load(inStream));
}
private final <T extends Closeable> Map<String, String> load(
Supplier<T> supplier,
IOExceptionThrowingBiConsumer<T, Properties> properties
) throws IOException {
T t = null;
try {
props.load(stream);
Map<String, String> result = new HashMap<>();
t = supplier.get();
final Properties props = new NoDuplicatesProperties();
properties.accept(t, props);
final Map<String, String> result = new HashMap<>();
for (Map.Entry entry : props.entrySet()) {
result.put((String) entry.getKey(), (String) entry.getValue());
}
return result;
} finally {
IOUtils.closeWhileHandlingException(stream);
IOUtils.closeWhileHandlingException(t);
}
}
@FunctionalInterface
private interface IOExceptionThrowingBiConsumer<T, U> {
void accept(T t, U u) throws IOException;
}
class NoDuplicatesProperties extends Properties {
@Override
public synchronized Object put(Object key, Object value) {
Object previousValue = super.put(key, value);
final Object previousValue = super.put(key, value);
if (previousValue != null) {
throw new ElasticsearchParseException("duplicate settings key [{}] found, previous value [{}], current value [{}]", key, previousValue, value);
throw new ElasticsearchParseException(
"duplicate settings key [{}] found, previous value [{}], current value [{}]",
key,
previousValue,
value
);
}
return previousValue;
}

View File

@ -20,43 +20,63 @@
package org.elasticsearch.common.settings.loader;
/**
* A settings loader factory automatically trying to identify what type of
* {@link SettingsLoader} to use.
*
*
* A class holding factory methods for settings loaders that attempts
* to infer the type of the underlying settings content.
*/
public final class SettingsLoaderFactory {
private SettingsLoaderFactory() {
}
/**
* Returns a {@link SettingsLoader} based on the resource name.
* Returns a {@link SettingsLoader} based on the source resource
* name. This factory method assumes that if the resource name ends
* with ".json" then the content should be parsed as JSON, else if
* the resource name ends with ".yml" or ".yaml" then the content
* should be parsed as YAML, else if the resource name ends with
* ".properties" then the content should be parsed as properties,
* otherwise default to attempting to parse as JSON. Note that the
* parsers returned by this method will not accept null-valued
* keys.
*
* @param resourceName The resource name containing the settings
* content.
* @return A settings loader.
*/
public static SettingsLoader loaderFromResource(String resourceName) {
if (resourceName.endsWith(".json")) {
return new JsonSettingsLoader();
return new JsonSettingsLoader(false);
} else if (resourceName.endsWith(".yml") || resourceName.endsWith(".yaml")) {
return new YamlSettingsLoader();
return new YamlSettingsLoader(false);
} else if (resourceName.endsWith(".properties")) {
return new PropertiesSettingsLoader();
} else {
// lets default to the json one
return new JsonSettingsLoader();
return new JsonSettingsLoader(false);
}
}
/**
* Returns a {@link SettingsLoader} based on the actual settings source.
* Returns a {@link SettingsLoader} based on the source content.
* This factory method assumes that if the underlying content
* contains an opening and closing brace ('{' and '}') then the
* content should be parsed as JSON, else if the underlying content
* fails this condition but contains a ':' then the content should
* be parsed as YAML, and otherwise should be parsed as properties.
* Note that the JSON and YAML parsers returned by this method will
* accept null-valued keys.
*
* @param source The underlying settings content.
* @return A settings loader.
*/
public static SettingsLoader loaderFromSource(String source) {
if (source.indexOf('{') != -1 && source.indexOf('}') != -1) {
return new JsonSettingsLoader();
return new JsonSettingsLoader(true);
}
if (source.indexOf(':') != -1) {
return new YamlSettingsLoader();
return new YamlSettingsLoader(true);
}
return new PropertiesSettingsLoader();
}
}

View File

@ -38,6 +38,12 @@ public abstract class XContentSettingsLoader implements SettingsLoader {
public abstract XContentType contentType();
private final boolean allowNullValues;
XContentSettingsLoader(boolean allowNullValues) {
this.allowNullValues = allowNullValues;
}
@Override
public Map<String, String> load(String source) throws IOException {
try (XContentParser parser = XContentFactory.xContent(contentType()).createParser(source)) {
@ -153,6 +159,16 @@ public abstract class XContentSettingsLoader implements SettingsLoader {
currentValue
);
}
if (currentValue == null && !allowNullValues) {
throw new ElasticsearchParseException(
"null-valued setting found for key [{}] found at line number [{}], column number [{}]",
key,
parser.getTokenLocation().lineNumber,
parser.getTokenLocation().columnNumber
);
}
settings.put(key, currentValue);
}
}

View File

@ -30,6 +30,10 @@ import java.util.Map;
*/
public class YamlSettingsLoader extends XContentSettingsLoader {
public YamlSettingsLoader(boolean allowNullValues) {
super(allowNullValues);
}
@Override
public XContentType contentType() {
return XContentType.YAML;

View File

@ -71,6 +71,7 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@ -78,6 +79,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
@ -100,14 +102,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, Property.NodeScope);
public final static Setting<Boolean> SEND_LEAVE_REQUEST_SETTING =
Setting.boolSetting("discovery.zen.send_leave_request", true, Property.NodeScope);
public final static Setting<Boolean> MASTER_ELECTION_FILTER_CLIENT_SETTING =
Setting.boolSetting("discovery.zen.master_election.filter_client", true, Property.NodeScope);
public final static Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING =
Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0),
Property.NodeScope);
public final static Setting<Boolean> MASTER_ELECTION_FILTER_DATA_SETTING =
Setting.boolSetting("discovery.zen.master_election.filter_data", false, Property.NodeScope);
public final static Setting<Boolean> MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING =
Setting.boolSetting("discovery.zen.master_election.ignore_non_master_pings", false, Property.NodeScope);
public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin";
@ -138,8 +138,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
private final ElectMasterService electMaster;
private final boolean masterElectionFilterClientNodes;
private final boolean masterElectionFilterDataNodes;
private final boolean masterElectionIgnoreNonMasters;
private final TimeValue masterElectionWaitForJoinsTimeout;
private final JoinThreadControl joinThreadControl;
@ -169,11 +168,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
this.maxPingsFromAnotherMaster = MAX_PINGS_FROM_ANOTHER_MASTER_SETTING.get(settings);
this.sendLeaveRequest = SEND_LEAVE_REQUEST_SETTING.get(settings);
this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings);
this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings);
this.masterElectionIgnoreNonMasters = MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING.get(settings);
this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings);
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.ignore_non_master [{}]",
this.pingTimeout, joinTimeout, masterElectionIgnoreNonMasters);
clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> {
final ClusterState clusterState = clusterService.state();
@ -846,30 +845,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
}
// filter responses
List<ZenPing.PingResponse> pingResponses = new ArrayList<>();
for (ZenPing.PingResponse pingResponse : fullPingResponses) {
DiscoveryNode node = pingResponse.node();
if (masterElectionFilterClientNodes && (node.clientNode() || (!node.masterNode() && !node.dataNode()))) {
// filter out the client node, which is a client node, or also one that is not data and not master (effectively, client)
} else if (masterElectionFilterDataNodes && (!node.masterNode() && node.dataNode())) {
// filter out data node that is not also master
} else {
pingResponses.add(pingResponse);
}
}
if (logger.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
if (pingResponses.isEmpty()) {
sb.append(" {none}");
} else {
for (ZenPing.PingResponse pingResponse : pingResponses) {
sb.append("\n\t--> ").append(pingResponse);
}
}
logger.debug("filtered ping responses: (filter_client[{}], filter_data[{}]){}", masterElectionFilterClientNodes,
masterElectionFilterDataNodes, sb);
}
final List<ZenPing.PingResponse> pingResponses;
pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger);
final DiscoveryNode localNode = clusterService.localNode();
List<DiscoveryNode> pingMasters = new ArrayList<>();
@ -925,6 +902,28 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
}
}
static List<ZenPing.PingResponse> filterPingResponses(ZenPing.PingResponse[] fullPingResponses, boolean masterElectionIgnoreNonMasters, ESLogger logger) {
List<ZenPing.PingResponse> pingResponses;
if (masterElectionIgnoreNonMasters) {
pingResponses = Arrays.stream(fullPingResponses).filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList());
} else {
pingResponses = Arrays.asList(fullPingResponses);
}
if (logger.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
if (pingResponses.isEmpty()) {
sb.append(" {none}");
} else {
for (ZenPing.PingResponse pingResponse : pingResponses) {
sb.append("\n\t--> ").append(pingResponse);
}
}
logger.debug("filtered ping responses: (ignore_non_masters [{}]){}", masterElectionIgnoreNonMasters, sb);
}
return pingResponses;
}
protected ClusterState rejoin(ClusterState clusterState, String reason) {
// *** called from within an cluster state update task *** //

View File

@ -234,8 +234,8 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
// We successfully checked all indices for backward compatibility and found no non-upgradable indices, which
// means the upgrade can continue. Now it's safe to overwrite index metadata with the new version.
for (IndexMetaData indexMetaData : updateIndexMetaData) {
// since we still haven't upgraded the index folders, we write index state in the old folder
metaStateService.writeIndex("upgrade", indexMetaData, nodeEnv.resolveIndexFolder(indexMetaData.getIndex().getUUID()));
// since we upgraded the index folders already, write index state in the upgraded index folder
metaStateService.writeIndex("upgrade", indexMetaData);
}
}

View File

@ -121,18 +121,11 @@ public class MetaStateService extends AbstractComponent {
* Writes the index state.
*/
void writeIndex(String reason, IndexMetaData indexMetaData) throws IOException {
writeIndex(reason, indexMetaData, nodeEnv.indexPaths(indexMetaData.getIndex()));
}
/**
* Writes the index state in <code>locations</code>, use {@link #writeGlobalState(String, MetaData)}
* to write index state in index paths
*/
void writeIndex(String reason, IndexMetaData indexMetaData, Path[] locations) throws IOException {
final Index index = indexMetaData.getIndex();
logger.trace("[{}] writing state, reason [{}]", index, reason);
try {
IndexMetaData.FORMAT.write(indexMetaData, indexMetaData.getVersion(), locations);
IndexMetaData.FORMAT.write(indexMetaData, indexMetaData.getVersion(),
nodeEnv.indexPaths(indexMetaData.getIndex()));
} catch (Throwable ex) {
logger.warn("[{}]: failed to write index state", ex, index);
throw new IOException("failed to write state for [" + index + "]", ex);

View File

@ -118,7 +118,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
final boolean enoughAllocationsFound;
if (lastActiveAllocationIds.isEmpty()) {
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new";
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) : "trying to allocated a primary with an empty allocation id set, but index is new";
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
// fall back to old version-based allocation mode
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
@ -128,7 +128,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
} else {
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
}
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0, nodeShardsResult.allocationsFound, shard);
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, shard);
} else {
assert lastActiveAllocationIds.isEmpty() == false;
// use allocation ids to select nodes

View File

@ -19,18 +19,6 @@
package org.elasticsearch.index;
import java.io.Closeable;
import java.io.IOException;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
@ -58,7 +46,6 @@ import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineClosedException;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
@ -83,6 +70,18 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable;
import java.io.IOException;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static java.util.Collections.emptyMap;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
@ -329,10 +328,10 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
final Engine.Warmer engineWarmer = (searcher, toLevel) -> {
final Engine.Warmer engineWarmer = (searcher) -> {
IndexShard shard = getShardOrNull(shardId.getId());
if (shard != null) {
warmer.warm(searcher, shard, IndexService.this.indexSettings, toLevel);
warmer.warm(searcher, shard, IndexService.this.indexSettings);
}
};
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock,
@ -525,21 +524,21 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
}
@Override
public void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage) {
public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
shard.fieldData().onCache(shardId, fieldName, fieldDataType, ramUsage);
shard.fieldData().onCache(shardId, fieldName, ramUsage);
}
}
}
@Override
public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) {
public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
shard.fieldData().onRemoval(shardId, fieldName, fieldDataType, wasEvicted, sizeInBytes);
shard.fieldData().onRemoval(shardId, fieldName, wasEvicted, sizeInBytes);
}
}
}
@ -622,6 +621,11 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
rescheduleFsyncTask(durability);
}
}
// update primary terms
for (final IndexShard shard : this.shards.values()) {
shard.updatePrimaryTerm(metadata.primaryTerm(shard.shardId().id()));
}
}
private void rescheduleFsyncTask(Translog.Durability durability) {
@ -781,7 +785,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
}
@Override
public void close() {
public synchronized void close() {
if (closed.compareAndSet(false, true)) {
FutureUtils.cancel(scheduledFuture);
scheduledFuture = null;

View File

@ -19,14 +19,11 @@
package org.elasticsearch.index;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DirectoryReader;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.DocumentMapper;
@ -50,9 +47,6 @@ import java.util.concurrent.TimeUnit;
*/
public final class IndexWarmer extends AbstractComponent {
public static final Setting<MappedFieldType.Loading> INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading",
MappedFieldType.Loading.LAZY.toString(), (s) -> MappedFieldType.Loading.parse(s, MappedFieldType.Loading.LAZY),
Property.IndexScope);
private final List<Listener> listeners;
IndexWarmer(Settings settings, ThreadPool threadPool, Listener... listeners) {
@ -66,7 +60,7 @@ public final class IndexWarmer extends AbstractComponent {
this.listeners = Collections.unmodifiableList(list);
}
void warm(Engine.Searcher searcher, IndexShard shard, IndexSettings settings, boolean isTopReader) {
void warm(Engine.Searcher searcher, IndexShard shard, IndexSettings settings) {
if (shard.state() == IndexShardState.CLOSED) {
return;
}
@ -74,22 +68,14 @@ public final class IndexWarmer extends AbstractComponent {
return;
}
if (logger.isTraceEnabled()) {
if (isTopReader) {
logger.trace("{} top warming [{}]", shard.shardId(), searcher.reader());
} else {
logger.trace("{} warming [{}]", shard.shardId(), searcher.reader());
}
logger.trace("{} top warming [{}]", shard.shardId(), searcher.reader());
}
shard.warmerService().onPreWarm();
long time = System.nanoTime();
final List<TerminationHandle> terminationHandles = new ArrayList<>();
// get a handle on pending tasks
for (final Listener listener : listeners) {
if (isTopReader) {
terminationHandles.add(listener.warmTopReader(shard, searcher));
} else {
terminationHandles.add(listener.warmNewReaders(shard, searcher));
}
terminationHandles.add(listener.warmReader(shard, searcher));
}
// wait for termination
for (TerminationHandle terminationHandle : terminationHandles) {
@ -97,22 +83,14 @@ public final class IndexWarmer extends AbstractComponent {
terminationHandle.awaitTermination();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
if (isTopReader) {
logger.warn("top warming has been interrupted", e);
} else {
logger.warn("warming has been interrupted", e);
}
logger.warn("top warming has been interrupted", e);
break;
}
}
long took = System.nanoTime() - time;
shard.warmerService().onPostWarm(took);
if (shard.warmerService().logger().isTraceEnabled()) {
if (isTopReader) {
shard.warmerService().logger().trace("top warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
} else {
shard.warmerService().logger().trace("warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
}
shard.warmerService().logger().trace("top warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
}
}
@ -127,9 +105,7 @@ public final class IndexWarmer extends AbstractComponent {
public interface Listener {
/** Queue tasks to warm-up the given segments and return handles that allow to wait for termination of the
* execution of those tasks. */
TerminationHandle warmNewReaders(IndexShard indexShard, Engine.Searcher searcher);
TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher);
TerminationHandle warmReader(IndexShard indexShard, Engine.Searcher searcher);
}
private static class FieldDataWarmer implements IndexWarmer.Listener {
@ -140,67 +116,17 @@ public final class IndexWarmer extends AbstractComponent {
}
@Override
public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
final MapperService mapperService = indexShard.mapperService();
final Map<String, MappedFieldType> warmUp = new HashMap<>();
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
for (FieldMapper fieldMapper : docMapper.mappers()) {
final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
final String indexName = fieldMapper.fieldType().name();
if (fieldDataType == null) {
continue;
}
if (fieldDataType.getLoading() == MappedFieldType.Loading.LAZY) {
continue;
}
if (warmUp.containsKey(indexName)) {
continue;
}
warmUp.put(indexName, fieldMapper.fieldType());
}
}
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size());
for (final LeafReaderContext ctx : searcher.reader().leaves()) {
for (final MappedFieldType fieldType : warmUp.values()) {
executor.execute(() -> {
try {
final long start = System.nanoTime();
indexFieldDataService.getForField(fieldType).load(ctx);
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed fielddata for [{}], took [{}]", fieldType.name(),
TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to warm-up fielddata for [{}]", t, fieldType.name());
} finally {
latch.countDown();
}
});
}
}
return () -> latch.await();
}
@Override
public TerminationHandle warmTopReader(final IndexShard indexShard, final Engine.Searcher searcher) {
public TerminationHandle warmReader(final IndexShard indexShard, final Engine.Searcher searcher) {
final MapperService mapperService = indexShard.mapperService();
final Map<String, MappedFieldType> warmUpGlobalOrdinals = new HashMap<>();
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
for (FieldMapper fieldMapper : docMapper.mappers()) {
final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
final String indexName = fieldMapper.fieldType().name();
if (fieldDataType == null) {
final MappedFieldType fieldType = fieldMapper.fieldType();
final String indexName = fieldType.name();
if (fieldType.eagerGlobalOrdinals() == false) {
continue;
}
if (fieldDataType.getLoading() != MappedFieldType.Loading.EAGER_GLOBAL_ORDINALS) {
continue;
}
if (warmUpGlobalOrdinals.containsKey(indexName)) {
continue;
}
warmUpGlobalOrdinals.put(indexName, fieldMapper.fieldType());
warmUpGlobalOrdinals.put(indexName, fieldType);
}
}
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
@ -210,7 +136,12 @@ public final class IndexWarmer extends AbstractComponent {
try {
final long start = System.nanoTime();
IndexFieldData.Global ifd = indexFieldDataService.getForField(fieldType);
ifd.loadGlobal(searcher.getDirectoryReader());
DirectoryReader reader = searcher.getDirectoryReader();
IndexFieldData<?> global = ifd.loadGlobal(reader);
if (reader.leaves().isEmpty() == false) {
global.load(reader.leaves().get(0));
}
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed global ordinals for [{}], took [{}]", fieldType.name(),
TimeValue.timeValueNanos(System.nanoTime() - start));

View File

@ -127,7 +127,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
}
if (analyzers.containsKey("default_index")) {
final Version createdVersion = indexSettings.getIndexVersionCreated();
if (createdVersion.onOrAfter(Version.V_5_0_0)) {
if (createdVersion.onOrAfter(Version.V_5_0_0_alpha1)) {
throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index().getName() + "]");
} else {
deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index().getName());

View File

@ -216,7 +216,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
}
@Override
public IndexWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, final Engine.Searcher searcher) {
if (indexSettings.getIndex().equals(indexShard.indexSettings().getIndex()) == false) {
// this is from a different index
return TerminationHandle.NO_WAIT;
@ -268,11 +268,6 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
return () -> latch.await();
}
@Override
public TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher) {
return TerminationHandle.NO_WAIT;
}
}
Cache<Object, Cache<Query, Value>> getLoadedFilters() {

View File

@ -1241,13 +1241,9 @@ public abstract class Engine implements Closeable {
*/
public interface Warmer {
/**
* Called once a new Searcher is opened.
*
* @param searcher the searcer to warm
* @param isTopLevelReader <code>true</code> iff the searcher is build from a top-level reader.
* Otherwise the searcher might be build from a leaf reader to warm in isolation
* Called once a new Searcher is opened on the top-level searcher.
*/
void warm(Engine.Searcher searcher, boolean isTopLevelReader);
void warm(Engine.Searcher searcher);
}
/**

View File

@ -68,7 +68,6 @@ public final class EngineConfig {
private final QueryCache queryCache;
private final QueryCachingPolicy queryCachingPolicy;
/**
* Index setting to change the low level lucene codec used for writing new segments.
* This setting is <b>not</b> realtime updateable.
@ -109,7 +108,7 @@ public final class EngineConfig {
final Settings settings = indexSettings.getSettings();
this.indexSettings = indexSettings;
this.threadPool = threadPool;
this.warmer = warmer == null ? (a,b) -> {} : warmer;
this.warmer = warmer == null ? (a) -> {} : warmer;
this.store = store;
this.deletionPolicy = deletionPolicy;
this.mergePolicy = mergePolicy;
@ -169,7 +168,7 @@ public final class EngineConfig {
/**
* Returns the {@link Codec} used in the engines {@link org.apache.lucene.index.IndexWriter}
* <p>
* Note: this settings is only read on startup.
* Note: this settings is only read on startup.
* </p>
*/
public Codec getCodec() {
@ -237,9 +236,7 @@ public final class EngineConfig {
/**
* Returns the engines shard ID
*/
public ShardId getShardId() {
return shardId;
}
public ShardId getShardId() { return shardId; }
/**
* Returns the analyzer as the default analyzer in the engines {@link org.apache.lucene.index.IndexWriter}
@ -306,7 +303,6 @@ public final class EngineConfig {
* should be automatically flushed. This is used to free up transient disk usage of potentially large segments that
* are written after the engine became inactive from an indexing perspective.
*/
public TimeValue getFlushMergesAfter() {
return flushMergesAfter;
}
public TimeValue getFlushMergesAfter() { return flushMergesAfter; }
}

View File

@ -24,13 +24,10 @@ import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriter.IndexReaderWarmer;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.LiveIndexWriterConfig;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.SegmentCommitInfo;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.index.Term;
@ -51,7 +48,6 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.LoggerInfoStream;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.lucene.index.ElasticsearchLeafReader;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.math.MathUtils;
import org.elasticsearch.common.unit.ByteSizeValue;
@ -72,7 +68,6 @@ import org.elasticsearch.index.translog.TranslogCorruptedException;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
@ -93,7 +88,6 @@ public class InternalEngine extends Engine {
*/
private volatile long lastDeleteVersionPruneTimeMSec;
private final Engine.Warmer warmer;
private final Translog translog;
private final ElasticsearchConcurrentMergeScheduler mergeScheduler;
@ -135,7 +129,6 @@ public class InternalEngine extends Engine {
boolean success = false;
try {
this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
this.warmer = engineConfig.getWarmer();
seqNoService = new SequenceNumbersService(shardId, engineConfig.getIndexSettings());
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings());
this.dirtyLocks = new Object[Runtime.getRuntime().availableProcessors() * 10]; // we multiply it to have enough...
@ -956,30 +949,6 @@ public class InternalEngine extends Engine {
iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().mbFrac());
iwc.setCodec(engineConfig.getCodec());
iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
// Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end
// of the merge operation and won't slow down _refresh
iwc.setMergedSegmentWarmer(new IndexReaderWarmer() {
@Override
public void warm(LeafReader reader) throws IOException {
try {
LeafReader esLeafReader = new ElasticsearchLeafReader(reader, shardId);
assert isMergedSegment(esLeafReader);
if (warmer != null) {
final Engine.Searcher searcher = new Searcher("warmer", searcherFactory.newSearcher(esLeafReader, null));
warmer.warm(searcher, false);
}
} catch (Throwable t) {
// Don't fail a merge if the warm-up failed
if (isClosed.get() == false) {
logger.warn("Warm-up failed", t);
}
if (t instanceof Error) {
// assertion/out-of-memory error, don't ignore those
throw (Error) t;
}
}
}
});
return new IndexWriter(store.directory(), iwc);
} catch (LockObtainFailedException ex) {
logger.warn("could not lock IndexWriter", ex);
@ -990,14 +959,12 @@ public class InternalEngine extends Engine {
/** Extended SearcherFactory that warms the segments if needed when acquiring a new searcher */
final static class SearchFactory extends EngineSearcherFactory {
private final Engine.Warmer warmer;
private final ShardId shardId;
private final ESLogger logger;
private final AtomicBoolean isEngineClosed;
SearchFactory(ESLogger logger, AtomicBoolean isEngineClosed, EngineConfig engineConfig) {
super(engineConfig);
warmer = engineConfig.getWarmer();
shardId = engineConfig.getShardId();
this.logger = logger;
this.isEngineClosed = isEngineClosed;
}
@ -1012,55 +979,13 @@ public class InternalEngine extends Engine {
return searcher;
}
if (warmer != null) {
// we need to pass a custom searcher that does not release anything on Engine.Search Release,
// we will release explicitly
IndexSearcher newSearcher = null;
boolean closeNewSearcher = false;
try {
if (previousReader == null) {
// we are starting up - no writer active so we can't acquire a searcher.
newSearcher = searcher;
} else {
// figure out the newSearcher, with only the new readers that are relevant for us
List<IndexReader> readers = new ArrayList<>();
for (LeafReaderContext newReaderContext : reader.leaves()) {
if (isMergedSegment(newReaderContext.reader())) {
// merged segments are already handled by IndexWriterConfig.setMergedSegmentWarmer
continue;
}
boolean found = false;
for (LeafReaderContext currentReaderContext : previousReader.leaves()) {
if (currentReaderContext.reader().getCoreCacheKey().equals(newReaderContext.reader().getCoreCacheKey())) {
found = true;
break;
}
}
if (!found) {
readers.add(newReaderContext.reader());
}
}
if (!readers.isEmpty()) {
// we don't want to close the inner readers, just increase ref on them
IndexReader newReader = new MultiReader(readers.toArray(new IndexReader[readers.size()]), false);
newSearcher = super.newSearcher(newReader, null);
closeNewSearcher = true;
}
}
if (newSearcher != null) {
warmer.warm(new Searcher("new_reader_warming", newSearcher), false);
}
assert searcher.getIndexReader() instanceof ElasticsearchDirectoryReader : "this class needs an ElasticsearchDirectoryReader but got: " + searcher.getIndexReader().getClass();
warmer.warm(new Searcher("top_reader_warming", searcher), true);
warmer.warm(new Searcher("top_reader_warming", searcher));
} catch (Throwable e) {
if (isEngineClosed.get() == false) {
logger.warn("failed to prepare/warm", e);
}
} finally {
// no need to release the fullSearcher, nothing really is done...
if (newSearcher != null && closeNewSearcher) {
IOUtils.closeWhileHandlingException(newSearcher.getIndexReader()); // ignore
}
}
}
return searcher;

View File

@ -348,7 +348,7 @@ public class SegmentsStats implements Streamable, ToXContent {
indexWriterMaxMemoryInBytes = in.readLong();
bitsetMemoryInBytes = in.readLong();
if (in.getVersion().onOrAfter(Version.V_5_0_0)) {
if (in.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
int size = in.readVInt();
ImmutableOpenMap.Builder<String, Long> map = ImmutableOpenMap.builder(size);
for (int i = 0; i < size; i++) {
@ -376,7 +376,7 @@ public class SegmentsStats implements Streamable, ToXContent {
out.writeLong(indexWriterMaxMemoryInBytes);
out.writeLong(bitsetMemoryInBytes);
if (out.getVersion().onOrAfter(Version.V_5_0_0)) {
if (out.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
out.writeVInt(fileSizes.size());
for (Iterator<ObjectObjectCursor<String, Long>> it = fileSizes.iterator(); it.hasNext();) {
ObjectObjectCursor<String, Long> entry = it.next();

View File

@ -1,92 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.MappedFieldType.Loading;
/**
*/
public class FieldDataType {
public static final String FORMAT_KEY = "format";
public static final String DOC_VALUES_FORMAT_VALUE = "doc_values";
private final String type;
private final String typeFormat;
private final Loading loading;
private final Settings settings;
public FieldDataType(String type) {
this(type, Settings.Builder.EMPTY_SETTINGS);
}
public FieldDataType(String type, Settings.Builder builder) {
this(type, builder.build());
}
public FieldDataType(String type, Settings settings) {
this.type = type;
this.typeFormat = "index.fielddata.type." + type + "." + FORMAT_KEY;
this.settings = settings;
final String loading = settings.get(Loading.KEY);
this.loading = Loading.parse(loading, Loading.LAZY);
}
public String getType() {
return this.type;
}
public Settings getSettings() {
return this.settings;
}
public Loading getLoading() {
return loading;
}
public String getFormat(Settings indexSettings) {
String format = settings.get(FORMAT_KEY);
if (format == null && indexSettings != null) {
format = indexSettings.get(typeFormat);
}
return format;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FieldDataType that = (FieldDataType) o;
if (!settings.equals(that.settings)) return false;
if (!type.equals(that.type)) return false;
return true;
}
@Override
public int hashCode() {
int result = type.hashCode();
result = 31 * result + settings.hashCode();
return result;
}
}

View File

@ -67,18 +67,6 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
return null;
}
}
/**
* Gets a memory storage hint that should be honored if possible but is not mandatory
*/
public static MemoryStorageFormat getMemoryStorageHint(FieldDataType fieldDataType) {
// backwards compatibility
String s = fieldDataType.getSettings().get("ordinals");
if (s != null) {
return "always".equals(s) ? MemoryStorageFormat.ORDINALS : null;
}
return MemoryStorageFormat.fromString(fieldDataType.getSettings().get(SETTING_MEMORY_STORAGE_HINT));
}
}
/**
@ -86,11 +74,6 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
*/
String getFieldName();
/**
* The field data type.
*/
FieldDataType getFieldDataType();
/**
* Loads the atomic field data for the reader, possibly cached.
*/

View File

@ -48,12 +48,12 @@ public interface IndexFieldDataCache {
/**
* Called after the fielddata is loaded during the cache phase
*/
default void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage){}
default void onCache(ShardId shardId, String fieldName, Accountable ramUsage){}
/**
* Called after the fielddata is unloaded
*/
default void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes){}
default void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes){}
}
class None implements IndexFieldDataCache {

View File

@ -70,116 +70,19 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
}
}, Property.IndexScope);
private static final IndexFieldData.Builder MISSING_DOC_VALUES_BUILDER = (indexProperties, fieldType, cache, breakerService, mapperService1) -> {
throw new IllegalStateException("Can't load fielddata on [" + fieldType.name()
+ "] of index [" + indexProperties.getIndex().getName() + "] because fielddata is unsupported on fields of type ["
+ fieldType.fieldDataType().getType() + "]. Use doc values instead.");
};
private static final String ARRAY_FORMAT = "array";
private static final String DISABLED_FORMAT = "disabled";
private static final String DOC_VALUES_FORMAT = "doc_values";
private static final String PAGED_BYTES_FORMAT = "paged_bytes";
private static final IndexFieldData.Builder DISABLED_BUILDER = new IndexFieldData.Builder() {
@Override
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
CircuitBreakerService breakerService, MapperService mapperService) {
throw new IllegalStateException("Field data loading is forbidden on [" + fieldType.name() + "]");
}
};
private final static Map<String, IndexFieldData.Builder> buildersByType;
private final static Map<String, IndexFieldData.Builder> docValuesBuildersByType;
private final static Map<Tuple<String, String>, IndexFieldData.Builder> buildersByTypeAndFormat;
private final CircuitBreakerService circuitBreakerService;
static {
Map<String, IndexFieldData.Builder> buildersByTypeBuilder = new HashMap<>();
buildersByTypeBuilder.put("string", new PagedBytesIndexFieldData.Builder());
buildersByTypeBuilder.put(TextFieldMapper.CONTENT_TYPE, new PagedBytesIndexFieldData.Builder());
buildersByTypeBuilder.put(KeywordFieldMapper.CONTENT_TYPE, MISSING_DOC_VALUES_BUILDER);
buildersByTypeBuilder.put("float", MISSING_DOC_VALUES_BUILDER);
buildersByTypeBuilder.put("double", MISSING_DOC_VALUES_BUILDER);
buildersByTypeBuilder.put("byte", MISSING_DOC_VALUES_BUILDER);
buildersByTypeBuilder.put("short", MISSING_DOC_VALUES_BUILDER);
buildersByTypeBuilder.put("int", MISSING_DOC_VALUES_BUILDER);
buildersByTypeBuilder.put("long", MISSING_DOC_VALUES_BUILDER);
buildersByTypeBuilder.put("geo_point", new GeoPointArrayIndexFieldData.Builder());
buildersByTypeBuilder.put(ParentFieldMapper.NAME, new ParentChildIndexFieldData.Builder());
buildersByTypeBuilder.put(IndexFieldMapper.NAME, new IndexIndexFieldData.Builder());
buildersByTypeBuilder.put("binary", DISABLED_BUILDER);
buildersByTypeBuilder.put(BooleanFieldMapper.CONTENT_TYPE, MISSING_DOC_VALUES_BUILDER);
buildersByType = unmodifiableMap(buildersByTypeBuilder);
docValuesBuildersByType = MapBuilder.<String, IndexFieldData.Builder>newMapBuilder()
.put("string", new DocValuesIndexFieldData.Builder())
.put(KeywordFieldMapper.CONTENT_TYPE, new DocValuesIndexFieldData.Builder())
.put("float", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT))
.put("double", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.DOUBLE))
.put("byte", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BYTE))
.put("short", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.SHORT))
.put("int", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.INT))
.put("long", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.LONG))
.put("geo_point", new AbstractGeoPointDVIndexFieldData.Builder())
.put("binary", new BytesBinaryDVIndexFieldData.Builder())
.put(BooleanFieldMapper.CONTENT_TYPE, new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BOOLEAN))
.immutableMap();
buildersByTypeAndFormat = MapBuilder.<Tuple<String, String>, IndexFieldData.Builder>newMapBuilder()
.put(Tuple.tuple("string", PAGED_BYTES_FORMAT), new PagedBytesIndexFieldData.Builder())
.put(Tuple.tuple("string", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder())
.put(Tuple.tuple("string", DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple(TextFieldMapper.CONTENT_TYPE, PAGED_BYTES_FORMAT), new PagedBytesIndexFieldData.Builder())
.put(Tuple.tuple(TextFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple(KeywordFieldMapper.CONTENT_TYPE, DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder())
.put(Tuple.tuple(KeywordFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple("float", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.FLOAT))
.put(Tuple.tuple("float", DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple("double", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.DOUBLE))
.put(Tuple.tuple("double", DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple("byte", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BYTE))
.put(Tuple.tuple("byte", DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple("short", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.SHORT))
.put(Tuple.tuple("short", DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple("int", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.INT))
.put(Tuple.tuple("int", DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple("long", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.LONG))
.put(Tuple.tuple("long", DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple("geo_point", ARRAY_FORMAT), new GeoPointArrayIndexFieldData.Builder())
.put(Tuple.tuple("geo_point", DOC_VALUES_FORMAT), new AbstractGeoPointDVIndexFieldData.Builder())
.put(Tuple.tuple("geo_point", DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple("binary", DOC_VALUES_FORMAT), new BytesBinaryDVIndexFieldData.Builder())
.put(Tuple.tuple("binary", DISABLED_FORMAT), DISABLED_BUILDER)
.put(Tuple.tuple(BooleanFieldMapper.CONTENT_TYPE, DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BOOLEAN))
.put(Tuple.tuple(BooleanFieldMapper.CONTENT_TYPE, DISABLED_FORMAT), DISABLED_BUILDER)
.immutableMap();
}
private final IndicesFieldDataCache indicesFieldDataCache;
// the below map needs to be modified under a lock
private final Map<String, IndexFieldDataCache> fieldDataCaches = new HashMap<>();
private final MapperService mapperService;
private static final IndexFieldDataCache.Listener DEFAULT_NOOP_LISTENER = new IndexFieldDataCache.Listener() {
@Override
public void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage) {
public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
}
@Override
public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) {
public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
}
};
private volatile IndexFieldDataCache.Listener listener = DEFAULT_NOOP_LISTENER;
@ -223,42 +126,15 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
@SuppressWarnings("unchecked")
public <IFD extends IndexFieldData<?>> IFD getForField(MappedFieldType fieldType) {
final String fieldName = fieldType.name();
final FieldDataType type = fieldType.fieldDataType();
if (type == null) {
throw new IllegalArgumentException("found no fielddata type for field [" + fieldName + "]");
}
final boolean docValues = fieldType.hasDocValues();
IndexFieldData.Builder builder = null;
String format = type.getFormat(indexSettings.getSettings());
if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) {
logger.warn("field [{}] has no doc values, will use default field data format", fieldName);
format = null;
}
if (format != null) {
builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
if (builder == null) {
logger.warn("failed to find format [{}] for field [{}], will use default", format, fieldName);
}
}
if (builder == null && docValues) {
builder = docValuesBuildersByType.get(type.getType());
}
if (builder == null) {
builder = buildersByType.get(type.getType());
}
if (builder == null) {
throw new IllegalArgumentException("failed to find field data builder for field " + fieldName + ", and type " + type.getType());
}
IndexFieldData.Builder builder = fieldType.fielddataBuilder();
IndexFieldDataCache cache;
synchronized (this) {
cache = fieldDataCaches.get(fieldName);
if (cache == null) {
// we default to node level cache, which in turn defaults to be unbounded
// this means changing the node level settings is simple, just set the bounds there
String cacheType = type.getSettings().get("cache", indexSettings.getValue(INDEX_FIELDDATA_CACHE_KEY));
String cacheType = indexSettings.getValue(INDEX_FIELDDATA_CACHE_KEY);
if (FIELDDATA_CACHE_VALUE_NODE.equals(cacheType)) {
cache = indicesFieldDataCache.buildIndexFieldDataCache(listener, index(), fieldName, type);
cache = indicesFieldDataCache.buildIndexFieldDataCache(listener, index(), fieldName);
} else if ("none".equals(cacheType)){
cache = new IndexFieldDataCache.None();
} else {

View File

@ -51,7 +51,7 @@ public class ShardFieldData implements IndexFieldDataCache.Listener {
}
@Override
public void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage) {
public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
totalMetric.inc(ramUsage.ramBytesUsed());
CounterMetric total = perFieldTotals.get(fieldName);
if (total != null) {
@ -67,7 +67,7 @@ public class ShardFieldData implements IndexFieldDataCache.Listener {
}
@Override
public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) {
public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
if (wasEvicted) {
evictionsMetric.inc();
}

View File

@ -70,7 +70,7 @@ public enum GlobalOrdinalsBuilder {
);
}
return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(),
indexFieldData.getFieldDataType(), atomicFD, ordinalMap, memorySizeInBytes
atomicFD, ordinalMap, memorySizeInBytes
);
}
@ -104,7 +104,7 @@ public enum GlobalOrdinalsBuilder {
}
final OrdinalMap ordinalMap = OrdinalMap.build(null, subs, PackedInts.DEFAULT);
return new InternalGlobalOrdinalsIndexFieldData(indexSettings, indexFieldData.getFieldName(),
indexFieldData.getFieldDataType(), atomicFD, ordinalMap, 0
atomicFD, ordinalMap, 0
);
}

View File

@ -25,11 +25,9 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.search.MultiValueMode;
import java.util.Collection;
@ -41,13 +39,11 @@ import java.util.Collections;
public abstract class GlobalOrdinalsIndexFieldData extends AbstractIndexComponent implements IndexOrdinalsFieldData, Accountable {
private final String fieldName;
private final FieldDataType fieldDataType;
private final long memorySizeInBytes;
protected GlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, FieldDataType fieldDataType, long memorySizeInBytes) {
protected GlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, long memorySizeInBytes) {
super(indexSettings);
this.fieldName = fieldName;
this.fieldDataType = fieldDataType;
this.memorySizeInBytes = memorySizeInBytes;
}
@ -71,11 +67,6 @@ public abstract class GlobalOrdinalsIndexFieldData extends AbstractIndexComponen
return fieldName;
}
@Override
public FieldDataType getFieldDataType() {
return fieldDataType;
}
@Override
public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) {
throw new UnsupportedOperationException("no global ordinals sorting yet");

View File

@ -24,9 +24,7 @@ import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.util.Accountable;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.plain.AbstractAtomicOrdinalsFieldData;
import org.elasticsearch.index.mapper.MappedFieldType;
import java.util.Collection;
@ -37,8 +35,8 @@ final class InternalGlobalOrdinalsIndexFieldData extends GlobalOrdinalsIndexFiel
private final Atomic[] atomicReaders;
InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, FieldDataType fieldDataType, AtomicOrdinalsFieldData[] segmentAfd, OrdinalMap ordinalMap, long memorySizeInBytes) {
super(indexSettings, fieldName, fieldDataType, memorySizeInBytes);
InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, AtomicOrdinalsFieldData[] segmentAfd, OrdinalMap ordinalMap, long memorySizeInBytes) {
super(indexSettings, fieldName, memorySizeInBytes);
this.atomicReaders = new Atomic[segmentAfd.length];
for (int i = 0; i < segmentAfd.length; i++) {
atomicReaders[i] = new Atomic(segmentAfd[i], ordinalMap, i);

View File

@ -35,7 +35,6 @@ import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.packed.GrowableWriter;
import org.apache.lucene.util.packed.PackedInts;
import org.apache.lucene.util.packed.PagedGrowableWriter;
import org.elasticsearch.common.settings.Settings;
import java.io.Closeable;
import java.io.IOException;
@ -287,20 +286,13 @@ public final class OrdinalsBuilder implements Closeable {
private OrdinalsStore ordinals;
private final LongsRef spare;
public OrdinalsBuilder(long numTerms, int maxDoc, float acceptableOverheadRatio) throws IOException {
public OrdinalsBuilder(int maxDoc, float acceptableOverheadRatio) throws IOException {
this.maxDoc = maxDoc;
int startBitsPerValue = 8;
if (numTerms >= 0) {
startBitsPerValue = PackedInts.bitsRequired(numTerms);
}
ordinals = new OrdinalsStore(maxDoc, startBitsPerValue, acceptableOverheadRatio);
spare = new LongsRef();
}
public OrdinalsBuilder(int maxDoc, float acceptableOverheadRatio) throws IOException {
this(-1, maxDoc, acceptableOverheadRatio);
}
public OrdinalsBuilder(int maxDoc) throws IOException {
this(maxDoc, DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
}
@ -413,10 +405,9 @@ public final class OrdinalsBuilder implements Closeable {
/**
* Builds an {@link Ordinals} instance from the builders current state.
*/
public Ordinals build(Settings settings) {
final float acceptableOverheadRatio = settings.getAsFloat("acceptable_overhead_ratio", PackedInts.FASTEST);
final boolean forceMultiOrdinals = settings.getAsBoolean(FORCE_MULTI_ORDINALS, false);
if (forceMultiOrdinals || numMultiValuedDocs > 0 || MultiOrdinals.significantlySmallerThanSinglePackedOrdinals(maxDoc, numDocsWithValue, getValueCount(), acceptableOverheadRatio)) {
public Ordinals build() {
final float acceptableOverheadRatio = PackedInts.DEFAULT;
if (numMultiValuedDocs > 0 || MultiOrdinals.significantlySmallerThanSinglePackedOrdinals(maxDoc, numDocsWithValue, getValueCount(), acceptableOverheadRatio)) {
// MultiOrdinals can be smaller than SinglePackedOrdinals for sparse fields
return new MultiOrdinals(this, acceptableOverheadRatio);
} else {

View File

@ -26,7 +26,6 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
@ -40,8 +39,8 @@ import java.io.IOException;
public abstract class AbstractGeoPointDVIndexFieldData extends DocValuesIndexFieldData implements IndexGeoPointFieldData {
AbstractGeoPointDVIndexFieldData(Index index, String fieldName, FieldDataType fieldDataType) {
super(index, fieldName, fieldDataType);
AbstractGeoPointDVIndexFieldData(Index index, String fieldName) {
super(index, fieldName);
}
@Override
@ -55,8 +54,8 @@ public abstract class AbstractGeoPointDVIndexFieldData extends DocValuesIndexFie
public static class GeoPointDVIndexFieldData extends AbstractGeoPointDVIndexFieldData {
final boolean indexCreatedBefore2x;
public GeoPointDVIndexFieldData(Index index, String fieldName, FieldDataType fieldDataType, final boolean indexCreatedBefore2x) {
super(index, fieldName, fieldDataType);
public GeoPointDVIndexFieldData(Index index, String fieldName, final boolean indexCreatedBefore2x) {
super(index, fieldName);
this.indexCreatedBefore2x = indexCreatedBefore2x;
}
@ -82,8 +81,12 @@ public abstract class AbstractGeoPointDVIndexFieldData extends DocValuesIndexFie
@Override
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
CircuitBreakerService breakerService, MapperService mapperService) {
if (indexSettings.getIndexVersionCreated().before(Version.V_2_2_0)
&& fieldType.hasDocValues() == false) {
return new GeoPointArrayIndexFieldData(indexSettings, fieldType.name(), cache, breakerService);
}
// Ignore breaker
return new GeoPointDVIndexFieldData(indexSettings.getIndex(), fieldType.name(), fieldType.fieldDataType(),
return new GeoPointDVIndexFieldData(indexSettings.getIndex(), fieldType.name(),
indexSettings.getIndexVersionCreated().before(Version.V_2_2_0));
}
}

View File

@ -27,7 +27,6 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicFieldData;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.RamAccountingTermsEnum;
@ -39,13 +38,11 @@ import java.io.IOException;
public abstract class AbstractIndexFieldData<FD extends AtomicFieldData> extends AbstractIndexComponent implements IndexFieldData<FD> {
private final String fieldName;
protected final FieldDataType fieldDataType;
protected final IndexFieldDataCache cache;
public AbstractIndexFieldData(IndexSettings indexSettings, String fieldName, FieldDataType fieldDataType, IndexFieldDataCache cache) {
public AbstractIndexFieldData(IndexSettings indexSettings, String fieldName, IndexFieldDataCache cache) {
super(indexSettings);
this.fieldName = fieldName;
this.fieldDataType = fieldDataType;
this.cache = cache;
}
@ -54,11 +51,6 @@ public abstract class AbstractIndexFieldData<FD extends AtomicFieldData> extends
return this.fieldName;
}
@Override
public FieldDataType getFieldDataType() {
return fieldDataType;
}
@Override
public void clear() {
cache.clear(fieldName);

View File

@ -30,7 +30,6 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
@ -102,8 +101,8 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData<Ato
}
}
public AbstractIndexGeoPointFieldData(IndexSettings indexSettings, String fieldName, FieldDataType fieldDataType, IndexFieldDataCache cache) {
super(indexSettings, fieldName, fieldDataType, cache);
public AbstractIndexGeoPointFieldData(IndexSettings indexSettings, String fieldName, IndexFieldDataCache cache) {
super(indexSettings, fieldName, cache);
}
@Override

View File

@ -25,13 +25,10 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
@ -41,23 +38,21 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.search.MultiValueMode;
import java.io.IOException;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldData<AtomicOrdinalsFieldData> implements IndexOrdinalsFieldData {
protected Settings frequency;
protected Settings regex;
private final double minFrequency, maxFrequency;
private final int minSegmentSize;
protected final CircuitBreakerService breakerService;
protected AbstractIndexOrdinalsFieldData(IndexSettings indexSettings, String fieldName, FieldDataType fieldDataType,
IndexFieldDataCache cache, CircuitBreakerService breakerService) {
super(indexSettings, fieldName, fieldDataType, cache);
final Map<String, Settings> groups = fieldDataType.getSettings().getGroups("filter");
frequency = groups.get("frequency");
regex = groups.get("regex");
protected AbstractIndexOrdinalsFieldData(IndexSettings indexSettings, String fieldName,
IndexFieldDataCache cache, CircuitBreakerService breakerService,
double minFrequency, double maxFrequency, int minSegmentSize) {
super(indexSettings, fieldName, cache);
this.breakerService = breakerService;
this.minFrequency = minFrequency;
this.maxFrequency = maxFrequency;
this.minSegmentSize = minSegmentSize;
}
@Override
@ -110,17 +105,24 @@ public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldD
return AbstractAtomicOrdinalsFieldData.empty();
}
protected TermsEnum filter(Terms terms, LeafReader reader) throws IOException {
TermsEnum iterator = terms.iterator();
protected TermsEnum filter(Terms terms, TermsEnum iterator, LeafReader reader) throws IOException {
if (iterator == null) {
return null;
}
if (iterator != null && frequency != null) {
iterator = FrequencyFilter.filter(iterator, terms, reader, frequency);
int docCount = terms.getDocCount();
if (docCount == -1) {
docCount = reader.maxDoc();
}
if (iterator != null && regex != null) {
iterator = RegexFilter.filter(iterator, terms, reader, regex);
if (docCount >= minSegmentSize) {
final int minFreq = minFrequency > 1.0
? (int) minFrequency
: (int)(docCount * minFrequency);
final int maxFreq = maxFrequency > 1.0
? (int) maxFrequency
: (int)(docCount * maxFrequency);
if (minFreq > 1 || maxFreq < docCount) {
iterator = new FrequencyFilter(iterator, minFreq, maxFreq);
}
}
return iterator;
}
@ -135,25 +137,6 @@ public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldD
this.maxFreq = maxFreq;
}
public static TermsEnum filter(TermsEnum toFilter, Terms terms, LeafReader reader, Settings settings) throws IOException {
int docCount = terms.getDocCount();
if (docCount == -1) {
docCount = reader.maxDoc();
}
final double minFrequency = settings.getAsDouble("min", 0d);
final double maxFrequency = settings.getAsDouble("max", docCount+1d);
final double minSegmentSize = settings.getAsInt("min_segment_size", 0);
if (minSegmentSize < docCount) {
final int minFreq = minFrequency > 1.0? (int) minFrequency : (int)(docCount * minFrequency);
final int maxFreq = maxFrequency > 1.0? (int) maxFrequency : (int)(docCount * maxFrequency);
assert minFreq < maxFreq;
return new FrequencyFilter(toFilter, minFreq, maxFreq);
}
return toFilter;
}
@Override
protected AcceptStatus accept(BytesRef arg0) throws IOException {
int docFreq = docFreq();
@ -164,33 +147,4 @@ public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldD
}
}
private static final class RegexFilter extends FilteredTermsEnum {
private final Matcher matcher;
private final CharsRefBuilder spare = new CharsRefBuilder();
public RegexFilter(TermsEnum delegate, Matcher matcher) {
super(delegate, false);
this.matcher = matcher;
}
public static TermsEnum filter(TermsEnum iterator, Terms terms, LeafReader reader, Settings regex) {
String pattern = regex.get("pattern");
if (pattern == null) {
return iterator;
}
Pattern p = Pattern.compile(pattern);
return new RegexFilter(iterator, p.matcher(""));
}
@Override
protected AcceptStatus accept(BytesRef arg0) throws IOException {
spare.copyUTF8Bytes(arg0);
matcher.reset(spare.get());
if (matcher.matches()) {
return AcceptStatus.YES;
}
return AcceptStatus.NO;
}
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.index.fielddata.plain;
import org.apache.lucene.index.LeafReaderContext;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
@ -29,8 +28,8 @@ import org.elasticsearch.search.MultiValueMode;
public class BinaryDVIndexFieldData extends DocValuesIndexFieldData implements IndexFieldData<BinaryDVAtomicFieldData> {
public BinaryDVIndexFieldData(Index index, String fieldName, FieldDataType fieldDataType) {
super(index, fieldName, fieldDataType);
public BinaryDVIndexFieldData(Index index, String fieldName) {
super(index, fieldName);
}
@Override

View File

@ -24,7 +24,6 @@ import org.apache.lucene.index.LeafReaderContext;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
@ -37,8 +36,8 @@ import java.io.IOException;
public class BytesBinaryDVIndexFieldData extends DocValuesIndexFieldData implements IndexFieldData<BytesBinaryDVAtomicFieldData> {
public BytesBinaryDVIndexFieldData(Index index, String fieldName, FieldDataType fieldDataType) {
super(index, fieldName, fieldDataType);
public BytesBinaryDVIndexFieldData(Index index, String fieldName) {
super(index, fieldName);
}
@Override
@ -67,7 +66,7 @@ public class BytesBinaryDVIndexFieldData extends DocValuesIndexFieldData impleme
CircuitBreakerService breakerService, MapperService mapperService) {
// Ignore breaker
final String fieldName = fieldType.name();
return new BytesBinaryDVIndexFieldData(indexSettings.getIndex(), fieldName, fieldType.fieldDataType());
return new BytesBinaryDVIndexFieldData(indexSettings.getIndex(), fieldName);
}
}

View File

@ -25,7 +25,6 @@ import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
@ -46,14 +45,12 @@ public abstract class DocValuesIndexFieldData {
protected final Index index;
protected final String fieldName;
protected final FieldDataType fieldDataType;
protected final ESLogger logger;
public DocValuesIndexFieldData(Index index, String fieldName, FieldDataType fieldDataType) {
public DocValuesIndexFieldData(Index index, String fieldName) {
super();
this.index = index;
this.fieldName = fieldName;
this.fieldDataType = fieldDataType;
this.logger = Loggers.getLogger(getClass());
}
@ -61,10 +58,6 @@ public abstract class DocValuesIndexFieldData {
return fieldName;
}
public final FieldDataType getFieldDataType() {
return fieldDataType;
}
public final void clear() {
// can't do
}
@ -92,19 +85,13 @@ public abstract class DocValuesIndexFieldData {
CircuitBreakerService breakerService, MapperService mapperService) {
// Ignore Circuit Breaker
final String fieldName = fieldType.name();
final Settings fdSettings = fieldType.fieldDataType().getSettings();
final Map<String, Settings> filter = fdSettings.getGroups("filter");
if (filter != null && !filter.isEmpty()) {
throw new IllegalArgumentException("Doc values field data doesn't support filters [" + fieldName + "]");
}
if (BINARY_INDEX_FIELD_NAMES.contains(fieldName)) {
assert numericType == null;
return new BinaryDVIndexFieldData(indexSettings.getIndex(), fieldName, fieldType.fieldDataType());
return new BinaryDVIndexFieldData(indexSettings.getIndex(), fieldName);
} else if (numericType != null) {
return new SortedNumericDVIndexFieldData(indexSettings.getIndex(), fieldName, numericType, fieldType.fieldDataType());
return new SortedNumericDVIndexFieldData(indexSettings.getIndex(), fieldName, numericType);
} else {
return new SortedSetDVOrdinalsIndexFieldData(indexSettings, cache, fieldName, breakerService, fieldType.fieldDataType());
return new SortedSetDVOrdinalsIndexFieldData(indexSettings, cache, fieldName, breakerService);
}
}

View File

@ -35,7 +35,6 @@ import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
import org.elasticsearch.index.fielddata.FieldData;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
@ -50,18 +49,9 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData {
private final CircuitBreakerService breakerService;
public static class Builder implements IndexFieldData.Builder {
@Override
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
CircuitBreakerService breakerService, MapperService mapperService) {
return new GeoPointArrayIndexFieldData(indexSettings, fieldType.name(), fieldType.fieldDataType(), cache,
breakerService);
}
}
public GeoPointArrayIndexFieldData(IndexSettings indexSettings, String fieldName,
FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService) {
super(indexSettings, fieldName, fieldDataType, cache);
IndexFieldDataCache cache, CircuitBreakerService breakerService) {
super(indexSettings, fieldName, cache);
this.breakerService = breakerService;
}
@ -88,8 +78,7 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
private AtomicGeoPointFieldData loadFieldData22(LeafReader reader, NonEstimatingEstimator estimator, Terms terms,
AtomicGeoPointFieldData data) throws Exception {
LongArray indexedPoints = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(128);
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio",
OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
final float acceptableTransientOverheadRatio = OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO;
boolean success = false;
try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) {
final TermsEnum termsEnum;
@ -112,10 +101,9 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
}
indexedPoints = BigArrays.NON_RECYCLING_INSTANCE.resize(indexedPoints, numTerms);
Ordinals build = builder.build(fieldDataType.getSettings());
Ordinals build = builder.build();
RandomAccessOrds ordinals = build.ordinals();
if (!(FieldData.isMultiValued(ordinals) || CommonSettings.getMemoryStorageHint(fieldDataType) == CommonSettings
.MemoryStorageFormat.ORDINALS)) {
if (FieldData.isMultiValued(ordinals) == false) {
int maxDoc = reader.maxDoc();
LongArray sIndexedPoint = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(reader.maxDoc());
for (int i=0; i<maxDoc; ++i) {
@ -146,9 +134,9 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
AtomicGeoPointFieldData data) throws Exception {
DoubleArray lat = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(128);
DoubleArray lon = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(128);
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
final float acceptableTransientOverheadRatio = OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO;
boolean success = false;
try (OrdinalsBuilder builder = new OrdinalsBuilder(terms.size(), reader.maxDoc(), acceptableTransientOverheadRatio)) {
try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) {
final GeoPointTermsEnumLegacy iter = new GeoPointTermsEnumLegacy(builder.buildFromTerms(terms.iterator()));
GeoPoint point;
long numTerms = 0;
@ -162,9 +150,9 @@ public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData
lat = BigArrays.NON_RECYCLING_INSTANCE.resize(lat, numTerms);
lon = BigArrays.NON_RECYCLING_INSTANCE.resize(lon, numTerms);
Ordinals build = builder.build(fieldDataType.getSettings());
Ordinals build = builder.build();
RandomAccessOrds ordinals = build.ordinals();
if (!(FieldData.isMultiValued(ordinals) || CommonSettings.getMemoryStorageHint(fieldDataType) == CommonSettings.MemoryStorageFormat.ORDINALS)) {
if (FieldData.isMultiValued(ordinals) == false) {
int maxDoc = reader.maxDoc();
DoubleArray sLat = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(reader.maxDoc());
DoubleArray sLon = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(reader.maxDoc());

View File

@ -28,12 +28,12 @@ import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.core.TextFieldMapper;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import java.util.Collection;
@ -101,7 +101,10 @@ public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData {
private final AtomicOrdinalsFieldData atomicFieldData;
private IndexIndexFieldData(IndexSettings indexSettings, String name) {
super(indexSettings, name, new FieldDataType("string"), null, null);
super(indexSettings, name, null, null,
TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY,
TextFieldMapper.Defaults.FIELDDATA_MAX_FREQUENCY,
TextFieldMapper.Defaults.FIELDDATA_MIN_SEGMENT_SIZE);
atomicFieldData = new IndexAtomicFieldData(index().getName());
}

Some files were not shown because too many files have changed in this diff Show More