Merge branch 'master' into feature/client_aggs_parsing
This commit is contained in:
commit
ce7326eb88
|
@ -167,6 +167,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
* warning every time. */
|
||||
current.println(" - skip:")
|
||||
current.println(" features: ")
|
||||
current.println(" - stash_in_key")
|
||||
current.println(" - warnings")
|
||||
}
|
||||
if (test.skipTest) {
|
||||
|
@ -295,7 +296,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
Path dest = outputRoot().toPath().resolve(test.path)
|
||||
// Replace the extension
|
||||
String fileName = dest.getName(dest.nameCount - 1)
|
||||
dest = dest.parent.resolve(fileName.replace('.asciidoc', '.yaml'))
|
||||
dest = dest.parent.resolve(fileName.replace('.asciidoc', '.yml'))
|
||||
|
||||
// Now setup the writer
|
||||
Files.createDirectories(dest.parent)
|
||||
|
|
|
@ -133,7 +133,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
super(in.readOptionalString(), in.readException());
|
||||
readStackTrace(this, in);
|
||||
headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString));
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
metadata.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString));
|
||||
} else {
|
||||
for (Iterator<Map.Entry<String, List<String>>> iterator = headers.entrySet().iterator(); iterator.hasNext(); ) {
|
||||
|
@ -284,7 +284,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
out.writeOptionalString(this.getMessage());
|
||||
out.writeException(this.getCause());
|
||||
writeStackTraces(this, out);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString);
|
||||
out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString);
|
||||
} else {
|
||||
|
@ -985,11 +985,11 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class,
|
||||
org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_1_UNRELEASED),
|
||||
org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_1),
|
||||
SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class,
|
||||
org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2),
|
||||
UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException.class,
|
||||
org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0_UNRELEASED);
|
||||
org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0);
|
||||
|
||||
final Class<? extends ElasticsearchException> exceptionClass;
|
||||
final CheckedFunction<StreamInput, ? extends ElasticsearchException, IOException> constructor;
|
||||
|
|
|
@ -55,33 +55,25 @@ public class Version implements Comparable<Version> {
|
|||
public static final Version V_5_0_1 = new Version(V_5_0_1_ID, org.apache.lucene.util.Version.LUCENE_6_2_1);
|
||||
public static final int V_5_0_2_ID = 5000299;
|
||||
public static final Version V_5_0_2 = new Version(V_5_0_2_ID, org.apache.lucene.util.Version.LUCENE_6_2_1);
|
||||
public static final int V_5_0_3_ID_UNRELEASED = 5000399;
|
||||
public static final Version V_5_0_3_UNRELEASED = new Version(V_5_0_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
// no version constant for 5.1.0 due to inadvertent release
|
||||
public static final int V_5_1_1_ID_UNRELEASED = 5010199;
|
||||
public static final Version V_5_1_1_UNRELEASED = new Version(V_5_1_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final int V_5_1_2_ID_UNRELEASED = 5010299;
|
||||
public static final Version V_5_1_2_UNRELEASED = new Version(V_5_1_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final int V_5_1_3_ID_UNRELEASED = 5010399;
|
||||
public static final Version V_5_1_3_UNRELEASED = new Version(V_5_1_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final int V_5_2_0_ID_UNRELEASED = 5020099;
|
||||
public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_0);
|
||||
public static final int V_5_2_1_ID_UNRELEASED = 5020199;
|
||||
public static final Version V_5_2_1_UNRELEASED = new Version(V_5_2_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_2_2_ID_UNRELEASED = 5020299;
|
||||
public static final Version V_5_2_2_UNRELEASED = new Version(V_5_2_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_2_3_ID_UNRELEASED = 5020399;
|
||||
public static final Version V_5_2_3_UNRELEASED = new Version(V_5_2_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_3_0_ID_UNRELEASED = 5030099;
|
||||
public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_3_1_ID_UNRELEASED = 5030199;
|
||||
public static final Version V_5_3_1_UNRELEASED = new Version(V_5_3_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||
public static final int V_5_3_2_ID_UNRELEASED = 5030299;
|
||||
public static final Version V_5_3_2_UNRELEASED = new Version(V_5_3_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||
public static final int V_5_4_0_ID_UNRELEASED = 5040099;
|
||||
public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final int V_5_4_1_ID_UNRELEASED = 5040199;
|
||||
public static final Version V_5_4_1_UNRELEASED = new Version(V_5_4_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_1);
|
||||
public static final int V_5_1_1_ID = 5010199;
|
||||
public static final Version V_5_1_1 = new Version(V_5_1_1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final int V_5_1_2_ID = 5010299;
|
||||
public static final Version V_5_1_2 = new Version(V_5_1_2_ID, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final int V_5_2_0_ID = 5020099;
|
||||
public static final Version V_5_2_0 = new Version(V_5_2_0_ID, org.apache.lucene.util.Version.LUCENE_6_4_0);
|
||||
public static final int V_5_2_1_ID = 5020199;
|
||||
public static final Version V_5_2_1 = new Version(V_5_2_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_2_2_ID = 5020299;
|
||||
public static final Version V_5_2_2 = new Version(V_5_2_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_3_0_ID = 5030099;
|
||||
public static final Version V_5_3_0 = new Version(V_5_3_0_ID, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_3_1_ID = 5030199;
|
||||
public static final Version V_5_3_1 = new Version(V_5_3_1_ID, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||
public static final int V_5_3_2_ID = 5030299;
|
||||
public static final Version V_5_3_2 = new Version(V_5_3_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||
public static final int V_5_4_0_ID = 5040099;
|
||||
public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final int V_5_5_0_ID_UNRELEASED = 5050099;
|
||||
public static final Version V_5_5_0_UNRELEASED = new Version(V_5_5_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
||||
|
@ -111,32 +103,24 @@ public class Version implements Comparable<Version> {
|
|||
return V_6_0_0_alpha1_UNRELEASED;
|
||||
case V_5_5_0_ID_UNRELEASED:
|
||||
return V_5_5_0_UNRELEASED;
|
||||
case V_5_4_1_ID_UNRELEASED:
|
||||
return V_5_4_1_UNRELEASED;
|
||||
case V_5_4_0_ID_UNRELEASED:
|
||||
return V_5_4_0_UNRELEASED;
|
||||
case V_5_3_2_ID_UNRELEASED:
|
||||
return V_5_3_2_UNRELEASED;
|
||||
case V_5_3_1_ID_UNRELEASED:
|
||||
return V_5_3_1_UNRELEASED;
|
||||
case V_5_3_0_ID_UNRELEASED:
|
||||
return V_5_3_0_UNRELEASED;
|
||||
case V_5_2_3_ID_UNRELEASED:
|
||||
return V_5_2_3_UNRELEASED;
|
||||
case V_5_2_2_ID_UNRELEASED:
|
||||
return V_5_2_2_UNRELEASED;
|
||||
case V_5_2_1_ID_UNRELEASED:
|
||||
return V_5_2_1_UNRELEASED;
|
||||
case V_5_2_0_ID_UNRELEASED:
|
||||
return V_5_2_0_UNRELEASED;
|
||||
case V_5_1_3_ID_UNRELEASED:
|
||||
return V_5_1_3_UNRELEASED;
|
||||
case V_5_1_2_ID_UNRELEASED:
|
||||
return V_5_1_2_UNRELEASED;
|
||||
case V_5_1_1_ID_UNRELEASED:
|
||||
return V_5_1_1_UNRELEASED;
|
||||
case V_5_0_3_ID_UNRELEASED:
|
||||
return V_5_0_3_UNRELEASED;
|
||||
case V_5_4_0_ID:
|
||||
return V_5_4_0;
|
||||
case V_5_3_2_ID:
|
||||
return V_5_3_2;
|
||||
case V_5_3_1_ID:
|
||||
return V_5_3_1;
|
||||
case V_5_3_0_ID:
|
||||
return V_5_3_0;
|
||||
case V_5_2_2_ID:
|
||||
return V_5_2_2;
|
||||
case V_5_2_1_ID:
|
||||
return V_5_2_1;
|
||||
case V_5_2_0_ID:
|
||||
return V_5_2_0;
|
||||
case V_5_1_2_ID:
|
||||
return V_5_1_2;
|
||||
case V_5_1_1_ID:
|
||||
return V_5_1_1;
|
||||
case V_5_0_2_ID:
|
||||
return V_5_0_2;
|
||||
case V_5_0_1_ID:
|
||||
|
@ -296,7 +280,7 @@ public class Version implements Comparable<Version> {
|
|||
final int bwcMinor;
|
||||
if (this.onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
bwcMajor = major - 1;
|
||||
bwcMinor = 0; // TODO we have to move this to the latest released minor of the last major but for now we just keep
|
||||
bwcMinor = 4;
|
||||
} else {
|
||||
bwcMajor = major;
|
||||
bwcMinor = 0;
|
||||
|
@ -306,7 +290,8 @@ public class Version implements Comparable<Version> {
|
|||
|
||||
/**
|
||||
* Returns the minimum created index version that this version supports. Indices created with lower versions
|
||||
* can't be used with this version.
|
||||
* can't be used with this version. This should also be used for file based serialization backwards compatibility ie. on serialization
|
||||
* code that is used to read / write file formats like transaction logs, cluster state, and index metadata.
|
||||
*/
|
||||
public Version minimumIndexCompatibilityVersion() {
|
||||
final int bwcMajor;
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* Used to keep track of original indices within internal (e.g. shard level) requests
|
||||
|
@ -64,4 +65,12 @@ public final class OriginalIndices implements IndicesRequest {
|
|||
out.writeStringArrayNullable(originalIndices.indices);
|
||||
originalIndices.indicesOptions.writeIndicesOptions(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "OriginalIndices{" +
|
||||
"indices=" + Arrays.toString(indices) +
|
||||
", indicesOptions=" + indicesOptions +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -249,8 +249,8 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
}
|
||||
|
||||
private void checkVersion(Version version) {
|
||||
if (version.before(Version.V_5_2_0_UNRELEASED)) {
|
||||
throw new IllegalArgumentException("cannot explain shards in a mixed-cluster with pre-" + Version.V_5_2_0_UNRELEASED +
|
||||
if (version.before(Version.V_5_2_0)) {
|
||||
throw new IllegalArgumentException("cannot explain shards in a mixed-cluster with pre-" + Version.V_5_2_0 +
|
||||
" nodes, node version [" + version + "]");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,12 +67,20 @@ final class SettingsUpdater {
|
|||
.transientSettings(transientSettings.build());
|
||||
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings()) || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings());
|
||||
boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings())
|
||||
|| MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings());
|
||||
if (updatedReadOnly) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
} else {
|
||||
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
}
|
||||
boolean updatedReadOnlyAllowDelete = MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(metaData.persistentSettings())
|
||||
|| MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(metaData.transientSettings());
|
||||
if (updatedReadOnlyAllowDelete) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
|
||||
} else {
|
||||
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
|
||||
}
|
||||
ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build();
|
||||
Settings settings = build.metaData().settings();
|
||||
// now we try to apply things and if they are invalid we fail
|
||||
|
|
|
@ -67,12 +67,15 @@ public class TransportClusterUpdateSettingsAction extends
|
|||
@Override
|
||||
protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, ClusterState state) {
|
||||
// allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it
|
||||
if ((request.transientSettings().isEmpty() &&
|
||||
request.persistentSettings().size() == 1 &&
|
||||
MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())) ||
|
||||
(request.persistentSettings().isEmpty() && request.transientSettings().size() == 1 &&
|
||||
MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings()))) {
|
||||
return null;
|
||||
if (request.transientSettings().size() + request.persistentSettings().size() == 1) {
|
||||
// only one setting
|
||||
if (MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())
|
||||
|| MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings())
|
||||
|| MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.exists(request.transientSettings())
|
||||
|| MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.exists(request.persistentSettings())) {
|
||||
// one of the settings above as the only setting in the request means - resetting the block!
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
|
|
@ -134,7 +134,7 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSea
|
|||
routing = in.readOptionalString();
|
||||
preference = in.readOptionalString();
|
||||
|
||||
if (in.getVersion().onOrBefore(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrBefore(Version.V_5_1_1)) {
|
||||
//types
|
||||
in.readStringArray();
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSea
|
|||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(preference);
|
||||
|
||||
if (out.getVersion().onOrBefore(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrBefore(Version.V_5_1_1)) {
|
||||
//types
|
||||
out.writeStringArray(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
|||
for (int i = 0; i < nodes.length; i++) {
|
||||
nodes[i] = new DiscoveryNode(in);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
int size = in.readVInt();
|
||||
indicesAndFilters = new HashMap<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
|
@ -95,7 +95,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
|||
for (DiscoveryNode node : nodes) {
|
||||
node.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
out.writeVInt(indicesAndFilters.size());
|
||||
for (Map.Entry<String, AliasFilter> entry : indicesAndFilters.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
|
|
|
@ -59,7 +59,7 @@ public class GetStoredScriptResponse extends ActionResponse implements ToXConten
|
|||
super.readFrom(in);
|
||||
|
||||
if (in.readBoolean()) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
source = new StoredScriptSource(in);
|
||||
} else {
|
||||
source = new StoredScriptSource(in.readString());
|
||||
|
@ -78,7 +78,7 @@ public class GetStoredScriptResponse extends ActionResponse implements ToXConten
|
|||
} else {
|
||||
out.writeBoolean(true);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
source.writeTo(out);
|
||||
} else {
|
||||
out.writeString(source.getCode());
|
||||
|
|
|
@ -123,7 +123,7 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
|
||||
id = in.readOptionalString();
|
||||
content = in.readBytesReference();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType = XContentType.readFrom(in);
|
||||
} else {
|
||||
xContentType = XContentFactory.xContentType(content);
|
||||
|
@ -137,7 +137,7 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
|||
out.writeString(lang == null ? "" : lang);
|
||||
out.writeOptionalString(id);
|
||||
out.writeBytesReference(content);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
startOffset = in.readInt();
|
||||
endOffset = in.readInt();
|
||||
position = in.readVInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
Integer len = in.readOptionalVInt();
|
||||
if (len != null) {
|
||||
positionLength = len;
|
||||
|
@ -135,7 +135,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
out.writeInt(startOffset);
|
||||
out.writeInt(endOffset);
|
||||
out.writeVInt(position);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
out.writeOptionalVInt(positionLength > 1 ? positionLength : null);
|
||||
}
|
||||
out.writeOptionalString(type);
|
||||
|
|
|
@ -78,7 +78,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(DeleteIndexRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
return state.blocks().indicesAllowReleaseResources(indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -62,7 +62,10 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
|
|||
if (globalBlock != null) {
|
||||
return globalBlock;
|
||||
}
|
||||
if (request.settings().size() == 1 && IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings()) || IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())) {
|
||||
if (request.settings().size() == 1 && // we have to allow resetting these settings otherwise users can't unblock an index
|
||||
IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings())
|
||||
|| IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())
|
||||
|| IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.exists(request.settings())) {
|
||||
return null;
|
||||
}
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
|
|
|
@ -76,7 +76,7 @@ public class QueryExplanation implements Streamable {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
index = in.readString();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
|
||||
shard = in.readInt();
|
||||
} else {
|
||||
shard = RANDOM_SHARD;
|
||||
|
@ -89,7 +89,7 @@ public class QueryExplanation implements Streamable {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(index);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
|
||||
out.writeInt(shard);
|
||||
}
|
||||
out.writeBoolean(valid);
|
||||
|
|
|
@ -154,7 +154,7 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
|
|||
}
|
||||
explain = in.readBoolean();
|
||||
rewrite = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
|
||||
allShards = in.readBoolean();
|
||||
}
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
|
|||
}
|
||||
out.writeBoolean(explain);
|
||||
out.writeBoolean(rewrite);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
|
||||
out.writeBoolean(allShards);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
|
@ -421,7 +420,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
id = in.readVInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
opType = OpType.fromId(in.readByte());
|
||||
} else {
|
||||
opType = OpType.fromString(in.readString());
|
||||
|
@ -448,7 +447,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(id);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
out.writeByte(opType.getId());
|
||||
} else {
|
||||
out.writeString(opType.getLowercase());
|
||||
|
|
|
@ -415,10 +415,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
}
|
||||
|
||||
static {
|
||||
assert Version.CURRENT.minimumCompatibilityVersion().after(Version.V_5_0_0) == false:
|
||||
assert Version.CURRENT.minimumCompatibilityVersion().after(Version.V_6_0_0_alpha1_UNRELEASED) == false:
|
||||
"Remove logic handling NoOp result from primary response; see TODO in replicaItemExecutionMode" +
|
||||
" as the current minimum compatible version [" +
|
||||
Version.CURRENT.minimumCompatibilityVersion() + "] is after 5.0";
|
||||
Version.CURRENT.minimumCompatibilityVersion() + "] is after 6.0";
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
|
||||
|
@ -78,6 +79,8 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
|||
super.readFrom(in);
|
||||
fields = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
indices = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
mergeResults = in.readBoolean();
|
||||
} else {
|
||||
mergeResults = true;
|
||||
|
@ -89,6 +92,8 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
|||
super.writeTo(out);
|
||||
out.writeStringArray(fields);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
out.writeStringArray(indices);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
out.writeBoolean(mergeResults);
|
||||
}
|
||||
}
|
||||
|
@ -118,12 +123,12 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
|||
* The list of indices to lookup
|
||||
*/
|
||||
public FieldCapabilitiesRequest indices(String... indices) {
|
||||
this.indices = indices;
|
||||
this.indices = Objects.requireNonNull(indices, "indices must not be null");
|
||||
return this;
|
||||
}
|
||||
|
||||
public FieldCapabilitiesRequest indicesOptions(IndicesOptions indicesOptions) {
|
||||
this.indicesOptions = indicesOptions;
|
||||
this.indicesOptions = Objects.requireNonNull(indicesOptions, "indices options must not be null");
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.action.support.HandledTransportAction;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
|
@ -72,7 +73,14 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
|
|||
final Map<String, OriginalIndices> remoteClusterIndices = remoteClusterService.groupIndices(request.indicesOptions(),
|
||||
request.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
|
||||
final OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, localIndices);
|
||||
final String[] concreteIndices;
|
||||
if (remoteClusterIndices.isEmpty() == false && localIndices.indices().length == 0) {
|
||||
// in the case we have one or more remote indices but no local we don't expand to all local indices and just do remote
|
||||
// indices
|
||||
concreteIndices = Strings.EMPTY_ARRAY;
|
||||
} else {
|
||||
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, localIndices);
|
||||
}
|
||||
final int totalNumRequest = concreteIndices.length + remoteClusterIndices.size();
|
||||
final CountDown completionCounter = new CountDown(totalNumRequest);
|
||||
final List<FieldCapabilitiesIndexResponse> indexResponses = Collections.synchronizedList(new ArrayList<>());
|
||||
|
|
|
@ -323,14 +323,14 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
out.writeLong(sumTotalTermFreq);
|
||||
out.writeBoolean(isSearchable);
|
||||
out.writeBoolean(isAggregatable);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
out.writeBoolean(hasMinMax);
|
||||
if (hasMinMax) {
|
||||
writeMinMax(out);
|
||||
}
|
||||
} else {
|
||||
assert hasMinMax : "cannot serialize null min/max fieldstats in a mixed-cluster " +
|
||||
"with pre-" + Version.V_5_2_0_UNRELEASED + " nodes, remote version [" + out.getVersion() + "]";
|
||||
"with pre-" + Version.V_5_2_0 + " nodes, remote version [" + out.getVersion() + "]";
|
||||
writeMinMax(out);
|
||||
}
|
||||
}
|
||||
|
@ -705,7 +705,7 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
boolean isSearchable = in.readBoolean();
|
||||
boolean isAggregatable = in.readBoolean();
|
||||
boolean hasMinMax = true;
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
hasMinMax = in.readBoolean();
|
||||
}
|
||||
switch (type) {
|
||||
|
|
|
@ -93,7 +93,7 @@ public class FieldStatsResponse extends BroadcastResponse {
|
|||
for (Map.Entry<String, Map<String, FieldStats>> entry1 : indicesMergedFieldStats.entrySet()) {
|
||||
out.writeString(entry1.getKey());
|
||||
int size = entry1.getValue().size();
|
||||
if (out.getVersion().before(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_2_0)) {
|
||||
// filter fieldstats without min/max information
|
||||
for (FieldStats stats : entry1.getValue().values()) {
|
||||
if (stats.hasMinMax() == false) {
|
||||
|
@ -103,7 +103,7 @@ public class FieldStatsResponse extends BroadcastResponse {
|
|||
}
|
||||
out.writeVInt(size);
|
||||
for (Map.Entry<String, FieldStats> entry2 : entry1.getValue().entrySet()) {
|
||||
if (entry2.getValue().hasMinMax() || out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (entry2.getValue().hasMinMax() || out.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
out.writeString(entry2.getKey());
|
||||
entry2.getValue().writeTo(out);
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ public class FieldStatsShardResponse extends BroadcastShardResponse {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
final Map<String, FieldStats<?> > stats;
|
||||
if (out.getVersion().before(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_2_0)) {
|
||||
/**
|
||||
* FieldStats with null min/max are not (de)serializable in versions prior to {@link Version.V_5_2_0_UNRELEASED}
|
||||
*/
|
||||
|
|
|
@ -534,7 +534,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
pipeline = in.readOptionalString();
|
||||
isRetry = in.readBoolean();
|
||||
autoGeneratedTimestamp = in.readLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
contentType = in.readOptionalWriteable(XContentType::readFrom);
|
||||
} else {
|
||||
contentType = XContentFactory.xContentType(source);
|
||||
|
@ -558,7 +558,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
out.writeBytesReference(source);
|
||||
out.writeByte(opType.getId());
|
||||
// ES versions below 5.1.2 don't know about resolveVersionDefaults but resolve the version eagerly (which messes with validation).
|
||||
if (out.getVersion().before(Version.V_5_1_2_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_1_2)) {
|
||||
out.writeLong(resolveVersionDefaults());
|
||||
} else {
|
||||
out.writeLong(version);
|
||||
|
@ -567,7 +567,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
out.writeOptionalString(pipeline);
|
||||
out.writeBoolean(isRetry);
|
||||
out.writeLong(autoGeneratedTimestamp);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
out.writeOptionalWriteable(contentType);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest>
|
|||
super.readFrom(in);
|
||||
id = in.readString();
|
||||
source = in.readBytesReference();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType = XContentType.readFrom(in);
|
||||
} else {
|
||||
xContentType = XContentFactory.xContentType(source);
|
||||
|
@ -92,7 +92,7 @@ public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest>
|
|||
super.writeTo(out);
|
||||
out.writeString(id);
|
||||
out.writeBytesReference(source);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ public class SimulatePipelineRequest extends ActionRequest {
|
|||
id = in.readOptionalString();
|
||||
verbose = in.readBoolean();
|
||||
source = in.readBytesReference();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType = XContentType.readFrom(in);
|
||||
} else {
|
||||
xContentType = XContentFactory.xContentType(source);
|
||||
|
@ -116,7 +116,7 @@ public class SimulatePipelineRequest extends ActionRequest {
|
|||
out.writeOptionalString(id);
|
||||
out.writeBoolean(verbose);
|
||||
out.writeBytesReference(source);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ final class ExpandSearchPhase extends SearchPhase {
|
|||
|
||||
@Override
|
||||
public void run() throws IOException {
|
||||
if (isCollapseRequest()) {
|
||||
if (isCollapseRequest() && searchResponse.getHits().getHits().length > 0) {
|
||||
SearchRequest searchRequest = context.getRequest();
|
||||
CollapseBuilder collapseBuilder = searchRequest.source().collapse();
|
||||
MultiSearchRequest multiRequest = new MultiSearchRequest();
|
||||
|
|
|
@ -120,7 +120,7 @@ public class SearchTransportService extends AbstractComponent {
|
|||
// this used to be the QUERY_AND_FETCH which doesn't exists anymore.
|
||||
final boolean fetchDocuments = request.numberOfShards() == 1;
|
||||
Supplier<SearchPhaseResult> supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new;
|
||||
if (connection.getVersion().before(Version.V_5_3_0_UNRELEASED) && fetchDocuments) {
|
||||
if (connection.getVersion().before(Version.V_5_3_0) && fetchDocuments) {
|
||||
// this is a BWC layer for pre 5.3 indices
|
||||
if (request.scroll() != null) {
|
||||
/**
|
||||
|
|
|
@ -499,7 +499,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
|
|||
|
||||
if (in.readBoolean()) {
|
||||
doc = in.readBytesReference();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType = XContentType.readFrom(in);
|
||||
} else {
|
||||
xContentType = XContentFactory.xContentType(doc);
|
||||
|
@ -544,7 +544,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
|
|||
out.writeBoolean(doc != null);
|
||||
if (doc != null) {
|
||||
out.writeBytesReference(doc);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ public interface NamedDiff<T extends Diffable<T>> extends Diff<T>, NamedWriteabl
|
|||
* The minimal version of the recipient this custom object can be sent to
|
||||
*/
|
||||
default Version getMinimalSupportedVersion() {
|
||||
return Version.CURRENT.minimumCompatibilityVersion();
|
||||
return Version.CURRENT.minimumIndexCompatibilityVersion();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -30,6 +30,6 @@ public interface NamedDiffable<T> extends Diffable<T>, NamedWriteable {
|
|||
* The minimal version of the recipient this custom object can be sent to
|
||||
*/
|
||||
default Version getMinimalSupportedVersion() {
|
||||
return Version.CURRENT.minimumCompatibilityVersion();
|
||||
return Version.CURRENT.minimumIndexCompatibilityVersion();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public class SnapshotDeletionsInProgress extends AbstractNamedDiffable<Custom> i
|
|||
|
||||
public static final String TYPE = "snapshot_deletions";
|
||||
// the version where SnapshotDeletionsInProgress was introduced
|
||||
public static final Version VERSION_INTRODUCED = Version.V_5_2_0_UNRELEASED;
|
||||
public static final Version VERSION_INTRODUCED = Version.V_5_2_0;
|
||||
|
||||
// the list of snapshot deletion request entries
|
||||
private final List<Entry> entries;
|
||||
|
|
|
@ -51,7 +51,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implement
|
|||
// a snapshot in progress from a pre 5.2.x node
|
||||
public static final long UNDEFINED_REPOSITORY_STATE_ID = -2L;
|
||||
// the version where repository state ids were introduced
|
||||
private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_5_2_0_UNRELEASED;
|
||||
private static final Version REPOSITORY_ID_INTRODUCED_VERSION = Version.V_5_2_0;
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.block;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -43,18 +44,22 @@ public class ClusterBlock implements Streamable, ToXContent {
|
|||
|
||||
private boolean disableStatePersistence = false;
|
||||
|
||||
private boolean allowReleaseResources;
|
||||
|
||||
private RestStatus status;
|
||||
|
||||
ClusterBlock() {
|
||||
}
|
||||
|
||||
public ClusterBlock(int id, String description, boolean retryable, boolean disableStatePersistence, RestStatus status, EnumSet<ClusterBlockLevel> levels) {
|
||||
public ClusterBlock(int id, String description, boolean retryable, boolean disableStatePersistence, boolean allowReleaseResources, RestStatus status,
|
||||
EnumSet<ClusterBlockLevel> levels) {
|
||||
this.id = id;
|
||||
this.description = description;
|
||||
this.retryable = retryable;
|
||||
this.disableStatePersistence = disableStatePersistence;
|
||||
this.status = status;
|
||||
this.levels = levels;
|
||||
this.allowReleaseResources = allowReleaseResources;
|
||||
}
|
||||
|
||||
public int id() {
|
||||
|
@ -127,12 +132,17 @@ public class ClusterBlock implements Streamable, ToXContent {
|
|||
final int len = in.readVInt();
|
||||
ArrayList<ClusterBlockLevel> levels = new ArrayList<>(len);
|
||||
for (int i = 0; i < len; i++) {
|
||||
levels.add(ClusterBlockLevel.fromId(in.readVInt()));
|
||||
levels.add(in.readEnum(ClusterBlockLevel.class));
|
||||
}
|
||||
this.levels = EnumSet.copyOf(levels);
|
||||
retryable = in.readBoolean();
|
||||
disableStatePersistence = in.readBoolean();
|
||||
status = RestStatus.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
allowReleaseResources = in.readBoolean();
|
||||
} else {
|
||||
allowReleaseResources = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -141,11 +151,14 @@ public class ClusterBlock implements Streamable, ToXContent {
|
|||
out.writeString(description);
|
||||
out.writeVInt(levels.size());
|
||||
for (ClusterBlockLevel level : levels) {
|
||||
out.writeVInt(level.id());
|
||||
out.writeEnum(level);
|
||||
}
|
||||
out.writeBoolean(retryable);
|
||||
out.writeBoolean(disableStatePersistence);
|
||||
RestStatus.writeTo(out, status);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
out.writeBoolean(allowReleaseResources);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -176,4 +189,8 @@ public class ClusterBlock implements Streamable, ToXContent {
|
|||
public int hashCode() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public boolean isAllowReleaseResources() {
|
||||
return allowReleaseResources;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,34 +23,11 @@ package org.elasticsearch.cluster.block;
|
|||
import java.util.EnumSet;
|
||||
|
||||
public enum ClusterBlockLevel {
|
||||
READ(0),
|
||||
WRITE(1),
|
||||
METADATA_READ(2),
|
||||
METADATA_WRITE(3);
|
||||
READ,
|
||||
WRITE,
|
||||
METADATA_READ,
|
||||
METADATA_WRITE;
|
||||
|
||||
public static final EnumSet<ClusterBlockLevel> ALL = EnumSet.of(READ, WRITE, METADATA_READ, METADATA_WRITE);
|
||||
public static final EnumSet<ClusterBlockLevel> ALL = EnumSet.allOf(ClusterBlockLevel.class);
|
||||
public static final EnumSet<ClusterBlockLevel> READ_WRITE = EnumSet.of(READ, WRITE);
|
||||
|
||||
private final int id;
|
||||
|
||||
ClusterBlockLevel(int id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public int id() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
static ClusterBlockLevel fromId(int id) {
|
||||
if (id == 0) {
|
||||
return READ;
|
||||
} else if (id == 1) {
|
||||
return WRITE;
|
||||
} else if (id == 2) {
|
||||
return METADATA_READ;
|
||||
} else if (id == 3) {
|
||||
return METADATA_WRITE;
|
||||
}
|
||||
throw new IllegalArgumentException("No cluster block level matching [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,11 +70,11 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
}
|
||||
|
||||
public Set<ClusterBlock> global(ClusterBlockLevel level) {
|
||||
return levelHolders[level.id()].global();
|
||||
return levelHolders[level.ordinal()].global();
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, Set<ClusterBlock>> indices(ClusterBlockLevel level) {
|
||||
return levelHolders[level.id()].indices();
|
||||
return levelHolders[level.ordinal()].indices();
|
||||
}
|
||||
|
||||
private Set<ClusterBlock> blocksForIndex(ClusterBlockLevel level, String index) {
|
||||
|
@ -97,7 +97,7 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
.collect(toSet())));
|
||||
}
|
||||
|
||||
levelHolders[level.id()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build());
|
||||
levelHolders[level.ordinal()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build());
|
||||
}
|
||||
return levelHolders;
|
||||
}
|
||||
|
@ -203,6 +203,26 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
return new ClusterBlockException(unmodifiableSet(blocks.collect(toSet())));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff non of the given have a {@link ClusterBlockLevel#METADATA_WRITE} in place where the
|
||||
* {@link ClusterBlock#isAllowReleaseResources()} returns <code>false</code>. This is used in places where resources will be released
|
||||
* like the deletion of an index to free up resources on nodes.
|
||||
* @param indices the indices to check
|
||||
*/
|
||||
public ClusterBlockException indicesAllowReleaseResources(String[] indices) {
|
||||
final Function<String, Stream<ClusterBlock>> blocksForIndexAtLevel = index ->
|
||||
blocksForIndex(ClusterBlockLevel.METADATA_WRITE, index).stream();
|
||||
Stream<ClusterBlock> blocks = concat(
|
||||
global(ClusterBlockLevel.METADATA_WRITE).stream(),
|
||||
Stream.of(indices).flatMap(blocksForIndexAtLevel)).filter(clusterBlock -> clusterBlock.isAllowReleaseResources() == false);
|
||||
Set<ClusterBlock> clusterBlocks = unmodifiableSet(blocks.collect(toSet()));
|
||||
if (clusterBlocks.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
return new ClusterBlockException(clusterBlocks);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
if (global.isEmpty() && indices().isEmpty()) {
|
||||
|
@ -270,8 +290,6 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
|
||||
static class ImmutableLevelHolder {
|
||||
|
||||
static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(emptySet(), ImmutableOpenMap.of());
|
||||
|
||||
private final Set<ClusterBlock> global;
|
||||
private final ImmutableOpenMap<String, Set<ClusterBlock>> indices;
|
||||
|
||||
|
@ -314,30 +332,31 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
}
|
||||
|
||||
public Builder addBlocks(IndexMetaData indexMetaData) {
|
||||
String indexName = indexMetaData.getIndex().getName();
|
||||
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
addIndexBlock(indexMetaData.getIndex().getName(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
|
||||
addIndexBlock(indexName, MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
|
||||
}
|
||||
if (IndexMetaData.INDEX_READ_ONLY_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
addIndexBlock(indexName, IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
}
|
||||
if (IndexMetaData.INDEX_BLOCKS_READ_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_READ_BLOCK);
|
||||
addIndexBlock(indexName, IndexMetaData.INDEX_READ_BLOCK);
|
||||
}
|
||||
if (IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
addIndexBlock(indexName, IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
}
|
||||
if (IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
addIndexBlock(indexName, IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
}
|
||||
if (IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING.get(indexMetaData.getSettings())) {
|
||||
addIndexBlock(indexName, IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder updateBlocks(IndexMetaData indexMetaData) {
|
||||
removeIndexBlock(indexMetaData.getIndex().getName(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
|
||||
removeIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
removeIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_READ_BLOCK);
|
||||
removeIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
removeIndexBlock(indexMetaData.getIndex().getName(), IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
// let's remove all blocks for this index and add them back -- no need to remove all individual blocks....
|
||||
indices.remove(indexMetaData.getIndex().getName());
|
||||
return addBlocks(indexMetaData);
|
||||
}
|
||||
|
||||
|
|
|
@ -131,10 +131,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
return proto;
|
||||
}
|
||||
|
||||
public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
|
||||
public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ));
|
||||
public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE));
|
||||
public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ));
|
||||
public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
|
||||
public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ));
|
||||
public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE));
|
||||
public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ));
|
||||
public static final ClusterBlock INDEX_READ_ONLY_ALLOW_DELETE_BLOCK = new ClusterBlock(12, "index read-only / allow delete (api)", false, false, true, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.WRITE));
|
||||
|
||||
public enum State {
|
||||
OPEN((byte) 0),
|
||||
|
@ -212,6 +213,10 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
public static final Setting<Boolean> INDEX_BLOCKS_METADATA_SETTING =
|
||||
Setting.boolSetting(SETTING_BLOCKS_METADATA, false, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
public static final String SETTING_READ_ONLY_ALLOW_DELETE = "index.blocks.read_only_allow_delete";
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING =
|
||||
Setting.boolSetting(SETTING_READ_ONLY_ALLOW_DELETE, false, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
public static final String SETTING_VERSION_CREATED = "index.version.created";
|
||||
public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string";
|
||||
public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded";
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
|||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.Diffable;
|
||||
import org.elasticsearch.cluster.DiffableUtils;
|
||||
|
@ -119,7 +118,14 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
|
|||
public static final Setting<Boolean> SETTING_READ_ONLY_SETTING =
|
||||
Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
|
||||
public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false,
|
||||
false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
|
||||
|
||||
public static final Setting<Boolean> SETTING_READ_ONLY_ALLOW_DELETE_SETTING =
|
||||
Setting.boolSetting("cluster.blocks.read_only_allow_delete", false, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
public static final ClusterBlock CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK = new ClusterBlock(13, "cluster read-only / allow delete (api)",
|
||||
false, false, true, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
|
||||
|
||||
public static final MetaData EMPTY_META_DATA = builder().build();
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ import java.util.Set;
|
|||
*/
|
||||
public class MetaDataIndexStateService extends AbstractComponent {
|
||||
|
||||
public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE);
|
||||
public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false, false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE);
|
||||
|
||||
private final ClusterService clusterService;
|
||||
|
||||
|
|
|
@ -230,6 +230,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_BLOCK, IndexMetaData.INDEX_READ_ONLY_SETTING, openSettings);
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING, openSettings);
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_METADATA_BLOCK, IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, openSettings);
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_WRITE_BLOCK, IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, openSettings);
|
||||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings);
|
||||
|
|
|
@ -34,7 +34,6 @@ import java.io.IOException;
|
|||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
@ -222,7 +221,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
|
|||
this.ephemeralId = in.readString().intern();
|
||||
this.hostName = in.readString().intern();
|
||||
this.hostAddress = in.readString().intern();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_0_3_UNRELEASED)) {
|
||||
if (in.getVersion().after(Version.V_5_0_2)) {
|
||||
this.address = new TransportAddress(in);
|
||||
} else {
|
||||
// we need to do this to preserve the host information during pinging and joining of a master. Since the version of the
|
||||
|
|
|
@ -47,6 +47,9 @@ public class DiscoveryNodeFilters {
|
|||
Map<String, String> settingsMap = settings.getAsMap();
|
||||
for (Map.Entry<String, String> entry : settingsMap.entrySet()) {
|
||||
String propertyKey = entry.getKey();
|
||||
if (entry.getValue() == null) {
|
||||
continue; // this setting gets reset
|
||||
}
|
||||
if ("_ip".equals(propertyKey) || "_host_ip".equals(propertyKey) || "_publish_ip".equals(propertyKey)) {
|
||||
for (String value : Strings.tokenizeToStringArray(entry.getValue(), ",")) {
|
||||
if (InetAddresses.isInetAddress(value) == false) {
|
||||
|
|
|
@ -80,7 +80,7 @@ public class NodeAllocationResult implements ToXContent, Writeable, Comparable<N
|
|||
public NodeAllocationResult(StreamInput in) throws IOException {
|
||||
node = new DiscoveryNode(in);
|
||||
shardStoreInfo = in.readOptionalWriteable(ShardStoreInfo::new);
|
||||
if (in.getVersion().before(Version.V_5_2_1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_2_1)) {
|
||||
canAllocateDecision = Decision.readFrom(in);
|
||||
} else {
|
||||
canAllocateDecision = in.readOptionalWriteable(Decision::readFrom);
|
||||
|
@ -93,7 +93,7 @@ public class NodeAllocationResult implements ToXContent, Writeable, Comparable<N
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
out.writeOptionalWriteable(shardStoreInfo);
|
||||
if (out.getVersion().before(Version.V_5_2_1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_2_1)) {
|
||||
if (canAllocateDecision == null) {
|
||||
Decision.NO.writeTo(out);
|
||||
} else {
|
||||
|
|
|
@ -187,6 +187,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING,
|
||||
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
|
||||
MetaData.SETTING_READ_ONLY_SETTING,
|
||||
MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
|
||||
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
|
||||
|
|
|
@ -74,6 +74,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
IndexMetaData.INDEX_BLOCKS_READ_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_WRITE_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_METADATA_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING,
|
||||
IndexMetaData.INDEX_PRIORITY_SETTING,
|
||||
IndexMetaData.INDEX_DATA_PATH_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING,
|
||||
|
|
|
@ -74,8 +74,8 @@ public final class TransportAddress implements Writeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Read from a stream and use the {@code hostString} when creating the InetAddress if the input comes from a version prior
|
||||
* {@link Version#V_5_0_3_UNRELEASED} as the hostString was not serialized
|
||||
* Read from a stream and use the {@code hostString} when creating the InetAddress if the input comes from a version on or prior
|
||||
* {@link Version#V_5_0_2} as the hostString was not serialized
|
||||
*/
|
||||
public TransportAddress(StreamInput in, @Nullable String hostString) throws IOException {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // bwc layer for 5.x where we had more than one transport address
|
||||
|
@ -88,7 +88,7 @@ public final class TransportAddress implements Writeable {
|
|||
final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
|
||||
in.readFully(a);
|
||||
final InetAddress inetAddress;
|
||||
if (in.getVersion().onOrAfter(Version.V_5_0_3_UNRELEASED)) {
|
||||
if (in.getVersion().after(Version.V_5_0_2)) {
|
||||
String host = in.readString(); // the host string was serialized so we can ignore the passed in version
|
||||
inetAddress = InetAddress.getByAddress(host, a);
|
||||
} else {
|
||||
|
@ -107,7 +107,7 @@ public final class TransportAddress implements Writeable {
|
|||
byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
|
||||
out.writeByte((byte) bytes.length); // 1 byte
|
||||
out.write(bytes, 0, bytes.length);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_0_3_UNRELEASED)) {
|
||||
if (out.getVersion().after(Version.V_5_0_2)) {
|
||||
out.writeString(address.getHostString());
|
||||
}
|
||||
// don't serialize scope ids over the network!!!!
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.common.util.concurrent;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -79,6 +80,33 @@ public class EsExecutors {
|
|||
return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, new EsAbortPolicy(), contextHolder);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a new executor that will automatically adjust the queue size based on queue throughput.
|
||||
*
|
||||
* @param size number of fixed threads to use for executing tasks
|
||||
* @param initialQueueCapacity initial size of the executor queue
|
||||
* @param minQueueSize minimum queue size that the queue can be adjusted to
|
||||
* @param maxQueueSize maximum queue size that the queue can be adjusted to
|
||||
* @param frameSize number of tasks during which stats are collected before adjusting queue size
|
||||
*/
|
||||
public static EsThreadPoolExecutor newAutoQueueFixed(String name, int size, int initialQueueCapacity, int minQueueSize,
|
||||
int maxQueueSize, int frameSize, TimeValue targetedResponseTime,
|
||||
ThreadFactory threadFactory, ThreadContext contextHolder) {
|
||||
if (initialQueueCapacity == minQueueSize && initialQueueCapacity == maxQueueSize) {
|
||||
return newFixed(name, size, initialQueueCapacity, threadFactory, contextHolder);
|
||||
}
|
||||
|
||||
if (initialQueueCapacity <= 0) {
|
||||
throw new IllegalArgumentException("initial queue capacity for [" + name + "] executor must be positive, got: " +
|
||||
initialQueueCapacity);
|
||||
}
|
||||
ResizableBlockingQueue<Runnable> queue =
|
||||
new ResizableBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), initialQueueCapacity);
|
||||
return new QueueResizingEsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS,
|
||||
queue, minQueueSize, maxQueueSize, TimedRunnable::new, frameSize, targetedResponseTime, threadFactory,
|
||||
new EsAbortPolicy(), contextHolder);
|
||||
}
|
||||
|
||||
private static final ExecutorService DIRECT_EXECUTOR_SERVICE = new AbstractExecutorService() {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -37,7 +37,7 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
|
|||
/**
|
||||
* Name used in error reporting.
|
||||
*/
|
||||
private final String name;
|
||||
protected final String name;
|
||||
|
||||
EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
|
||||
BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, ThreadContext contextHolder) {
|
||||
|
|
|
@ -0,0 +1,234 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ResizableBlockingQueue;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
/**
|
||||
* An extension to thread pool executor, which automatically adjusts the queue size of the
|
||||
* {@code ResizableBlockingQueue} according to Little's Law.
|
||||
*/
|
||||
public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecutor {
|
||||
|
||||
private static final Logger logger =
|
||||
ESLoggerFactory.getLogger(QueueResizingEsThreadPoolExecutor.class);
|
||||
|
||||
private final Function<Runnable, Runnable> runnableWrapper;
|
||||
private final ResizableBlockingQueue<Runnable> workQueue;
|
||||
private final int tasksPerFrame;
|
||||
private final int minQueueSize;
|
||||
private final int maxQueueSize;
|
||||
private final long targetedResponseTimeNanos;
|
||||
// The amount the queue size is adjusted by for each calcuation
|
||||
private static final int QUEUE_ADJUSTMENT_AMOUNT = 50;
|
||||
|
||||
private final AtomicLong totalTaskNanos = new AtomicLong(0);
|
||||
private final AtomicInteger taskCount = new AtomicInteger(0);
|
||||
|
||||
private long startNs;
|
||||
|
||||
QueueResizingEsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit,
|
||||
ResizableBlockingQueue<Runnable> workQueue, int minQueueSize, int maxQueueSize,
|
||||
Function<Runnable, Runnable> runnableWrapper, final int tasksPerFrame,
|
||||
TimeValue targetedResponseTime, ThreadFactory threadFactory, XRejectedExecutionHandler handler,
|
||||
ThreadContext contextHolder) {
|
||||
super(name, corePoolSize, maximumPoolSize, keepAliveTime, unit,
|
||||
workQueue, threadFactory, handler, contextHolder);
|
||||
this.runnableWrapper = runnableWrapper;
|
||||
this.workQueue = workQueue;
|
||||
this.tasksPerFrame = tasksPerFrame;
|
||||
this.startNs = System.nanoTime();
|
||||
this.minQueueSize = minQueueSize;
|
||||
this.maxQueueSize = maxQueueSize;
|
||||
this.targetedResponseTimeNanos = targetedResponseTime.getNanos();
|
||||
logger.debug("thread pool [{}] will adjust queue by [{}] when determining automatic queue size",
|
||||
name, QUEUE_ADJUSTMENT_AMOUNT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final Runnable command) {
|
||||
// we are submitting a task, it has not yet started running (because super.excute() has not
|
||||
// been called), but it could be immediately run, or run at a later time. We need the time
|
||||
// this task entered the queue, which we get by creating a TimedRunnable, which starts the
|
||||
// clock as soon as it is created.
|
||||
super.doExecute(this.runnableWrapper.apply(command));
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate task rate (λ), for a fixed number of tasks and time it took those tasks to be measured
|
||||
*
|
||||
* @param totalNumberOfTasks total number of tasks that were measured
|
||||
* @param totalFrameFrameNanos nanoseconds during which the tasks were received
|
||||
* @return the rate of tasks in the system
|
||||
*/
|
||||
static double calculateLambda(final int totalNumberOfTasks, final long totalFrameFrameNanos) {
|
||||
assert totalFrameFrameNanos > 0 : "cannot calculate for instantaneous tasks";
|
||||
assert totalNumberOfTasks > 0 : "cannot calculate for no tasks";
|
||||
// There is no set execution time, instead we adjust the time window based on the
|
||||
// number of completed tasks, so there is no background thread required to update the
|
||||
// queue size at a regular interval. This means we need to calculate our λ by the
|
||||
// total runtime, rather than a fixed interval.
|
||||
|
||||
// λ = total tasks divided by measurement time
|
||||
return (double) totalNumberOfTasks / totalFrameFrameNanos;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate Little's Law (L), which is the "optimal" queue size for a particular task rate (lambda) and targeted response time.
|
||||
*
|
||||
* @param lambda the arrival rate of tasks in nanoseconds
|
||||
* @param targetedResponseTimeNanos nanoseconds for the average targeted response rate of requests
|
||||
* @return the optimal queue size for the give task rate and targeted response time
|
||||
*/
|
||||
static int calculateL(final double lambda, final long targetedResponseTimeNanos) {
|
||||
assert targetedResponseTimeNanos > 0 : "cannot calculate for instantaneous requests";
|
||||
// L = λ * W
|
||||
return Math.toIntExact((long)(lambda * targetedResponseTimeNanos));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current queue capacity
|
||||
*/
|
||||
public int getCurrentCapacity() {
|
||||
return workQueue.capacity();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void afterExecute(Runnable r, Throwable t) {
|
||||
super.afterExecute(r, t);
|
||||
// A task has been completed, it has left the building. We should now be able to get the
|
||||
// total time as a combination of the time in the queue and time spent running the task. We
|
||||
// only want runnables that did not throw errors though, because they could be fast-failures
|
||||
// that throw off our timings, so only check when t is null.
|
||||
assert r instanceof TimedRunnable : "expected only TimedRunnables in queue";
|
||||
final long taskNanos = ((TimedRunnable) r).getTotalNanos();
|
||||
final long totalNanos = totalTaskNanos.addAndGet(taskNanos);
|
||||
if (taskCount.incrementAndGet() == this.tasksPerFrame) {
|
||||
final long endTimeNs = System.nanoTime();
|
||||
final long totalRuntime = endTimeNs - this.startNs;
|
||||
// Reset the start time for all tasks. At first glance this appears to need to be
|
||||
// volatile, since we are reading from a different thread when it is set, but it
|
||||
// is protected by the taskCount memory barrier.
|
||||
// See: https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html
|
||||
startNs = endTimeNs;
|
||||
|
||||
// Calculate the new desired queue size
|
||||
try {
|
||||
final double lambda = calculateLambda(tasksPerFrame, totalNanos);
|
||||
final int desiredQueueSize = calculateL(lambda, targetedResponseTimeNanos);
|
||||
if (logger.isDebugEnabled()) {
|
||||
final long avgTaskTime = totalNanos / tasksPerFrame;
|
||||
logger.debug("[{}]: there were [{}] tasks in [{}], avg task time: [{}], [{} tasks/s], " +
|
||||
"optimal queue is [{}]",
|
||||
name,
|
||||
tasksPerFrame,
|
||||
TimeValue.timeValueNanos(totalRuntime),
|
||||
TimeValue.timeValueNanos(avgTaskTime),
|
||||
String.format(Locale.ROOT, "%.2f", lambda * TimeValue.timeValueSeconds(1).nanos()),
|
||||
desiredQueueSize);
|
||||
}
|
||||
|
||||
final int oldCapacity = workQueue.capacity();
|
||||
|
||||
// Adjust the queue size towards the desired capacity using an adjust of
|
||||
// QUEUE_ADJUSTMENT_AMOUNT (either up or down), keeping in mind the min and max
|
||||
// values the queue size can have.
|
||||
final int newCapacity =
|
||||
workQueue.adjustCapacity(desiredQueueSize, QUEUE_ADJUSTMENT_AMOUNT, minQueueSize, maxQueueSize);
|
||||
if (oldCapacity != newCapacity && logger.isDebugEnabled()) {
|
||||
logger.debug("adjusted [{}] queue size by [{}], old capacity: [{}], new capacity: [{}]", name,
|
||||
newCapacity > oldCapacity ? QUEUE_ADJUSTMENT_AMOUNT : -QUEUE_ADJUSTMENT_AMOUNT,
|
||||
oldCapacity, newCapacity);
|
||||
}
|
||||
} catch (ArithmeticException e) {
|
||||
// There was an integer overflow, so just log about it, rather than adjust the queue size
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to calculate optimal queue size for [{}] thread pool, " +
|
||||
"total frame time [{}ns], tasks [{}], task execution time [{}ns]",
|
||||
name, totalRuntime, tasksPerFrame, totalNanos),
|
||||
e);
|
||||
} finally {
|
||||
// Finally, decrement the task count and time back to their starting values. We
|
||||
// do this at the end so there is no concurrent adjustments happening. We also
|
||||
// decrement them instead of resetting them back to zero, as resetting them back
|
||||
// to zero causes operations that came in during the adjustment to be uncounted
|
||||
int tasks = taskCount.addAndGet(-this.tasksPerFrame);
|
||||
assert tasks >= 0 : "tasks should never be negative, got: " + tasks;
|
||||
|
||||
if (tasks >= this.tasksPerFrame) {
|
||||
// Start over, because we can potentially reach a "never adjusting" state,
|
||||
//
|
||||
// consider the following:
|
||||
// - If the frame window is 10, and there are 10 tasks, then an adjustment will begin. (taskCount == 10)
|
||||
// - Prior to the adjustment being done, 15 more tasks come in, the taskCount is now 25
|
||||
// - Adjustment happens and we decrement the tasks by 10, taskCount is now 15
|
||||
// - Since taskCount will now be incremented forever, it will never be 10 again,
|
||||
// so there will be no further adjustments
|
||||
logger.debug("[{}]: too many incoming tasks while queue size adjustment occurs, resetting measurements to 0", name);
|
||||
totalTaskNanos.getAndSet(0);
|
||||
taskCount.getAndSet(0);
|
||||
startNs = System.nanoTime();
|
||||
} else {
|
||||
// Do a regular adjustment
|
||||
totalTaskNanos.addAndGet(-totalNanos);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append(getClass().getSimpleName()).append('[');
|
||||
b.append(name).append(", ");
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
ResizableBlockingQueue queue = (ResizableBlockingQueue) getQueue();
|
||||
|
||||
b.append("queue capacity = ").append(getCurrentCapacity()).append(", ");
|
||||
b.append("min queue capacity = ").append(minQueueSize).append(", ");
|
||||
b.append("max queue capacity = ").append(maxQueueSize).append(", ");
|
||||
b.append("frame size = ").append(tasksPerFrame).append(", ");
|
||||
b.append("targeted response rate = ").append(TimeValue.timeValueNanos(targetedResponseTimeNanos)).append(", ");
|
||||
b.append("adjustment amount = ").append(QUEUE_ADJUSTMENT_AMOUNT).append(", ");
|
||||
/*
|
||||
* ThreadPoolExecutor has some nice information in its toString but we
|
||||
* can't get at it easily without just getting the toString.
|
||||
*/
|
||||
b.append(super.toString()).append(']');
|
||||
return b.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* Extends the {@code SizeBlockingQueue} to add the {@code adjustCapacity} method, which will adjust
|
||||
* the capacity by a certain amount towards a maximum or minimum.
|
||||
*/
|
||||
final class ResizableBlockingQueue<E> extends SizeBlockingQueue<E> {
|
||||
|
||||
private volatile int capacity;
|
||||
|
||||
ResizableBlockingQueue(BlockingQueue<E> queue, int initialCapacity) {
|
||||
super(queue, initialCapacity);
|
||||
this.capacity = initialCapacity;
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "optimalCapacity is non-negative, therefore the difference cannot be < -Integer.MAX_VALUE")
|
||||
private int getChangeAmount(int optimalCapacity) {
|
||||
assert optimalCapacity >= 0 : "optimal capacity should always be positive, got: " + optimalCapacity;
|
||||
return Math.abs(optimalCapacity - this.capacity);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int capacity() {
|
||||
return this.capacity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int remainingCapacity() {
|
||||
return Math.max(0, this.capacity());
|
||||
}
|
||||
|
||||
/** Resize the limit for the queue, returning the new size limit */
|
||||
public synchronized int adjustCapacity(int optimalCapacity, int adjustmentAmount, int minCapacity, int maxCapacity) {
|
||||
assert adjustmentAmount > 0 : "adjustment amount should be a positive value";
|
||||
assert optimalCapacity >= 0 : "desired capacity cannot be negative";
|
||||
assert minCapacity >= 0 : "cannot have min capacity smaller than 0";
|
||||
assert maxCapacity >= minCapacity : "cannot have max capacity smaller than min capacity";
|
||||
|
||||
if (optimalCapacity == capacity) {
|
||||
// Yahtzee!
|
||||
return this.capacity;
|
||||
}
|
||||
|
||||
if (optimalCapacity > capacity + adjustmentAmount) {
|
||||
// adjust up
|
||||
final int newCapacity = Math.min(maxCapacity, capacity + adjustmentAmount);
|
||||
this.capacity = newCapacity;
|
||||
return newCapacity;
|
||||
} else if (optimalCapacity < capacity - adjustmentAmount) {
|
||||
// adjust down
|
||||
final int newCapacity = Math.max(minCapacity, capacity - adjustmentAmount);
|
||||
this.capacity = newCapacity;
|
||||
return newCapacity;
|
||||
} else {
|
||||
return this.capacity;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -131,7 +131,7 @@ public class SizeBlockingQueue<E> extends AbstractQueue<E> implements BlockingQu
|
|||
@Override
|
||||
public boolean offer(E e) {
|
||||
int count = size.incrementAndGet();
|
||||
if (count > capacity) {
|
||||
if (count > capacity()) {
|
||||
size.decrementAndGet();
|
||||
return false;
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ public class SizeBlockingQueue<E> extends AbstractQueue<E> implements BlockingQu
|
|||
|
||||
@Override
|
||||
public int remainingCapacity() {
|
||||
return capacity - size.get();
|
||||
return capacity() - size.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
/**
|
||||
* A class used to wrap a {@code Runnable} that allows capturing the time the task since creation
|
||||
* through execution.
|
||||
*/
|
||||
class TimedRunnable implements Runnable {
|
||||
private final Runnable original;
|
||||
private final long creationTimeNanos;
|
||||
private long finishTimeNanos = -1;
|
||||
|
||||
TimedRunnable(Runnable original) {
|
||||
this.original = original;
|
||||
this.creationTimeNanos = System.nanoTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
original.run();
|
||||
} finally {
|
||||
finishTimeNanos = System.nanoTime();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the time since this task was created until it finished running.
|
||||
* If the task is still running or has not yet been run, returns -1.
|
||||
*/
|
||||
long getTotalNanos() {
|
||||
if (finishTimeNanos == -1) {
|
||||
// There must have been an exception thrown, the total time is unknown (-1)
|
||||
return -1;
|
||||
}
|
||||
return finishTimeNanos - creationTimeNanos;
|
||||
}
|
||||
}
|
|
@ -37,8 +37,8 @@ import java.util.EnumSet;
|
|||
public class DiscoverySettings extends AbstractComponent {
|
||||
|
||||
public static final int NO_MASTER_BLOCK_ID = 2;
|
||||
public static final ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
|
||||
public static final ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
|
||||
public static final ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
|
||||
public static final ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
|
||||
/**
|
||||
* sets the timeout for a complete publishing cycle, including both sending and committing. the master
|
||||
* will continue to process the next cluster state update after this time has elapsed
|
||||
|
|
|
@ -223,7 +223,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateA
|
|||
final String name = stateFile.getFileName().toString();
|
||||
if (name.startsWith("metadata-")) {
|
||||
throw new IllegalStateException("Detected pre 0.19 metadata file please upgrade to a version before "
|
||||
+ Version.CURRENT.minimumCompatibilityVersion()
|
||||
+ Version.CURRENT.minimumIndexCompatibilityVersion()
|
||||
+ " first to upgrade state structures - metadata found: [" + stateFile.getParent().toAbsolutePath());
|
||||
}
|
||||
}
|
||||
|
@ -294,7 +294,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateA
|
|||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(stateLocation, "shards-*")) {
|
||||
for (Path stateFile : stream) {
|
||||
throw new IllegalStateException("Detected pre 0.19 shard state file please upgrade to a version before "
|
||||
+ Version.CURRENT.minimumCompatibilityVersion()
|
||||
+ Version.CURRENT.minimumIndexCompatibilityVersion()
|
||||
+ " first to upgrade state structures - shard state found: [" + stateFile.getParent().toAbsolutePath());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste
|
|||
public static final Setting<Integer> RECOVER_AFTER_MASTER_NODES_SETTING =
|
||||
Setting.intSetting("gateway.recover_after_master_nodes", 0, 0, Property.NodeScope);
|
||||
|
||||
public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
|
||||
public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, false, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
|
||||
|
||||
public static final TimeValue DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET = TimeValue.timeValueMinutes(5);
|
||||
|
||||
|
@ -246,9 +246,14 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste
|
|||
// automatically generate a UID for the metadata if we need to
|
||||
metaDataBuilder.generateClusterUuidIfNeeded();
|
||||
|
||||
if (MetaData.SETTING_READ_ONLY_SETTING.get(recoveredState.metaData().settings()) || MetaData.SETTING_READ_ONLY_SETTING.get(currentState.metaData().settings())) {
|
||||
if (MetaData.SETTING_READ_ONLY_SETTING.get(recoveredState.metaData().settings())
|
||||
|| MetaData.SETTING_READ_ONLY_SETTING.get(currentState.metaData().settings())) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
}
|
||||
if (MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(recoveredState.metaData().settings())
|
||||
|| MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.get(currentState.metaData().settings())) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
|
||||
}
|
||||
|
||||
for (IndexMetaData indexMetaData : recoveredState.metaData()) {
|
||||
metaDataBuilder.put(indexMetaData, false);
|
||||
|
|
|
@ -146,9 +146,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
this.mapperService = new MapperService(indexSettings, registry.build(indexSettings), xContentRegistry, similarityService,
|
||||
mapperRegistry,
|
||||
// we parse all percolator queries as they would be parsed on shard 0
|
||||
() -> newQueryShardContext(0, null, () -> {
|
||||
throw new IllegalArgumentException("Percolator queries are not allowed to use the current timestamp");
|
||||
}));
|
||||
() -> newQueryShardContext(0, null, System::currentTimeMillis));
|
||||
this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService);
|
||||
if (indexSettings.getIndexSortConfig().hasIndexSort()) {
|
||||
// we delay the actual creation of the sort order for this index because the mapping has not been merged yet.
|
||||
|
|
|
@ -213,7 +213,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
name = in.readOptionalString();
|
||||
nestedPath = in.readOptionalString();
|
||||
parentChildType = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
ignoreUnmapped = in.readBoolean();
|
||||
}
|
||||
from = in.readVInt();
|
||||
|
@ -254,7 +254,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
out.writeOptionalString(name);
|
||||
out.writeOptionalString(nestedPath);
|
||||
out.writeOptionalString(parentChildType);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
out.writeBoolean(ignoreUnmapped);
|
||||
}
|
||||
out.writeVInt(from);
|
||||
|
|
|
@ -228,7 +228,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
|
|||
type = in.readOptionalString();
|
||||
if (in.readBoolean()) {
|
||||
doc = (BytesReference) in.readGenericValue();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType = XContentType.readFrom(in);
|
||||
} else {
|
||||
xContentType = XContentFactory.xContentType(doc);
|
||||
|
@ -250,7 +250,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
|
|||
out.writeBoolean(doc != null);
|
||||
if (doc != null) {
|
||||
out.writeGenericValue(doc);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType.writeTo(out);
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -212,11 +212,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
autoGeneratePhraseQueries = in.readBoolean();
|
||||
allowLeadingWildcard = in.readOptionalBoolean();
|
||||
analyzeWildcard = in.readOptionalBoolean();
|
||||
if (in.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_1_1)) {
|
||||
in.readBoolean(); // lowercase_expanded_terms
|
||||
}
|
||||
enablePositionIncrements = in.readBoolean();
|
||||
if (in.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_1_1)) {
|
||||
in.readString(); // locale
|
||||
}
|
||||
fuzziness = new Fuzziness(in);
|
||||
|
@ -232,7 +232,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
timeZone = in.readOptionalTimeZone();
|
||||
escape = in.readBoolean();
|
||||
maxDeterminizedStates = in.readVInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
splitOnWhitespace = in.readBoolean();
|
||||
useAllFields = in.readOptionalBoolean();
|
||||
} else {
|
||||
|
@ -256,11 +256,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
out.writeBoolean(this.autoGeneratePhraseQueries);
|
||||
out.writeOptionalBoolean(this.allowLeadingWildcard);
|
||||
out.writeOptionalBoolean(this.analyzeWildcard);
|
||||
if (out.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_1_1)) {
|
||||
out.writeBoolean(true); // lowercase_expanded_terms
|
||||
}
|
||||
out.writeBoolean(this.enablePositionIncrements);
|
||||
if (out.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_1_1)) {
|
||||
out.writeString(Locale.ROOT.toLanguageTag()); // locale
|
||||
}
|
||||
this.fuzziness.writeTo(out);
|
||||
|
@ -276,7 +276,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
out.writeOptionalTimeZone(timeZone);
|
||||
out.writeBoolean(this.escape);
|
||||
out.writeVInt(this.maxDeterminizedStates);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
out.writeBoolean(this.splitOnWhitespace);
|
||||
out.writeOptionalBoolean(this.useAllFields);
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ public class RangeQueryBuilder extends AbstractQueryBuilder<RangeQueryBuilder> i
|
|||
if (formatString != null) {
|
||||
format = Joda.forPattern(formatString);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
String relationString = in.readOptionalString();
|
||||
if (relationString != null) {
|
||||
relation = ShapeRelation.getRelationByName(relationString);
|
||||
|
@ -133,7 +133,7 @@ public class RangeQueryBuilder extends AbstractQueryBuilder<RangeQueryBuilder> i
|
|||
formatString = this.format.format();
|
||||
}
|
||||
out.writeOptionalString(formatString);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
String relationString = null;
|
||||
if (this.relation != null) {
|
||||
relationString = this.relation.getRelationName();
|
||||
|
|
|
@ -157,19 +157,19 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
flags = in.readInt();
|
||||
analyzer = in.readOptionalString();
|
||||
defaultOperator = Operator.readFromStream(in);
|
||||
if (in.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_1_1)) {
|
||||
in.readBoolean(); // lowercase_expanded_terms
|
||||
}
|
||||
settings.lenient(in.readBoolean());
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
this.lenientSet = in.readBoolean();
|
||||
}
|
||||
settings.analyzeWildcard(in.readBoolean());
|
||||
if (in.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_1_1)) {
|
||||
in.readString(); // locale
|
||||
}
|
||||
minimumShouldMatch = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
settings.quoteFieldSuffix(in.readOptionalString());
|
||||
useAllFields = in.readOptionalBoolean();
|
||||
}
|
||||
|
@ -186,19 +186,19 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
|||
out.writeInt(flags);
|
||||
out.writeOptionalString(analyzer);
|
||||
defaultOperator.writeTo(out);
|
||||
if (out.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_1_1)) {
|
||||
out.writeBoolean(true); // lowercase_expanded_terms
|
||||
}
|
||||
out.writeBoolean(settings.lenient());
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
out.writeBoolean(lenientSet);
|
||||
}
|
||||
out.writeBoolean(settings.analyzeWildcard());
|
||||
if (out.getVersion().before(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_1_1)) {
|
||||
out.writeString(Locale.ROOT.toLanguageTag()); // locale
|
||||
}
|
||||
out.writeOptionalString(minimumShouldMatch);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
out.writeOptionalString(settings.quoteFieldSuffix());
|
||||
out.writeOptionalBoolean(useAllFields);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.refresh;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -106,20 +105,14 @@ public class RefreshStats implements Streamable, ToXContent {
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
total = in.readVLong();
|
||||
totalTimeInMillis = in.readVLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
listeners = in.readVInt();
|
||||
} else {
|
||||
listeners = 0;
|
||||
}
|
||||
listeners = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(total);
|
||||
out.writeVLong(totalTimeInMillis);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
out.writeVInt(listeners);
|
||||
}
|
||||
out.writeVInt(listeners);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -402,7 +402,7 @@ public abstract class AbstractBulkByScrollRequest<Self extends AbstractBulkByScr
|
|||
retryBackoffInitialTime = new TimeValue(in);
|
||||
maxRetries = in.readVInt();
|
||||
requestsPerSecond = in.readFloat();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
slices = in.readVInt();
|
||||
} else {
|
||||
slices = 1;
|
||||
|
@ -421,12 +421,12 @@ public abstract class AbstractBulkByScrollRequest<Self extends AbstractBulkByScr
|
|||
retryBackoffInitialTime.writeTo(out);
|
||||
out.writeVInt(maxRetries);
|
||||
out.writeFloat(requestsPerSecond);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
out.writeVInt(slices);
|
||||
} else {
|
||||
if (slices > 1) {
|
||||
throw new IllegalArgumentException("Attempting to send sliced reindex-style request to a node that doesn't support "
|
||||
+ "it. Version is [" + out.getVersion() + "] but must be [" + Version.V_5_1_1_UNRELEASED + "]");
|
||||
+ "it. Version is [" + out.getVersion() + "] but must be [" + Version.V_5_1_1 + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -189,7 +189,7 @@ public abstract class BulkByScrollTask extends CancellableTask {
|
|||
}
|
||||
|
||||
public Status(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
sliceId = in.readOptionalVInt();
|
||||
} else {
|
||||
sliceId = null;
|
||||
|
@ -207,7 +207,7 @@ public abstract class BulkByScrollTask extends CancellableTask {
|
|||
requestsPerSecond = in.readFloat();
|
||||
reasonCancelled = in.readOptionalString();
|
||||
throttledUntil = new TimeValue(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new));
|
||||
} else {
|
||||
sliceStatuses = emptyList();
|
||||
|
@ -216,7 +216,7 @@ public abstract class BulkByScrollTask extends CancellableTask {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
out.writeOptionalVInt(sliceId);
|
||||
}
|
||||
out.writeVLong(total);
|
||||
|
@ -232,7 +232,7 @@ public abstract class BulkByScrollTask extends CancellableTask {
|
|||
out.writeFloat(requestsPerSecond);
|
||||
out.writeOptionalString(reasonCancelled);
|
||||
throttledUntil.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
out.writeVInt(sliceStatuses.size());
|
||||
for (StatusOrException sliceStatus : sliceStatuses) {
|
||||
out.writeOptionalWriteable(sliceStatus);
|
||||
|
|
|
@ -90,7 +90,7 @@ public class RemoteInfo implements Writeable {
|
|||
headers.put(in.readString(), in.readString());
|
||||
}
|
||||
this.headers = unmodifiableMap(headers);
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
socketTimeout = new TimeValue(in);
|
||||
connectTimeout = new TimeValue(in);
|
||||
} else {
|
||||
|
@ -112,7 +112,7 @@ public class RemoteInfo implements Writeable {
|
|||
out.writeString(header.getKey());
|
||||
out.writeString(header.getValue());
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
socketTimeout.writeTo(out);
|
||||
connectTimeout.writeTo(out);
|
||||
}
|
||||
|
|
|
@ -805,10 +805,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
final boolean force = request.force();
|
||||
logger.trace("flush with {}", request);
|
||||
/*
|
||||
* We allow flushes while recovery since we allow operations to happen while recovering and
|
||||
* we want to keep the translog under control (up to deletes, which we do not GC). Yet, we
|
||||
* do not use flush internally to clear deletes and flush the index writer since we use
|
||||
* Engine#writeIndexingBuffer for this now.
|
||||
* We allow flushes while recovery since we allow operations to happen while recovering and we want to keep the translog under
|
||||
* control (up to deletes, which we do not GC). Yet, we do not use flush internally to clear deletes and flush the index writer
|
||||
* since we use Engine#writeIndexingBuffer for this now.
|
||||
*/
|
||||
verifyNotClosed();
|
||||
final Engine engine = getEngine();
|
||||
|
@ -1316,8 +1315,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
/**
|
||||
* Tests whether or not the translog should be flushed. This test is based on the current size
|
||||
* of the translog comparted to the configured flush threshold size.
|
||||
* Tests whether or not the translog should be flushed. This test is based on the current size of the translog comparted to the
|
||||
* configured flush threshold size.
|
||||
*
|
||||
* @return {@code true} if the translog should be flushed
|
||||
*/
|
||||
|
@ -1335,9 +1334,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
/**
|
||||
* Tests whether or not the translog generation should be rolled to a new generation. This test
|
||||
* is based on the size of the current generation compared to the configured generation
|
||||
* threshold size.
|
||||
* Tests whether or not the translog generation should be rolled to a new generation. This test is based on the size of the current
|
||||
* generation compared to the configured generation threshold size.
|
||||
*
|
||||
* @return {@code true} if the current generation should be rolled to a new generation
|
||||
*/
|
||||
|
@ -1919,21 +1917,19 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
private final AtomicBoolean flushOrRollRunning = new AtomicBoolean();
|
||||
|
||||
/**
|
||||
* Schedules a flush or translog generation roll if needed but will not schedule more than one
|
||||
* concurrently. The operation will be executed asynchronously on the flush thread pool.
|
||||
* Schedules a flush or translog generation roll if needed but will not schedule more than one concurrently. The operation will be
|
||||
* executed asynchronously on the flush thread pool.
|
||||
*/
|
||||
public void afterWriteOperation() {
|
||||
if (shouldFlush() || shouldRollTranslogGeneration()) {
|
||||
if (flushOrRollRunning.compareAndSet(false, true)) {
|
||||
/*
|
||||
* We have to check again since otherwise there is a race when a thread passes the
|
||||
* first check next to another thread which performs the operation quickly enough to
|
||||
* finish before the current thread could flip the flag. In that situation, we have
|
||||
* an extra operation.
|
||||
* We have to check again since otherwise there is a race when a thread passes the first check next to another thread which
|
||||
* performs the operation quickly enough to finish before the current thread could flip the flag. In that situation, we
|
||||
* have an extra operation.
|
||||
*
|
||||
* Additionally, a flush implicitly executes a translog generation roll so if we
|
||||
* execute a flush then we do not need to check if we should roll the translog
|
||||
* generation.
|
||||
* Additionally, a flush implicitly executes a translog generation roll so if we execute a flush then we do not need to
|
||||
* check if we should roll the translog generation.
|
||||
*/
|
||||
if (shouldFlush()) {
|
||||
logger.debug("submitting async flush request");
|
||||
|
|
|
@ -278,22 +278,6 @@ public final class AnalysisModule {
|
|||
* version uses a set of English stop words that are in
|
||||
* lucene-analyzers-common so "stop" is defined in the analysis-common
|
||||
* module. */
|
||||
|
||||
// Add token filters declared in PreBuiltTokenFilters until they have all been migrated
|
||||
for (PreBuiltTokenFilters preBuilt : PreBuiltTokenFilters.values()) {
|
||||
switch (preBuilt) {
|
||||
case LOWERCASE:
|
||||
// This has been migrated but has to stick around until PreBuiltTokenizers is removed.
|
||||
continue;
|
||||
default:
|
||||
if (CachingStrategy.ONE != preBuilt.getCachingStrategy()) {
|
||||
throw new UnsupportedOperationException("shim not available for " + preBuilt.getCachingStrategy());
|
||||
}
|
||||
String name = preBuilt.name().toLowerCase(Locale.ROOT);
|
||||
preConfiguredTokenFilters.register(name, PreConfiguredTokenFilter.singleton(name, preBuilt.isMultiTermAware(),
|
||||
tokenStream -> preBuilt.create(tokenStream, Version.CURRENT)));
|
||||
}
|
||||
}
|
||||
|
||||
for (AnalysisPlugin plugin: plugins) {
|
||||
for (PreConfiguredTokenFilter filter : plugin.getPreConfiguredTokenFilters()) {
|
||||
|
|
|
@ -20,38 +20,10 @@ package org.elasticsearch.indices.analysis;
|
|||
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
|
||||
import org.apache.lucene.analysis.ar.ArabicStemFilter;
|
||||
import org.apache.lucene.analysis.br.BrazilianStemFilter;
|
||||
import org.apache.lucene.analysis.cjk.CJKBigramFilter;
|
||||
import org.apache.lucene.analysis.cjk.CJKWidthFilter;
|
||||
import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter;
|
||||
import org.apache.lucene.analysis.core.DecimalDigitFilter;
|
||||
import org.apache.lucene.analysis.cz.CzechStemFilter;
|
||||
import org.apache.lucene.analysis.de.GermanNormalizationFilter;
|
||||
import org.apache.lucene.analysis.de.GermanStemFilter;
|
||||
import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
|
||||
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
|
||||
import org.apache.lucene.analysis.hi.HindiNormalizationFilter;
|
||||
import org.apache.lucene.analysis.in.IndicNormalizationFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.ScandinavianFoldingFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter;
|
||||
import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter;
|
||||
import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
|
||||
import org.apache.lucene.analysis.shingle.ShingleFilter;
|
||||
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||
import org.apache.lucene.analysis.tr.ApostropheFilter;
|
||||
import org.apache.lucene.analysis.util.ElisionFilter;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.index.analysis.DelimitedPayloadTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.LimitTokenCountFilterFactory;
|
||||
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||
import org.tartarus.snowball.ext.DutchStemmer;
|
||||
import org.tartarus.snowball.ext.FrenchStemmer;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
|
@ -66,229 +38,7 @@ public enum PreBuiltTokenFilters {
|
|||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
// Extended Token Filters
|
||||
ELISION(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ElisionFilter(tokenStream, FrenchAnalyzer.DEFAULT_ARTICLES);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
ARABIC_STEM(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ArabicStemFilter(tokenStream);
|
||||
}
|
||||
},
|
||||
|
||||
BRAZILIAN_STEM(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new BrazilianStemFilter(tokenStream);
|
||||
}
|
||||
},
|
||||
|
||||
CZECH_STEM(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new CzechStemFilter(tokenStream);
|
||||
}
|
||||
},
|
||||
|
||||
DUTCH_STEM(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new SnowballFilter(tokenStream, new DutchStemmer());
|
||||
}
|
||||
},
|
||||
|
||||
FRENCH_STEM(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new SnowballFilter(tokenStream, new FrenchStemmer());
|
||||
}
|
||||
},
|
||||
|
||||
GERMAN_STEM(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new GermanStemFilter(tokenStream);
|
||||
}
|
||||
},
|
||||
|
||||
RUSSIAN_STEM(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new SnowballFilter(tokenStream, "Russian");
|
||||
}
|
||||
},
|
||||
|
||||
KEYWORD_REPEAT(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new KeywordRepeatFilter(tokenStream);
|
||||
}
|
||||
},
|
||||
|
||||
ARABIC_NORMALIZATION(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ArabicNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
PERSIAN_NORMALIZATION(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new PersianNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
TYPE_AS_PAYLOAD(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new TypeAsPayloadTokenFilter(tokenStream);
|
||||
}
|
||||
},
|
||||
|
||||
SHINGLE(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ShingleFilter(tokenStream);
|
||||
}
|
||||
},
|
||||
|
||||
GERMAN_NORMALIZATION(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new GermanNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
HINDI_NORMALIZATION(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new HindiNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
INDIC_NORMALIZATION(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new IndicNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
SORANI_NORMALIZATION(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new SoraniNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
SCANDINAVIAN_NORMALIZATION(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ScandinavianNormalizationFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
SCANDINAVIAN_FOLDING(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ScandinavianFoldingFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
APOSTROPHE(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new ApostropheFilter(tokenStream);
|
||||
}
|
||||
},
|
||||
|
||||
CJK_WIDTH(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new CJKWidthFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
DECIMAL_DIGIT(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new DecimalDigitFilter(tokenStream);
|
||||
}
|
||||
@Override
|
||||
protected boolean isMultiTermAware() {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
|
||||
CJK_BIGRAM(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new CJKBigramFilter(tokenStream);
|
||||
}
|
||||
},
|
||||
|
||||
DELIMITED_PAYLOAD_FILTER(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new DelimitedPayloadTokenFilter(tokenStream, DelimitedPayloadTokenFilterFactory.DEFAULT_DELIMITER, DelimitedPayloadTokenFilterFactory.DEFAULT_ENCODER);
|
||||
}
|
||||
},
|
||||
|
||||
LIMIT(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new LimitTokenCountFilter(tokenStream, LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT, LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS);
|
||||
}
|
||||
},
|
||||
|
||||
;
|
||||
};
|
||||
|
||||
protected boolean isMultiTermAware() {
|
||||
return false;
|
||||
|
|
|
@ -118,7 +118,7 @@ public final class PipelineConfiguration extends AbstractDiffable<PipelineConfig
|
|||
}
|
||||
|
||||
public static PipelineConfiguration readFrom(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
return new PipelineConfiguration(in.readString(), in.readBytesReference(), XContentType.readFrom(in));
|
||||
} else {
|
||||
final String id = in.readString();
|
||||
|
@ -135,7 +135,7 @@ public final class PipelineConfiguration extends AbstractDiffable<PipelineConfig
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(id);
|
||||
out.writeBytesReference(config);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
xContentType.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ public class OsStats implements Writeable, ToXContent {
|
|||
this.cpu = new Cpu(in);
|
||||
this.mem = new Mem(in);
|
||||
this.swap = new Swap(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
this.cgroup = in.readOptionalWriteable(Cgroup::new);
|
||||
} else {
|
||||
this.cgroup = null;
|
||||
|
@ -65,7 +65,7 @@ public class OsStats implements Writeable, ToXContent {
|
|||
cpu.writeTo(out);
|
||||
mem.writeTo(out);
|
||||
swap.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
out.writeOptionalWriteable(cgroup);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ public class PluginInfo implements Writeable, ToXContent {
|
|||
this.description = in.readString();
|
||||
this.version = in.readString();
|
||||
this.classname = in.readString();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
|
||||
hasNativeController = in.readBoolean();
|
||||
} else {
|
||||
hasNativeController = false;
|
||||
|
@ -94,7 +94,7 @@ public class PluginInfo implements Writeable, ToXContent {
|
|||
out.writeString(description);
|
||||
out.writeString(version);
|
||||
out.writeString(classname);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
|
||||
out.writeBoolean(hasNativeController);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -479,7 +479,7 @@ public enum RestStatus {
|
|||
* is considered to be temporary. If the request that received this status code was the result of a user action,
|
||||
* the request MUST NOT be repeated until it is requested by a separate user action.
|
||||
*/
|
||||
INSUFFICIENT_STORAGE(506);
|
||||
INSUFFICIENT_STORAGE(507);
|
||||
|
||||
private static final Map<Integer, RestStatus> CODE_TO_STATUS;
|
||||
static {
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.search.Scorer;
|
|||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.search.lookup.LeafDocLookup;
|
||||
import org.elasticsearch.search.lookup.LeafFieldsLookup;
|
||||
import org.elasticsearch.search.lookup.LeafIndexLookup;
|
||||
import org.elasticsearch.search.lookup.LeafSearchLookup;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
|
@ -87,13 +86,6 @@ public abstract class AbstractSearchScript extends AbstractExecutableScript impl
|
|||
return lookup.source();
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows to access statistics on terms and fields.
|
||||
*/
|
||||
protected final LeafIndexLookup indexLookup() {
|
||||
return lookup.indexLookup();
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows to access the *stored* fields.
|
||||
*/
|
||||
|
|
|
@ -19,9 +19,12 @@
|
|||
|
||||
package org.elasticsearch.script;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
||||
|
@ -41,6 +44,11 @@ public class NativeScriptEngine extends AbstractComponent implements ScriptEngin
|
|||
|
||||
public NativeScriptEngine(Settings settings, Map<String, NativeScriptFactory> scripts) {
|
||||
super(settings);
|
||||
if (scripts.isEmpty() == false) {
|
||||
Logger logger = Loggers.getLogger(ScriptModule.class);
|
||||
DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
|
||||
deprecationLogger.deprecated("Native scripts are deprecated. Use a custom ScriptEngine to write scripts in java.");
|
||||
}
|
||||
this.scripts = unmodifiableMap(scripts);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,9 @@ import java.util.Map;
|
|||
* @see AbstractSearchScript
|
||||
* @see AbstractLongSearchScript
|
||||
* @see AbstractDoubleSearchScript
|
||||
* @deprecated Create a {@link ScriptEngine} instead of using native scripts
|
||||
*/
|
||||
@Deprecated
|
||||
public interface NativeScriptFactory {
|
||||
|
||||
/**
|
||||
|
|
|
@ -485,7 +485,7 @@ public final class Script implements ToXContentObject, Writeable {
|
|||
public Script(StreamInput in) throws IOException {
|
||||
// Version 5.3 allows lang to be an optional parameter for stored scripts and expects
|
||||
// options to be null for stored and file scripts.
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
this.type = ScriptType.readFrom(in);
|
||||
this.lang = in.readOptionalString();
|
||||
this.idOrCode = in.readString();
|
||||
|
@ -496,7 +496,7 @@ public final class Script implements ToXContentObject, Writeable {
|
|||
// Version 5.1 to 5.3 (exclusive) requires all Script members to be non-null and supports the potential
|
||||
// for more options than just XContentType. Reorders the read in contents to be in
|
||||
// same order as the constructor.
|
||||
} else if (in.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
} else if (in.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
this.type = ScriptType.readFrom(in);
|
||||
this.lang = in.readString();
|
||||
|
||||
|
@ -554,7 +554,7 @@ public final class Script implements ToXContentObject, Writeable {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
// Version 5.3+ allows lang to be an optional parameter for stored scripts and expects
|
||||
// options to be null for stored and file scripts.
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
type.writeTo(out);
|
||||
out.writeOptionalString(lang);
|
||||
out.writeString(idOrCode);
|
||||
|
@ -565,7 +565,7 @@ public final class Script implements ToXContentObject, Writeable {
|
|||
// Version 5.1 to 5.3 (exclusive) requires all Script members to be non-null and supports the potential
|
||||
// for more options than just XContentType. Reorders the written out contents to be in
|
||||
// same order as the constructor.
|
||||
} else if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
} else if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
type.writeTo(out);
|
||||
|
||||
if (lang == null) {
|
||||
|
|
|
@ -329,7 +329,7 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont
|
|||
// Split the id to find the language then use StoredScriptSource to parse the
|
||||
// expected BytesReference after which a new StoredScriptSource is created
|
||||
// with the appropriate language and options.
|
||||
if (in.getVersion().before(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_3_0)) {
|
||||
int split = id.indexOf('#');
|
||||
|
||||
if (split == -1) {
|
||||
|
@ -353,7 +353,7 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
// Version 5.3+ will output the contents of the scripts' Map using
|
||||
// StoredScriptSource to stored the language, code, and options.
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
out.writeVInt(scripts.size());
|
||||
|
||||
for (Map.Entry<String, StoredScriptSource> entry : scripts.entrySet()) {
|
||||
|
|
|
@ -365,7 +365,7 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
|
|||
* only the code parameter will be read in as a bytes reference.
|
||||
*/
|
||||
public StoredScriptSource(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
this.lang = in.readString();
|
||||
this.code = in.readString();
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -385,7 +385,7 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
|
|||
*/
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
out.writeString(lang);
|
||||
out.writeString(code);
|
||||
@SuppressWarnings("unchecked")
|
||||
|
|
|
@ -140,6 +140,9 @@ public class RangeAggregationBuilder extends AbstractRangeBuilder<RangeAggregati
|
|||
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
|
||||
// We need to call processRanges here so they are parsed before we make the decision of whether to cache the request
|
||||
Range[] ranges = processRanges(context, config);
|
||||
if (ranges.length == 0) {
|
||||
throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation");
|
||||
}
|
||||
return new RangeAggregatorFactory(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder,
|
||||
metaData);
|
||||
}
|
||||
|
|
|
@ -283,9 +283,12 @@ public class DateRangeAggregationBuilder extends AbstractRangeBuilder<DateRangeA
|
|||
@Override
|
||||
protected DateRangeAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig<Numeric> config,
|
||||
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
|
||||
// We need to call processRanges here so they are parsed and we know whether `now` has been used before we make
|
||||
// We need to call processRanges here so they are parsed and we know whether `now` has been used before we make
|
||||
// the decision of whether to cache the request
|
||||
Range[] ranges = processRanges(context, config);
|
||||
if (ranges.length == 0) {
|
||||
throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation");
|
||||
}
|
||||
return new DateRangeAggregatorFactory(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder,
|
||||
metaData);
|
||||
}
|
||||
|
|
|
@ -384,6 +384,9 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde
|
|||
ValuesSourceConfig<ValuesSource.GeoPoint> config, AggregatorFactory<?> parent, Builder subFactoriesBuilder)
|
||||
throws IOException {
|
||||
Range[] ranges = this.ranges.toArray(new Range[this.range().size()]);
|
||||
if (ranges.length == 0) {
|
||||
throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation");
|
||||
}
|
||||
return new GeoDistanceRangeAggregatorFactory(name, config, origin, ranges, unit, distanceType, keyed, context, parent,
|
||||
subFactoriesBuilder, metaData);
|
||||
}
|
||||
|
|
|
@ -369,6 +369,9 @@ public final class IpRangeAggregationBuilder
|
|||
AggregatorFactory<?> parent, Builder subFactoriesBuilder)
|
||||
throws IOException {
|
||||
List<BinaryRangeAggregator.Range> ranges = new ArrayList<>();
|
||||
if(this.ranges.size() == 0){
|
||||
throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation");
|
||||
}
|
||||
for (Range range : this.ranges) {
|
||||
ranges.add(new BinaryRangeAggregator.Range(range.key, toBytesRef(range.from), toBytesRef(range.to)));
|
||||
}
|
||||
|
|
|
@ -64,8 +64,8 @@ public class IncludeExclude implements Writeable, ToXContent {
|
|||
public static final ParseField PARTITION_FIELD = new ParseField("partition");
|
||||
public static final ParseField NUM_PARTITIONS_FIELD = new ParseField("num_partitions");
|
||||
// Needed to add this seed for a deterministic term hashing policy
|
||||
// otherwise tests fail to get expected results and worse, shards
|
||||
// can disagree on which terms hash to the required partition.
|
||||
// otherwise tests fail to get expected results and worse, shards
|
||||
// can disagree on which terms hash to the required partition.
|
||||
private static final int HASH_PARTITIONING_SEED = 31;
|
||||
|
||||
// for parsing purposes only
|
||||
|
@ -427,7 +427,7 @@ public class IncludeExclude implements Writeable, ToXContent {
|
|||
} else {
|
||||
excludeValues = null;
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
incNumPartitions = in.readVInt();
|
||||
incZeroBasedPartition = in.readVInt();
|
||||
} else {
|
||||
|
@ -460,7 +460,7 @@ public class IncludeExclude implements Writeable, ToXContent {
|
|||
out.writeBytesRef(value);
|
||||
}
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
out.writeVInt(incNumPartitions);
|
||||
out.writeVInt(incZeroBasedPartition);
|
||||
}
|
||||
|
|
|
@ -220,7 +220,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
profile = in.readBoolean();
|
||||
searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new);
|
||||
sliceBuilder = in.readOptionalWriteable(SliceBuilder::new);
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
collapse = in.readOptionalWriteable(CollapseBuilder::new);
|
||||
}
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
out.writeBoolean(profile);
|
||||
out.writeOptionalWriteable(searchAfterBuilder);
|
||||
out.writeOptionalWriteable(sliceBuilder);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
out.writeOptionalWriteable(collapse);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,14 +128,14 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
|
|||
order(in.readOptionalWriteable(Order::readFromStream));
|
||||
highlightFilter(in.readOptionalBoolean());
|
||||
forceSource(in.readOptionalBoolean());
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
|
||||
boundaryScannerType(in.readOptionalWriteable(BoundaryScannerType::readFromStream));
|
||||
}
|
||||
boundaryMaxScan(in.readOptionalVInt());
|
||||
if (in.readBoolean()) {
|
||||
boundaryChars(in.readString().toCharArray());
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
|
||||
if (in.readBoolean()) {
|
||||
boundaryScannerLocale(in.readString());
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
|
|||
out.writeOptionalWriteable(order);
|
||||
out.writeOptionalBoolean(highlightFilter);
|
||||
out.writeOptionalBoolean(forceSource);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
|
||||
out.writeOptionalWriteable(boundaryScannerType);
|
||||
}
|
||||
out.writeOptionalVInt(boundaryMaxScan);
|
||||
|
@ -176,7 +176,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
|
|||
if (hasBounaryChars) {
|
||||
out.writeString(String.valueOf(boundaryChars));
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
|
||||
boolean hasBoundaryScannerLocale = boundaryScannerLocale != null;
|
||||
out.writeBoolean(hasBoundaryScannerLocale);
|
||||
if (hasBoundaryScannerLocale) {
|
||||
|
|
|
@ -54,7 +54,7 @@ public final class AliasFilter implements Writeable {
|
|||
|
||||
public AliasFilter(StreamInput input) throws IOException {
|
||||
aliases = input.readStringArray();
|
||||
if (input.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (input.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
filter = input.readOptionalNamedWriteable(QueryBuilder.class);
|
||||
reparseAliases = false;
|
||||
} else {
|
||||
|
@ -90,7 +90,7 @@ public final class AliasFilter implements Writeable {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeStringArray(aliases);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_1)) {
|
||||
out.writeOptionalNamedWriteable(filter);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -181,7 +181,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
|||
source = in.readOptionalWriteable(SearchSourceBuilder::new);
|
||||
types = in.readStringArray();
|
||||
aliasFilter = new AliasFilter(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
indexBoost = in.readFloat();
|
||||
} else {
|
||||
// Nodes < 5.2.0 doesn't send index boost. Read it from source.
|
||||
|
@ -209,7 +209,7 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
|||
out.writeOptionalWriteable(source);
|
||||
out.writeStringArray(types);
|
||||
aliasFilter.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0)) {
|
||||
out.writeFloat(indexBoost);
|
||||
}
|
||||
if (!asKey) {
|
||||
|
|
|
@ -1,132 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.lookup;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.IntsRefBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
||||
/*
|
||||
* Can iterate over the positions of a term an arbitrary number of times.
|
||||
* */
|
||||
public class CachedPositionIterator extends PositionIterator {
|
||||
|
||||
public CachedPositionIterator(IndexFieldTerm indexFieldTerm) {
|
||||
super(indexFieldTerm);
|
||||
}
|
||||
|
||||
// all payloads of the term in the current document in one bytes array.
|
||||
// payloadStarts and payloadLength mark the start and end of one payload.
|
||||
final BytesRefBuilder payloads = new BytesRefBuilder();
|
||||
|
||||
final IntsRefBuilder payloadsLengths = new IntsRefBuilder();
|
||||
|
||||
final IntsRefBuilder payloadsStarts = new IntsRefBuilder();
|
||||
|
||||
final IntsRefBuilder positions = new IntsRefBuilder();
|
||||
|
||||
final IntsRefBuilder startOffsets = new IntsRefBuilder();
|
||||
|
||||
final IntsRefBuilder endOffsets = new IntsRefBuilder();
|
||||
|
||||
final BytesRef payload = new BytesRef();
|
||||
|
||||
@Override
|
||||
public Iterator<TermPosition> reset() {
|
||||
return new Iterator<TermPosition>() {
|
||||
private int pos = 0;
|
||||
private final TermPosition termPosition = new TermPosition();
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return pos < freq;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermPosition next() {
|
||||
termPosition.position = positions.intAt(pos);
|
||||
termPosition.startOffset = startOffsets.intAt(pos);
|
||||
termPosition.endOffset = endOffsets.intAt(pos);
|
||||
termPosition.payload = payload;
|
||||
payload.bytes = payloads.bytes();
|
||||
payload.offset = payloadsStarts.intAt(pos);
|
||||
payload.length = payloadsLengths.intAt(pos);
|
||||
pos++;
|
||||
return termPosition;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove() {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
private void record() throws IOException {
|
||||
TermPosition termPosition;
|
||||
for (int i = 0; i < freq; i++) {
|
||||
termPosition = super.next();
|
||||
positions.setIntAt(i, termPosition.position);
|
||||
addPayload(i, termPosition.payload);
|
||||
startOffsets.setIntAt(i, termPosition.startOffset);
|
||||
endOffsets.setIntAt(i, termPosition.endOffset);
|
||||
}
|
||||
}
|
||||
private void ensureSize(int freq) {
|
||||
if (freq == 0) {
|
||||
return;
|
||||
}
|
||||
startOffsets.grow(freq);
|
||||
endOffsets.grow(freq);
|
||||
positions.grow(freq);
|
||||
payloadsLengths.grow(freq);
|
||||
payloadsStarts.grow(freq);
|
||||
payloads.grow(freq * 8);// this is just a guess....
|
||||
|
||||
}
|
||||
|
||||
private void addPayload(int i, BytesRef currPayload) {
|
||||
if (currPayload != null) {
|
||||
payloadsLengths.setIntAt(i, currPayload.length);
|
||||
payloadsStarts.setIntAt(i, i == 0 ? 0 : payloadsStarts.intAt(i - 1) + payloadsLengths.intAt(i - 1));
|
||||
payloads.grow(payloadsStarts.intAt(i) + currPayload.length);
|
||||
System.arraycopy(currPayload.bytes, currPayload.offset, payloads.bytes(), payloadsStarts.intAt(i), currPayload.length);
|
||||
} else {
|
||||
payloadsLengths.setIntAt(i, 0);
|
||||
payloadsStarts.setIntAt(i, i == 0 ? 0 : payloadsStarts.intAt(i - 1) + payloadsLengths.intAt(i - 1));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void nextDoc() throws IOException {
|
||||
super.nextDoc();
|
||||
ensureSize(freq);
|
||||
record();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermPosition next() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
|
@ -1,128 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.lookup;
|
||||
|
||||
import org.apache.lucene.search.CollectionStatistics;
|
||||
import org.elasticsearch.common.util.MinimalMap;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Script interface to all information regarding a field.
|
||||
* */
|
||||
public class IndexField extends MinimalMap<String, IndexFieldTerm> {
|
||||
|
||||
/*
|
||||
* TermsInfo Objects that represent the Terms are stored in this map when
|
||||
* requested. Information such as frequency, doc frequency and positions
|
||||
* information can be retrieved from the TermInfo objects in this map.
|
||||
*/
|
||||
private final Map<String, IndexFieldTerm> terms = new HashMap<>();
|
||||
|
||||
// the name of this field
|
||||
private final String fieldName;
|
||||
|
||||
/*
|
||||
* The holds the current reader. We need it to populate the field
|
||||
* statistics. We just delegate all requests there
|
||||
*/
|
||||
private final LeafIndexLookup indexLookup;
|
||||
|
||||
/*
|
||||
* General field statistics such as number of documents containing the
|
||||
* field.
|
||||
*/
|
||||
private final CollectionStatistics fieldStats;
|
||||
|
||||
/*
|
||||
* Represents a field in a document. Can be used to return information on
|
||||
* statistics of this field. Information on specific terms in this field can
|
||||
* be accessed by calling get(String term).
|
||||
*/
|
||||
public IndexField(String fieldName, LeafIndexLookup indexLookup) throws IOException {
|
||||
|
||||
assert fieldName != null;
|
||||
this.fieldName = fieldName;
|
||||
|
||||
assert indexLookup != null;
|
||||
this.indexLookup = indexLookup;
|
||||
|
||||
fieldStats = this.indexLookup.getIndexSearcher().collectionStatistics(fieldName);
|
||||
}
|
||||
|
||||
/* get number of documents containing the field */
|
||||
public long docCount() throws IOException {
|
||||
return fieldStats.docCount();
|
||||
}
|
||||
|
||||
/* get sum of the number of words over all documents that were indexed */
|
||||
public long sumttf() throws IOException {
|
||||
return fieldStats.sumTotalTermFreq();
|
||||
}
|
||||
|
||||
/*
|
||||
* get the sum of doc frequencies over all words that appear in any document
|
||||
* that has the field.
|
||||
*/
|
||||
public long sumdf() throws IOException {
|
||||
return fieldStats.sumDocFreq();
|
||||
}
|
||||
|
||||
// TODO: might be good to get the field lengths here somewhere?
|
||||
|
||||
/*
|
||||
* Returns a TermInfo object that can be used to access information on
|
||||
* specific terms. flags can be set as described in TermInfo.
|
||||
*
|
||||
* TODO: here might be potential for running time improvement? If we knew in
|
||||
* advance which terms are requested, we could provide an array which the
|
||||
* user could then iterate over.
|
||||
*/
|
||||
public IndexFieldTerm get(Object key, int flags) {
|
||||
String termString = (String) key;
|
||||
IndexFieldTerm indexFieldTerm = terms.get(termString);
|
||||
// see if we initialized already...
|
||||
if (indexFieldTerm == null) {
|
||||
indexFieldTerm = new IndexFieldTerm(termString, fieldName, indexLookup, flags);
|
||||
terms.put(termString, indexFieldTerm);
|
||||
}
|
||||
indexFieldTerm.validateFlags(flags);
|
||||
return indexFieldTerm;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a TermInfo object that can be used to access information on
|
||||
* specific terms. flags can be set as described in TermInfo.
|
||||
*/
|
||||
@Override
|
||||
public IndexFieldTerm get(Object key) {
|
||||
// per default, do not initialize any positions info
|
||||
return get(key, IndexLookup.FLAG_FREQUENCIES);
|
||||
}
|
||||
|
||||
public void setDocIdInTerms(int docId) {
|
||||
for (IndexFieldTerm ti : terms.values()) {
|
||||
ti.setDocument(docId);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,298 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.lookup;
|
||||
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.FilterLeafReader.FilterPostingsEnum;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermContext;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.TermStatistics;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
* Holds all information on a particular term in a field.
|
||||
* */
|
||||
public class IndexFieldTerm implements Iterable<TermPosition> {
|
||||
|
||||
// The posting list for this term. Is null if the term or field does not
|
||||
// exist.
|
||||
PostingsEnum postings;
|
||||
|
||||
// Stores if positions, offsets and payloads are requested.
|
||||
private final int flags;
|
||||
|
||||
private final String fieldName;
|
||||
|
||||
private final String term;
|
||||
|
||||
private final PositionIterator iterator;
|
||||
|
||||
// for lucene calls
|
||||
private final Term identifier;
|
||||
|
||||
private final TermStatistics termStats;
|
||||
|
||||
// get the document frequency of the term
|
||||
public long df() throws IOException {
|
||||
return termStats.docFreq();
|
||||
}
|
||||
|
||||
// get the total term frequency of the term, that is, how often does the
|
||||
// term appear in any document?
|
||||
public long ttf() throws IOException {
|
||||
return termStats.totalTermFreq();
|
||||
}
|
||||
|
||||
// when the reader changes, we have to get the posting list for this term
|
||||
// and reader
|
||||
private void setReader(LeafReader reader) {
|
||||
try {
|
||||
postings = getPostings(convertToLuceneFlags(flags), reader);
|
||||
|
||||
if (postings == null) {
|
||||
// no term or field for this segment, fake out the postings...
|
||||
final DocIdSetIterator empty = DocIdSetIterator.empty();
|
||||
postings = new PostingsEnum() {
|
||||
@Override
|
||||
public int docID() {
|
||||
return empty.docID();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return empty.nextDoc();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
return empty.advance(target);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return empty.cost();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int freq() throws IOException {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextPosition() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int startOffset() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int endOffset() throws IOException {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef getPayload() throws IOException {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Unable to get postings for field " + fieldName + " and term " + term, e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private int convertToLuceneFlags(int flags) {
|
||||
int lucenePositionsFlags = PostingsEnum.NONE;
|
||||
lucenePositionsFlags |= (flags & IndexLookup.FLAG_FREQUENCIES) > 0 ? PostingsEnum.FREQS : 0x0;
|
||||
lucenePositionsFlags |= (flags & IndexLookup.FLAG_POSITIONS) > 0 ? PostingsEnum.POSITIONS : 0x0;
|
||||
lucenePositionsFlags |= (flags & IndexLookup.FLAG_PAYLOADS) > 0 ? PostingsEnum.PAYLOADS : 0x0;
|
||||
lucenePositionsFlags |= (flags & IndexLookup.FLAG_OFFSETS) > 0 ? PostingsEnum.OFFSETS : 0x0;
|
||||
return lucenePositionsFlags;
|
||||
}
|
||||
|
||||
private PostingsEnum getPostings(int luceneFlags, LeafReader reader) throws IOException {
|
||||
assert identifier.field() != null;
|
||||
assert identifier.bytes() != null;
|
||||
final Fields fields = reader.fields();
|
||||
PostingsEnum newPostings = null;
|
||||
if (fields != null) {
|
||||
final Terms terms = fields.terms(identifier.field());
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
if (termsEnum.seekExact(identifier.bytes())) {
|
||||
newPostings = termsEnum.postings(postings, luceneFlags);
|
||||
final Bits liveDocs = reader.getLiveDocs();
|
||||
if (liveDocs != null) {
|
||||
newPostings = new FilterPostingsEnum(newPostings) {
|
||||
private int doNext(int d) throws IOException {
|
||||
for (; d != NO_MORE_DOCS; d = super.nextDoc()) {
|
||||
if (liveDocs.get(d)) {
|
||||
return d;
|
||||
}
|
||||
}
|
||||
return NO_MORE_DOCS;
|
||||
}
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return doNext(super.nextDoc());
|
||||
}
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
return doNext(super.advance(target));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return newPostings;
|
||||
}
|
||||
|
||||
private int freq = 0;
|
||||
|
||||
public void setDocument(int docId) {
|
||||
assert (postings != null);
|
||||
try {
|
||||
// we try to advance to the current document.
|
||||
int currentDocPos = postings.docID();
|
||||
if (currentDocPos < docId) {
|
||||
currentDocPos = postings.advance(docId);
|
||||
}
|
||||
if (currentDocPos == docId) {
|
||||
freq = postings.freq();
|
||||
} else {
|
||||
freq = 0;
|
||||
}
|
||||
iterator.nextDoc();
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("While trying to initialize term positions in IndexFieldTerm.setNextDoc() ", e);
|
||||
}
|
||||
}
|
||||
|
||||
public IndexFieldTerm(String term, String fieldName, LeafIndexLookup indexLookup, int flags) {
|
||||
assert fieldName != null;
|
||||
this.fieldName = fieldName;
|
||||
assert term != null;
|
||||
this.term = term;
|
||||
assert indexLookup != null;
|
||||
identifier = new Term(fieldName, (String) term);
|
||||
this.flags = flags;
|
||||
boolean doRecord = ((flags & IndexLookup.FLAG_CACHE) > 0);
|
||||
if (!doRecord) {
|
||||
iterator = new PositionIterator(this);
|
||||
} else {
|
||||
iterator = new CachedPositionIterator(this);
|
||||
}
|
||||
setReader(indexLookup.getReader());
|
||||
setDocument(indexLookup.getDocId());
|
||||
try {
|
||||
termStats = indexLookup.getIndexSearcher().termStatistics(identifier,
|
||||
TermContext.build(indexLookup.getReaderContext(), identifier));
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Cannot get term statistics: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
public int tf() throws IOException {
|
||||
return freq;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<TermPosition> iterator() {
|
||||
return iterator.reset();
|
||||
}
|
||||
|
||||
/*
|
||||
* A user might decide inside a script to call get with _POSITIONS and then
|
||||
* a second time with _PAYLOADS. If the positions were recorded but the
|
||||
* payloads were not, the user will not have access to them. Therefore, throw
|
||||
* exception here explaining how to call get().
|
||||
*/
|
||||
public void validateFlags(int flags2) {
|
||||
if ((this.flags & flags2) < flags2) {
|
||||
throw new ElasticsearchException("You must call get with all required flags! Instead of " + getCalledStatement(flags2)
|
||||
+ "call " + getCallStatement(flags2 | this.flags) + " once");
|
||||
}
|
||||
}
|
||||
|
||||
private String getCalledStatement(int flags2) {
|
||||
String calledFlagsCall1 = getFlagsString(flags);
|
||||
String calledFlagsCall2 = getFlagsString(flags2);
|
||||
String callStatement1 = getCallStatement(calledFlagsCall1);
|
||||
String callStatement2 = getCallStatement(calledFlagsCall2);
|
||||
return " " + callStatement1 + " and " + callStatement2 + " ";
|
||||
}
|
||||
|
||||
private String getCallStatement(String calledFlags) {
|
||||
return "_index['" + this.fieldName + "'].get('" + this.term + "', " + calledFlags + ")";
|
||||
}
|
||||
|
||||
private String getFlagsString(int flags2) {
|
||||
String flagsString = null;
|
||||
if ((flags2 & IndexLookup.FLAG_FREQUENCIES) != 0) {
|
||||
flagsString = anddToFlagsString(flagsString, "_FREQUENCIES");
|
||||
}
|
||||
if ((flags2 & IndexLookup.FLAG_POSITIONS) != 0) {
|
||||
flagsString = anddToFlagsString(flagsString, "_POSITIONS");
|
||||
}
|
||||
if ((flags2 & IndexLookup.FLAG_OFFSETS) != 0) {
|
||||
flagsString = anddToFlagsString(flagsString, "_OFFSETS");
|
||||
}
|
||||
if ((flags2 & IndexLookup.FLAG_PAYLOADS) != 0) {
|
||||
flagsString = anddToFlagsString(flagsString, "_PAYLOADS");
|
||||
}
|
||||
if ((flags2 & IndexLookup.FLAG_CACHE) != 0) {
|
||||
flagsString = anddToFlagsString(flagsString, "_CACHE");
|
||||
}
|
||||
return flagsString;
|
||||
}
|
||||
|
||||
private String anddToFlagsString(String flagsString, String flag) {
|
||||
if (flagsString != null) {
|
||||
flagsString += " | ";
|
||||
} else {
|
||||
flagsString = "";
|
||||
}
|
||||
flagsString += flag;
|
||||
return flagsString;
|
||||
}
|
||||
|
||||
private String getCallStatement(int flags2) {
|
||||
String calledFlags = getFlagsString(flags2);
|
||||
String callStatement = getCallStatement(calledFlags);
|
||||
return " " + callStatement + " ";
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.search.lookup;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
public class IndexLookup {
|
||||
public static final Map<String, Object> NAMES;
|
||||
static {
|
||||
Map<String, Object> names = new HashMap<>();
|
||||
names.put("_FREQUENCIES", IndexLookup.FLAG_FREQUENCIES);
|
||||
names.put("_POSITIONS", IndexLookup.FLAG_POSITIONS);
|
||||
names.put("_OFFSETS", IndexLookup.FLAG_OFFSETS);
|
||||
names.put("_PAYLOADS", IndexLookup.FLAG_PAYLOADS);
|
||||
names.put("_CACHE", IndexLookup.FLAG_CACHE);
|
||||
NAMES = unmodifiableMap(names);
|
||||
}
|
||||
/**
|
||||
* Flag to pass to {@link IndexField#get(Object, int)} if you require
|
||||
* offsets in the returned {@link IndexFieldTerm}.
|
||||
*/
|
||||
public static final int FLAG_OFFSETS = 2;
|
||||
|
||||
/**
|
||||
* Flag to pass to {@link IndexField#get(Object, int)} if you require
|
||||
* payloads in the returned {@link IndexFieldTerm}.
|
||||
*/
|
||||
public static final int FLAG_PAYLOADS = 4;
|
||||
|
||||
/**
|
||||
* Flag to pass to {@link IndexField#get(Object, int)} if you require
|
||||
* frequencies in the returned {@link IndexFieldTerm}. Frequencies might be
|
||||
* returned anyway for some lucene codecs even if this flag is no set.
|
||||
*/
|
||||
public static final int FLAG_FREQUENCIES = 8;
|
||||
|
||||
/**
|
||||
* Flag to pass to {@link IndexField#get(Object, int)} if you require
|
||||
* positions in the returned {@link IndexFieldTerm}.
|
||||
*/
|
||||
public static final int FLAG_POSITIONS = 16;
|
||||
|
||||
/**
|
||||
* Flag to pass to {@link IndexField#get(Object, int)} if you require
|
||||
* positions in the returned {@link IndexFieldTerm}.
|
||||
*/
|
||||
public static final int FLAG_CACHE = 32;
|
||||
|
||||
public static LeafIndexLookup getLeafIndexLookup(LeafReaderContext context) {
|
||||
return new LeafIndexLookup(context);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,199 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.search.lookup;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.util.MinimalMap;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class LeafIndexLookup extends MinimalMap<String, IndexField> {
|
||||
|
||||
// Current reader from which we can get the term vectors. No info on term
|
||||
// and field statistics.
|
||||
private final LeafReader reader;
|
||||
|
||||
// The parent reader from which we can get proper field and term
|
||||
// statistics
|
||||
private final IndexReader parentReader;
|
||||
|
||||
// we need this later to get the field and term statistics of the shard
|
||||
private final IndexSearcher indexSearcher;
|
||||
|
||||
// current docId
|
||||
private int docId = -1;
|
||||
|
||||
// stores the objects that are used in the script. we maintain this map
|
||||
// because we do not want to re-initialize the objects each time a field is
|
||||
// accessed
|
||||
private final Map<String, IndexField> indexFields = new HashMap<>();
|
||||
|
||||
// number of documents per shard. cached here because the computation is
|
||||
// expensive
|
||||
private int numDocs = -1;
|
||||
|
||||
// the maximum doc number of the shard.
|
||||
private int maxDoc = -1;
|
||||
|
||||
// number of deleted documents per shard. cached here because the
|
||||
// computation is expensive
|
||||
private int numDeletedDocs = -1;
|
||||
|
||||
private boolean deprecationEmitted = false;
|
||||
|
||||
private void logDeprecation() {
|
||||
if (deprecationEmitted == false) {
|
||||
Logger logger = Loggers.getLogger(getClass());
|
||||
DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
|
||||
deprecationLogger.deprecated("Using _index is deprecated. Create a custom ScriptEngine to access index internals.");
|
||||
deprecationEmitted = true;
|
||||
}
|
||||
}
|
||||
|
||||
public int numDocs() {
|
||||
logDeprecation();
|
||||
if (numDocs == -1) {
|
||||
numDocs = parentReader.numDocs();
|
||||
}
|
||||
return numDocs;
|
||||
}
|
||||
|
||||
public int maxDoc() {
|
||||
logDeprecation();
|
||||
if (maxDoc == -1) {
|
||||
maxDoc = parentReader.maxDoc();
|
||||
}
|
||||
return maxDoc;
|
||||
}
|
||||
|
||||
public int numDeletedDocs() {
|
||||
logDeprecation();
|
||||
if (numDeletedDocs == -1) {
|
||||
numDeletedDocs = parentReader.numDeletedDocs();
|
||||
}
|
||||
return numDeletedDocs;
|
||||
}
|
||||
|
||||
public LeafIndexLookup(LeafReaderContext ctx) {
|
||||
reader = ctx.reader();
|
||||
parentReader = ReaderUtil.getTopLevelContext(ctx).reader();
|
||||
indexSearcher = new IndexSearcher(parentReader);
|
||||
indexSearcher.setQueryCache(null);
|
||||
}
|
||||
|
||||
public void setDocument(int docId) {
|
||||
if (this.docId == docId) { // if we are called with the same docId,
|
||||
// nothing to do
|
||||
return;
|
||||
}
|
||||
// We assume that docs are processed in ascending order of id. If this
|
||||
// is not the case, we would have to re initialize all posting lists in
|
||||
// IndexFieldTerm. TODO: Instead of assert we could also call
|
||||
// setReaderInFields(); here?
|
||||
if (this.docId > docId) {
|
||||
// This might happen if the same SearchLookup is used in different
|
||||
// phases, such as score and fetch phase.
|
||||
// In this case we do not want to re initialize posting list etc.
|
||||
// because we do not even know if term and field statistics will be
|
||||
// needed in this new phase.
|
||||
// Therefore we just remove all IndexFieldTerms.
|
||||
indexFields.clear();
|
||||
}
|
||||
this.docId = docId;
|
||||
setNextDocIdInFields();
|
||||
}
|
||||
|
||||
protected void setNextDocIdInFields() {
|
||||
for (IndexField stat : indexFields.values()) {
|
||||
stat.setDocIdInTerms(this.docId);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: here might be potential for running time improvement? If we knew in
|
||||
* advance which terms are requested, we could provide an array which the
|
||||
* user could then iterate over.
|
||||
*/
|
||||
@Override
|
||||
public IndexField get(Object key) {
|
||||
logDeprecation();
|
||||
String stringField = (String) key;
|
||||
IndexField indexField = indexFields.get(key);
|
||||
if (indexField == null) {
|
||||
try {
|
||||
indexField = new IndexField(stringField, this);
|
||||
indexFields.put(stringField, indexField);
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException(e);
|
||||
}
|
||||
}
|
||||
return indexField;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the lucene term vectors. See
|
||||
* https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html
|
||||
* *
|
||||
*/
|
||||
public Fields termVectors() throws IOException {
|
||||
logDeprecation();
|
||||
assert reader != null;
|
||||
return reader.getTermVectors(docId);
|
||||
}
|
||||
|
||||
LeafReader getReader() {
|
||||
logDeprecation();
|
||||
return reader;
|
||||
}
|
||||
|
||||
public int getDocId() {
|
||||
logDeprecation();
|
||||
return docId;
|
||||
}
|
||||
|
||||
public IndexReader getParentReader() {
|
||||
logDeprecation();
|
||||
if (parentReader == null) {
|
||||
return reader;
|
||||
}
|
||||
return parentReader;
|
||||
}
|
||||
|
||||
public IndexSearcher getIndexSearcher() {
|
||||
logDeprecation();
|
||||
return indexSearcher;
|
||||
}
|
||||
|
||||
public IndexReaderContext getReaderContext() {
|
||||
logDeprecation();
|
||||
return getParentReader().getContext();
|
||||
}
|
||||
}
|
|
@ -35,24 +35,20 @@ public class LeafSearchLookup {
|
|||
final LeafDocLookup docMap;
|
||||
final SourceLookup sourceLookup;
|
||||
final LeafFieldsLookup fieldsLookup;
|
||||
final LeafIndexLookup indexLookup;
|
||||
final Map<String, Object> asMap;
|
||||
|
||||
public LeafSearchLookup(LeafReaderContext ctx, LeafDocLookup docMap, SourceLookup sourceLookup,
|
||||
LeafFieldsLookup fieldsLookup, LeafIndexLookup indexLookup, Map<String, Object> topLevelMap) {
|
||||
LeafFieldsLookup fieldsLookup) {
|
||||
this.ctx = ctx;
|
||||
this.docMap = docMap;
|
||||
this.sourceLookup = sourceLookup;
|
||||
this.fieldsLookup = fieldsLookup;
|
||||
this.indexLookup = indexLookup;
|
||||
|
||||
Map<String, Object> asMap = new HashMap<>(topLevelMap.size() + 5);
|
||||
asMap.putAll(topLevelMap);
|
||||
Map<String, Object> asMap = new HashMap<>(4);
|
||||
asMap.put("doc", docMap);
|
||||
asMap.put("_doc", docMap);
|
||||
asMap.put("_source", sourceLookup);
|
||||
asMap.put("_fields", fieldsLookup);
|
||||
asMap.put("_index", indexLookup);
|
||||
this.asMap = unmodifiableMap(asMap);
|
||||
}
|
||||
|
||||
|
@ -64,10 +60,6 @@ public class LeafSearchLookup {
|
|||
return this.sourceLookup;
|
||||
}
|
||||
|
||||
public LeafIndexLookup indexLookup() {
|
||||
return this.indexLookup;
|
||||
}
|
||||
|
||||
public LeafFieldsLookup fields() {
|
||||
return this.fieldsLookup;
|
||||
}
|
||||
|
@ -80,6 +72,5 @@ public class LeafSearchLookup {
|
|||
docMap.setDocument(docId);
|
||||
sourceLookup.setSegmentAndDocument(ctx, docId);
|
||||
fieldsLookup.setDocument(docId);
|
||||
indexLookup.setDocument(docId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.lookup;
|
||||
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class PositionIterator implements Iterator<TermPosition> {
|
||||
|
||||
private boolean resetted = false;
|
||||
|
||||
protected IndexFieldTerm indexFieldTerm;
|
||||
|
||||
protected int freq = -1;
|
||||
|
||||
// current position of iterator
|
||||
private int currentPos;
|
||||
|
||||
protected final TermPosition termPosition = new TermPosition();
|
||||
|
||||
private PostingsEnum postings;
|
||||
|
||||
public PositionIterator(IndexFieldTerm indexFieldTerm) {
|
||||
this.indexFieldTerm = indexFieldTerm;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException("Cannot remove anything from TermPosition iterator.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return currentPos < freq;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public TermPosition next() {
|
||||
try {
|
||||
termPosition.position = postings.nextPosition();
|
||||
termPosition.startOffset = postings.startOffset();
|
||||
termPosition.endOffset = postings.endOffset();
|
||||
termPosition.payload = postings.getPayload();
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("can not advance iterator", ex);
|
||||
}
|
||||
currentPos++;
|
||||
return termPosition;
|
||||
}
|
||||
|
||||
public void nextDoc() throws IOException {
|
||||
resetted = false;
|
||||
currentPos = 0;
|
||||
freq = indexFieldTerm.tf();
|
||||
postings = indexFieldTerm.postings;
|
||||
}
|
||||
|
||||
public Iterator<TermPosition> reset() {
|
||||
if (resetted) {
|
||||
throw new ElasticsearchException(
|
||||
"Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly.");
|
||||
}
|
||||
resetted = true;
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -42,9 +42,7 @@ public class SearchLookup {
|
|||
return new LeafSearchLookup(context,
|
||||
docMap.getLeafDocLookup(context),
|
||||
sourceLookup,
|
||||
fieldsLookup.getLeafFieldsLookup(context),
|
||||
IndexLookup.getLeafIndexLookup(context),
|
||||
IndexLookup.NAMES);
|
||||
fieldsLookup.getLeafFieldsLookup(context));
|
||||
}
|
||||
|
||||
public DocLookup doc() {
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.lookup;
|
||||
|
||||
import org.apache.lucene.analysis.payloads.PayloadHelper;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
|
||||
public class TermPosition {
|
||||
|
||||
public int position = -1;
|
||||
public int startOffset = -1;
|
||||
public int endOffset = -1;
|
||||
public BytesRef payload;
|
||||
private CharsRefBuilder spare = new CharsRefBuilder();
|
||||
|
||||
public String payloadAsString() {
|
||||
if (payload != null && payload.length != 0) {
|
||||
spare.copyUTF8Bytes(payload);
|
||||
return spare.toString();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public float payloadAsFloat(float defaultMissing) {
|
||||
if (payload != null && payload.length != 0) {
|
||||
return PayloadHelper.decodeFloat(payload.bytes, payload.offset);
|
||||
} else {
|
||||
return defaultMissing;
|
||||
}
|
||||
}
|
||||
|
||||
public int payloadAsInt(int defaultMissing) {
|
||||
if (payload != null && payload.length != 0) {
|
||||
return PayloadHelper.decodeInt(payload.bytes, payload.offset);
|
||||
} else {
|
||||
return defaultMissing;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -69,7 +69,7 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
private static final String TOTAL_SHARDS = "total_shards";
|
||||
private static final String SUCCESSFUL_SHARDS = "successful_shards";
|
||||
|
||||
private static final Version VERSION_INCOMPATIBLE_INTRODUCED = Version.V_5_2_0_UNRELEASED;
|
||||
private static final Version VERSION_INCOMPATIBLE_INTRODUCED = Version.V_5_2_0;
|
||||
public static final Version VERBOSE_INTRODUCED = Version.V_5_5_0_UNRELEASED;
|
||||
|
||||
private static final Comparator<SnapshotInfo> COMPARATOR =
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.threadpool;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.SizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.threadpool.ExecutorBuilder;
|
||||
import org.elasticsearch.common.util.concurrent.QueueResizingEsThreadPoolExecutor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
|
||||
/**
|
||||
* A builder for executors that automatically adjust the queue length as needed, depending on
|
||||
* Little's Law. See https://en.wikipedia.org/wiki/Little's_law for more information.
|
||||
*/
|
||||
public final class AutoQueueAdjustingExecutorBuilder extends ExecutorBuilder<AutoQueueAdjustingExecutorBuilder.AutoExecutorSettings> {
|
||||
|
||||
private final Setting<Integer> sizeSetting;
|
||||
private final Setting<Integer> queueSizeSetting;
|
||||
private final Setting<Integer> minQueueSizeSetting;
|
||||
private final Setting<Integer> maxQueueSizeSetting;
|
||||
private final Setting<TimeValue> targetedResponseTimeSetting;
|
||||
private final Setting<Integer> frameSizeSetting;
|
||||
|
||||
AutoQueueAdjustingExecutorBuilder(final Settings settings, final String name, final int size,
|
||||
final int initialQueueSize, final int minQueueSize,
|
||||
final int maxQueueSize, final int frameSize) {
|
||||
super(name);
|
||||
final String prefix = "thread_pool." + name;
|
||||
final String sizeKey = settingsKey(prefix, "size");
|
||||
this.sizeSetting =
|
||||
new Setting<>(
|
||||
sizeKey,
|
||||
s -> Integer.toString(size),
|
||||
s -> Setting.parseInt(s, 1, applyHardSizeLimit(settings, name), sizeKey),
|
||||
Setting.Property.NodeScope);
|
||||
final String queueSizeKey = settingsKey(prefix, "queue_size");
|
||||
final String minSizeKey = settingsKey(prefix, "min_queue_size");
|
||||
final String maxSizeKey = settingsKey(prefix, "max_queue_size");
|
||||
final String frameSizeKey = settingsKey(prefix, "auto_queue_frame_size");
|
||||
final String targetedResponseTimeKey = settingsKey(prefix, "target_response_time");
|
||||
this.targetedResponseTimeSetting = Setting.timeSetting(targetedResponseTimeKey, TimeValue.timeValueSeconds(1),
|
||||
TimeValue.timeValueMillis(10), Setting.Property.NodeScope);
|
||||
this.queueSizeSetting = Setting.intSetting(queueSizeKey, initialQueueSize, Setting.Property.NodeScope);
|
||||
// These temp settings are used to validate the min and max settings below
|
||||
Setting<Integer> tempMaxQueueSizeSetting = Setting.intSetting(maxSizeKey, maxQueueSize, Setting.Property.NodeScope);
|
||||
Setting<Integer> tempMinQueueSizeSetting = Setting.intSetting(minSizeKey, minQueueSize, Setting.Property.NodeScope);
|
||||
|
||||
this.minQueueSizeSetting = new Setting<>(
|
||||
minSizeKey,
|
||||
(s) -> Integer.toString(minQueueSize),
|
||||
(s) -> Setting.parseInt(s, 0, tempMaxQueueSizeSetting.get(settings), minSizeKey),
|
||||
Setting.Property.NodeScope);
|
||||
this.maxQueueSizeSetting = new Setting<>(
|
||||
maxSizeKey,
|
||||
(s) -> Integer.toString(maxQueueSize),
|
||||
(s) -> Setting.parseInt(s, tempMinQueueSizeSetting.get(settings), Integer.MAX_VALUE, maxSizeKey),
|
||||
Setting.Property.NodeScope);
|
||||
this.frameSizeSetting = Setting.intSetting(frameSizeKey, frameSize, 100, Setting.Property.NodeScope);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Setting<?>> getRegisteredSettings() {
|
||||
return Arrays.asList(sizeSetting, queueSizeSetting, minQueueSizeSetting,
|
||||
maxQueueSizeSetting, frameSizeSetting, targetedResponseTimeSetting);
|
||||
}
|
||||
|
||||
@Override
|
||||
AutoExecutorSettings getSettings(Settings settings) {
|
||||
final String nodeName = Node.NODE_NAME_SETTING.get(settings);
|
||||
final int size = sizeSetting.get(settings);
|
||||
final int initialQueueSize = queueSizeSetting.get(settings);
|
||||
final int minQueueSize = minQueueSizeSetting.get(settings);
|
||||
final int maxQueueSize = maxQueueSizeSetting.get(settings);
|
||||
final int frameSize = frameSizeSetting.get(settings);
|
||||
final TimeValue targetedResponseTime = targetedResponseTimeSetting.get(settings);
|
||||
return new AutoExecutorSettings(nodeName, size, initialQueueSize, minQueueSize, maxQueueSize, frameSize, targetedResponseTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
ThreadPool.ExecutorHolder build(final AutoExecutorSettings settings,
|
||||
final ThreadContext threadContext) {
|
||||
int size = settings.size;
|
||||
int initialQueueSize = settings.initialQueueSize;
|
||||
int minQueueSize = settings.minQueueSize;
|
||||
int maxQueueSize = settings.maxQueueSize;
|
||||
int frameSize = settings.frameSize;
|
||||
TimeValue targetedResponseTime = settings.targetedResponseTime;
|
||||
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(EsExecutors.threadName(settings.nodeName, name()));
|
||||
final ExecutorService executor =
|
||||
EsExecutors.newAutoQueueFixed(name(), size, initialQueueSize, minQueueSize,
|
||||
maxQueueSize, frameSize, targetedResponseTime, threadFactory, threadContext);
|
||||
// TODO: in a subsequent change we hope to extend ThreadPool.Info to be more specific for the thread pool type
|
||||
final ThreadPool.Info info =
|
||||
new ThreadPool.Info(name(), ThreadPool.ThreadPoolType.FIXED_AUTO_QUEUE_SIZE,
|
||||
size, size, null, new SizeValue(initialQueueSize));
|
||||
return new ThreadPool.ExecutorHolder(executor, info);
|
||||
}
|
||||
|
||||
@Override
|
||||
String formatInfo(ThreadPool.Info info) {
|
||||
return String.format(
|
||||
Locale.ROOT,
|
||||
"name [%s], size [%d], queue size [%s]",
|
||||
info.getName(),
|
||||
info.getMax(),
|
||||
info.getQueueSize() == null ? "unbounded" : info.getQueueSize());
|
||||
}
|
||||
|
||||
static final class AutoExecutorSettings extends ExecutorBuilder.ExecutorSettings {
|
||||
|
||||
private final int size;
|
||||
private final int initialQueueSize;
|
||||
private final int minQueueSize;
|
||||
private final int maxQueueSize;
|
||||
private final int frameSize;
|
||||
private final TimeValue targetedResponseTime;
|
||||
|
||||
AutoExecutorSettings(final String nodeName, final int size, final int initialQueueSize,
|
||||
final int minQueueSize, final int maxQueueSize, final int frameSize,
|
||||
final TimeValue targetedResponseTime) {
|
||||
super(nodeName);
|
||||
this.size = size;
|
||||
this.initialQueueSize = initialQueueSize;
|
||||
this.minQueueSize = minQueueSize;
|
||||
this.maxQueueSize = maxQueueSize;
|
||||
this.frameSize = frameSize;
|
||||
this.targetedResponseTime = targetedResponseTime;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.threadpool;
|
|||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
||||
import java.util.List;
|
||||
|
@ -46,6 +47,14 @@ public abstract class ExecutorBuilder<U extends ExecutorBuilder.ExecutorSettings
|
|||
return String.join(".", prefix, key);
|
||||
}
|
||||
|
||||
protected int applyHardSizeLimit(final Settings settings, final String name) {
|
||||
if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {
|
||||
return 1 + EsExecutors.numberOfProcessors(settings);
|
||||
} else {
|
||||
return Integer.MAX_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The list of settings this builder will register.
|
||||
*
|
||||
|
|
|
@ -76,14 +76,6 @@ public final class FixedExecutorBuilder extends ExecutorBuilder<FixedExecutorBui
|
|||
Setting.intSetting(queueSizeKey, queueSize, Setting.Property.NodeScope);
|
||||
}
|
||||
|
||||
private int applyHardSizeLimit(final Settings settings, final String name) {
|
||||
if (name.equals(ThreadPool.Names.BULK) || name.equals(ThreadPool.Names.INDEX)) {
|
||||
return 1 + EsExecutors.numberOfProcessors(settings);
|
||||
} else {
|
||||
return Integer.MAX_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Setting<?>> getRegisteredSettings() {
|
||||
return Arrays.asList(sizeSetting, queueSizeSetting);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue