mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-05 20:48:22 +00:00
Merge remote-tracking branch 'upstream/master' into index-lifecycle
This commit is contained in:
commit
d5d28420b6
@ -103,27 +103,6 @@
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngine.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]LiveVersionMap.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]get[/\\]ShardGetService.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]CompletionFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentFieldMappers.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentParser.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DynamicTemplate.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]FieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]FieldNamesFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]FieldTypeLookup.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]GeoShapeFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]IdFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]IndexFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MappedFieldType.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperService.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]Mapping.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MetadataFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ObjectMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]RootObjectMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]RoutingFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]SourceFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]TypeFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]VersionFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]merge[/\\]MergeStats.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryBuilders.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryValidationException.java" checks="LineLength" />
|
||||
@ -229,29 +208,6 @@
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PreBuiltAnalyzerTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngineMergeIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngineTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]CompletionFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]CompletionFieldTypeTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]CopyToMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DocumentParserTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]DynamicMappingTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ExternalMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ExternalMetadataMapper.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]FieldNamesFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]GeoPointFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]GeoShapeFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]IdFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]IndexFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperServiceTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MultiFieldCopyToMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MultiFieldTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]NestedObjectMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]NullValueObjectMappingTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ObjectMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]PathMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]RoutingFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]SourceFieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]UpdateMappingTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoolQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoostingQueryBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoDistanceQueryBuilderTests.java" checks="LineLength" />
|
||||
|
@ -44,7 +44,7 @@ public class GraphClient {
|
||||
public final GraphExploreResponse explore(GraphExploreRequest graphExploreRequest,
|
||||
RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, GraphRequestConverters::explore,
|
||||
options, GraphExploreResponse::fromXContext, emptySet());
|
||||
options, GraphExploreResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -57,7 +57,7 @@ public class GraphClient {
|
||||
RequestOptions options,
|
||||
ActionListener<GraphExploreResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, GraphRequestConverters::explore,
|
||||
options, GraphExploreResponse::fromXContext, listener, emptySet());
|
||||
options, GraphExploreResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,8 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.security.AuthenticateRequest;
|
||||
import org.elasticsearch.client.security.AuthenticateResponse;
|
||||
import org.elasticsearch.client.security.ChangePasswordRequest;
|
||||
import org.elasticsearch.client.security.ClearRolesCacheRequest;
|
||||
import org.elasticsearch.client.security.ClearRolesCacheResponse;
|
||||
@ -210,6 +212,32 @@ public final class SecurityClient {
|
||||
EmptyResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticate the current user and return all the information about the authenticated user.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html">
|
||||
* the docs</a> for more.
|
||||
*
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the responsee from the authenticate user call
|
||||
*/
|
||||
public AuthenticateResponse authenticate(RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(AuthenticateRequest.INSTANCE, AuthenticateRequest::getRequest, options,
|
||||
AuthenticateResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticate the current user asynchronously and return all the information about the authenticated user.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-authenticate.html">
|
||||
* the docs</a> for more.
|
||||
*
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public void authenticateAsync(RequestOptions options, ActionListener<AuthenticateResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(AuthenticateRequest.INSTANCE, AuthenticateRequest::getRequest, options,
|
||||
AuthenticateResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears the native roles cache for a set of roles.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-clear-role-cache.html">
|
||||
|
@ -47,7 +47,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona
|
||||
*
|
||||
* @see GraphExploreRequest
|
||||
*/
|
||||
public class GraphExploreResponse implements ToXContentObject {
|
||||
public class GraphExploreResponse implements ToXContentObject {
|
||||
|
||||
private long tookInMillis;
|
||||
private boolean timedOut = false;
|
||||
@ -94,14 +94,30 @@ public class GraphExploreResponse implements ToXContentObject {
|
||||
return connections.values();
|
||||
}
|
||||
|
||||
public Collection<ConnectionId> getConnectionIds() {
|
||||
return connections.keySet();
|
||||
}
|
||||
|
||||
public Connection getConnection(ConnectionId connectionId) {
|
||||
return connections.get(connectionId);
|
||||
}
|
||||
|
||||
public Collection<Vertex> getVertices() {
|
||||
return vertices.values();
|
||||
}
|
||||
|
||||
|
||||
public Collection<VertexId> getVertexIds() {
|
||||
return vertices.keySet();
|
||||
}
|
||||
|
||||
public Vertex getVertex(VertexId id) {
|
||||
return vertices.get(id);
|
||||
}
|
||||
|
||||
public boolean isReturnDetailedInfo() {
|
||||
return returnDetailedInfo;
|
||||
}
|
||||
|
||||
private static final ParseField TOOK = new ParseField("took");
|
||||
private static final ParseField TIMED_OUT = new ParseField("timed_out");
|
||||
private static final ParseField VERTICES = new ParseField("vertices");
|
||||
@ -190,7 +206,7 @@ public class GraphExploreResponse implements ToXContentObject {
|
||||
PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES);
|
||||
}
|
||||
|
||||
public static GraphExploreResponse fromXContext(XContentParser parser) throws IOException {
|
||||
public static GraphExploreResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
|
@ -220,6 +220,14 @@ public class Vertex implements ToXContentFragment {
|
||||
this.term = term;
|
||||
}
|
||||
|
||||
public String getField() {
|
||||
return field;
|
||||
}
|
||||
|
||||
public String getTerm() {
|
||||
return term;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
|
@ -84,14 +84,13 @@ public class StartBasicResponse {
|
||||
}
|
||||
}
|
||||
return new Tuple<>(message, acknowledgeMessages);
|
||||
},
|
||||
new ParseField("acknowledge"));
|
||||
}, new ParseField("acknowledge"));
|
||||
}
|
||||
|
||||
private Map<String, String[]> acknowledgeMessages;
|
||||
private String acknowledgeMessage;
|
||||
|
||||
enum Status {
|
||||
public enum Status {
|
||||
GENERATED_BASIC(true, null, RestStatus.OK),
|
||||
ALREADY_USING_BASIC(false, "Operation failed: Current license is basic.", RestStatus.FORBIDDEN),
|
||||
NEED_ACKNOWLEDGEMENT(false, "Operation failed: Needs acknowledgement.", RestStatus.OK);
|
||||
@ -141,6 +140,10 @@ public class StartBasicResponse {
|
||||
this.acknowledgeMessage = acknowledgeMessage;
|
||||
}
|
||||
|
||||
public Status getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public boolean isAcknowledged() {
|
||||
return status != StartBasicResponse.Status.NEED_ACKNOWLEDGEMENT;
|
||||
}
|
||||
|
@ -18,17 +18,12 @@
|
||||
*/
|
||||
package org.elasticsearch.client.migration;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* Indicates the type of the upgrade required for the index
|
||||
*/
|
||||
public enum UpgradeActionRequired implements Writeable {
|
||||
public enum UpgradeActionRequired {
|
||||
NOT_APPLICABLE, // Indicates that the check is not applicable to this index type, the next check will be performed
|
||||
UP_TO_DATE, // Indicates that the check finds this index to be up to date - no additional checks are required
|
||||
REINDEX, // The index should be reindex
|
||||
@ -38,15 +33,6 @@ public enum UpgradeActionRequired implements Writeable {
|
||||
return UpgradeActionRequired.valueOf(value.toUpperCase(Locale.ROOT));
|
||||
}
|
||||
|
||||
public static UpgradeActionRequired readFromStream(StreamInput in) throws IOException {
|
||||
return in.readEnum(UpgradeActionRequired.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeEnum(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
|
@ -17,10 +17,25 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.migration;
|
||||
package org.elasticsearch.client.security;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Validatable;
|
||||
|
||||
/**
|
||||
* Empty request object required to make the authenticate call. The authenticate call
|
||||
* retrieves metadata about the authenticated user.
|
||||
*/
|
||||
public final class AuthenticateRequest implements Validatable {
|
||||
|
||||
public static final AuthenticateRequest INSTANCE = new AuthenticateRequest();
|
||||
|
||||
private AuthenticateRequest() {
|
||||
}
|
||||
|
||||
public Request getRequest() {
|
||||
return new Request(HttpGet.METHOD_NAME, "/_xpack/security/_authenticate");
|
||||
}
|
||||
|
||||
public class IndexUpgradeInfoResponseTests extends ESTestCase {
|
||||
// TODO: add to cross XPack-HLRC serialization test
|
||||
}
|
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.security;
|
||||
|
||||
import org.elasticsearch.client.security.user.User;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
|
||||
/**
|
||||
* The response for the authenticate call. The response contains two fields: a
|
||||
* user field and a boolean flag signaling if the user is enabled or not. The
|
||||
* user object contains all user metadata which Elasticsearch uses to map roles,
|
||||
* etc.
|
||||
*/
|
||||
public final class AuthenticateResponse {
|
||||
|
||||
static final ParseField USERNAME = new ParseField("username");
|
||||
static final ParseField ROLES = new ParseField("roles");
|
||||
static final ParseField METADATA = new ParseField("metadata");
|
||||
static final ParseField FULL_NAME = new ParseField("full_name");
|
||||
static final ParseField EMAIL = new ParseField("email");
|
||||
static final ParseField ENABLED = new ParseField("enabled");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConstructingObjectParser<AuthenticateResponse, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"client_security_authenticate_response",
|
||||
a -> new AuthenticateResponse(new User((String) a[0], ((List<String>) a[1]), (Map<String, Object>) a[2],
|
||||
(String) a[3], (String) a[4]), (Boolean) a[5]));
|
||||
static {
|
||||
PARSER.declareString(constructorArg(), USERNAME);
|
||||
PARSER.declareStringArray(constructorArg(), ROLES);
|
||||
PARSER.<Map<String, Object>>declareObject(constructorArg(), (parser, c) -> parser.map(), METADATA);
|
||||
PARSER.declareStringOrNull(optionalConstructorArg(), FULL_NAME);
|
||||
PARSER.declareStringOrNull(optionalConstructorArg(), EMAIL);
|
||||
PARSER.declareBoolean(constructorArg(), ENABLED);
|
||||
}
|
||||
|
||||
private final User user;
|
||||
private final boolean enabled;
|
||||
|
||||
public AuthenticateResponse(User user, boolean enabled) {
|
||||
this.user = user;
|
||||
this.enabled = enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The effective user. This is the authenticated user, or, when
|
||||
* submitting requests on behalf of other users, it is the
|
||||
* impersonated user.
|
||||
*/
|
||||
public User getUser() {
|
||||
return user;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return whether the user is enabled or not
|
||||
*/
|
||||
public boolean enabled() {
|
||||
return enabled;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
final AuthenticateResponse that = (AuthenticateResponse) o;
|
||||
return user.equals(that.user) && enabled == that.enabled;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(user, enabled);
|
||||
}
|
||||
|
||||
public static AuthenticateResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.security.user;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
|
||||
/**
|
||||
* An authenticated user
|
||||
*/
|
||||
public final class User {
|
||||
|
||||
private final String username;
|
||||
private final Collection<String> roles;
|
||||
private final Map<String, Object> metadata;
|
||||
@Nullable private final String fullName;
|
||||
@Nullable private final String email;
|
||||
|
||||
public User(String username, Collection<String> roles, Map<String, Object> metadata, @Nullable String fullName,
|
||||
@Nullable String email) {
|
||||
Objects.requireNonNull(username, "`username` cannot be null");
|
||||
Objects.requireNonNull(roles, "`roles` cannot be null. Pass an empty collection instead.");
|
||||
Objects.requireNonNull(roles, "`metadata` cannot be null. Pass an empty map instead.");
|
||||
this.username = username;
|
||||
this.roles = roles;
|
||||
this.metadata = Collections.unmodifiableMap(metadata);
|
||||
this.fullName = fullName;
|
||||
this.email = email;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The principal of this user - effectively serving as the
|
||||
* unique identity of the user. Can never be {@code null}.
|
||||
*/
|
||||
public String username() {
|
||||
return this.username;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The roles this user is associated with. The roles are
|
||||
* identified by their unique names and each represents as
|
||||
* set of permissions. Can never be {@code null}.
|
||||
*/
|
||||
public Collection<String> roles() {
|
||||
return this.roles;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The metadata that is associated with this user. Can never be {@code null}.
|
||||
*/
|
||||
public Map<String, Object> metadata() {
|
||||
return metadata;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The full name of this user. May be {@code null}.
|
||||
*/
|
||||
public @Nullable String fullName() {
|
||||
return fullName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The email of this user. May be {@code null}.
|
||||
*/
|
||||
public @Nullable String email() {
|
||||
return email;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
sb.append("User[username=").append(username);
|
||||
sb.append(",roles=[").append(Strings.collectionToCommaDelimitedString(roles)).append("]");
|
||||
sb.append(",metadata=").append(metadata);
|
||||
sb.append(",fullName=").append(fullName);
|
||||
sb.append(",email=").append(email);
|
||||
sb.append("]");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o instanceof User == false) {
|
||||
return false;
|
||||
}
|
||||
|
||||
final User user = (User) o;
|
||||
|
||||
if (!username.equals(user.username)) {
|
||||
return false;
|
||||
}
|
||||
if (!roles.equals(user.roles)) {
|
||||
return false;
|
||||
}
|
||||
if (!metadata.equals(user.metadata)) {
|
||||
return false;
|
||||
}
|
||||
if (fullName != null ? !fullName.equals(user.fullName) : user.fullName != null) {
|
||||
return false;
|
||||
}
|
||||
return !(email != null ? !email.equals(user.email) : user.email != null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(username, roles, metadata, fullName, email);
|
||||
}
|
||||
|
||||
}
|
@ -21,9 +21,6 @@ package org.elasticsearch.client.xpack;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
@ -252,7 +249,7 @@ public class XPackInfoResponse implements ToXContentObject {
|
||||
}
|
||||
}
|
||||
|
||||
public static class BuildInfo implements ToXContentObject, Writeable {
|
||||
public static class BuildInfo implements ToXContentObject {
|
||||
private final String hash;
|
||||
private final String timestamp;
|
||||
|
||||
@ -261,16 +258,6 @@ public class XPackInfoResponse implements ToXContentObject {
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
public BuildInfo(StreamInput input) throws IOException {
|
||||
this(input.readString(), input.readString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput output) throws IOException {
|
||||
output.writeString(hash);
|
||||
output.writeString(timestamp);
|
||||
}
|
||||
|
||||
public String getHash() {
|
||||
return hash;
|
||||
}
|
||||
@ -309,7 +296,7 @@ public class XPackInfoResponse implements ToXContentObject {
|
||||
}
|
||||
}
|
||||
|
||||
public static class FeatureSetsInfo implements ToXContentObject, Writeable {
|
||||
public static class FeatureSetsInfo implements ToXContentObject {
|
||||
private final Map<String, FeatureSet> featureSets;
|
||||
|
||||
public FeatureSetsInfo(Set<FeatureSet> featureSets) {
|
||||
@ -320,24 +307,6 @@ public class XPackInfoResponse implements ToXContentObject {
|
||||
this.featureSets = Collections.unmodifiableMap(map);
|
||||
}
|
||||
|
||||
public FeatureSetsInfo(StreamInput in) throws IOException {
|
||||
int size = in.readVInt();
|
||||
Map<String, FeatureSet> featureSets = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
FeatureSet featureSet = new FeatureSet(in);
|
||||
featureSets.put(featureSet.name, featureSet);
|
||||
}
|
||||
this.featureSets = Collections.unmodifiableMap(featureSets);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(featureSets.size());
|
||||
for (FeatureSet featureSet : featureSets.values()) {
|
||||
featureSet.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, FeatureSet> getFeatureSets() {
|
||||
return featureSets;
|
||||
}
|
||||
@ -365,7 +334,7 @@ public class XPackInfoResponse implements ToXContentObject {
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
public static class FeatureSet implements ToXContentObject, Writeable {
|
||||
public static class FeatureSet implements ToXContentObject {
|
||||
private final String name;
|
||||
@Nullable private final String description;
|
||||
private final boolean available;
|
||||
@ -381,19 +350,6 @@ public class XPackInfoResponse implements ToXContentObject {
|
||||
this.nativeCodeInfo = nativeCodeInfo;
|
||||
}
|
||||
|
||||
public FeatureSet(StreamInput in) throws IOException {
|
||||
this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), in.readMap());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
out.writeOptionalString(description);
|
||||
out.writeBoolean(available);
|
||||
out.writeBoolean(enabled);
|
||||
out.writeMap(nativeCodeInfo);
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ public class XPackUsageResponse {
|
||||
|
||||
private final Map<String, Map<String, Object>> usages;
|
||||
|
||||
private XPackUsageResponse(Map<String, Map<String, Object>> usages) throws IOException {
|
||||
private XPackUsageResponse(Map<String, Map<String, Object>> usages) {
|
||||
this.usages = usages;
|
||||
}
|
||||
|
||||
|
@ -84,16 +84,42 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the provided request using either the sync method or its async
|
||||
* variant, both provided as functions. This variant is used when the call does
|
||||
* not have a request object (only headers and the request path).
|
||||
*/
|
||||
protected static <Resp> Resp execute(SyncMethodNoRequest<Resp> syncMethodNoRequest, AsyncMethodNoRequest<Resp> asyncMethodNoRequest,
|
||||
RequestOptions requestOptions) throws IOException {
|
||||
if (randomBoolean()) {
|
||||
return syncMethodNoRequest.execute(requestOptions);
|
||||
} else {
|
||||
PlainActionFuture<Resp> future = PlainActionFuture.newFuture();
|
||||
asyncMethodNoRequest.execute(requestOptions, future);
|
||||
return future.actionGet();
|
||||
}
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
protected interface SyncMethod<Request, Response> {
|
||||
Response execute(Request request, RequestOptions options) throws IOException;
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
protected interface SyncMethodNoRequest<Response> {
|
||||
Response execute(RequestOptions options) throws IOException;
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
protected interface AsyncMethod<Request, Response> {
|
||||
void execute(Request request, RequestOptions options, ActionListener<Response> listener);
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
protected interface AsyncMethodNoRequest<Response> {
|
||||
void execute(RequestOptions options, ActionListener<Response> listener);
|
||||
}
|
||||
|
||||
private static class HighLevelClient extends RestHighLevelClient {
|
||||
private HighLevelClient(RestClient restClient) {
|
||||
super(restClient, (client) -> {}, Collections.emptyList());
|
||||
|
@ -733,7 +733,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
methods.containsKey(apiName.substring(0, apiName.length() - 6)));
|
||||
assertThat("async method [" + method + "] should return void", method.getReturnType(), equalTo(Void.TYPE));
|
||||
assertEquals("async method [" + method + "] should not throw any exceptions", 0, method.getExceptionTypes().length);
|
||||
if (apiName.equals("security.get_ssl_certificates_async")) {
|
||||
if (apiName.equals("security.authenticate_async") || apiName.equals("security.get_ssl_certificates_async")) {
|
||||
assertEquals(2, method.getParameterTypes().length);
|
||||
assertThat(method.getParameterTypes()[0], equalTo(RequestOptions.class));
|
||||
assertThat(method.getParameterTypes()[1], equalTo(ActionListener.class));
|
||||
@ -758,7 +758,8 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
|
||||
assertEquals("incorrect number of exceptions for method [" + method + "]", 1, method.getExceptionTypes().length);
|
||||
//a few methods don't accept a request object as argument
|
||||
if (apiName.equals("ping") || apiName.equals("info") || apiName.equals("security.get_ssl_certificates")) {
|
||||
if (apiName.equals("ping") || apiName.equals("info") || apiName.equals("security.get_ssl_certificates")
|
||||
|| apiName.equals("security.authenticate")) {
|
||||
assertEquals("incorrect number of arguments for method [" + method + "]", 1, method.getParameterTypes().length);
|
||||
assertThat("the parameter to method [" + method + "] is the wrong type",
|
||||
method.getParameterTypes()[0], equalTo(RequestOptions.class));
|
||||
|
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.client.security.AuthenticateResponse;
|
||||
import org.elasticsearch.client.security.PutUserRequest;
|
||||
import org.elasticsearch.client.security.PutUserResponse;
|
||||
import org.elasticsearch.client.security.RefreshPolicy;
|
||||
import org.elasticsearch.common.CharArrays;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
|
||||
public class SecurityIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testAuthenticate() throws Exception {
|
||||
final SecurityClient securityClient = highLevelClient().security();
|
||||
// test fixture: put enabled user
|
||||
final PutUserRequest putUserRequest = randomPutUserRequest(true);
|
||||
final PutUserResponse putUserResponse = execute(putUserRequest, securityClient::putUser, securityClient::putUserAsync);
|
||||
assertThat(putUserResponse.isCreated(), is(true));
|
||||
|
||||
// authenticate correctly
|
||||
final String basicAuthHeader = basicAuthHeader(putUserRequest.getUsername(), putUserRequest.getPassword());
|
||||
final AuthenticateResponse authenticateResponse = execute(securityClient::authenticate, securityClient::authenticateAsync,
|
||||
authorizationRequestOptions(basicAuthHeader));
|
||||
|
||||
assertThat(authenticateResponse.getUser().username(), is(putUserRequest.getUsername()));
|
||||
if (putUserRequest.getRoles().isEmpty()) {
|
||||
assertThat(authenticateResponse.getUser().roles(), is(empty()));
|
||||
} else {
|
||||
assertThat(authenticateResponse.getUser().roles(), contains(putUserRequest.getRoles().toArray()));
|
||||
}
|
||||
assertThat(authenticateResponse.getUser().metadata(), is(putUserRequest.getMetadata()));
|
||||
assertThat(authenticateResponse.getUser().fullName(), is(putUserRequest.getFullName()));
|
||||
assertThat(authenticateResponse.getUser().email(), is(putUserRequest.getEmail()));
|
||||
assertThat(authenticateResponse.enabled(), is(true));
|
||||
|
||||
// delete user
|
||||
final Request deleteUserRequest = new Request(HttpDelete.METHOD_NAME, "/_xpack/security/user/" + putUserRequest.getUsername());
|
||||
highLevelClient().getLowLevelClient().performRequest(deleteUserRequest);
|
||||
|
||||
// authentication no longer works
|
||||
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> execute(securityClient::authenticate,
|
||||
securityClient::authenticateAsync, authorizationRequestOptions(basicAuthHeader)));
|
||||
assertThat(e.getMessage(), containsString("unable to authenticate user [" + putUserRequest.getUsername() + "]"));
|
||||
}
|
||||
|
||||
private static PutUserRequest randomPutUserRequest(boolean enabled) {
|
||||
final String username = randomAlphaOfLengthBetween(1, 4);
|
||||
final char[] password = randomAlphaOfLengthBetween(6, 10).toCharArray();
|
||||
final List<String> roles = Arrays.asList(generateRandomStringArray(3, 3, false, true));
|
||||
final String fullName = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 3));
|
||||
final String email = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 3));
|
||||
final Map<String, Object> metadata;
|
||||
metadata = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
metadata.put("string", null);
|
||||
} else {
|
||||
metadata.put("string", randomAlphaOfLengthBetween(0, 4));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
metadata.put("string_list", null);
|
||||
} else {
|
||||
metadata.put("string_list", Arrays.asList(generateRandomStringArray(4, 4, false, true)));
|
||||
}
|
||||
return new PutUserRequest(username, password, roles, fullName, email, enabled, metadata, RefreshPolicy.IMMEDIATE);
|
||||
}
|
||||
|
||||
private static String basicAuthHeader(String username, char[] password) {
|
||||
final String concat = new StringBuilder().append(username).append(':').append(password).toString();
|
||||
final byte[] concatBytes = CharArrays.toUtf8Bytes(concat.toCharArray());
|
||||
return "Basic " + Base64.getEncoder().encodeToString(concatBytes);
|
||||
}
|
||||
|
||||
private static RequestOptions authorizationRequestOptions(String authorizationHeader) {
|
||||
final RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
|
||||
builder.addHeader("Authorization", authorizationHeader);
|
||||
return builder.build();
|
||||
}
|
||||
}
|
@ -29,6 +29,7 @@ import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.security.AuthenticateResponse;
|
||||
import org.elasticsearch.client.security.ChangePasswordRequest;
|
||||
import org.elasticsearch.client.security.ClearRolesCacheRequest;
|
||||
import org.elasticsearch.client.security.ClearRolesCacheResponse;
|
||||
@ -50,10 +51,11 @@ import org.elasticsearch.client.security.PutRoleMappingResponse;
|
||||
import org.elasticsearch.client.security.PutUserRequest;
|
||||
import org.elasticsearch.client.security.PutUserResponse;
|
||||
import org.elasticsearch.client.security.RefreshPolicy;
|
||||
import org.elasticsearch.client.security.support.CertificateInfo;
|
||||
import org.elasticsearch.client.security.support.expressiondsl.RoleMapperExpression;
|
||||
import org.elasticsearch.client.security.support.expressiondsl.expressions.AnyRoleMapperExpression;
|
||||
import org.elasticsearch.client.security.support.expressiondsl.fields.FieldRoleMapperExpression;
|
||||
import org.elasticsearch.client.security.user.User;
|
||||
import org.elasticsearch.client.security.support.CertificateInfo;
|
||||
import org.elasticsearch.client.security.support.expressiondsl.expressions.AnyRoleMapperExpression;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.hamcrest.Matchers;
|
||||
@ -67,13 +69,14 @@ import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.isIn;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@ -379,6 +382,51 @@ public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testAuthenticate() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::authenticate-execute
|
||||
AuthenticateResponse response = client.security().authenticate(RequestOptions.DEFAULT);
|
||||
//end::authenticate-execute
|
||||
|
||||
//tag::authenticate-response
|
||||
User user = response.getUser(); // <1>
|
||||
boolean enabled = response.enabled(); // <2>
|
||||
//end::authenticate-response
|
||||
|
||||
assertThat(user.username(), is("test_user"));
|
||||
assertThat(user.roles(), contains(new String[] {"superuser"}));
|
||||
assertThat(user.fullName(), nullValue());
|
||||
assertThat(user.email(), nullValue());
|
||||
assertThat(user.metadata().isEmpty(), is(true));
|
||||
assertThat(enabled, is(true));
|
||||
}
|
||||
|
||||
{
|
||||
// tag::authenticate-execute-listener
|
||||
ActionListener<AuthenticateResponse> listener = new ActionListener<AuthenticateResponse>() {
|
||||
@Override
|
||||
public void onResponse(AuthenticateResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::authenticate-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
// tag::authenticate-execute-async
|
||||
client.security().authenticateAsync(RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::authenticate-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testClearRolesCache() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
@ -81,7 +81,7 @@ public class GraphExploreResponseTests extends AbstractXContentTestCase<GraphExp
|
||||
|
||||
@Override
|
||||
protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return GraphExploreResponse.fromXContext(parser);
|
||||
return GraphExploreResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1,103 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.license;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.client.common.ProtocolUtils;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
public class StartBasicResponseTests extends ESTestCase {
|
||||
|
||||
public void testFromXContent() throws Exception {
|
||||
StartBasicResponse.Status status = randomFrom(StartBasicResponse.Status.values());
|
||||
|
||||
boolean acknowledged = status != StartBasicResponse.Status.NEED_ACKNOWLEDGEMENT;
|
||||
String acknowledgeMessage = null;
|
||||
Map<String, String[]> ackMessages = Collections.emptyMap();
|
||||
if (status != StartBasicResponse.Status.GENERATED_BASIC) {
|
||||
acknowledgeMessage = randomAlphaOfLength(10);
|
||||
ackMessages = randomAckMessages();
|
||||
}
|
||||
|
||||
final StartBasicResponse startBasicResponse = new StartBasicResponse(status, ackMessages, acknowledgeMessage);
|
||||
|
||||
XContentType xContentType = randomFrom(XContentType.values());
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(xContentType);
|
||||
|
||||
toXContent(startBasicResponse, builder);
|
||||
|
||||
final StartBasicResponse response = StartBasicResponse.fromXContent(createParser(builder));
|
||||
assertThat(response.isAcknowledged(), equalTo(acknowledged));
|
||||
assertThat(response.isBasicStarted(), equalTo(status.isBasicStarted()));
|
||||
assertThat(response.getAcknowledgeMessage(), equalTo(acknowledgeMessage));
|
||||
assertThat(ProtocolUtils.equals(response.getAcknowledgeMessages(), ackMessages), equalTo(true));
|
||||
}
|
||||
|
||||
private static void toXContent(StartBasicResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("acknowledged", response.isAcknowledged());
|
||||
if (response.isBasicStarted()) {
|
||||
builder.field("basic_was_started", true);
|
||||
} else {
|
||||
builder.field("basic_was_started", false);
|
||||
builder.field("error_message", response.getErrorMessage());
|
||||
}
|
||||
if (response.getAcknowledgeMessages().isEmpty() == false) {
|
||||
builder.startObject("acknowledge");
|
||||
builder.field("message", response.getAcknowledgeMessage());
|
||||
for (Map.Entry<String, String[]> entry : response.getAcknowledgeMessages().entrySet()) {
|
||||
builder.startArray(entry.getKey());
|
||||
for (String message : entry.getValue()) {
|
||||
builder.value(message);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
private static Map<String, String[]> randomAckMessages() {
|
||||
int nFeatures = randomIntBetween(1, 5);
|
||||
|
||||
Map<String, String[]> ackMessages = new HashMap<>();
|
||||
|
||||
for (int i = 0; i < nFeatures; i++) {
|
||||
String feature = randomAlphaOfLengthBetween(9, 15);
|
||||
int nMessages = randomIntBetween(1, 5);
|
||||
String[] messages = new String[nMessages];
|
||||
for (int j = 0; j < nMessages; j++) {
|
||||
messages[j] = randomAlphaOfLengthBetween(10, 30);
|
||||
}
|
||||
ackMessages.put(feature, messages);
|
||||
}
|
||||
|
||||
return ackMessages;
|
||||
}
|
||||
|
||||
}
|
@ -23,8 +23,6 @@ import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class IndexUpgradeInfoRequestTests extends ESTestCase {
|
||||
|
||||
// TODO: add to cross XPack-HLRC serialization test
|
||||
|
||||
public void testNullIndices() {
|
||||
expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest((String[])null));
|
||||
expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest().indices((String[])null));
|
||||
|
@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.security;
|
||||
|
||||
import org.elasticsearch.client.security.user.User;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.EqualsHashCodeTestUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
|
||||
public class AuthenticateResponseTests extends ESTestCase {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(
|
||||
this::createParser,
|
||||
this::createTestInstance,
|
||||
this::toXContent,
|
||||
AuthenticateResponse::fromXContent)
|
||||
.supportsUnknownFields(false)
|
||||
.test();
|
||||
}
|
||||
|
||||
public void testEqualsAndHashCode() {
|
||||
final AuthenticateResponse reponse = createTestInstance();
|
||||
EqualsHashCodeTestUtils.checkEqualsAndHashCode(reponse, this::copy,
|
||||
this::mutate);
|
||||
}
|
||||
|
||||
protected AuthenticateResponse createTestInstance() {
|
||||
final String username = randomAlphaOfLengthBetween(1, 4);
|
||||
final List<String> roles = Arrays.asList(generateRandomStringArray(4, 4, false, true));
|
||||
final Map<String, Object> metadata;
|
||||
metadata = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
metadata.put("string", null);
|
||||
} else {
|
||||
metadata.put("string", randomAlphaOfLengthBetween(0, 4));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
metadata.put("string_list", null);
|
||||
} else {
|
||||
metadata.put("string_list", Arrays.asList(generateRandomStringArray(4, 4, false, true)));
|
||||
}
|
||||
final String fullName = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 4));
|
||||
final String email = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 4));
|
||||
final boolean enabled = randomBoolean();
|
||||
return new AuthenticateResponse(new User(username, roles, metadata, fullName, email), enabled);
|
||||
}
|
||||
|
||||
private void toXContent(AuthenticateResponse response, XContentBuilder builder) throws IOException {
|
||||
final User user = response.getUser();
|
||||
final boolean enabled = response.enabled();
|
||||
builder.startObject();
|
||||
builder.field(AuthenticateResponse.USERNAME.getPreferredName(), user.username());
|
||||
builder.field(AuthenticateResponse.ROLES.getPreferredName(), user.roles());
|
||||
builder.field(AuthenticateResponse.METADATA.getPreferredName(), user.metadata());
|
||||
if (user.fullName() != null) {
|
||||
builder.field(AuthenticateResponse.FULL_NAME.getPreferredName(), user.fullName());
|
||||
}
|
||||
if (user.email() != null) {
|
||||
builder.field(AuthenticateResponse.EMAIL.getPreferredName(), user.email());
|
||||
}
|
||||
builder.field(AuthenticateResponse.ENABLED.getPreferredName(), enabled);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
private AuthenticateResponse copy(AuthenticateResponse response) {
|
||||
final User originalUser = response.getUser();
|
||||
final User copyUser = new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), originalUser.fullName(),
|
||||
originalUser.email());
|
||||
return new AuthenticateResponse(copyUser, response.enabled());
|
||||
}
|
||||
|
||||
private AuthenticateResponse mutate(AuthenticateResponse response) {
|
||||
final User originalUser = response.getUser();
|
||||
switch (randomIntBetween(1, 6)) {
|
||||
case 1:
|
||||
return new AuthenticateResponse(new User(originalUser.username() + "wrong", originalUser.roles(), originalUser.metadata(),
|
||||
originalUser.fullName(), originalUser.email()), response.enabled());
|
||||
case 2:
|
||||
final Collection<String> wrongRoles = new ArrayList<>(originalUser.roles());
|
||||
wrongRoles.add(randomAlphaOfLengthBetween(1, 4));
|
||||
return new AuthenticateResponse(new User(originalUser.username(), wrongRoles, originalUser.metadata(),
|
||||
originalUser.fullName(), originalUser.email()), response.enabled());
|
||||
case 3:
|
||||
final Map<String, Object> wrongMetadata = new HashMap<>(originalUser.metadata());
|
||||
wrongMetadata.put("wrong_string", randomAlphaOfLengthBetween(0, 4));
|
||||
return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), wrongMetadata,
|
||||
originalUser.fullName(), originalUser.email()), response.enabled());
|
||||
case 4:
|
||||
return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(),
|
||||
originalUser.fullName() + "wrong", originalUser.email()), response.enabled());
|
||||
case 5:
|
||||
return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(),
|
||||
originalUser.fullName(), originalUser.email() + "wrong"), response.enabled());
|
||||
case 6:
|
||||
return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(),
|
||||
originalUser.fullName(), originalUser.email()), !response.enabled());
|
||||
}
|
||||
throw new IllegalStateException("Bad random number");
|
||||
}
|
||||
}
|
@ -0,0 +1,116 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.xpack;
|
||||
|
||||
import org.elasticsearch.client.license.LicenseStatus;
|
||||
import org.elasticsearch.client.xpack.XPackInfoResponse.BuildInfo;
|
||||
import org.elasticsearch.client.xpack.XPackInfoResponse.FeatureSetsInfo;
|
||||
import org.elasticsearch.client.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet;
|
||||
import org.elasticsearch.client.xpack.XPackInfoResponse.LicenseInfo;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class XPackInfoResponseTests extends AbstractXContentTestCase<XPackInfoResponse> {
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
protected XPackInfoResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return XPackInfoResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
return path -> path.equals("features")
|
||||
|| (path.startsWith("features") && path.endsWith("native_code_info"));
|
||||
}
|
||||
|
||||
protected ToXContent.Params getToXContentParams() {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
params.put("human", randomBoolean() ? "true" : "false");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
params.put("categories", "_none");
|
||||
}
|
||||
return new ToXContent.MapParams(params);
|
||||
}
|
||||
|
||||
protected XPackInfoResponse createTestInstance() {
|
||||
return new XPackInfoResponse(
|
||||
randomBoolean() ? null : randomBuildInfo(),
|
||||
randomBoolean() ? null : randomLicenseInfo(),
|
||||
randomBoolean() ? null : randomFeatureSetsInfo());
|
||||
}
|
||||
|
||||
private BuildInfo randomBuildInfo() {
|
||||
return new BuildInfo(
|
||||
randomAlphaOfLength(10),
|
||||
randomAlphaOfLength(15));
|
||||
}
|
||||
|
||||
private LicenseInfo randomLicenseInfo() {
|
||||
return new LicenseInfo(
|
||||
randomAlphaOfLength(10),
|
||||
randomAlphaOfLength(4),
|
||||
randomAlphaOfLength(5),
|
||||
randomFrom(LicenseStatus.values()),
|
||||
randomLong());
|
||||
}
|
||||
|
||||
private FeatureSetsInfo randomFeatureSetsInfo() {
|
||||
int size = between(0, 10);
|
||||
Set<FeatureSet> featureSets = new HashSet<>(size);
|
||||
while (featureSets.size() < size) {
|
||||
featureSets.add(randomFeatureSet());
|
||||
}
|
||||
return new FeatureSetsInfo(featureSets);
|
||||
}
|
||||
|
||||
private FeatureSet randomFeatureSet() {
|
||||
return new FeatureSet(
|
||||
randomAlphaOfLength(5),
|
||||
randomBoolean() ? null : randomAlphaOfLength(20),
|
||||
randomBoolean(),
|
||||
randomBoolean(),
|
||||
randomNativeCodeInfo());
|
||||
}
|
||||
|
||||
private Map<String, Object> randomNativeCodeInfo() {
|
||||
if (randomBoolean()) {
|
||||
return null;
|
||||
}
|
||||
int size = between(0, 10);
|
||||
Map<String, Object> nativeCodeInfo = new HashMap<>(size);
|
||||
while (nativeCodeInfo.size() < size) {
|
||||
nativeCodeInfo.put(randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
}
|
||||
return nativeCodeInfo;
|
||||
}
|
||||
}
|
66
docs/java-rest/high-level/security/authenticate.asciidoc
Normal file
66
docs/java-rest/high-level/security/authenticate.asciidoc
Normal file
@ -0,0 +1,66 @@
|
||||
|
||||
--
|
||||
:api: authenticate
|
||||
:response: AuthenticateResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Authenticate API
|
||||
|
||||
[id="{upid}-{api}-sync"]
|
||||
==== Execution
|
||||
|
||||
Authenticating and retrieving information about a user can be performed
|
||||
using the `security().authenticate()` method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
This method does not require a request object. The client waits for the
|
||||
+{response}+ to be returned before continuing with code execution.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ contains two fields. Firstly, the `user` field
|
||||
, accessed with `getUser`, contains all the information about this
|
||||
authenticated user. The other field, `enabled`, tells if this user is actually
|
||||
usable or has been temporalily deactivated.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> `getUser` retrieves the `User` instance containing the information,
|
||||
see {javadoc-client}/security/user/User.html.
|
||||
<2> `enabled` tells if this user is usable or is deactivated.
|
||||
|
||||
[id="{upid}-{api}-async"]
|
||||
==== Asynchronous Execution
|
||||
|
||||
This request can also be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `ActionListener` to use when the execution completes. This method does
|
||||
not require a request object.
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once the request
|
||||
has completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution completed successfully or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for a +{response}+ looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution completed successfully. The response is
|
||||
provided as an argument.
|
||||
<2> Called in case of a failure. The exception is provided as an argument.
|
||||
|
@ -327,6 +327,7 @@ The Java High Level REST Client supports the following Security APIs:
|
||||
* <<java-rest-high-security-change-password>>
|
||||
* <<java-rest-high-security-delete-role>>
|
||||
* <<{upid}-clear-roles-cache>>
|
||||
* <<{upid}-authenticate>>
|
||||
* <<java-rest-high-security-get-certificates>>
|
||||
* <<java-rest-high-security-put-role-mapping>>
|
||||
* <<java-rest-high-security-get-role-mappings>>
|
||||
@ -339,6 +340,7 @@ include::security/disable-user.asciidoc[]
|
||||
include::security/change-password.asciidoc[]
|
||||
include::security/delete-role.asciidoc[]
|
||||
include::security/clear-roles-cache.asciidoc[]
|
||||
include::security/authenticate.asciidoc[]
|
||||
include::security/get-certificates.asciidoc[]
|
||||
include::security/put-role-mapping.asciidoc[]
|
||||
include::security/get-role-mappings.asciidoc[]
|
||||
@ -386,4 +388,4 @@ don't leak into the rest of the documentation.
|
||||
:response!:
|
||||
:doc-tests-file!:
|
||||
:upid!:
|
||||
--
|
||||
--
|
||||
|
@ -46,7 +46,7 @@ the request URL.
|
||||
PUT /seats
|
||||
{
|
||||
"mappings": {
|
||||
"_doc": {
|
||||
"seat": {
|
||||
"properties": {
|
||||
"theatre": { "type": "keyword" },
|
||||
"play": { "type": "text" },
|
||||
|
@ -1,12 +1,129 @@
|
||||
[[search-aggregations-bucket-datehistogram-aggregation]]
|
||||
=== Date Histogram Aggregation
|
||||
|
||||
A multi-bucket aggregation similar to the <<search-aggregations-bucket-histogram-aggregation,histogram>> except it can
|
||||
only be applied on date values. Since dates are represented in Elasticsearch internally as long values, it is possible
|
||||
to use the normal `histogram` on dates as well, though accuracy will be compromised. The reason for this is in the fact
|
||||
that time based intervals are not fixed (think of leap years and on the number of days in a month). For this reason,
|
||||
we need special support for time based data. From a functionality perspective, this histogram supports the same features
|
||||
as the normal <<search-aggregations-bucket-histogram-aggregation,histogram>>. The main difference is that the interval can be specified by date/time expressions.
|
||||
This multi-bucket aggregation is similar to the normal
|
||||
<<search-aggregations-bucket-histogram-aggregation,histogram>>, but it can
|
||||
only be used with date values. Because dates are represented internally in
|
||||
Elasticsearch as long values, it is possible, but not as accurate, to use the
|
||||
normal `histogram` on dates as well. The main difference in the two APIs is
|
||||
that here the interval can be specified using date/time expressions. Time-based
|
||||
data requires special support because time-based intervals are not always a
|
||||
fixed length.
|
||||
|
||||
==== Setting intervals
|
||||
|
||||
There seems to be no limit to the creativity we humans apply to setting our
|
||||
clocks and calendars. We've invented leap years and leap seconds, standard and
|
||||
daylight savings times, and timezone offsets of 30 or 45 minutes rather than a
|
||||
full hour. While these creations help keep us in sync with the cosmos and our
|
||||
environment, they can make specifying time intervals accurately a real challenge.
|
||||
The only universal truth our researchers have yet to disprove is that a
|
||||
millisecond is always the same duration, and a second is always 1000 milliseconds.
|
||||
Beyond that, things get complicated.
|
||||
|
||||
Generally speaking, when you specify a single time unit, such as 1 hour or 1 day, you
|
||||
are working with a _calendar interval_, but multiples, such as 6 hours or 3 days, are
|
||||
_fixed-length intervals_.
|
||||
|
||||
For example, a specification of 1 day (1d) from now is a calendar interval that
|
||||
means "at
|
||||
this exact time tomorrow" no matter the length of the day. A change to or from
|
||||
daylight savings time that results in a 23 or 25 hour day is compensated for and the
|
||||
specification of "this exact time tomorrow" is maintained. But if you specify 2 or
|
||||
more days, each day must be of the same fixed duration (24 hours). In this case, if
|
||||
the specified interval includes the change to or from daylight savings time, the
|
||||
interval will end an hour sooner or later than you expect.
|
||||
|
||||
There are similar differences to consider when you specify single versus multiple
|
||||
minutes or hours. Multiple time periods longer than a day are not supported.
|
||||
|
||||
Here are the valid time specifications and their meanings:
|
||||
|
||||
milliseconds (ms) ::
|
||||
Fixed length interval; supports multiples.
|
||||
|
||||
seconds (s) ::
|
||||
1000 milliseconds; fixed length interval (except for the last second of a
|
||||
minute that contains a leap-second, which is 2000ms long); supports multiples.
|
||||
|
||||
minutes (m) ::
|
||||
All minutes begin at 00 seconds.
|
||||
|
||||
* One minute (1m) is the interval between 00 seconds of the first minute and 00
|
||||
seconds of the following minute in the specified timezone, compensating for any
|
||||
intervening leap seconds, so that the number of minutes and seconds past the
|
||||
hour is the same at the start and end.
|
||||
* Multiple minutes (__n__m) are intervals of exactly 60x1000=60,000 milliseconds
|
||||
each.
|
||||
|
||||
hours (h) ::
|
||||
All hours begin at 00 minutes and 00 seconds.
|
||||
|
||||
* One hour (1h) is the interval between 00:00 minutes of the first hour and 00:00
|
||||
minutes of the following hour in the specified timezone, compensating for any
|
||||
intervening leap seconds, so that the number of minutes and seconds past the hour
|
||||
is the same at the start and end.
|
||||
* Multiple hours (__n__h) are intervals of exactly 60x60x1000=3,600,000 milliseconds
|
||||
each.
|
||||
|
||||
days (d) ::
|
||||
All days begin at the earliest possible time, which is usually 00:00:00
|
||||
(midnight).
|
||||
|
||||
* One day (1d) is the interval between the start of the day and the start of
|
||||
of the following day in the specified timezone, compensating for any intervening
|
||||
time changes.
|
||||
* Multiple days (__n__d) are intervals of exactly 24x60x60x1000=86,400,000
|
||||
milliseconds each.
|
||||
|
||||
weeks (w) ::
|
||||
|
||||
* One week (1w) is the interval between the start day_of_week:hour:minute:second
|
||||
and the same day of the week and time of the following week in the specified
|
||||
timezone.
|
||||
* Multiple weeks (__n__w) are not supported.
|
||||
|
||||
months (M) ::
|
||||
|
||||
* One month (1M) is the interval between the start day of the month and time of
|
||||
day and the same day of the month and time of the following month in the specified
|
||||
timezone, so that the day of the month and time of day are the same at the start
|
||||
and end.
|
||||
* Multiple months (__n__M) are not supported.
|
||||
|
||||
quarters (q) ::
|
||||
|
||||
* One quarter (1q) is the interval between the start day of the month and
|
||||
time of day and the same day of the month and time of day three months later,
|
||||
so that the day of the month and time of day are the same at the start and end. +
|
||||
* Multiple quarters (__n__q) are not supported.
|
||||
|
||||
years (y) ::
|
||||
|
||||
* One year (1y) is the interval between the start day of the month and time of
|
||||
day and the same day of the month and time of day the following year in the
|
||||
specified timezone, so that the date and time are the same at the start and end. +
|
||||
* Multiple years (__n__y) are not supported.
|
||||
|
||||
NOTE:
|
||||
In all cases, when the specified end time does not exist, the actual end time is
|
||||
the closest available time after the specified end.
|
||||
|
||||
Widely distributed applications must also consider vagaries such as countries that
|
||||
start and stop daylight savings time at 12:01 A.M., so end up with one minute of
|
||||
Sunday followed by an additional 59 minutes of Saturday once a year, and countries
|
||||
that decide to move across the international date line. Situations like
|
||||
that can make irregular timezone offsets seem easy.
|
||||
|
||||
As always, rigorous testing, especially around time-change events, will ensure
|
||||
that your time interval specification is
|
||||
what you intend it to be.
|
||||
|
||||
WARNING:
|
||||
To avoid unexpected results, all connected servers and clients must sync to a
|
||||
reliable network time service.
|
||||
|
||||
==== Examples
|
||||
|
||||
Requesting bucket intervals of a month.
|
||||
|
||||
@ -27,13 +144,11 @@ POST /sales/_search?size=0
|
||||
// CONSOLE
|
||||
// TEST[setup:sales]
|
||||
|
||||
Available expressions for interval: `year` (`1y`), `quarter` (`1q`), `month` (`1M`), `week` (`1w`),
|
||||
`day` (`1d`), `hour` (`1h`), `minute` (`1m`), `second` (`1s`)
|
||||
|
||||
Time values can also be specified via abbreviations supported by <<time-units,time units>> parsing.
|
||||
Note that fractional time values are not supported, but you can address this by shifting to another
|
||||
time unit (e.g., `1.5h` could instead be specified as `90m`). Also note that time intervals larger than
|
||||
days do not support arbitrary values but can only be one unit large (e.g. `1y` is valid, `2y` is not).
|
||||
You can also specify time values using abbreviations supported by
|
||||
<<time-units,time units>> parsing.
|
||||
Note that fractional time values are not supported, but you can address this by
|
||||
shifting to another
|
||||
time unit (e.g., `1.5h` could instead be specified as `90m`).
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
@ -52,15 +167,16 @@ POST /sales/_search?size=0
|
||||
// CONSOLE
|
||||
// TEST[setup:sales]
|
||||
|
||||
==== Keys
|
||||
===== Keys
|
||||
|
||||
Internally, a date is represented as a 64 bit number representing a timestamp
|
||||
in milliseconds-since-the-epoch. These timestamps are returned as the bucket
|
||||
++key++s. The `key_as_string` is the same timestamp converted to a formatted
|
||||
date string using the format specified with the `format` parameter:
|
||||
in milliseconds-since-the-epoch (01/01/1970 midnight UTC). These timestamps are
|
||||
returned as the ++key++ name of the bucket. The `key_as_string` is the same
|
||||
timestamp converted to a formatted
|
||||
date string using the `format` parameter sprcification:
|
||||
|
||||
TIP: If no `format` is specified, then it will use the first date
|
||||
<<mapping-date-format,format>> specified in the field mapping.
|
||||
TIP: If you don't specify `format`, the first date
|
||||
<<mapping-date-format,format>> specified in the field mapping is used.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
@ -113,15 +229,15 @@ Response:
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
|
||||
|
||||
==== Time Zone
|
||||
===== Timezone
|
||||
|
||||
Date-times are stored in Elasticsearch in UTC. By default, all bucketing and
|
||||
rounding is also done in UTC. The `time_zone` parameter can be used to indicate
|
||||
that bucketing should use a different time zone.
|
||||
rounding is also done in UTC. Use the `time_zone` parameter to indicate
|
||||
that bucketing should use a different timezone.
|
||||
|
||||
Time zones may either be specified as an ISO 8601 UTC offset (e.g. `+01:00` or
|
||||
`-08:00`) or as a timezone id, an identifier used in the TZ database like
|
||||
`America/Los_Angeles`.
|
||||
You can specify timezones as either an ISO 8601 UTC offset (e.g. `+01:00` or
|
||||
`-08:00`) or as a timezone ID as specified in the IANA timezone database,
|
||||
such as`America/Los_Angeles`.
|
||||
|
||||
Consider the following example:
|
||||
|
||||
@ -151,7 +267,7 @@ GET my_index/_search?size=0
|
||||
---------------------------------
|
||||
// CONSOLE
|
||||
|
||||
UTC is used if no time zone is specified, which would result in both of these
|
||||
If you don't specify a timezone, UTC is used. This would result in both of these
|
||||
documents being placed into the same day bucket, which starts at midnight UTC
|
||||
on 1 October 2015:
|
||||
|
||||
@ -174,8 +290,8 @@ on 1 October 2015:
|
||||
---------------------------------
|
||||
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
|
||||
|
||||
If a `time_zone` of `-01:00` is specified, then midnight starts at one hour before
|
||||
midnight UTC:
|
||||
If you specify a `time_zone` of `-01:00`, midnight in that timezone is one hour
|
||||
before midnight UTC:
|
||||
|
||||
[source,js]
|
||||
---------------------------------
|
||||
@ -223,28 +339,27 @@ second document falls into the bucket for 1 October 2015:
|
||||
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
|
||||
|
||||
<1> The `key_as_string` value represents midnight on each day
|
||||
in the specified time zone.
|
||||
in the specified timezone.
|
||||
|
||||
WARNING: When using time zones that follow DST (daylight savings time) changes,
|
||||
buckets close to the moment when those changes happen can have slightly different
|
||||
sizes than would be expected from the used `interval`.
|
||||
sizes than you would expect from the used `interval`.
|
||||
For example, consider a DST start in the `CET` time zone: on 27 March 2016 at 2am,
|
||||
clocks were turned forward 1 hour to 3am local time. When using `day` as `interval`,
|
||||
clocks were turned forward 1 hour to 3am local time. If you use `day` as `interval`,
|
||||
the bucket covering that day will only hold data for 23 hours instead of the usual
|
||||
24 hours for other buckets. The same is true for shorter intervals like e.g. 12h.
|
||||
Here, we will have only a 11h bucket on the morning of 27 March when the DST shift
|
||||
24 hours for other buckets. The same is true for shorter intervals, like 12h,
|
||||
where you'll have only a 11h bucket on the morning of 27 March when the DST shift
|
||||
happens.
|
||||
|
||||
===== Offset
|
||||
|
||||
==== Offset
|
||||
|
||||
The `offset` parameter is used to change the start value of each bucket by the
|
||||
Use the `offset` parameter to change the start value of each bucket by the
|
||||
specified positive (`+`) or negative offset (`-`) duration, such as `1h` for
|
||||
an hour, or `1d` for a day. See <<time-units>> for more possible time
|
||||
duration options.
|
||||
|
||||
For instance, when using an interval of `day`, each bucket runs from midnight
|
||||
to midnight. Setting the `offset` parameter to `+6h` would change each bucket
|
||||
For example, when using an interval of `day`, each bucket runs from midnight
|
||||
to midnight. Setting the `offset` parameter to `+6h` changes each bucket
|
||||
to run from 6am to 6am:
|
||||
|
||||
[source,js]
|
||||
@ -301,12 +416,13 @@ documents into buckets starting at 6am:
|
||||
-----------------------------
|
||||
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
|
||||
|
||||
NOTE: The start `offset` of each bucket is calculated after the `time_zone`
|
||||
NOTE: The start `offset` of each bucket is calculated after `time_zone`
|
||||
adjustments have been made.
|
||||
|
||||
==== Keyed Response
|
||||
===== Keyed Response
|
||||
|
||||
Setting the `keyed` flag to `true` will associate a unique string key with each bucket and return the ranges as a hash rather than an array:
|
||||
Setting the `keyed` flag to `true` associates a unique string key with each
|
||||
bucket and returns the ranges as a hash rather than an array:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
@ -358,20 +474,25 @@ Response:
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
|
||||
|
||||
==== Scripts
|
||||
===== Scripts
|
||||
|
||||
Like with the normal <<search-aggregations-bucket-histogram-aggregation,histogram>>, both document level scripts and
|
||||
value level scripts are supported. It is also possible to control the order of the returned buckets using the `order`
|
||||
settings and filter the returned buckets based on a `min_doc_count` setting (by default all buckets between the first
|
||||
bucket that matches documents and the last one are returned). This histogram also supports the `extended_bounds`
|
||||
setting, which enables extending the bounds of the histogram beyond the data itself (to read more on why you'd want to
|
||||
do that please refer to the explanation <<search-aggregations-bucket-histogram-aggregation-extended-bounds,here>>).
|
||||
As with the normal <<search-aggregations-bucket-histogram-aggregation,histogram>>,
|
||||
both document-level scripts and
|
||||
value-level scripts are supported. You can control the order of the returned
|
||||
buckets using the `order`
|
||||
settings and filter the returned buckets based on a `min_doc_count` setting
|
||||
(by default all buckets between the first
|
||||
bucket that matches documents and the last one are returned). This histogram
|
||||
also supports the `extended_bounds`
|
||||
setting, which enables extending the bounds of the histogram beyond the data
|
||||
itself. For more information, see
|
||||
<<search-aggregations-bucket-histogram-aggregation-extended-bounds,`Extended Bounds`>>.
|
||||
|
||||
==== Missing value
|
||||
===== Missing value
|
||||
|
||||
The `missing` parameter defines how documents that are missing a value should be treated.
|
||||
By default they will be ignored but it is also possible to treat them as if they
|
||||
had a value.
|
||||
The `missing` parameter defines how to treat documents that are missing a value.
|
||||
By default, they are ignored, but it is also possible to treat them as if they
|
||||
have a value.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
@ -391,20 +512,22 @@ POST /sales/_search?size=0
|
||||
// CONSOLE
|
||||
// TEST[setup:sales]
|
||||
|
||||
<1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`.
|
||||
<1> Documents without a value in the `publish_date` field will fall into the
|
||||
same bucket as documents that have the value `2000-01-01`.
|
||||
|
||||
==== Order
|
||||
===== Order
|
||||
|
||||
By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled using
|
||||
the `order` setting. Supports the same `order` functionality as the <<search-aggregations-bucket-terms-aggregation-order,`Terms Aggregation`>>.
|
||||
By default the returned buckets are sorted by their `key` ascending, but you can
|
||||
control the order using
|
||||
the `order` setting. This setting supports the same `order` functionality as
|
||||
<<search-aggregations-bucket-terms-aggregation-order,`Terms Aggregation`>>.
|
||||
|
||||
deprecated[6.0.0, Use `_key` instead of `_time` to order buckets by their dates/keys]
|
||||
|
||||
==== Use of a script to aggregate by day of the week
|
||||
===== Using a script to aggregate by day of the week
|
||||
|
||||
There are some cases where date histogram can't help us, like for example, when we need
|
||||
to aggregate the results by day of the week.
|
||||
In this case to overcome the problem, we can use a script that returns the day of the week:
|
||||
When you need to aggregate the results by day of the week, use a script that
|
||||
returns the day of the week:
|
||||
|
||||
|
||||
[source,js]
|
||||
@ -452,5 +575,5 @@ Response:
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
|
||||
|
||||
The response will contain all the buckets having as key the relative day of
|
||||
the week: 1 for Monday, 2 for Tuesday... 7 for Sunday.
|
||||
The response will contain all the buckets having the relative day of
|
||||
the week as key : 1 for Monday, 2 for Tuesday... 7 for Sunday.
|
||||
|
@ -49,11 +49,9 @@ GET _search <4>
|
||||
<3> This document will be indexed, but without indexing the `message` field.
|
||||
<4> Search returns both documents, but only the first is present in the terms aggregation.
|
||||
|
||||
TIP: The `ignore_above` setting is allowed to have different settings for
|
||||
fields of the same name in the same index. Its value can be updated on
|
||||
TIP: The `ignore_above` setting can be updated on
|
||||
existing fields using the <<indices-put-mapping,PUT mapping API>>.
|
||||
|
||||
|
||||
This option is also useful for protecting against Lucene's term byte-length
|
||||
limit of `32766`.
|
||||
|
||||
|
@ -55,7 +55,6 @@ PUT my_index/_doc/match_value
|
||||
Fields referred to in a percolator query must *already* exist in the mapping
|
||||
associated with the index used for percolation. In order to make sure these fields exist,
|
||||
add or update a mapping via the <<indices-create-index,create index>> or <<indices-put-mapping,put mapping>> APIs.
|
||||
Fields referred in a percolator query may exist in any type of the index containing the `percolator` field type.
|
||||
|
||||
=====================================
|
||||
|
||||
|
@ -152,6 +152,15 @@ PUT _cluster/settings
|
||||
by default, but they can selectively be made optional by setting this setting
|
||||
to `true`.
|
||||
|
||||
`cluster.remote.${cluster_alias}.transport.ping_schedule`::
|
||||
|
||||
Sets the time interval between regular application-level ping messages that
|
||||
are sent to ensure that transport connections to nodes belonging to remote
|
||||
clusters are kept alive. If set to `-1`, application-level ping messages to
|
||||
this remote cluster are not sent. If unset, application-level ping messages
|
||||
are sent according to the global `transport.ping_schedule` setting, which
|
||||
defaults to ``-1` meaning that pings are not sent.
|
||||
|
||||
[float]
|
||||
[[retrieve-remote-clusters-info]]
|
||||
=== Retrieving remote clusters info
|
||||
|
@ -46,9 +46,9 @@ between all nodes. Defaults to `false`.
|
||||
|
||||
|`transport.ping_schedule` | Schedule a regular application-level ping message
|
||||
to ensure that transport connections between nodes are kept alive. Defaults to
|
||||
`5s` in the transport client and `-1` (disabled) elsewhere. It is preferable to
|
||||
correctly configure TCP keep-alives instead of using this feature, because TCP
|
||||
keep-alives apply to all kinds of long-lived connection and not just to
|
||||
`5s` in the transport client and `-1` (disabled) elsewhere. It is preferable
|
||||
to correctly configure TCP keep-alives instead of using this feature, because
|
||||
TCP keep-alives apply to all kinds of long-lived connections and not just to
|
||||
transport connections.
|
||||
|
||||
|=======================================================================
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 78 KiB After Width: | Height: | Size: 175 KiB |
@ -208,10 +208,10 @@ not. The number value is of type float.
|
||||
[[function-random]]
|
||||
==== Random
|
||||
|
||||
The `random_score` generates scores that are uniformly distributed in [0, 1[.
|
||||
By default, it uses the internal Lucene doc ids as a source of randomness,
|
||||
which is very efficient but unfortunately not reproducible since documents might
|
||||
be renumbered by merges.
|
||||
The `random_score` generates scores that are uniformly distributed from 0 up to
|
||||
but not including 1. By default, it uses the internal Lucene doc ids as a
|
||||
source of randomness, which is very efficient but unfortunately not
|
||||
reproducible since documents might be renumbered by merges.
|
||||
|
||||
In case you want scores to be reproducible, it is possible to provide a `seed`
|
||||
and `field`. The final score will then be computed based on this seed, the
|
||||
|
@ -30,6 +30,7 @@ Rules for the `index` parameter:
|
||||
or using `_all`, is not permitted
|
||||
- Multiple non-rollup indices may be specified
|
||||
- Only one rollup index may be specified. If more than one are supplied an exception will be thrown
|
||||
- Index patterns may be used, but if they match more than one rollup index an exception will be thrown.
|
||||
|
||||
==== Request Body
|
||||
|
||||
|
@ -21,6 +21,7 @@ follows:
|
||||
or using `_all`, is not permitted
|
||||
- Multiple non-rollup indices may be specified
|
||||
- Only one rollup index may be specified. If more than one are supplied an exception will be thrown
|
||||
- Index patterns may be used, but if they match more than one rollup index an exception will be thrown.
|
||||
|
||||
This limitation is driven by the logic that decides which jobs are the "best" for any given query. If you have ten jobs stored in a single
|
||||
index, which cover the source data with varying degrees of completeness and different intervals, the query needs to determine which set
|
||||
|
@ -3,10 +3,10 @@
|
||||
[[xpack-sql]]
|
||||
= SQL access
|
||||
|
||||
:sql-tests: {xes-repo-dir}/../../qa/sql
|
||||
:sql-tests: {xes-repo-dir}/../../plugin/sql/qa
|
||||
:sql-specs: {sql-tests}/src/main/resources
|
||||
:jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc
|
||||
:security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/qa/sql/security
|
||||
:jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc
|
||||
:security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/sql/qa/security
|
||||
:es-sql: Elasticsearch SQL
|
||||
|
||||
[partintro]
|
||||
|
@ -82,21 +82,21 @@ public class URLRepository extends BlobStoreRepository {
|
||||
NamedXContentRegistry namedXContentRegistry) {
|
||||
super(metadata, environment.settings(), namedXContentRegistry);
|
||||
|
||||
if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(settings) == false) {
|
||||
if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) {
|
||||
throw new RepositoryException(metadata.name(), "missing url");
|
||||
}
|
||||
this.environment = environment;
|
||||
supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(settings);
|
||||
urlWhiteList = ALLOWED_URLS_SETTING.get(settings).toArray(new URIPattern[]{});
|
||||
supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(environment.settings());
|
||||
urlWhiteList = ALLOWED_URLS_SETTING.get(environment.settings()).toArray(new URIPattern[]{});
|
||||
basePath = BlobPath.cleanPath();
|
||||
url = URL_SETTING.exists(metadata.settings())
|
||||
? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(settings);
|
||||
? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(environment.settings());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BlobStore createBlobStore() {
|
||||
URL normalizedURL = checkURL(url);
|
||||
return new URLBlobStore(settings, normalizedURL);
|
||||
return new URLBlobStore(environment.settings(), normalizedURL);
|
||||
}
|
||||
|
||||
// only use for testing
|
||||
|
@ -93,6 +93,7 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic
|
||||
}
|
||||
}
|
||||
|
||||
private final Settings settings;
|
||||
private final AzureComputeService azureComputeService;
|
||||
private TransportService transportService;
|
||||
private NetworkService networkService;
|
||||
@ -108,6 +109,7 @@ public class AzureUnicastHostsProvider extends AbstractComponent implements Unic
|
||||
public AzureUnicastHostsProvider(Settings settings, AzureComputeService azureComputeService,
|
||||
TransportService transportService, NetworkService networkService) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.azureComputeService = azureComputeService;
|
||||
this.transportService = transportService;
|
||||
this.networkService = networkService;
|
||||
|
@ -94,6 +94,7 @@ public class GceInstancesServiceImpl extends AbstractComponent implements GceIns
|
||||
return instances;
|
||||
}
|
||||
|
||||
private final Settings settings;
|
||||
private Compute client;
|
||||
private TimeValue refreshInterval = null;
|
||||
private long lastRefresh;
|
||||
@ -108,6 +109,7 @@ public class GceInstancesServiceImpl extends AbstractComponent implements GceIns
|
||||
|
||||
public GceInstancesServiceImpl(Settings settings) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.validateCerts = GCE_VALIDATE_CERTIFICATES.get(settings);
|
||||
this.project = resolveProject();
|
||||
this.zones = resolveZones();
|
||||
|
@ -44,11 +44,14 @@ public class GceMetadataService extends AbstractLifecycleComponent {
|
||||
public static final Setting<String> GCE_HOST =
|
||||
new Setting<>("cloud.gce.host", "http://metadata.google.internal", Function.identity(), Setting.Property.NodeScope);
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
/** Global instance of the HTTP transport. */
|
||||
private HttpTransport gceHttpTransport;
|
||||
|
||||
public GceMetadataService(Settings settings) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
}
|
||||
|
||||
protected synchronized HttpTransport getGceHttpTransport() throws GeneralSecurityException, IOException {
|
||||
|
@ -58,6 +58,7 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas
|
||||
private static final String TERMINATED = "TERMINATED";
|
||||
}
|
||||
|
||||
private final Settings settings;
|
||||
private final GceInstancesService gceInstancesService;
|
||||
private TransportService transportService;
|
||||
private NetworkService networkService;
|
||||
@ -74,6 +75,7 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas
|
||||
TransportService transportService,
|
||||
NetworkService networkService) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.gceInstancesService = gceInstancesService;
|
||||
this.transportService = transportService;
|
||||
this.networkService = networkService;
|
||||
|
@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
@ -55,6 +56,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository {
|
||||
byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope, Property.Dynamic);
|
||||
static final Setting<String> CLIENT_NAME = new Setting<>("client", "default", Function.identity());
|
||||
|
||||
private final Settings settings;
|
||||
private final GoogleCloudStorageService storageService;
|
||||
private final BlobPath basePath;
|
||||
private final boolean compress;
|
||||
@ -66,6 +68,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository {
|
||||
NamedXContentRegistry namedXContentRegistry,
|
||||
GoogleCloudStorageService storageService) {
|
||||
super(metadata, environment.settings(), namedXContentRegistry);
|
||||
this.settings = environment.settings();
|
||||
this.storageService = storageService;
|
||||
|
||||
String basePath = BASE_PATH.get(metadata.settings());
|
||||
|
@ -148,6 +148,8 @@ class S3Repository extends BlobStoreRepository {
|
||||
*/
|
||||
static final Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path");
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
private final S3Service service;
|
||||
|
||||
private final String bucket;
|
||||
@ -178,6 +180,7 @@ class S3Repository extends BlobStoreRepository {
|
||||
final NamedXContentRegistry namedXContentRegistry,
|
||||
final S3Service service) {
|
||||
super(metadata, settings, namedXContentRegistry);
|
||||
this.settings = settings;
|
||||
this.service = service;
|
||||
|
||||
// Parse and validate the user's S3 Storage Class setting
|
||||
|
@ -82,6 +82,9 @@
|
||||
|
||||
---
|
||||
"Find a task result record from the old cluster":
|
||||
- skip:
|
||||
features: headers
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: .tasks
|
||||
|
@ -58,7 +58,7 @@ public class TestDeprecationHeaderRestAction extends BaseRestHandler {
|
||||
Setting.boolSetting("test.setting.not_deprecated", false,
|
||||
Setting.Property.NodeScope, Setting.Property.Dynamic);
|
||||
|
||||
private static final Map<String, Setting<?>> SETTINGS;
|
||||
private static final Map<String, Setting<?>> SETTINGS_MAP;
|
||||
|
||||
static {
|
||||
Map<String, Setting<?>> settingsMap = new HashMap<>(3);
|
||||
@ -67,14 +67,17 @@ public class TestDeprecationHeaderRestAction extends BaseRestHandler {
|
||||
settingsMap.put(TEST_DEPRECATED_SETTING_TRUE2.getKey(), TEST_DEPRECATED_SETTING_TRUE2);
|
||||
settingsMap.put(TEST_NOT_DEPRECATED_SETTING.getKey(), TEST_NOT_DEPRECATED_SETTING);
|
||||
|
||||
SETTINGS = Collections.unmodifiableMap(settingsMap);
|
||||
SETTINGS_MAP = Collections.unmodifiableMap(settingsMap);
|
||||
}
|
||||
|
||||
public static final String DEPRECATED_ENDPOINT = "[/_test_cluster/deprecated_settings] exists for deprecated tests";
|
||||
public static final String DEPRECATED_USAGE = "[deprecated_settings] usage is deprecated. use [settings] instead";
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
public TestDeprecationHeaderRestAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
|
||||
controller.registerAsDeprecatedHandler(RestRequest.Method.GET, "/_test_cluster/deprecated_settings", this,
|
||||
DEPRECATED_ENDPOINT, deprecationLogger);
|
||||
@ -107,7 +110,7 @@ public class TestDeprecationHeaderRestAction extends BaseRestHandler {
|
||||
|
||||
builder.startObject().startArray("settings");
|
||||
for (String setting : settings) {
|
||||
builder.startObject().field(setting, SETTINGS.get(setting).getRaw(this.settings)).endObject();
|
||||
builder.startObject().field(setting, SETTINGS_MAP.get(setting).getRaw(this.settings)).endObject();
|
||||
}
|
||||
builder.endArray().endObject();
|
||||
channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder));
|
||||
|
@ -75,7 +75,7 @@
|
||||
"Deprecated _source_include and _source_exclude":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
version: " - 6.5.99"
|
||||
reason: _source_include and _source_exclude are deprecated from 6.6.0
|
||||
features: "warnings"
|
||||
|
||||
|
@ -79,6 +79,7 @@ import java.util.function.Function;
|
||||
*/
|
||||
public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRequest, AnalyzeResponse> {
|
||||
|
||||
private final Settings settings;
|
||||
private final IndicesService indicesService;
|
||||
private final Environment environment;
|
||||
|
||||
@ -88,6 +89,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Environment environment) {
|
||||
super(settings, AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
AnalyzeRequest::new, ThreadPool.Names.ANALYZE);
|
||||
this.settings = settings;
|
||||
this.indicesService = indicesService;
|
||||
this.environment = environment;
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TransportRequestOptions transportOptions() {
|
||||
protected TransportRequestOptions transportOptions(Settings settings) {
|
||||
return BulkAction.INSTANCE.transportOptions(settings);
|
||||
}
|
||||
|
||||
|
@ -34,21 +34,22 @@ import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public class TransportMainAction extends HandledTransportAction<MainRequest, MainResponse> {
|
||||
|
||||
private final String nodeName;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public TransportMainAction(Settings settings, TransportService transportService,
|
||||
ActionFilters actionFilters, ClusterService clusterService) {
|
||||
super(settings, MainAction.NAME, transportService, actionFilters, MainRequest::new);
|
||||
this.nodeName = Node.NODE_NAME_SETTING.get(settings);
|
||||
this.clusterService = clusterService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, MainRequest request, ActionListener<MainResponse> listener) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
assert Node.NODE_NAME_SETTING.exists(settings);
|
||||
listener.onResponse(
|
||||
new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(),
|
||||
new MainResponse(nodeName, Version.CURRENT, clusterState.getClusterName(),
|
||||
clusterState.metaData().clusterUUID(), Build.CURRENT));
|
||||
}
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ public abstract class TransportReplicationAction<
|
||||
this.transportReplicaAction = actionName + "[r]";
|
||||
registerRequestHandlers(actionName, transportService, request, replicaRequest, executor);
|
||||
|
||||
this.transportOptions = transportOptions();
|
||||
this.transportOptions = transportOptions(settings);
|
||||
|
||||
this.syncGlobalCheckpointAfterOperation = syncGlobalCheckpointAfterOperation;
|
||||
}
|
||||
@ -231,7 +231,7 @@ public abstract class TransportReplicationAction<
|
||||
return true;
|
||||
}
|
||||
|
||||
protected TransportRequestOptions transportOptions() {
|
||||
protected TransportRequestOptions transportOptions(Settings settings) {
|
||||
return TransportRequestOptions.EMPTY;
|
||||
}
|
||||
|
||||
|
@ -344,12 +344,14 @@ import java.util.Map;
|
||||
|
||||
public abstract class AbstractClient extends AbstractComponent implements Client {
|
||||
|
||||
protected final Settings settings;
|
||||
private final ThreadPool threadPool;
|
||||
private final Admin admin;
|
||||
private final ThreadedActionListener.Wrapper threadedWrapper;
|
||||
|
||||
public AbstractClient(Settings settings, ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.threadPool = threadPool;
|
||||
this.admin = new Admin(this);
|
||||
this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool);
|
||||
|
@ -130,15 +130,15 @@ final class TransportClientNodesService extends AbstractComponent implements Clo
|
||||
this.threadPool = threadPool;
|
||||
this.minCompatibilityVersion = Version.CURRENT.minimumCompatibilityVersion();
|
||||
|
||||
this.nodesSamplerInterval = TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(this.settings);
|
||||
this.pingTimeout = TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT.get(this.settings).millis();
|
||||
this.ignoreClusterName = TransportClient.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
|
||||
this.nodesSamplerInterval = TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(settings);
|
||||
this.pingTimeout = TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT.get(settings).millis();
|
||||
this.ignoreClusterName = TransportClient.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(settings);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("node_sampler_interval[{}]", nodesSamplerInterval);
|
||||
}
|
||||
|
||||
if (TransportClient.CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
|
||||
if (TransportClient.CLIENT_TRANSPORT_SNIFF.get(settings)) {
|
||||
this.nodesSampler = new SniffNodesSampler();
|
||||
} else {
|
||||
this.nodesSampler = new SimpleNodeSampler();
|
||||
|
@ -106,6 +106,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
|
||||
public static final int MAX_INDEX_NAME_BYTES = 255;
|
||||
|
||||
private final Settings settings;
|
||||
private final ClusterService clusterService;
|
||||
private final IndicesService indicesService;
|
||||
private final AllocationService allocationService;
|
||||
@ -128,6 +129,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
final NamedXContentRegistry xContentRegistry,
|
||||
final boolean forbidPrivateIndexSettings) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
this.allocationService = allocationService;
|
||||
|
@ -48,7 +48,7 @@ import static java.util.stream.Collectors.toSet;
|
||||
* Deletes indices.
|
||||
*/
|
||||
public class MetaDataDeleteIndexService extends AbstractComponent {
|
||||
|
||||
private final Settings settings;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final AllocationService allocationService;
|
||||
@ -56,6 +56,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
||||
@Inject
|
||||
public MetaDataDeleteIndexService(Settings settings, ClusterService clusterService, AllocationService allocationService) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.clusterService = clusterService;
|
||||
this.allocationService = allocationService;
|
||||
}
|
||||
|
@ -53,6 +53,7 @@ import java.util.function.UnaryOperator;
|
||||
*/
|
||||
public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||
|
||||
private final Settings settings;
|
||||
private final NamedXContentRegistry xContentRegistry;
|
||||
private final MapperRegistry mapperRegistry;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
@ -62,6 +63,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||
IndexScopedSettings indexScopedSettings,
|
||||
Collection<UnaryOperator<IndexMetaData>> indexMetaDataUpgraders) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.xContentRegistry = xContentRegistry;
|
||||
this.mapperRegistry = mapperRegistry;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
|
@ -73,8 +73,11 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
||||
Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings);
|
||||
clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit);
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
|
||||
addListener(localNodeMasterListeners);
|
||||
threadPoolExecutor = EsExecutors.newSinglePrioritizing(
|
||||
nodeName + "/" + CLUSTER_UPDATE_THREAD_NAME,
|
||||
daemonThreadFactory(settings, CLUSTER_UPDATE_THREAD_NAME),
|
||||
daemonThreadFactory(nodeName, CLUSTER_UPDATE_THREAD_NAME),
|
||||
threadPool.getThreadContext(),
|
||||
threadPool.scheduler());
|
||||
}
|
||||
|
@ -55,6 +55,11 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
public static final org.elasticsearch.common.settings.Setting.AffixSetting<String> USER_DEFINED_META_DATA =
|
||||
Setting.prefixKeySetting("cluster.metadata.", (key) -> Setting.simpleString(key, Property.Dynamic, Property.NodeScope));
|
||||
|
||||
/**
|
||||
* The node's settings.
|
||||
*/
|
||||
private final Settings settings;
|
||||
|
||||
private final ClusterName clusterName;
|
||||
|
||||
private final OperationRouting operationRouting;
|
||||
@ -65,6 +70,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
|
||||
public ClusterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.nodeName = Node.NODE_NAME_SETTING.get(settings);
|
||||
this.masterService = new MasterService(nodeName, settings, threadPool);
|
||||
this.operationRouting = new OperationRouting(settings, clusterSettings);
|
||||
@ -199,6 +205,9 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
return clusterSettings;
|
||||
}
|
||||
|
||||
/**
|
||||
* The node's settings.
|
||||
*/
|
||||
public Settings getSettings() {
|
||||
return settings;
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ public class MasterService extends AbstractLifecycleComponent {
|
||||
Objects.requireNonNull(clusterStateSupplier, "please set a cluster state supplier before starting");
|
||||
threadPoolExecutor = EsExecutors.newSinglePrioritizing(
|
||||
nodeName + "/" + MASTER_UPDATE_THREAD_NAME,
|
||||
daemonThreadFactory(settings, MASTER_UPDATE_THREAD_NAME),
|
||||
daemonThreadFactory(nodeName, MASTER_UPDATE_THREAD_NAME),
|
||||
threadPool.getThreadContext(),
|
||||
threadPool.scheduler());
|
||||
taskBatcher = new Batcher(logger, threadPoolExecutor);
|
||||
|
@ -26,10 +26,8 @@ import org.elasticsearch.common.settings.Settings;
|
||||
public abstract class AbstractComponent {
|
||||
|
||||
protected final Logger logger;
|
||||
protected final Settings settings;
|
||||
|
||||
public AbstractComponent(Settings settings) {
|
||||
this.logger = LogManager.getLogger(getClass());
|
||||
this.settings = settings;
|
||||
}
|
||||
}
|
||||
|
@ -49,15 +49,17 @@ import java.util.stream.Collectors;
|
||||
*/
|
||||
public abstract class AbstractScopedSettings extends AbstractComponent {
|
||||
public static final String ARCHIVED_SETTINGS_PREFIX = "archived.";
|
||||
private Settings lastSettingsApplied = Settings.EMPTY;
|
||||
private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$");
|
||||
private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$");
|
||||
private static final Pattern AFFIX_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+[*](?:[.][-\\w]+)+$");
|
||||
|
||||
private final Settings settings;
|
||||
private final List<SettingUpdater<?>> settingUpdaters = new CopyOnWriteArrayList<>();
|
||||
private final Map<String, Setting<?>> complexMatchers;
|
||||
private final Map<String, Setting<?>> keySettings;
|
||||
private final Map<Setting<?>, SettingUpgrader<?>> settingUpgraders;
|
||||
private final Setting.Property scope;
|
||||
private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$");
|
||||
private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$");
|
||||
private static final Pattern AFFIX_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+[*](?:[.][-\\w]+)+$");
|
||||
private Settings lastSettingsApplied;
|
||||
|
||||
protected AbstractScopedSettings(
|
||||
final Settings settings,
|
||||
@ -65,6 +67,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
||||
final Set<SettingUpgrader<?>> settingUpgraders,
|
||||
final Setting.Property scope) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.lastSettingsApplied = Settings.EMPTY;
|
||||
|
||||
this.settingUpgraders =
|
||||
@ -105,6 +108,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
||||
|
||||
protected AbstractScopedSettings(Settings nodeSettings, Settings scopeSettings, AbstractScopedSettings other) {
|
||||
super(nodeSettings);
|
||||
this.settings = nodeSettings;
|
||||
this.lastSettingsApplied = scopeSettings;
|
||||
this.scope = other.scope;
|
||||
complexMatchers = other.complexMatchers;
|
||||
|
@ -293,6 +293,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||
RemoteClusterService.SEARCH_REMOTE_NODE_ATTRIBUTE,
|
||||
RemoteClusterService.ENABLE_REMOTE_CLUSTERS,
|
||||
RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS,
|
||||
RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE,
|
||||
TransportService.TRACE_LOG_EXCLUDE_SETTING,
|
||||
TransportService.TRACE_LOG_INCLUDE_SETTING,
|
||||
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
|
||||
|
@ -28,6 +28,7 @@ import java.time.ZoneOffset;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class DateUtils {
|
||||
public static DateTimeZone zoneIdToDateTimeZone(ZoneId zoneId) {
|
||||
@ -44,6 +45,7 @@ public class DateUtils {
|
||||
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(DateFormatters.class));
|
||||
// pkg private for tests
|
||||
static final Map<String, String> DEPRECATED_SHORT_TIMEZONES;
|
||||
public static final Set<String> DEPRECATED_SHORT_TZ_IDS;
|
||||
static {
|
||||
Map<String, String> tzs = new HashMap<>();
|
||||
tzs.put("EST", "-05:00"); // eastern time without daylight savings
|
||||
@ -52,6 +54,7 @@ public class DateUtils {
|
||||
tzs.put("ROC", "Asia/Taipei");
|
||||
tzs.put("Eire", "Europe/London");
|
||||
DEPRECATED_SHORT_TIMEZONES = Collections.unmodifiableMap(tzs);
|
||||
DEPRECATED_SHORT_TZ_IDS = tzs.keySet();
|
||||
}
|
||||
|
||||
public static ZoneId dateTimeZoneToZoneId(DateTimeZone timeZone) {
|
||||
|
@ -179,6 +179,11 @@ public class EsExecutors {
|
||||
return daemonThreadFactory(threadName(settings, namePrefix));
|
||||
}
|
||||
|
||||
public static ThreadFactory daemonThreadFactory(String nodeName, String namePrefix) {
|
||||
assert nodeName != null && false == nodeName.isEmpty();
|
||||
return daemonThreadFactory(threadName(nodeName, namePrefix));
|
||||
}
|
||||
|
||||
public static ThreadFactory daemonThreadFactory(Settings settings, String ... names) {
|
||||
return daemonThreadFactory(threadName(settings, names));
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK
|
||||
*/
|
||||
public class SingleNodeDiscovery extends AbstractLifecycleComponent implements Discovery {
|
||||
|
||||
private final ClusterName clusterName;
|
||||
protected final TransportService transportService;
|
||||
private final ClusterApplier clusterApplier;
|
||||
private volatile ClusterState clusterState;
|
||||
@ -54,6 +55,7 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D
|
||||
public SingleNodeDiscovery(final Settings settings, final TransportService transportService,
|
||||
final MasterService masterService, final ClusterApplier clusterApplier) {
|
||||
super(Objects.requireNonNull(settings));
|
||||
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
this.transportService = Objects.requireNonNull(transportService);
|
||||
masterService.setClusterStateSupplier(() -> clusterState);
|
||||
this.clusterApplier = clusterApplier;
|
||||
@ -114,7 +116,7 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D
|
||||
}
|
||||
|
||||
protected ClusterState createInitialState(DiscoveryNode localNode) {
|
||||
ClusterState.Builder builder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings));
|
||||
ClusterState.Builder builder = ClusterState.builder(clusterName);
|
||||
return builder.nodes(DiscoveryNodes.builder().add(localNode)
|
||||
.localNodeId(localNode.getId())
|
||||
.masterNodeId(localNode.getId())
|
||||
|
@ -121,6 +121,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
private final NodesFaultDetection nodesFD;
|
||||
private final PublishClusterStateAction publishClusterState;
|
||||
private final MembershipAction membership;
|
||||
private final ClusterName clusterName;
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final TimeValue pingTimeout;
|
||||
@ -172,7 +173,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
this.maxPingsFromAnotherMaster = MAX_PINGS_FROM_ANOTHER_MASTER_SETTING.get(settings);
|
||||
this.sendLeaveRequest = SEND_LEAVE_REQUEST_SETTING.get(settings);
|
||||
this.threadPool = threadPool;
|
||||
ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
this.committedState = new AtomicReference<>();
|
||||
|
||||
this.masterElectionIgnoreNonMasters = MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING.get(settings);
|
||||
@ -252,7 +253,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||
// set initial state
|
||||
assert committedState.get() == null;
|
||||
assert localNode != null;
|
||||
ClusterState.Builder builder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings));
|
||||
ClusterState.Builder builder = ClusterState.builder(clusterName);
|
||||
ClusterState initialState = builder
|
||||
.blocks(ClusterBlocks.builder()
|
||||
.addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)
|
||||
|
@ -23,7 +23,6 @@ import com.carrotsearch.hppc.ObjectFloatHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -153,7 +152,7 @@ public class Gateway extends AbstractComponent {
|
||||
clusterSettings.upgradeSettings(metaDataBuilder.transientSettings()),
|
||||
e -> logUnknownSetting("transient", e),
|
||||
(e, ex) -> logInvalidSetting("transient", e, ex)));
|
||||
ClusterState.Builder builder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings));
|
||||
ClusterState.Builder builder = ClusterState.builder(clusterService.getClusterName());
|
||||
builder.metaData(metaDataBuilder);
|
||||
return builder;
|
||||
}
|
||||
|
@ -101,22 +101,22 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste
|
||||
this.clusterService = clusterService;
|
||||
this.threadPool = threadPool;
|
||||
// allow to control a delay of when indices will get created
|
||||
this.expectedNodes = EXPECTED_NODES_SETTING.get(this.settings);
|
||||
this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(this.settings);
|
||||
this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(this.settings);
|
||||
this.expectedNodes = EXPECTED_NODES_SETTING.get(settings);
|
||||
this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(settings);
|
||||
this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(settings);
|
||||
|
||||
if (RECOVER_AFTER_TIME_SETTING.exists(this.settings)) {
|
||||
recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(this.settings);
|
||||
if (RECOVER_AFTER_TIME_SETTING.exists(settings)) {
|
||||
recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(settings);
|
||||
} else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) {
|
||||
recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET;
|
||||
} else {
|
||||
recoverAfterTime = null;
|
||||
}
|
||||
this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(this.settings);
|
||||
this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(this.settings);
|
||||
this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(settings);
|
||||
this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(settings);
|
||||
// default the recover after master nodes to the minimum master nodes in the discovery
|
||||
if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(this.settings)) {
|
||||
recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(this.settings);
|
||||
if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(settings)) {
|
||||
recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(settings);
|
||||
} else {
|
||||
// TODO: change me once the minimum_master_nodes is changed too
|
||||
recoverAfterMasterNodes = settings.getAsInt("discovery.zen.minimum_master_nodes", -1);
|
||||
|
@ -67,6 +67,7 @@ public class TransportNodesListGatewayStartedShards extends
|
||||
TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> {
|
||||
|
||||
public static final String ACTION_NAME = "internal:gateway/local/started_shards";
|
||||
private final Settings settings;
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final IndicesService indicesService;
|
||||
private final NamedXContentRegistry namedXContentRegistry;
|
||||
@ -78,6 +79,7 @@ public class TransportNodesListGatewayStartedShards extends
|
||||
NamedXContentRegistry namedXContentRegistry) {
|
||||
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
Request::new, NodeRequest::new, ThreadPool.Names.FETCH_SHARD_STARTED, NodeGatewayStartedShards.class);
|
||||
this.settings = settings;
|
||||
this.nodeEnv = env;
|
||||
this.indicesService = indicesService;
|
||||
this.namedXContentRegistry = namedXContentRegistry;
|
||||
|
@ -63,7 +63,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT;
|
||||
|
||||
public abstract class AbstractHttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport {
|
||||
|
||||
protected final Settings settings;
|
||||
public final HttpHandlingSettings handlingSettings;
|
||||
protected final NetworkService networkService;
|
||||
protected final BigArrays bigArrays;
|
||||
@ -84,6 +84,7 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo
|
||||
protected AbstractHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool,
|
||||
NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.networkService = networkService;
|
||||
this.bigArrays = bigArrays;
|
||||
this.threadPool = threadPool;
|
||||
|
@ -117,7 +117,8 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
|
||||
@Override
|
||||
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext)
|
||||
throws MapperParsingException {
|
||||
CompletionFieldMapper.Builder builder = new CompletionFieldMapper.Builder(name);
|
||||
NamedAnalyzer indexAnalyzer = null;
|
||||
NamedAnalyzer searchAnalyzer = null;
|
||||
@ -368,7 +369,8 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
||||
*/
|
||||
public Builder maxInputLength(int maxInputLength) {
|
||||
if (maxInputLength <= 0) {
|
||||
throw new IllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName() + " must be > 0 but was [" + maxInputLength + "]");
|
||||
throw new IllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName()
|
||||
+ " must be > 0 but was [" + maxInputLength + "]");
|
||||
}
|
||||
this.maxInputLength = maxInputLength;
|
||||
return this;
|
||||
@ -400,13 +402,15 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
||||
completionFieldType.setContextMappings(contextMappings);
|
||||
completionFieldType.setPreservePositionIncrements(preservePositionIncrements);
|
||||
completionFieldType.setPreserveSep(preserveSeparators);
|
||||
return new CompletionFieldMapper(name, this.fieldType, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo, maxInputLength);
|
||||
return new CompletionFieldMapper(name, this.fieldType, context.indexSettings(),
|
||||
multiFieldsBuilder.build(this, context), copyTo, maxInputLength);
|
||||
}
|
||||
}
|
||||
|
||||
private int maxInputLength;
|
||||
|
||||
public CompletionFieldMapper(String simpleName, MappedFieldType fieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo, int maxInputLength) {
|
||||
public CompletionFieldMapper(String simpleName, MappedFieldType fieldType, Settings indexSettings,
|
||||
MultiFields multiFields, CopyTo copyTo, int maxInputLength) {
|
||||
super(simpleName, fieldType, Defaults.FIELD_TYPE, indexSettings, multiFields, copyTo);
|
||||
this.maxInputLength = maxInputLength;
|
||||
}
|
||||
@ -506,7 +510,8 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
||||
* "STRING" - interpreted as the field value (input)
|
||||
* "OBJECT" - { "input": STRING|ARRAY, "weight": STRING|INT, "contexts": ARRAY|OBJECT }
|
||||
*/
|
||||
private void parse(ParseContext parseContext, Token token, XContentParser parser, Map<String, CompletionInputMetaData> inputMap) throws IOException {
|
||||
private void parse(ParseContext parseContext, Token token,
|
||||
XContentParser parser, Map<String, CompletionInputMetaData> inputMap) throws IOException {
|
||||
String currentFieldName = null;
|
||||
if (token == Token.VALUE_STRING) {
|
||||
inputMap.put(parser.text(), new CompletionInputMetaData(parser.text(), Collections.emptyMap(), 1));
|
||||
@ -518,7 +523,8 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
||||
if (token == Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
if (!ALLOWED_CONTENT_FIELD_NAMES.contains(currentFieldName)) {
|
||||
throw new IllegalArgumentException("unknown field name [" + currentFieldName + "], must be one of " + ALLOWED_CONTENT_FIELD_NAMES);
|
||||
throw new IllegalArgumentException("unknown field name [" + currentFieldName
|
||||
+ "], must be one of " + ALLOWED_CONTENT_FIELD_NAMES);
|
||||
}
|
||||
} else if (currentFieldName != null) {
|
||||
if (Fields.CONTENT_FIELD_NAME_INPUT.equals(currentFieldName)) {
|
||||
@ -529,7 +535,8 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
||||
if (token == Token.VALUE_STRING) {
|
||||
inputs.add(parser.text());
|
||||
} else {
|
||||
throw new IllegalArgumentException("input array must have string values, but was [" + token.name() + "]");
|
||||
throw new IllegalArgumentException("input array must have string values, but was ["
|
||||
+ token.name() + "]");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -552,8 +559,10 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
||||
} else {
|
||||
throw new IllegalArgumentException("weight must be a number or string, but was [" + token.name() + "]");
|
||||
}
|
||||
if (weightValue.longValue() < 0 || weightValue.longValue() > Integer.MAX_VALUE) { // always parse a long to make sure we don't get overflow
|
||||
throw new IllegalArgumentException("weight must be in the interval [0..2147483647], but was [" + weightValue.longValue() + "]");
|
||||
// always parse a long to make sure we don't get overflow
|
||||
if (weightValue.longValue() < 0 || weightValue.longValue() > Integer.MAX_VALUE) {
|
||||
throw new IllegalArgumentException("weight must be in the interval [0..2147483647], but was ["
|
||||
+ weightValue.longValue() + "]");
|
||||
}
|
||||
weight = weightValue.intValue();
|
||||
} else if (Fields.CONTENT_FIELD_NAME_CONTEXTS.equals(currentFieldName)) {
|
||||
@ -587,7 +596,8 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "failed to parse [" + parser.currentName() + "]: expected text or object, but got " + token.name());
|
||||
throw new ParsingException(parser.getTokenLocation(), "failed to parse [" + parser.currentName()
|
||||
+ "]: expected text or object, but got " + token.name());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,8 +72,9 @@ public class DocumentMapper implements ToXContentFragment {
|
||||
this.rootObjectMapper = builder.build(builderContext);
|
||||
|
||||
final String type = rootObjectMapper.name();
|
||||
DocumentMapper existingMapper = mapperService.documentMapper(type);
|
||||
for (Map.Entry<String, MetadataFieldMapper.TypeParser> entry : mapperService.mapperRegistry.getMetadataMapperParsers().entrySet()) {
|
||||
final DocumentMapper existingMapper = mapperService.documentMapper(type);
|
||||
final Map<String, TypeParser> metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers();
|
||||
for (Map.Entry<String, MetadataFieldMapper.TypeParser> entry : metadataMapperParsers.entrySet()) {
|
||||
final String name = entry.getKey();
|
||||
final MetadataFieldMapper existingMetadataMapper = existingMapper == null
|
||||
? null
|
||||
|
@ -107,7 +107,8 @@ final class DocumentParser {
|
||||
}
|
||||
|
||||
if (Objects.equals(source.type(), docMapper.type()) == false) {
|
||||
throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type [" + docMapper.type() + "]");
|
||||
throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type ["
|
||||
+ docMapper.type() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,7 +137,8 @@ final class DocumentParser {
|
||||
// empty doc, we can handle it...
|
||||
return true;
|
||||
} else if (token != XContentParser.Token.FIELD_NAME) {
|
||||
throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist");
|
||||
throw new MapperParsingException("Malformed content, after first object, either the type field"
|
||||
+ " or the actual properties should exist");
|
||||
}
|
||||
}
|
||||
return false;
|
||||
@ -355,7 +357,8 @@ final class DocumentParser {
|
||||
|
||||
String currentFieldName = parser.currentName();
|
||||
if (token.isValue()) {
|
||||
throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but found a concrete value");
|
||||
throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName
|
||||
+ "] as object, but found a concrete value");
|
||||
}
|
||||
|
||||
ObjectMapper.Nested nested = mapper.nested();
|
||||
@ -379,7 +382,8 @@ final class DocumentParser {
|
||||
}
|
||||
}
|
||||
|
||||
private static void innerParseObject(ParseContext context, ObjectMapper mapper, XContentParser parser, String currentFieldName, XContentParser.Token token) throws IOException {
|
||||
private static void innerParseObject(ParseContext context, ObjectMapper mapper, XContentParser parser,
|
||||
String currentFieldName, XContentParser.Token token) throws IOException {
|
||||
while (token != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
parseObject(context, mapper, currentFieldName);
|
||||
@ -388,12 +392,14 @@ final class DocumentParser {
|
||||
} else if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
if (MapperService.isMetadataField(context.path().pathAsText(currentFieldName))) {
|
||||
throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside a document. Use the index API request parameters.");
|
||||
throw new MapperParsingException("Field [" + currentFieldName + "] is a metadata field and cannot be added inside"
|
||||
+ " a document. Use the index API request parameters.");
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_NULL) {
|
||||
parseNullValue(context, mapper, currentFieldName);
|
||||
} else if (token == null) {
|
||||
throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but got EOF, has a concrete value been provided to it?");
|
||||
throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName
|
||||
+ "] as object, but got EOF, has a concrete value been provided to it?");
|
||||
} else if (token.isValue()) {
|
||||
parseValue(context, mapper, currentFieldName, token);
|
||||
}
|
||||
@ -558,7 +564,8 @@ final class DocumentParser {
|
||||
}
|
||||
}
|
||||
|
||||
private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapper, String lastFieldName, String arrayFieldName) throws IOException {
|
||||
private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapper,
|
||||
String lastFieldName, String arrayFieldName) throws IOException {
|
||||
XContentParser parser = context.parser();
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
@ -571,16 +578,19 @@ final class DocumentParser {
|
||||
} else if (token == XContentParser.Token.VALUE_NULL) {
|
||||
parseNullValue(context, mapper, lastFieldName);
|
||||
} else if (token == null) {
|
||||
throw new MapperParsingException("object mapping for [" + mapper.name() + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?");
|
||||
throw new MapperParsingException("object mapping for [" + mapper.name() + "] with array for [" + arrayFieldName
|
||||
+ "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?");
|
||||
} else {
|
||||
parseValue(context, mapper, lastFieldName, token);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void parseValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException {
|
||||
private static void parseValue(final ParseContext context, ObjectMapper parentMapper,
|
||||
String currentFieldName, XContentParser.Token token) throws IOException {
|
||||
if (currentFieldName == null) {
|
||||
throw new MapperParsingException("object mapping [" + parentMapper.name() + "] trying to serialize a value with no field associated with it, current value [" + context.parser().textOrNull() + "]");
|
||||
throw new MapperParsingException("object mapping [" + parentMapper.name() + "] trying to serialize a value with"
|
||||
+ " no field associated with it, current value [" + context.parser().textOrNull() + "]");
|
||||
}
|
||||
|
||||
final String[] paths = splitAndValidatePath(currentFieldName);
|
||||
@ -609,7 +619,8 @@ final class DocumentParser {
|
||||
}
|
||||
}
|
||||
|
||||
private static Mapper.Builder<?,?> createBuilderFromFieldType(final ParseContext context, MappedFieldType fieldType, String currentFieldName) {
|
||||
private static Mapper.Builder<?,?> createBuilderFromFieldType(final ParseContext context,
|
||||
MappedFieldType fieldType, String currentFieldName) {
|
||||
Mapper.Builder builder = null;
|
||||
if (fieldType instanceof TextFieldType) {
|
||||
builder = context.root().findTemplateBuilder(context, currentFieldName, "text", XContentFieldType.STRING);
|
||||
@ -671,7 +682,9 @@ final class DocumentParser {
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static Mapper.Builder<?,?> createBuilderFromDynamicValue(final ParseContext context, XContentParser.Token token, String currentFieldName) throws IOException {
|
||||
private static Mapper.Builder<?,?> createBuilderFromDynamicValue(final ParseContext context,
|
||||
XContentParser.Token token,
|
||||
String currentFieldName) throws IOException {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
String text = context.parser().text();
|
||||
|
||||
@ -771,10 +784,12 @@ final class DocumentParser {
|
||||
}
|
||||
}
|
||||
// TODO how do we identify dynamically that its a binary value?
|
||||
throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]");
|
||||
throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name ["
|
||||
+ currentFieldName + "]");
|
||||
}
|
||||
|
||||
private static void parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException {
|
||||
private static void parseDynamicValue(final ParseContext context, ObjectMapper parentMapper,
|
||||
String currentFieldName, XContentParser.Token token) throws IOException {
|
||||
ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context);
|
||||
if (dynamic == ObjectMapper.Dynamic.STRICT) {
|
||||
throw new StrictDynamicMappingException(parentMapper.fullPath(), currentFieldName);
|
||||
@ -885,8 +900,8 @@ final class DocumentParser {
|
||||
context.path());
|
||||
mapper = (ObjectMapper) builder.build(builderContext);
|
||||
if (mapper.nested() != ObjectMapper.Nested.NO) {
|
||||
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i])
|
||||
+ "]) through `copy_to` or dots in field names");
|
||||
throw new MapperParsingException("It is forbidden to create dynamic nested objects (["
|
||||
+ context.path().pathAsText(paths[i]) + "]) through `copy_to` or dots in field names");
|
||||
}
|
||||
context.addDynamicMapper(mapper);
|
||||
break;
|
||||
|
@ -228,7 +228,8 @@ public class DynamicTemplate implements ToXContentObject {
|
||||
try {
|
||||
matchType.matches(regex, "");
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new IllegalArgumentException("Pattern [" + regex + "] of type [" + matchType + "] is invalid. Cannot create dynamic template [" + name + "].", e);
|
||||
throw new IllegalArgumentException("Pattern [" + regex + "] of type [" + matchType
|
||||
+ "] is invalid. Cannot create dynamic template [" + name + "].", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -320,14 +321,16 @@ public class DynamicTemplate implements ToXContentObject {
|
||||
private Map<String, Object> processMap(Map<String, Object> map, String name, String dynamicType) {
|
||||
Map<String, Object> processedMap = new HashMap<>();
|
||||
for (Map.Entry<String, Object> entry : map.entrySet()) {
|
||||
String key = entry.getKey().replace("{name}", name).replace("{dynamic_type}", dynamicType).replace("{dynamicType}", dynamicType);
|
||||
String key = entry.getKey().replace("{name}", name).replace("{dynamic_type}", dynamicType)
|
||||
.replace("{dynamicType}", dynamicType);
|
||||
Object value = entry.getValue();
|
||||
if (value instanceof Map) {
|
||||
value = processMap((Map<String, Object>) value, name, dynamicType);
|
||||
} else if (value instanceof List) {
|
||||
value = processList((List) value, name, dynamicType);
|
||||
} else if (value instanceof String) {
|
||||
value = value.toString().replace("{name}", name).replace("{dynamic_type}", dynamicType).replace("{dynamicType}", dynamicType);
|
||||
value = value.toString().replace("{name}", name).replace("{dynamic_type}", dynamicType)
|
||||
.replace("{dynamicType}", dynamicType);
|
||||
}
|
||||
processedMap.put(key, value);
|
||||
}
|
||||
@ -342,7 +345,9 @@ public class DynamicTemplate implements ToXContentObject {
|
||||
} else if (value instanceof List) {
|
||||
value = processList((List) value, name, dynamicType);
|
||||
} else if (value instanceof String) {
|
||||
value = value.toString().replace("{name}", name).replace("{dynamic_type}", dynamicType).replace("{dynamicType}", dynamicType);
|
||||
value = value.toString().replace("{name}", name)
|
||||
.replace("{dynamic_type}", dynamicType)
|
||||
.replace("{dynamicType}", dynamicType);
|
||||
}
|
||||
processedList.add(value);
|
||||
}
|
||||
|
@ -91,7 +91,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
// can happen when an existing type on the same index has disabled indexing
|
||||
// since we inherit the default field type from the first mapper that is
|
||||
// created on an index
|
||||
throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types of the same index");
|
||||
throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types"
|
||||
+ " of the same index");
|
||||
}
|
||||
fieldType.setIndexOptions(options);
|
||||
}
|
||||
@ -227,7 +228,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
protected MultiFields multiFields;
|
||||
protected CopyTo copyTo;
|
||||
|
||||
protected FieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
|
||||
protected FieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
|
||||
super(simpleName);
|
||||
assert indexSettings != null;
|
||||
this.indexCreatedVersion = Version.indexCreated(indexSettings);
|
||||
@ -325,7 +327,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
if (mergeWith instanceof FieldMapper) {
|
||||
mergedType = ((FieldMapper) mergeWith).contentType();
|
||||
}
|
||||
throw new IllegalArgumentException("mapper [" + fieldType().name() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]");
|
||||
throw new IllegalArgumentException("mapper [" + fieldType().name() + "] of different type, current_type [" + contentType()
|
||||
+ "], merged_type [" + mergedType + "]");
|
||||
}
|
||||
FieldMapper fieldMergeWith = (FieldMapper) mergeWith;
|
||||
multiFields = multiFields.merge(fieldMergeWith.multiFields);
|
||||
@ -414,12 +417,13 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
}
|
||||
} else {
|
||||
boolean hasDefaultIndexAnalyzer = fieldType().indexAnalyzer().name().equals("default");
|
||||
boolean hasDifferentSearchAnalyzer = fieldType().searchAnalyzer().name().equals(fieldType().indexAnalyzer().name()) == false;
|
||||
boolean hasDifferentSearchQuoteAnalyzer = fieldType().searchAnalyzer().name().equals(fieldType().searchQuoteAnalyzer().name()) == false;
|
||||
final String searchAnalyzerName = fieldType().searchAnalyzer().name();
|
||||
boolean hasDifferentSearchAnalyzer = searchAnalyzerName.equals(fieldType().indexAnalyzer().name()) == false;
|
||||
boolean hasDifferentSearchQuoteAnalyzer = searchAnalyzerName.equals(fieldType().searchQuoteAnalyzer().name()) == false;
|
||||
if (includeDefaults || hasDefaultIndexAnalyzer == false || hasDifferentSearchAnalyzer || hasDifferentSearchQuoteAnalyzer) {
|
||||
builder.field("analyzer", fieldType().indexAnalyzer().name());
|
||||
if (includeDefaults || hasDifferentSearchAnalyzer || hasDifferentSearchQuoteAnalyzer) {
|
||||
builder.field("search_analyzer", fieldType().searchAnalyzer().name());
|
||||
builder.field("search_analyzer", searchAnalyzerName);
|
||||
if (includeDefaults || hasDifferentSearchQuoteAnalyzer) {
|
||||
builder.field("search_quote_analyzer", fieldType().searchQuoteAnalyzer().name());
|
||||
}
|
||||
@ -521,7 +525,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
}
|
||||
|
||||
public void parse(FieldMapper mainField, ParseContext context) throws IOException {
|
||||
// TODO: multi fields are really just copy fields, we just need to expose "sub fields" or something that can be part of the mappings
|
||||
// TODO: multi fields are really just copy fields, we just need to expose "sub fields" or something that can be part
|
||||
// of the mappings
|
||||
if (mappers.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
@ -104,7 +104,8 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
Builder builder = new Builder(parserContext.mapperService().fullName(NAME));
|
||||
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
@ -211,7 +211,8 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
||||
builder.fieldType().setTreeLevels(Integer.parseInt(fieldNode.toString()));
|
||||
iterator.remove();
|
||||
} else if (Names.TREE_PRESISION.equals(fieldName)) {
|
||||
builder.fieldType().setPrecisionInMeters(DistanceUnit.parse(fieldNode.toString(), DistanceUnit.DEFAULT, DistanceUnit.DEFAULT));
|
||||
builder.fieldType().setPrecisionInMeters(DistanceUnit.parse(fieldNode.toString(),
|
||||
DistanceUnit.DEFAULT, DistanceUnit.DEFAULT));
|
||||
iterator.remove();
|
||||
} else if (Names.DISTANCE_ERROR_PCT.equals(fieldName)) {
|
||||
builder.fieldType().setDistanceErrorPct(Double.parseDouble(fieldNode.toString()));
|
||||
@ -229,7 +230,8 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
||||
builder.coerce(XContentMapValues.nodeBooleanValue(fieldNode, name + "." + Names.COERCE));
|
||||
iterator.remove();
|
||||
} else if (GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName().equals(fieldName)) {
|
||||
builder.ignoreZValue(XContentMapValues.nodeBooleanValue(fieldNode, name + "." + GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName()));
|
||||
builder.ignoreZValue(XContentMapValues.nodeBooleanValue(fieldNode,
|
||||
name + "." + GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName()));
|
||||
iterator.remove();
|
||||
} else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)) {
|
||||
pointsOnly = XContentMapValues.nodeBooleanValue(fieldNode, name + "." + Names.STRATEGY_POINTS_ONLY);
|
||||
@ -314,11 +316,14 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
||||
// must be by the time freeze is called.
|
||||
SpatialPrefixTree prefixTree;
|
||||
if ("geohash".equals(tree)) {
|
||||
prefixTree = new GeohashPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, getLevels(treeLevels, precisionInMeters, Defaults.GEOHASH_LEVELS, true));
|
||||
prefixTree = new GeohashPrefixTree(ShapeBuilder.SPATIAL_CONTEXT,
|
||||
getLevels(treeLevels, precisionInMeters, Defaults.GEOHASH_LEVELS, true));
|
||||
} else if ("legacyquadtree".equals(tree)) {
|
||||
prefixTree = new QuadPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, getLevels(treeLevels, precisionInMeters, Defaults.QUADTREE_LEVELS, false));
|
||||
prefixTree = new QuadPrefixTree(ShapeBuilder.SPATIAL_CONTEXT,
|
||||
getLevels(treeLevels, precisionInMeters, Defaults.QUADTREE_LEVELS, false));
|
||||
} else if ("quadtree".equals(tree)) {
|
||||
prefixTree = new PackedQuadPrefixTree(ShapeBuilder.SPATIAL_CONTEXT, getLevels(treeLevels, precisionInMeters, Defaults.QUADTREE_LEVELS, false));
|
||||
prefixTree = new PackedQuadPrefixTree(ShapeBuilder.SPATIAL_CONTEXT,
|
||||
getLevels(treeLevels, precisionInMeters, Defaults.QUADTREE_LEVELS, false));
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unknown prefix tree type [" + tree + "]");
|
||||
}
|
||||
@ -503,8 +508,9 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
||||
}
|
||||
return;
|
||||
} else if (shape instanceof Point == false) {
|
||||
throw new MapperParsingException("[{" + fieldType().name() + "}] is configured for points only but a " +
|
||||
((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found");
|
||||
throw new MapperParsingException("[{" + fieldType().name() + "}] is configured for points only but a "
|
||||
+ ((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass())
|
||||
+ " was found");
|
||||
}
|
||||
}
|
||||
indexShape(context, shape);
|
||||
|
@ -84,7 +84,8 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
throw new MapperParsingException(NAME + " is not configurable");
|
||||
}
|
||||
|
||||
@ -157,7 +158,8 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
||||
@Override
|
||||
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
final IndexFieldData<?> fieldData = fieldDataBuilder.build(indexSettings, fieldType, cache, breakerService, mapperService);
|
||||
final IndexFieldData<?> fieldData = fieldDataBuilder.build(indexSettings, fieldType, cache,
|
||||
breakerService, mapperService);
|
||||
return new IndexFieldData<AtomicFieldData>() {
|
||||
|
||||
@Override
|
||||
@ -182,7 +184,8 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
@Override
|
||||
public SortField sortField(Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) {
|
||||
XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested);
|
||||
XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue,
|
||||
sortMode, nested);
|
||||
return new SortField(getFieldName(), source, reverse);
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,8 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
throw new MapperParsingException(NAME + " is not configurable");
|
||||
}
|
||||
|
||||
@ -131,7 +132,8 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
||||
if (isSameIndex(value, context.getFullyQualifiedIndex().getName())) {
|
||||
return Queries.newMatchAllQuery();
|
||||
} else {
|
||||
return Queries.newMatchNoDocsQuery("Index didn't match. Index queried: " + context.index().getName() + " vs. " + value);
|
||||
return Queries.newMatchNoDocsQuery("Index didn't match. Index queried: " + context.index().getName()
|
||||
+ " vs. " + value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,9 +138,11 @@ public abstract class MappedFieldType extends FieldType {
|
||||
/** Checks this type is the same type as other. Adds a conflict if they are different. */
|
||||
private void checkTypeName(MappedFieldType other) {
|
||||
if (typeName().equals(other.typeName()) == false) {
|
||||
throw new IllegalArgumentException("mapper [" + name + "] cannot be changed from type [" + typeName() + "] to [" + other.typeName() + "]");
|
||||
throw new IllegalArgumentException("mapper [" + name + "] cannot be changed from type [" + typeName()
|
||||
+ "] to [" + other.typeName() + "]");
|
||||
} else if (getClass() != other.getClass()) {
|
||||
throw new IllegalStateException("Type names equal for class " + getClass().getSimpleName() + " and " + other.getClass().getSimpleName());
|
||||
throw new IllegalStateException("Type names equal for class " + getClass().getSimpleName() + " and "
|
||||
+ other.getClass().getSimpleName());
|
||||
}
|
||||
}
|
||||
|
||||
@ -338,31 +340,38 @@ public abstract class MappedFieldType extends FieldType {
|
||||
}
|
||||
|
||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||
throw new IllegalArgumentException("Can only use fuzzy queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
throw new IllegalArgumentException("Can only use fuzzy queries on keyword and text fields - not on [" + name
|
||||
+ "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Can only use prefix queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
throw new QueryShardException(context, "Can only use prefix queries on keyword and text fields - not on [" + name
|
||||
+ "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public Query wildcardQuery(String value,
|
||||
@Nullable MultiTermQuery.RewriteMethod method,
|
||||
QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Can only use wildcard queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
throw new QueryShardException(context, "Can only use wildcard queries on keyword and text fields - not on [" + name
|
||||
+ "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Can only use regexp queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
public Query regexpQuery(String value, int flags, int maxDeterminizedStates, @Nullable MultiTermQuery.RewriteMethod method,
|
||||
QueryShardContext context) {
|
||||
throw new QueryShardException(context, "Can only use regexp queries on keyword and text fields - not on [" + name
|
||||
+ "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public abstract Query existsQuery(QueryShardContext context);
|
||||
|
||||
public Query phraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException {
|
||||
throw new IllegalArgumentException("Can only use phrase queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
throw new IllegalArgumentException("Can only use phrase queries on text fields - not on [" + name
|
||||
+ "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException {
|
||||
throw new IllegalArgumentException("Can only use phrase queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]");
|
||||
throw new IllegalArgumentException("Can only use phrase queries on text fields - not on [" + name
|
||||
+ "] which is of type [" + typeName() + "]");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -195,7 +195,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
* Update mapping by only merging the metadata that is different between received and stored entries
|
||||
*/
|
||||
public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException {
|
||||
assert newIndexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + newIndexMetaData.getIndex();
|
||||
assert newIndexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index()
|
||||
+ " but was " + newIndexMetaData.getIndex();
|
||||
// go over and add the relevant mappings (or update them)
|
||||
Set<String> existingMappers = new HashSet<>();
|
||||
if (mapper != null) {
|
||||
@ -227,15 +228,16 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string());
|
||||
} else {
|
||||
logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index(), op, mappingType);
|
||||
logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)",
|
||||
index(), op, mappingType);
|
||||
}
|
||||
|
||||
// refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same
|
||||
// mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
|
||||
// merge version of it, which it does when refreshing the mappings), and warn log it.
|
||||
if (documentMapper(mappingType).mappingSource().equals(incomingMappingSource) == false) {
|
||||
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index(), mappingType,
|
||||
incomingMappingSource, documentMapper(mappingType).mappingSource());
|
||||
logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}",
|
||||
index(), mappingType, incomingMappingSource, documentMapper(mappingType).mappingSource());
|
||||
|
||||
requireRefresh = true;
|
||||
}
|
||||
@ -287,7 +289,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
Map<String, CompressedXContent> mappingSourcesCompressed = new LinkedHashMap<>(mappings.size());
|
||||
for (Map.Entry<String, Map<String, Object>> entry : mappings.entrySet()) {
|
||||
try {
|
||||
mappingSourcesCompressed.put(entry.getKey(), new CompressedXContent(Strings.toString(XContentFactory.jsonBuilder().map(entry.getValue()))));
|
||||
mappingSourcesCompressed.put(entry.getKey(), new CompressedXContent(Strings.toString(
|
||||
XContentFactory.jsonBuilder().map(entry.getValue()))));
|
||||
} catch (Exception e) {
|
||||
throw new MapperParsingException("Failed to parse mapping [{}]: {}", e, entry.getKey(), e.getMessage());
|
||||
}
|
||||
@ -304,7 +307,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
return internalMerge(Collections.singletonMap(type, mappingSource), reason).get(type);
|
||||
}
|
||||
|
||||
private synchronized Map<String, DocumentMapper> internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean onlyUpdateIfNeeded) {
|
||||
private synchronized Map<String, DocumentMapper> internalMerge(IndexMetaData indexMetaData,
|
||||
MergeReason reason, boolean onlyUpdateIfNeeded) {
|
||||
Map<String, CompressedXContent> map = new LinkedHashMap<>();
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||
MappingMetaData mappingMetaData = cursor.value;
|
||||
@ -379,10 +383,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
throw new InvalidTypeNameException("mapping type name is empty");
|
||||
}
|
||||
if (type.length() > 255) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + type + "] is too long; limit is length 255 but was [" + type.length() + "]");
|
||||
throw new InvalidTypeNameException("mapping type name [" + type + "] is too long; limit is length 255 but was ["
|
||||
+ type.length() + "]");
|
||||
}
|
||||
if (type.charAt(0) == '_' && SINGLE_MAPPING_NAME.equals(type) == false) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + type + "] can't start with '_' unless it is called [" + SINGLE_MAPPING_NAME + "]");
|
||||
throw new InvalidTypeNameException("mapping type name [" + type + "] can't start with '_' unless it is called ["
|
||||
+ SINGLE_MAPPING_NAME + "]");
|
||||
}
|
||||
if (type.contains("#")) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + type + "] should not include '#' in it");
|
||||
@ -395,8 +401,9 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource,
|
||||
DocumentMapper mapper, MergeReason reason) {
|
||||
private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper,
|
||||
@Nullable String defaultMappingSource, DocumentMapper mapper,
|
||||
MergeReason reason) {
|
||||
boolean hasNested = this.hasNested;
|
||||
Map<String, ObjectMapper> fullPathObjectMappers = this.fullPathObjectMappers;
|
||||
FieldTypeLookup fieldTypes = this.fieldTypes;
|
||||
@ -418,7 +425,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
{
|
||||
if (mapper != null && this.mapper != null && Objects.equals(this.mapper.type(), mapper.type()) == false) {
|
||||
throw new IllegalArgumentException(
|
||||
"Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + Arrays.asList(this.mapper.type(), mapper.type()));
|
||||
"Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: "
|
||||
+ Arrays.asList(this.mapper.type(), mapper.type()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -475,7 +483,8 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
// deserializing cluster state that was sent by the master node,
|
||||
// this check will be skipped.
|
||||
// Also, don't take metadata mappers into account for the field limit check
|
||||
checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size() - metadataMappers.length + fieldAliasMappers.size() );
|
||||
checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size() - metadataMappers.length
|
||||
+ fieldAliasMappers.size() );
|
||||
}
|
||||
|
||||
results.put(newMapper.type(), newMapper);
|
||||
@ -562,14 +571,16 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
||||
}
|
||||
}
|
||||
if (actualNestedFields > allowedNestedFields) {
|
||||
throw new IllegalArgumentException("Limit of nested fields [" + allowedNestedFields + "] in index [" + index().getName() + "] has been exceeded");
|
||||
throw new IllegalArgumentException("Limit of nested fields [" + allowedNestedFields + "] in index [" + index().getName()
|
||||
+ "] has been exceeded");
|
||||
}
|
||||
}
|
||||
|
||||
private void checkTotalFieldsLimit(long totalMappers) {
|
||||
long allowedTotalFields = indexSettings.getValue(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING);
|
||||
if (allowedTotalFields < totalMappers) {
|
||||
throw new IllegalArgumentException("Limit of total fields [" + allowedTotalFields + "] in index [" + index().getName() + "] has been exceeded");
|
||||
throw new IllegalArgumentException("Limit of total fields [" + allowedTotalFields + "] in index [" + index().getName()
|
||||
+ "] has been exceeded");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,8 @@ public final class Mapping implements ToXContentFragment {
|
||||
final Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap;
|
||||
final Map<String, Object> meta;
|
||||
|
||||
public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map<String, Object> meta) {
|
||||
public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper,
|
||||
MetadataFieldMapper[] metadataMappers, Map<String, Object> meta) {
|
||||
this.indexCreated = indexCreated;
|
||||
this.metadataMappers = metadataMappers;
|
||||
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap = new HashMap<>();
|
||||
|
@ -33,7 +33,8 @@ public abstract class MetadataFieldMapper extends FieldMapper {
|
||||
public interface TypeParser extends Mapper.TypeParser {
|
||||
|
||||
@Override
|
||||
MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException;
|
||||
MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException;
|
||||
|
||||
/**
|
||||
* Get the default {@link MetadataFieldMapper} to use, if nothing had to be parsed.
|
||||
|
@ -173,7 +173,8 @@ public class ObjectMapper extends Mapper implements Cloneable {
|
||||
return builder;
|
||||
}
|
||||
|
||||
protected static boolean parseObjectOrDocumentTypeProperties(String fieldName, Object fieldNode, ParserContext parserContext, ObjectMapper.Builder builder) {
|
||||
protected static boolean parseObjectOrDocumentTypeProperties(String fieldName, Object fieldNode, ParserContext parserContext,
|
||||
ObjectMapper.Builder builder) {
|
||||
if (fieldName.equals("dynamic")) {
|
||||
String value = fieldNode.toString();
|
||||
if (value.equalsIgnoreCase("strict")) {
|
||||
@ -215,7 +216,8 @@ public class ObjectMapper extends Mapper implements Cloneable {
|
||||
} else if (type.equals(NESTED_CONTENT_TYPE)) {
|
||||
nested = true;
|
||||
} else {
|
||||
throw new MapperParsingException("Trying to parse an object but has a different type [" + type + "] for [" + name + "]");
|
||||
throw new MapperParsingException("Trying to parse an object but has a different type [" + type
|
||||
+ "] for [" + name + "]");
|
||||
}
|
||||
}
|
||||
fieldNode = node.get("include_in_parent");
|
||||
@ -433,7 +435,8 @@ public class ObjectMapper extends Mapper implements Cloneable {
|
||||
@Override
|
||||
public ObjectMapper merge(Mapper mergeWith) {
|
||||
if (!(mergeWith instanceof ObjectMapper)) {
|
||||
throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
|
||||
throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name()
|
||||
+ "] with an object mapping [" + name() + "]");
|
||||
}
|
||||
ObjectMapper mergeWithObject = (ObjectMapper) mergeWith;
|
||||
ObjectMapper merged = clone();
|
||||
@ -522,7 +525,8 @@ public class ObjectMapper extends Mapper implements Cloneable {
|
||||
if (nested.isIncludeInRoot()) {
|
||||
builder.field("include_in_root", true);
|
||||
}
|
||||
} else if (mappers.isEmpty() && custom == null) { // only write the object content type if there are no properties, otherwise, it is automatically detected
|
||||
} else if (mappers.isEmpty() && custom == null) {
|
||||
// only write the object content type if there are no properties, otherwise, it is automatically detected
|
||||
builder.field("type", CONTENT_TYPE);
|
||||
}
|
||||
if (dynamic != null) {
|
||||
|
@ -55,7 +55,8 @@ public class RootObjectMapper extends ObjectMapper {
|
||||
public static class Builder extends ObjectMapper.Builder<Builder, RootObjectMapper> {
|
||||
|
||||
protected Explicit<DynamicTemplate[]> dynamicTemplates = new Explicit<>(new DynamicTemplate[0], false);
|
||||
protected Explicit<FormatDateTimeFormatter[]> dynamicDateTimeFormatters = new Explicit<>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false);
|
||||
protected Explicit<FormatDateTimeFormatter[]> dynamicDateTimeFormatters =
|
||||
new Explicit<>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false);
|
||||
protected Explicit<Boolean> dateDetection = new Explicit<>(Defaults.DATE_DETECTION, false);
|
||||
protected Explicit<Boolean> numericDetection = new Explicit<>(Defaults.NUMERIC_DETECTION, false);
|
||||
|
||||
|
@ -82,7 +82,8 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
Builder builder = new Builder(parserContext.mapperService().fullName(NAME));
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -107,7 +107,8 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
Builder builder = new Builder();
|
||||
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
@ -79,7 +79,8 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
throw new MapperParsingException(NAME + " is not configurable");
|
||||
}
|
||||
|
||||
@ -161,7 +162,8 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
||||
@Override
|
||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) {
|
||||
deprecationLogger.deprecatedAndMaybeLog("range_single_type",
|
||||
"Running [range] query on [_type] field for an index with a single type. As types are deprecated, this functionality will be removed in future releases.");
|
||||
"Running [range] query on [_type] field for an index with a single type."
|
||||
+ " As types are deprecated, this functionality will be removed in future releases.");
|
||||
Query result = new MatchAllDocsQuery();
|
||||
String type = context.getMapperService().documentMapper().type();
|
||||
if (type != null) {
|
||||
|
@ -58,7 +58,8 @@ public class VersionFieldMapper extends MetadataFieldMapper {
|
||||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?, ?> parse(String name, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
throw new MapperParsingException(NAME + " is not configurable");
|
||||
}
|
||||
|
||||
|
@ -103,8 +103,8 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
||||
// null means we used the default (10%)
|
||||
if (indexingBufferSetting == null || indexingBufferSetting.endsWith("%")) {
|
||||
// We only apply the min/max when % value was used for the index buffer:
|
||||
ByteSizeValue minIndexingBuffer = MIN_INDEX_BUFFER_SIZE_SETTING.get(this.settings);
|
||||
ByteSizeValue maxIndexingBuffer = MAX_INDEX_BUFFER_SIZE_SETTING.get(this.settings);
|
||||
ByteSizeValue minIndexingBuffer = MIN_INDEX_BUFFER_SIZE_SETTING.get(settings);
|
||||
ByteSizeValue maxIndexingBuffer = MAX_INDEX_BUFFER_SIZE_SETTING.get(settings);
|
||||
if (indexingBuffer.getBytes() < minIndexingBuffer.getBytes()) {
|
||||
indexingBuffer = minIndexingBuffer;
|
||||
}
|
||||
@ -114,9 +114,9 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
||||
}
|
||||
this.indexingBuffer = indexingBuffer;
|
||||
|
||||
this.inactiveTime = SHARD_INACTIVE_TIME_SETTING.get(this.settings);
|
||||
this.inactiveTime = SHARD_INACTIVE_TIME_SETTING.get(settings);
|
||||
// we need to have this relatively small to free up heap quickly enough
|
||||
this.interval = SHARD_MEMORY_INTERVAL_TIME_SETTING.get(this.settings);
|
||||
this.interval = SHARD_MEMORY_INTERVAL_TIME_SETTING.get(settings);
|
||||
|
||||
this.statusChecker = new ShardsIndicesStatusChecker();
|
||||
|
||||
|
@ -173,6 +173,10 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The node's settings.
|
||||
*/
|
||||
private final Settings settings;
|
||||
private final PluginsService pluginsService;
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final NamedXContentRegistry xContentRegistry;
|
||||
@ -215,6 +219,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
Collection<Function<IndexSettings, Optional<EngineFactory>>> engineFactoryProviders,
|
||||
Map<String, Function<IndexSettings, IndexStore>> indexStoreFactories) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.threadPool = threadPool;
|
||||
this.pluginsService = pluginsService;
|
||||
this.nodeEnv = nodeEnv;
|
||||
@ -483,7 +488,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
IndicesFieldDataCache indicesFieldDataCache,
|
||||
List<IndexEventListener> builtInListeners,
|
||||
IndexingOperationListener... indexingOperationListeners) throws IOException {
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopedSettings);
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, settings, indexScopedSettings);
|
||||
// we ignore private settings since they are not registered settings
|
||||
indexScopedSettings.validate(indexMetaData.getSettings(), true, true, true);
|
||||
logger.debug("creating Index [{}], shards [{}]/[{}] - reason [{}]",
|
||||
|
@ -107,6 +107,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
private static final ShardStateAction.Listener SHARD_STATE_ACTION_LISTENER = new ShardStateAction.Listener() {
|
||||
};
|
||||
|
||||
private final Settings settings;
|
||||
// a list of shards that failed during recovery
|
||||
// we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update
|
||||
final ConcurrentMap<ShardId, ShardRouting> failedShardsCache = ConcurrentCollections.newConcurrentMap();
|
||||
@ -156,6 +157,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
PrimaryReplicaSyncer primaryReplicaSyncer,
|
||||
Consumer<ShardId> globalCheckpointSyncer) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.buildInIndexListener =
|
||||
Arrays.asList(
|
||||
peerRecoverySourceService,
|
||||
@ -172,7 +174,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
this.repositoriesService = repositoriesService;
|
||||
this.primaryReplicaSyncer = primaryReplicaSyncer;
|
||||
this.globalCheckpointSyncer = globalCheckpointSyncer;
|
||||
this.sendRefreshMapping = this.settings.getAsBoolean("indices.cluster.send_refresh_mapping", true);
|
||||
this.sendRefreshMapping = settings.getAsBoolean("indices.cluster.send_refresh_mapping", true);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -78,6 +78,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||
Property.NodeScope);
|
||||
public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists";
|
||||
private static final EnumSet<IndexShardState> ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED);
|
||||
private final Settings settings;
|
||||
private final IndicesService indicesService;
|
||||
private final ClusterService clusterService;
|
||||
private final TransportService transportService;
|
||||
@ -92,6 +93,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
||||
public IndicesStore(Settings settings, IndicesService indicesService,
|
||||
ClusterService clusterService, TransportService transportService, ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.indicesService = indicesService;
|
||||
this.clusterService = clusterService;
|
||||
this.transportService = transportService;
|
||||
|
@ -66,6 +66,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
|
||||
|
||||
public static final String ACTION_NAME = "internal:cluster/nodes/indices/shard/store";
|
||||
|
||||
private final Settings settings;
|
||||
private final IndicesService indicesService;
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final NamedXContentRegistry namedXContentRegistry;
|
||||
@ -77,6 +78,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
|
||||
ActionFilters actionFilters, NamedXContentRegistry namedXContentRegistry) {
|
||||
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
Request::new, NodeRequest::new, ThreadPool.Names.FETCH_SHARD_STORE, NodeStoreFilesMetaData.class);
|
||||
this.settings = settings;
|
||||
this.indicesService = indicesService;
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.namedXContentRegistry = namedXContentRegistry;
|
||||
|
@ -46,7 +46,7 @@ import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
public class NodeService extends AbstractComponent implements Closeable {
|
||||
|
||||
private final Settings settings;
|
||||
private final ThreadPool threadPool;
|
||||
private final MonitorService monitorService;
|
||||
private final TransportService transportService;
|
||||
@ -69,6 +69,7 @@ public class NodeService extends AbstractComponent implements Closeable {
|
||||
SettingsFilter settingsFilter, ResponseCollectorService responseCollectorService,
|
||||
SearchTransportService searchTransportService) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.threadPool = threadPool;
|
||||
this.monitorService = monitorService;
|
||||
this.transportService = transportService;
|
||||
|
@ -72,6 +72,7 @@ import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory;
|
||||
|
||||
public class PluginsService extends AbstractComponent {
|
||||
|
||||
private final Settings settings;
|
||||
private final Path configPath;
|
||||
|
||||
/**
|
||||
@ -79,6 +80,7 @@ public class PluginsService extends AbstractComponent {
|
||||
*/
|
||||
private final List<Tuple<PluginInfo, Plugin>> plugins;
|
||||
private final PluginsAndModules info;
|
||||
|
||||
public static final Setting<List<String>> MANDATORY_SETTING =
|
||||
Setting.listSetting("plugin.mandatory", Collections.emptyList(), Function.identity(), Property.NodeScope);
|
||||
|
||||
@ -99,7 +101,7 @@ public class PluginsService extends AbstractComponent {
|
||||
*/
|
||||
public PluginsService(Settings settings, Path configPath, Path modulesDirectory, Path pluginsDirectory, Collection<Class<? extends Plugin>> classpathPlugins) {
|
||||
super(settings);
|
||||
|
||||
this.settings = settings;
|
||||
this.configPath = configPath;
|
||||
|
||||
List<Tuple<PluginInfo, Plugin>> pluginsLoaded = new ArrayList<>();
|
||||
|
@ -204,6 +204,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
|
||||
private static final String DATA_BLOB_PREFIX = "__";
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
private final RateLimiter snapshotRateLimiter;
|
||||
|
||||
private final RateLimiter restoreRateLimiter;
|
||||
@ -234,10 +236,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
* Constructs new BlobStoreRepository
|
||||
*
|
||||
* @param metadata The metadata for this repository including name and settings
|
||||
* @param globalSettings Settings for the node this repository object is created on
|
||||
* @param settings Settings for the node this repository object is created on
|
||||
*/
|
||||
protected BlobStoreRepository(RepositoryMetaData metadata, Settings globalSettings, NamedXContentRegistry namedXContentRegistry) {
|
||||
super(globalSettings);
|
||||
protected BlobStoreRepository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.metadata = metadata;
|
||||
this.namedXContentRegistry = namedXContentRegistry;
|
||||
snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB));
|
||||
|
@ -99,10 +99,10 @@ public class FsRepository extends BlobStoreRepository {
|
||||
if (CHUNK_SIZE_SETTING.exists(metadata.settings())) {
|
||||
this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
|
||||
} else {
|
||||
this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(settings);
|
||||
this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings());
|
||||
}
|
||||
this.compress = COMPRESS_SETTING.exists(metadata.settings())
|
||||
? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(settings);
|
||||
? COMPRESS_SETTING.get(metadata.settings()) : REPOSITORIES_COMPRESS_SETTING.get(environment.settings());
|
||||
this.basePath = BlobPath.cleanPath();
|
||||
}
|
||||
|
||||
@ -110,7 +110,7 @@ public class FsRepository extends BlobStoreRepository {
|
||||
protected BlobStore createBlobStore() throws Exception {
|
||||
final String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings());
|
||||
final Path locationFile = environment.resolveRepoFile(location);
|
||||
return new FsBlobStore(settings, locationFile);
|
||||
return new FsBlobStore(environment.settings(), locationFile);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -43,12 +43,14 @@ import java.util.Set;
|
||||
|
||||
public class RestClusterGetSettingsAction extends BaseRestHandler {
|
||||
|
||||
private final Settings settings;
|
||||
private final ClusterSettings clusterSettings;
|
||||
private final SettingsFilter settingsFilter;
|
||||
|
||||
public RestClusterGetSettingsAction(Settings settings, RestController controller, ClusterSettings clusterSettings,
|
||||
SettingsFilter settingsFilter) {
|
||||
super(settings);
|
||||
this.settings = settings;
|
||||
this.clusterSettings = clusterSettings;
|
||||
controller.registerHandler(RestRequest.Method.GET, "/_cluster/settings", this);
|
||||
this.settingsFilter = settingsFilter;
|
||||
|
@ -109,6 +109,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
|
||||
public static final Setting<List<String>> CONTEXTS_ALLOWED_SETTING =
|
||||
Setting.listSetting("script.allowed_contexts", Collections.emptyList(), Function.identity(), Setting.Property.NodeScope);
|
||||
|
||||
private final Settings settings;
|
||||
private final Set<String> typesAllowed;
|
||||
private final Set<String> contextsAllowed;
|
||||
|
||||
@ -128,8 +129,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
|
||||
|
||||
public ScriptService(Settings settings, Map<String, ScriptEngine> engines, Map<String, ScriptContext<?>> contexts) {
|
||||
super(settings);
|
||||
|
||||
Objects.requireNonNull(settings);
|
||||
this.settings = Objects.requireNonNull(settings);
|
||||
this.engines = Objects.requireNonNull(engines);
|
||||
this.contexts = Objects.requireNonNull(contexts);
|
||||
|
||||
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
@ -182,13 +183,14 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
||||
ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase,
|
||||
ResponseCollectorService responseCollectorService) {
|
||||
super(clusterService.getSettings());
|
||||
Settings settings = clusterService.getSettings();
|
||||
this.threadPool = threadPool;
|
||||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
this.scriptService = scriptService;
|
||||
this.responseCollectorService = responseCollectorService;
|
||||
this.bigArrays = bigArrays;
|
||||
this.queryPhase = new QueryPhase(settings);
|
||||
this.queryPhase = new QueryPhase(clusterService.getSettings());
|
||||
this.fetchPhase = fetchPhase;
|
||||
this.multiBucketConsumerService = new MultiBucketConsumerService(clusterService, settings);
|
||||
|
||||
|
@ -18,8 +18,8 @@
|
||||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
@ -67,16 +67,15 @@ public class ConnectionManager implements Closeable {
|
||||
private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener();
|
||||
|
||||
public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool) {
|
||||
this(settings, transport, threadPool, ConnectionProfile.buildDefaultConnectionProfile(settings));
|
||||
this(settings, transport, threadPool, TcpTransport.PING_SCHEDULE.get(settings));
|
||||
}
|
||||
|
||||
public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool, ConnectionProfile defaultProfile) {
|
||||
public ConnectionManager(Settings settings, Transport transport, ThreadPool threadPool, TimeValue pingSchedule) {
|
||||
this.transport = transport;
|
||||
this.threadPool = threadPool;
|
||||
this.pingSchedule = TcpTransport.PING_SCHEDULE.get(settings);
|
||||
this.defaultProfile = defaultProfile;
|
||||
this.pingSchedule = pingSchedule;
|
||||
this.defaultProfile = ConnectionProfile.buildDefaultConnectionProfile(settings);
|
||||
this.lifecycle.moveToStarted();
|
||||
|
||||
if (pingSchedule.millis() > 0) {
|
||||
threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, new ScheduledPing());
|
||||
}
|
||||
@ -252,6 +251,10 @@ public class ConnectionManager implements Closeable {
|
||||
}
|
||||
}
|
||||
|
||||
TimeValue getPingSchedule() {
|
||||
return pingSchedule;
|
||||
}
|
||||
|
||||
private class ScheduledPing extends AbstractLifecycleRunnable {
|
||||
|
||||
private ScheduledPing() {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user