Merge commit '3522b9084b611c89ec4f06c1863542883840ed0e' into zen2
This commit is contained in:
commit
0b4a6ae97c
|
@ -701,22 +701,4 @@
|
|||
<suppress files="plugins[/\\]repository-s3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]s3[/\\]MockDefaultS3OutputStream.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-s3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]s3[/\\]TestAmazonS3.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]store-smb[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]SmbDirectoryWrapper.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]MockInternalClusterInfoService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]TestShardRouting.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]ModuleTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]script[/\\]NativeSignificanceScoreScriptWithParams.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]BackgroundIndexer.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CorruptionUtils.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESIntegTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]IndexSettingsModule.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]InternalTestCluster.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]MockIndexEventListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]discovery[/\\]ClusterDiscoveryConfiguration.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]disruption[/\\]IntermittentLongGCDisruption.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]disruption[/\\]SlowClusterStateProcessing.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]engine[/\\]AssertingSearcher.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]engine[/\\]MockEngineSupport.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchAssertions.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSDirectoryService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSIndexStore.java" checks="LineLength" />
|
||||
</suppressions>
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.http.entity.ByteArrayEntity;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.client.RequestConverters.EndpointBuilder;
|
||||
import org.elasticsearch.client.ml.CloseJobRequest;
|
||||
import org.elasticsearch.client.ml.DeleteCalendarRequest;
|
||||
import org.elasticsearch.client.ml.DeleteDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.DeleteForecastRequest;
|
||||
import org.elasticsearch.client.ml.DeleteJobRequest;
|
||||
|
@ -372,4 +373,15 @@ final class MLRequestConverters {
|
|||
request.setEntity(createEntity(getCalendarsRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request deleteCalendar(DeleteCalendarRequest deleteCalendarRequest) {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("calendars")
|
||||
.addPathPart(deleteCalendarRequest.getCalendarId())
|
||||
.build();
|
||||
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
|
||||
return request;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ml.CloseJobRequest;
|
||||
import org.elasticsearch.client.ml.CloseJobResponse;
|
||||
import org.elasticsearch.client.ml.DeleteCalendarRequest;
|
||||
import org.elasticsearch.client.ml.DeleteDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.DeleteForecastRequest;
|
||||
import org.elasticsearch.client.ml.DeleteJobRequest;
|
||||
|
@ -910,4 +911,44 @@ public final class MachineLearningClient {
|
|||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the given Machine Learning Calendar
|
||||
* <p>
|
||||
* For additional info see
|
||||
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html">
|
||||
* ML Delete calendar documentation</a>
|
||||
*
|
||||
* @param request The request to delete the calendar
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return action acknowledgement
|
||||
* @throws IOException when there is a serialization issue sending the request or receiving the response
|
||||
*/
|
||||
public AcknowledgedResponse deleteCalendar(DeleteCalendarRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
MLRequestConverters::deleteCalendar,
|
||||
options,
|
||||
AcknowledgedResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the given Machine Learning Job asynchronously and notifies the listener on completion
|
||||
* <p>
|
||||
* For additional info see
|
||||
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-calendar.html">
|
||||
* ML Delete calendar documentation</a>
|
||||
*
|
||||
* @param request The request to delete the calendar
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified upon request completion
|
||||
*/
|
||||
public void deleteCalendarAsync(DeleteCalendarRequest request, RequestOptions options, ActionListener<AcknowledgedResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
MLRequestConverters::deleteCalendar,
|
||||
options,
|
||||
AcknowledgedResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Request to delete a Machine Learning Calendar
|
||||
*/
|
||||
public class DeleteCalendarRequest extends ActionRequest {
|
||||
|
||||
private final String calendarId;
|
||||
|
||||
/**
|
||||
* The constructor requires a single calendar id.
|
||||
* @param calendarId The calendar to delete. Must be {@code non-null}
|
||||
*/
|
||||
public DeleteCalendarRequest(String calendarId) {
|
||||
this.calendarId = Objects.requireNonNull(calendarId, "[calendar_id] must not be null");
|
||||
}
|
||||
|
||||
public String getCalendarId() {
|
||||
return calendarId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(calendarId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DeleteCalendarRequest other = (DeleteCalendarRequest) obj;
|
||||
return Objects.equals(calendarId, other.calendarId);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,341 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ActionStatus {
|
||||
|
||||
private final AckStatus ackStatus;
|
||||
@Nullable private final Execution lastExecution;
|
||||
@Nullable private final Execution lastSuccessfulExecution;
|
||||
@Nullable private final Throttle lastThrottle;
|
||||
|
||||
public ActionStatus(AckStatus ackStatus,
|
||||
@Nullable Execution lastExecution,
|
||||
@Nullable Execution lastSuccessfulExecution,
|
||||
@Nullable Throttle lastThrottle) {
|
||||
this.ackStatus = ackStatus;
|
||||
this.lastExecution = lastExecution;
|
||||
this.lastSuccessfulExecution = lastSuccessfulExecution;
|
||||
this.lastThrottle = lastThrottle;
|
||||
}
|
||||
|
||||
public AckStatus ackStatus() {
|
||||
return ackStatus;
|
||||
}
|
||||
|
||||
public Execution lastExecution() {
|
||||
return lastExecution;
|
||||
}
|
||||
|
||||
public Execution lastSuccessfulExecution() {
|
||||
return lastSuccessfulExecution;
|
||||
}
|
||||
|
||||
public Throttle lastThrottle() {
|
||||
return lastThrottle;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
ActionStatus that = (ActionStatus) o;
|
||||
|
||||
return Objects.equals(ackStatus, that.ackStatus) &&
|
||||
Objects.equals(lastExecution, that.lastExecution) &&
|
||||
Objects.equals(lastSuccessfulExecution, that.lastSuccessfulExecution) &&
|
||||
Objects.equals(lastThrottle, that.lastThrottle);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(ackStatus, lastExecution, lastSuccessfulExecution, lastThrottle);
|
||||
}
|
||||
|
||||
public static ActionStatus parse(String actionId, XContentParser parser) throws IOException {
|
||||
AckStatus ackStatus = null;
|
||||
Execution lastExecution = null;
|
||||
Execution lastSuccessfulExecution = null;
|
||||
Throttle lastThrottle = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.ACK_STATUS.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
ackStatus = AckStatus.parse(actionId, parser);
|
||||
} else if (Field.LAST_EXECUTION.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
lastExecution = Execution.parse(actionId, parser);
|
||||
} else if (Field.LAST_SUCCESSFUL_EXECUTION.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
lastSuccessfulExecution = Execution.parse(actionId, parser);
|
||||
} else if (Field.LAST_THROTTLE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
lastThrottle = Throttle.parse(actionId, parser);
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
if (ackStatus == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}]",
|
||||
actionId, Field.ACK_STATUS.getPreferredName());
|
||||
}
|
||||
return new ActionStatus(ackStatus, lastExecution, lastSuccessfulExecution, lastThrottle);
|
||||
}
|
||||
|
||||
public static class AckStatus {
|
||||
|
||||
public enum State {
|
||||
AWAITS_SUCCESSFUL_EXECUTION,
|
||||
ACKABLE,
|
||||
ACKED;
|
||||
}
|
||||
|
||||
private final DateTime timestamp;
|
||||
private final State state;
|
||||
|
||||
public AckStatus(DateTime timestamp, State state) {
|
||||
this.timestamp = timestamp.toDateTime(DateTimeZone.UTC);
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
public DateTime timestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public State state() {
|
||||
return state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
AckStatus ackStatus = (AckStatus) o;
|
||||
|
||||
return Objects.equals(timestamp, ackStatus.timestamp) && Objects.equals(state, ackStatus.state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(timestamp, state);
|
||||
}
|
||||
|
||||
public static AckStatus parse(String actionId, XContentParser parser) throws IOException {
|
||||
DateTime timestamp = null;
|
||||
State state = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
timestamp = WatchStatusDateParser.parseDate(parser.text());
|
||||
} else if (Field.ACK_STATUS_STATE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
state = State.valueOf(parser.text().toUpperCase(Locale.ROOT));
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
if (timestamp == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.ACK_STATUS.getPreferredName(), Field.TIMESTAMP.getPreferredName());
|
||||
}
|
||||
if (state == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.ACK_STATUS.getPreferredName(), Field.ACK_STATUS_STATE.getPreferredName());
|
||||
}
|
||||
return new AckStatus(timestamp, state);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Execution {
|
||||
|
||||
public static Execution successful(DateTime timestamp) {
|
||||
return new Execution(timestamp, true, null);
|
||||
}
|
||||
|
||||
public static Execution failure(DateTime timestamp, String reason) {
|
||||
return new Execution(timestamp, false, reason);
|
||||
}
|
||||
|
||||
private final DateTime timestamp;
|
||||
private final boolean successful;
|
||||
private final String reason;
|
||||
|
||||
private Execution(DateTime timestamp, boolean successful, String reason) {
|
||||
this.timestamp = timestamp.toDateTime(DateTimeZone.UTC);
|
||||
this.successful = successful;
|
||||
this.reason = reason;
|
||||
}
|
||||
|
||||
public DateTime timestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public boolean successful() {
|
||||
return successful;
|
||||
}
|
||||
|
||||
public String reason() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Execution execution = (Execution) o;
|
||||
|
||||
return Objects.equals(successful, execution.successful) &&
|
||||
Objects.equals(timestamp, execution.timestamp) &&
|
||||
Objects.equals(reason, execution.reason);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(timestamp, successful, reason);
|
||||
}
|
||||
|
||||
public static Execution parse(String actionId, XContentParser parser) throws IOException {
|
||||
DateTime timestamp = null;
|
||||
Boolean successful = null;
|
||||
String reason = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
timestamp = WatchStatusDateParser.parseDate(parser.text());
|
||||
} else if (Field.EXECUTION_SUCCESSFUL.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
successful = parser.booleanValue();
|
||||
} else if (Field.REASON.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
reason = parser.text();
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
if (timestamp == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.LAST_EXECUTION.getPreferredName(), Field.TIMESTAMP.getPreferredName());
|
||||
}
|
||||
if (successful == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.LAST_EXECUTION.getPreferredName(), Field.EXECUTION_SUCCESSFUL.getPreferredName());
|
||||
}
|
||||
if (successful) {
|
||||
return successful(timestamp);
|
||||
}
|
||||
if (reason == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field for unsuccessful" +
|
||||
" execution [{}.{}]", actionId, Field.LAST_EXECUTION.getPreferredName(), Field.REASON.getPreferredName());
|
||||
}
|
||||
return failure(timestamp, reason);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Throttle {
|
||||
|
||||
private final DateTime timestamp;
|
||||
private final String reason;
|
||||
|
||||
public Throttle(DateTime timestamp, String reason) {
|
||||
this.timestamp = timestamp.toDateTime(DateTimeZone.UTC);
|
||||
this.reason = reason;
|
||||
}
|
||||
|
||||
public DateTime timestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public String reason() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Throttle throttle = (Throttle) o;
|
||||
return Objects.equals(timestamp, throttle.timestamp) && Objects.equals(reason, throttle.reason);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(timestamp, reason);
|
||||
}
|
||||
|
||||
public static Throttle parse(String actionId, XContentParser parser) throws IOException {
|
||||
DateTime timestamp = null;
|
||||
String reason = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
timestamp = WatchStatusDateParser.parseDate(parser.text());
|
||||
} else if (Field.REASON.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
reason = parser.text();
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
if (timestamp == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.LAST_THROTTLE.getPreferredName(), Field.TIMESTAMP.getPreferredName());
|
||||
}
|
||||
if (reason == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.LAST_THROTTLE.getPreferredName(), Field.REASON.getPreferredName());
|
||||
}
|
||||
return new Throttle(timestamp, reason);
|
||||
}
|
||||
}
|
||||
|
||||
private interface Field {
|
||||
ParseField ACK_STATUS = new ParseField("ack");
|
||||
ParseField ACK_STATUS_STATE = new ParseField("state");
|
||||
ParseField LAST_EXECUTION = new ParseField("last_execution");
|
||||
ParseField LAST_SUCCESSFUL_EXECUTION = new ParseField("last_successful_execution");
|
||||
ParseField EXECUTION_SUCCESSFUL = new ParseField("successful");
|
||||
ParseField LAST_THROTTLE = new ParseField("last_throttle");
|
||||
ParseField TIMESTAMP = new ParseField("timestamp");
|
||||
ParseField REASON = new ParseField("reason");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
public enum ExecutionState {
|
||||
|
||||
// the condition of the watch was not met
|
||||
EXECUTION_NOT_NEEDED,
|
||||
|
||||
// Execution has been throttled due to time-based throttling - this might only affect a single action though
|
||||
THROTTLED,
|
||||
|
||||
// Execution has been throttled due to ack-based throttling/muting of an action - this might only affect a single action though
|
||||
ACKNOWLEDGED,
|
||||
|
||||
// regular execution
|
||||
EXECUTED,
|
||||
|
||||
// an error in the condition or the execution of the input
|
||||
FAILED,
|
||||
|
||||
// a rejection due to a filled up threadpool
|
||||
THREADPOOL_REJECTION,
|
||||
|
||||
// the execution was scheduled, but in between the watch was deleted
|
||||
NOT_EXECUTED_WATCH_MISSING,
|
||||
|
||||
// even though the execution was scheduled, it was not executed, because the watch was already queued in the thread pool
|
||||
NOT_EXECUTED_ALREADY_QUEUED,
|
||||
|
||||
// this can happen when a watch was executed, but not completely finished (the triggered watch entry was not deleted), and then
|
||||
// watcher is restarted (manually or due to host switch) - the triggered watch will be executed but the history entry already
|
||||
// exists
|
||||
EXECUTED_MULTIPLE_TIMES;
|
||||
|
||||
public String id() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
public static ExecutionState resolve(String id) {
|
||||
return valueOf(id.toUpperCase(Locale.ROOT));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return id();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,233 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.client.watcher.WatchStatusDateParser.parseDate;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.joda.time.DateTimeZone.UTC;
|
||||
|
||||
public class WatchStatus {
|
||||
|
||||
private final State state;
|
||||
|
||||
private final ExecutionState executionState;
|
||||
private final DateTime lastChecked;
|
||||
private final DateTime lastMetCondition;
|
||||
private final long version;
|
||||
private final Map<String, ActionStatus> actions;
|
||||
|
||||
public WatchStatus(long version,
|
||||
State state,
|
||||
ExecutionState executionState,
|
||||
DateTime lastChecked,
|
||||
DateTime lastMetCondition,
|
||||
Map<String, ActionStatus> actions) {
|
||||
this.version = version;
|
||||
this.lastChecked = lastChecked;
|
||||
this.lastMetCondition = lastMetCondition;
|
||||
this.actions = actions;
|
||||
this.state = state;
|
||||
this.executionState = executionState;
|
||||
}
|
||||
|
||||
public State state() {
|
||||
return state;
|
||||
}
|
||||
|
||||
public boolean checked() {
|
||||
return lastChecked != null;
|
||||
}
|
||||
|
||||
public DateTime lastChecked() {
|
||||
return lastChecked;
|
||||
}
|
||||
|
||||
public DateTime lastMetCondition() {
|
||||
return lastMetCondition;
|
||||
}
|
||||
|
||||
public ActionStatus actionStatus(String actionId) {
|
||||
return actions.get(actionId);
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public ExecutionState getExecutionState() {
|
||||
return executionState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
WatchStatus that = (WatchStatus) o;
|
||||
|
||||
return Objects.equals(lastChecked, that.lastChecked) &&
|
||||
Objects.equals(lastMetCondition, that.lastMetCondition) &&
|
||||
Objects.equals(version, that.version) &&
|
||||
Objects.equals(executionState, that.executionState) &&
|
||||
Objects.equals(actions, that.actions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(lastChecked, lastMetCondition, actions, version, executionState);
|
||||
}
|
||||
|
||||
public static WatchStatus parse(XContentParser parser) throws IOException {
|
||||
State state = null;
|
||||
ExecutionState executionState = null;
|
||||
DateTime lastChecked = null;
|
||||
DateTime lastMetCondition = null;
|
||||
Map<String, ActionStatus> actions = null;
|
||||
long version = -1;
|
||||
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation);
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.STATE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
try {
|
||||
state = State.parse(parser);
|
||||
} catch (ElasticsearchParseException e) {
|
||||
throw new ElasticsearchParseException("could not parse watch status. failed to parse field [{}]",
|
||||
e, currentFieldName);
|
||||
}
|
||||
} else if (Field.VERSION.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
if (token.isValue()) {
|
||||
version = parser.longValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse watch status. expecting field [{}] to hold a long " +
|
||||
"value, found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (Field.LAST_CHECKED.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
if (token.isValue()) {
|
||||
lastChecked = parseDate(currentFieldName, parser);
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse watch status. expecting field [{}] to hold a date " +
|
||||
"value, found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (Field.LAST_MET_CONDITION.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
if (token.isValue()) {
|
||||
lastMetCondition = parseDate(currentFieldName, parser);
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse watch status. expecting field [{}] to hold a date " +
|
||||
"value, found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (Field.EXECUTION_STATE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
if (token.isValue()) {
|
||||
executionState = ExecutionState.resolve(parser.text());
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse watch status. expecting field [{}] to hold a string " +
|
||||
"value, found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (Field.ACTIONS.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
actions = new HashMap<>();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else {
|
||||
ActionStatus actionStatus = ActionStatus.parse(currentFieldName, parser);
|
||||
actions.put(currentFieldName, actionStatus);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse watch status. expecting field [{}] to be an object, " +
|
||||
"found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
|
||||
actions = actions == null ? emptyMap() : unmodifiableMap(actions);
|
||||
return new WatchStatus(version, state, executionState, lastChecked, lastMetCondition, actions);
|
||||
}
|
||||
|
||||
public static class State {
|
||||
|
||||
private final boolean active;
|
||||
private final DateTime timestamp;
|
||||
|
||||
public State(boolean active, DateTime timestamp) {
|
||||
this.active = active;
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
public boolean isActive() {
|
||||
return active;
|
||||
}
|
||||
|
||||
public DateTime getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public static State parse(XContentParser parser) throws IOException {
|
||||
if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
|
||||
throw new ElasticsearchParseException("expected an object but found [{}] instead", parser.currentToken());
|
||||
}
|
||||
boolean active = true;
|
||||
DateTime timestamp = DateTime.now(UTC);
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.ACTIVE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
active = parser.booleanValue();
|
||||
} else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
timestamp = parseDate(currentFieldName, parser);
|
||||
}
|
||||
}
|
||||
return new State(active, timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
public interface Field {
|
||||
ParseField STATE = new ParseField("state");
|
||||
ParseField ACTIVE = new ParseField("active");
|
||||
ParseField TIMESTAMP = new ParseField("timestamp");
|
||||
ParseField LAST_CHECKED = new ParseField("last_checked");
|
||||
ParseField LAST_MET_CONDITION = new ParseField("last_met_condition");
|
||||
ParseField ACTIONS = new ParseField("actions");
|
||||
ParseField VERSION = new ParseField("version");
|
||||
ParseField EXECUTION_STATE = new ParseField("execution_state");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public final class WatchStatusDateParser {
|
||||
|
||||
private static final FormatDateTimeFormatter FORMATTER = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER;
|
||||
|
||||
private WatchStatusDateParser() {
|
||||
// Prevent instantiation.
|
||||
}
|
||||
|
||||
public static DateTime parseDate(String fieldName, XContentParser parser) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
return new DateTime(parser.longValue(), DateTimeZone.UTC);
|
||||
}
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
DateTime dateTime = parseDate(parser.text());
|
||||
return dateTime.toDateTime(DateTimeZone.UTC);
|
||||
}
|
||||
if (token == XContentParser.Token.VALUE_NULL) {
|
||||
return null;
|
||||
}
|
||||
throw new ElasticsearchParseException("could not parse date/time. expected date field [{}] " +
|
||||
"to be either a number or a string but found [{}] instead", fieldName, token);
|
||||
}
|
||||
|
||||
public static DateTime parseDate(String text) {
|
||||
return FORMATTER.parser().parseDateTime(text);
|
||||
}
|
||||
}
|
|
@ -24,6 +24,7 @@ import org.apache.http.client.methods.HttpGet;
|
|||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.ml.CloseJobRequest;
|
||||
import org.elasticsearch.client.ml.DeleteCalendarRequest;
|
||||
import org.elasticsearch.client.ml.DeleteDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.DeleteForecastRequest;
|
||||
import org.elasticsearch.client.ml.DeleteJobRequest;
|
||||
|
@ -438,6 +439,13 @@ public class MLRequestConvertersTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testDeleteCalendar() {
|
||||
DeleteCalendarRequest deleteCalendarRequest = new DeleteCalendarRequest(randomAlphaOfLength(10));
|
||||
Request request = MLRequestConverters.deleteCalendar(deleteCalendarRequest);
|
||||
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/calendars/" + deleteCalendarRequest.getCalendarId(), request.getEndpoint());
|
||||
}
|
||||
|
||||
private static Job createValidJob(String jobId) {
|
||||
AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList(
|
||||
Detector.builder().setFunction("count").build()));
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.action.get.GetResponse;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ml.CloseJobRequest;
|
||||
import org.elasticsearch.client.ml.CloseJobResponse;
|
||||
import org.elasticsearch.client.ml.DeleteCalendarRequest;
|
||||
import org.elasticsearch.client.ml.DeleteDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.DeleteForecastRequest;
|
||||
import org.elasticsearch.client.ml.DeleteJobRequest;
|
||||
|
@ -517,6 +518,24 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
|||
assertEquals(calendar1, getCalendarsResponse.calendars().get(0));
|
||||
}
|
||||
|
||||
public void testDeleteCalendar() throws IOException {
|
||||
Calendar calendar = CalendarTests.testInstance();
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
execute(new PutCalendarRequest(calendar), machineLearningClient::putCalendar,
|
||||
machineLearningClient::putCalendarAsync);
|
||||
|
||||
AcknowledgedResponse response = execute(new DeleteCalendarRequest(calendar.getId()),
|
||||
machineLearningClient::deleteCalendar,
|
||||
machineLearningClient::deleteCalendarAsync);
|
||||
assertTrue(response.isAcknowledged());
|
||||
|
||||
// calendar is missing
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> execute(new DeleteCalendarRequest(calendar.getId()), machineLearningClient::deleteCalendar,
|
||||
machineLearningClient::deleteCalendarAsync));
|
||||
assertThat(exception.status().getStatus(), equalTo(404));
|
||||
}
|
||||
|
||||
public static String randomValidJobId() {
|
||||
CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray());
|
||||
return generator.ofCodePointsLength(random(), 10, 10);
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.client.RequestOptions;
|
|||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.ml.CloseJobRequest;
|
||||
import org.elasticsearch.client.ml.CloseJobResponse;
|
||||
import org.elasticsearch.client.ml.DeleteCalendarRequest;
|
||||
import org.elasticsearch.client.ml.DeleteDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.DeleteForecastRequest;
|
||||
import org.elasticsearch.client.ml.DeleteJobRequest;
|
||||
|
@ -1591,4 +1592,50 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteCalendar() throws IOException, InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
Calendar calendar = new Calendar("holidays", Collections.singletonList("job_1"), "A calendar for public holidays");
|
||||
PutCalendarRequest putCalendarRequest = new PutCalendarRequest(calendar);
|
||||
client.machineLearning().putCalendar(putCalendarRequest, RequestOptions.DEFAULT);
|
||||
|
||||
//tag::x-pack-ml-delete-calendar-request
|
||||
DeleteCalendarRequest request = new DeleteCalendarRequest("holidays"); // <1>
|
||||
//end::x-pack-ml-delete-calendar-request
|
||||
|
||||
//tag::x-pack-ml-delete-calendar-execute
|
||||
AcknowledgedResponse response = client.machineLearning().deleteCalendar(request, RequestOptions.DEFAULT);
|
||||
//end::x-pack-ml-delete-calendar-execute
|
||||
|
||||
//tag::x-pack-ml-delete-calendar-response
|
||||
boolean isAcknowledged = response.isAcknowledged(); // <1>
|
||||
//end::x-pack-ml-delete-calendar-response
|
||||
|
||||
assertTrue(isAcknowledged);
|
||||
|
||||
// tag::x-pack-ml-delete-calendar-listener
|
||||
ActionListener<AcknowledgedResponse> listener = new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-ml-delete-calendar-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-ml-delete-calendar-execute-async
|
||||
client.machineLearning().deleteCalendarAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-ml-delete-calendar-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
|
||||
public class DeleteCalendarRequestTests extends ESTestCase {
|
||||
|
||||
public void testWithNullId() {
|
||||
NullPointerException ex = expectThrows(NullPointerException.class, () -> new DeleteCalendarRequest(null));
|
||||
assertEquals("[calendar_id] must not be null", ex.getMessage());
|
||||
}
|
||||
|
||||
public void testEqualsAndHash() {
|
||||
String id1 = randomAlphaOfLength(8);
|
||||
String id2 = id1 + "_a";
|
||||
assertThat(new DeleteCalendarRequest(id1), equalTo(new DeleteCalendarRequest(id1)));
|
||||
assertThat(new DeleteCalendarRequest(id1).hashCode(), equalTo(new DeleteCalendarRequest(id1).hashCode()));
|
||||
assertThat(new DeleteCalendarRequest(id1), not(equalTo(new DeleteCalendarRequest(id2))));
|
||||
assertThat(new DeleteCalendarRequest(id1).hashCode(), not(equalTo(new DeleteCalendarRequest(id2).hashCode())));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,174 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.XContentTestUtils;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class WatchStatusTests extends ESTestCase {
|
||||
|
||||
public void testBasicParsing() throws IOException {
|
||||
int expectedVersion = randomIntBetween(0, 100);
|
||||
ExecutionState expectedExecutionState = randomFrom(ExecutionState.values());
|
||||
boolean expectedActive = randomBoolean();
|
||||
ActionStatus.AckStatus.State expectedAckState = randomFrom(ActionStatus.AckStatus.State.values());
|
||||
|
||||
XContentBuilder builder = createTestXContent(expectedVersion, expectedExecutionState,
|
||||
expectedActive, expectedAckState);
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
WatchStatus watchStatus = parse(builder.contentType(), bytes);
|
||||
|
||||
assertEquals(expectedVersion, watchStatus.version());
|
||||
assertEquals(expectedExecutionState, watchStatus.getExecutionState());
|
||||
|
||||
assertEquals(new DateTime(1432663467763L, DateTimeZone.UTC), watchStatus.lastChecked());
|
||||
assertEquals(DateTime.parse("2015-05-26T18:04:27.763Z"), watchStatus.lastMetCondition());
|
||||
|
||||
WatchStatus.State watchState = watchStatus.state();
|
||||
assertEquals(expectedActive, watchState.isActive());
|
||||
assertEquals(DateTime.parse("2015-05-26T18:04:27.723Z"), watchState.getTimestamp());
|
||||
|
||||
ActionStatus actionStatus = watchStatus.actionStatus("test_index");
|
||||
assertNotNull(actionStatus);
|
||||
|
||||
ActionStatus.AckStatus ackStatus = actionStatus.ackStatus();
|
||||
assertEquals(DateTime.parse("2015-05-26T18:04:27.763Z"), ackStatus.timestamp());
|
||||
assertEquals(expectedAckState, ackStatus.state());
|
||||
|
||||
ActionStatus.Execution lastExecution = actionStatus.lastExecution();
|
||||
assertEquals(DateTime.parse("2015-05-25T18:04:27.733Z"), lastExecution.timestamp());
|
||||
assertFalse(lastExecution.successful());
|
||||
assertEquals("failed to send email", lastExecution.reason());
|
||||
|
||||
ActionStatus.Execution lastSuccessfulExecution = actionStatus.lastSuccessfulExecution();
|
||||
assertEquals(DateTime.parse("2015-05-25T18:04:27.773Z"), lastSuccessfulExecution.timestamp());
|
||||
assertTrue(lastSuccessfulExecution.successful());
|
||||
assertNull(lastSuccessfulExecution.reason());
|
||||
|
||||
ActionStatus.Throttle lastThrottle = actionStatus.lastThrottle();
|
||||
assertEquals(DateTime.parse("2015-04-25T18:05:23.445Z"), lastThrottle.timestamp());
|
||||
assertEquals("throttling interval is set to [5 seconds] ...", lastThrottle.reason());
|
||||
}
|
||||
|
||||
public void testParsingWithUnknownKeys() throws IOException {
|
||||
int expectedVersion = randomIntBetween(0, 100);
|
||||
ExecutionState expectedExecutionState = randomFrom(ExecutionState.values());
|
||||
boolean expectedActive = randomBoolean();
|
||||
ActionStatus.AckStatus.State expectedAckState = randomFrom(ActionStatus.AckStatus.State.values());
|
||||
|
||||
XContentBuilder builder = createTestXContent(expectedVersion, expectedExecutionState,
|
||||
expectedActive, expectedAckState);
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
Predicate<String> excludeFilter = field -> field.equals("actions");
|
||||
BytesReference bytesWithRandomFields = XContentTestUtils.insertRandomFields(
|
||||
builder.contentType(), bytes, excludeFilter, random());
|
||||
|
||||
WatchStatus watchStatus = parse(builder.contentType(), bytesWithRandomFields);
|
||||
|
||||
assertEquals(expectedVersion, watchStatus.version());
|
||||
assertEquals(expectedExecutionState, watchStatus.getExecutionState());
|
||||
}
|
||||
|
||||
public void testOptionalFieldsParsing() throws IOException {
|
||||
XContentType contentType = randomFrom(XContentType.values());
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(contentType).startObject()
|
||||
.field("version", 42)
|
||||
.startObject("actions")
|
||||
.startObject("test_index")
|
||||
.startObject("ack")
|
||||
.field("timestamp", "2015-05-26T18:04:27.763Z")
|
||||
.field("state", "ackable")
|
||||
.endObject()
|
||||
.startObject("last_execution")
|
||||
.field("timestamp", "2015-05-25T18:04:27.733Z")
|
||||
.field("successful", false)
|
||||
.field("reason", "failed to send email")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
WatchStatus watchStatus = parse(builder.contentType(), bytes);
|
||||
|
||||
assertEquals(42, watchStatus.version());
|
||||
assertNull(watchStatus.getExecutionState());
|
||||
assertFalse(watchStatus.checked());
|
||||
}
|
||||
|
||||
private XContentBuilder createTestXContent(int version,
|
||||
ExecutionState executionState,
|
||||
boolean active,
|
||||
ActionStatus.AckStatus.State ackState) throws IOException {
|
||||
XContentType contentType = randomFrom(XContentType.values());
|
||||
return XContentFactory.contentBuilder(contentType).startObject()
|
||||
.field("version", version)
|
||||
.field("execution_state", executionState)
|
||||
.field("last_checked", 1432663467763L)
|
||||
.field("last_met_condition", "2015-05-26T18:04:27.763Z")
|
||||
.startObject("state")
|
||||
.field("active", active)
|
||||
.field("timestamp", "2015-05-26T18:04:27.723Z")
|
||||
.endObject()
|
||||
.startObject("actions")
|
||||
.startObject("test_index")
|
||||
.startObject("ack")
|
||||
.field("timestamp", "2015-05-26T18:04:27.763Z")
|
||||
.field("state", ackState)
|
||||
.endObject()
|
||||
.startObject("last_execution")
|
||||
.field("timestamp", "2015-05-25T18:04:27.733Z")
|
||||
.field("successful", false)
|
||||
.field("reason", "failed to send email")
|
||||
.endObject()
|
||||
.startObject("last_successful_execution")
|
||||
.field("timestamp", "2015-05-25T18:04:27.773Z")
|
||||
.field("successful", true)
|
||||
.endObject()
|
||||
.startObject("last_throttle")
|
||||
.field("timestamp", "2015-04-25T18:05:23.445Z")
|
||||
.field("reason", "throttling interval is set to [5 seconds] ...")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
}
|
||||
|
||||
private WatchStatus parse(XContentType contentType, BytesReference bytes) throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(contentType)
|
||||
.createParser(NamedXContentRegistry.EMPTY, null, bytes.streamInput());
|
||||
parser.nextToken();
|
||||
|
||||
return WatchStatus.parse(parser);
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.test.rest;
|
||||
|
||||
import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase;
|
||||
import org.hamcrest.Matcher;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.BufferedReader;
|
||||
|
@ -29,9 +30,16 @@ import java.nio.file.Path;
|
|||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
|
||||
@Override
|
||||
protected BufferedReader openReader(Path logFile) throws IOException {
|
||||
protected Matcher<String> nodeNameMatcher() {
|
||||
return is("node-0");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BufferedReader openReader(Path logFile) {
|
||||
return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> {
|
||||
try {
|
||||
return Files.newBufferedReader(logFile, StandardCharsets.UTF_8);
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
[[java-rest-high-x-pack-ml-delete-calendar]]
|
||||
=== Delete Calendar API
|
||||
Delete a {ml} calendar.
|
||||
The API accepts a `DeleteCalendarRequest` and responds
|
||||
with a `AcknowledgedResponse` object.
|
||||
|
||||
[[java-rest-high-x-pack-ml-delete-calendar-request]]
|
||||
==== Delete Calendar Request
|
||||
|
||||
A `DeleteCalendar` object requires a non-null `calendarId`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-calendar-request]
|
||||
---------------------------------------------------
|
||||
<1> Constructing a new request referencing an existing Calendar
|
||||
|
||||
[[java-rest-high-x-pack-ml-delete-calendar-response]]
|
||||
==== Delete Calendar Response
|
||||
|
||||
The returned `AcknowledgedResponse` object indicates the acknowledgement of the request:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-calendar-response]
|
||||
---------------------------------------------------
|
||||
<1> `isAcknowledged` was the deletion request acknowledged or not
|
||||
|
||||
[[java-rest-high-x-pack-ml-delete-calendar-execution]]
|
||||
==== Execution
|
||||
The request can be executed through the `MachineLearningClient` contained
|
||||
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-calendar-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-x-pack-ml-delete-calendar-async]]
|
||||
==== Delete Calendar Asynchronously
|
||||
|
||||
This request can also be made asynchronously.
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-calendar-execute-async]
|
||||
---------------------------------------------------
|
||||
<1> The `DeleteCalendarRequest` to execute and the `ActionListener` to alert on completion or error.
|
||||
|
||||
The deletion request returns immediately. Once the request is completed, the `ActionListener` is
|
||||
called back using the `onResponse` or `onFailure`. The latter indicates some failure occurred when
|
||||
making the request.
|
||||
|
||||
A typical listener for a `DeleteCalendarRequest` could be defined as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-calendar-listener]
|
||||
---------------------------------------------------
|
||||
<1> The action to be taken when it is completed
|
||||
<2> What to do when a failure occurs
|
|
@ -233,6 +233,7 @@ The Java High Level REST Client supports the following Machine Learning APIs:
|
|||
* <<java-rest-high-x-pack-ml-get-categories>>
|
||||
* <<java-rest-high-x-pack-ml-get-calendars>>
|
||||
* <<java-rest-high-x-pack-ml-put-calendar>>
|
||||
* <<java-rest-high-x-pack-ml-delete-calendar>>
|
||||
|
||||
include::ml/put-job.asciidoc[]
|
||||
include::ml/get-job.asciidoc[]
|
||||
|
@ -255,6 +256,7 @@ include::ml/get-influencers.asciidoc[]
|
|||
include::ml/get-categories.asciidoc[]
|
||||
include::ml/get-calendars.asciidoc[]
|
||||
include::ml/put-calendar.asciidoc[]
|
||||
include::ml/delete-calendar.asciidoc[]
|
||||
|
||||
== Migration APIs
|
||||
|
||||
|
|
|
@ -113,4 +113,12 @@ And it'd respond:
|
|||
// TESTRESPONSE
|
||||
|
||||
<1> The stemmer has also emitted a token `home` at position 1, but because it is a
|
||||
duplicate of this token it has been removed from the token stream
|
||||
duplicate of this token it has been removed from the token stream
|
||||
|
||||
NOTE: The synonym and synonym_graph filters use their preceding analysis chain to
|
||||
parse and analyse their synonym lists, and ignore any token filters in the chain
|
||||
that produce multiple tokens at the same position. This means that any filters
|
||||
within the multiplexer will be ignored for the purpose of synonyms. If you want to
|
||||
use filters contained within the multiplexer for parsing synonyms (for example, to
|
||||
apply stemming to the synonym lists), then you should append the synonym filter
|
||||
to the relevant multiplexer filter list.
|
|
@ -236,29 +236,36 @@ If everything goes well with installation, you should see a bunch of messages th
|
|||
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
--------------------------------------------------
|
||||
[2016-09-16T14:17:51,251][INFO ][o.e.n.Node ] [] initializing ...
|
||||
[2016-09-16T14:17:51,329][INFO ][o.e.e.NodeEnvironment ] [6-bjhwl] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [317.7gb], net total_space [453.6gb], spins? [no], types [ext4]
|
||||
[2016-09-16T14:17:51,330][INFO ][o.e.e.NodeEnvironment ] [6-bjhwl] heap size [1.9gb], compressed ordinary object pointers [true]
|
||||
[2016-09-16T14:17:51,333][INFO ][o.e.n.Node ] [6-bjhwl] node name [6-bjhwl] derived from node ID; set [node.name] to override
|
||||
[2016-09-16T14:17:51,334][INFO ][o.e.n.Node ] [6-bjhwl] version[{version}], pid[21261], build[f5daa16/2016-09-16T09:12:24.346Z], OS[Linux/4.4.0-36-generic/amd64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_60/25.60-b23]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [aggs-matrix-stats]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [ingest-common]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-expression]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-mustache]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-painless]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [percolator]
|
||||
[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [reindex]
|
||||
[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [transport-netty3]
|
||||
[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [transport-netty4]
|
||||
[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded plugin [mapper-murmur3]
|
||||
[2016-09-16T14:17:53,521][INFO ][o.e.n.Node ] [6-bjhwl] initialized
|
||||
[2016-09-16T14:17:53,521][INFO ][o.e.n.Node ] [6-bjhwl] starting ...
|
||||
[2016-09-16T14:17:53,671][INFO ][o.e.t.TransportService ] [6-bjhwl] publish_address {192.168.8.112:9300}, bound_addresses {{192.168.8.112:9300}
|
||||
[2016-09-16T14:17:53,676][WARN ][o.e.b.BootstrapCheck ] [6-bjhwl] max virtual memory areas vm.max_map_count [65530] likely too low, increase to at least [262144]
|
||||
[2016-09-16T14:17:56,718][INFO ][o.e.c.s.ClusterService ] [6-bjhwl] new_master {6-bjhwl}{6-bjhwl4TkajjoD2oEipnQ}{8m3SNKoFR6yQl1I0JUfPig}{192.168.8.112}{192.168.8.112:9300}, reason: zen-disco-elected-as-master ([0] nodes joined)
|
||||
[2016-09-16T14:17:56,731][INFO ][o.e.h.HttpServer ] [6-bjhwl] publish_address {192.168.8.112:9200}, bound_addresses {[::1]:9200}, {192.168.8.112:9200}
|
||||
[2016-09-16T14:17:56,732][INFO ][o.e.g.GatewayService ] [6-bjhwl] recovered [0] indices into cluster_state
|
||||
[2016-09-16T14:17:56,748][INFO ][o.e.n.Node ] [6-bjhwl] started
|
||||
[2018-09-13T12:20:01,766][INFO ][o.e.e.NodeEnvironment ] [localhost.localdomain] using [1] data paths, mounts [[/home (/dev/mapper/fedora-home)]], net usable_space [335.3gb], net total_space [410.3gb], types [ext4]
|
||||
[2018-09-13T12:20:01,772][INFO ][o.e.e.NodeEnvironment ] [localhost.localdomain] heap size [990.7mb], compressed ordinary object pointers [true]
|
||||
[2018-09-13T12:20:01,774][INFO ][o.e.n.Node ] [localhost.localdomain] node name [localhost.localdomain], node ID [B0aEHNagTiWx7SYj-l4NTw]
|
||||
[2018-09-13T12:20:01,775][INFO ][o.e.n.Node ] [localhost.localdomain] version[{version}], pid[13030], build[oss/zip/77fc20e/2018-09-13T15:37:57.478402Z], OS[Linux/4.16.11-100.fc26.x86_64/amd64], JVM["Oracle Corporation"/OpenJDK 64-Bit Server VM/10/10+46]
|
||||
[2018-09-13T12:20:01,775][INFO ][o.e.n.Node ] [localhost.localdomain] JVM arguments [-Xms1g, -Xmx1g, -XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Djava.io.tmpdir=/tmp/elasticsearch.LN1ctLCi, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, -Djava.locale.providers=COMPAT, -XX:UseAVX=2, -Dio.netty.allocator.type=unpooled, -Des.path.home=/home/manybubbles/Workspaces/Elastic/master/elasticsearch/qa/unconfigured-node-name/build/cluster/integTestCluster node0/elasticsearch-7.0.0-alpha1-SNAPSHOT, -Des.path.conf=/home/manybubbles/Workspaces/Elastic/master/elasticsearch/qa/unconfigured-node-name/build/cluster/integTestCluster node0/elasticsearch-7.0.0-alpha1-SNAPSHOT/config, -Des.distribution.flavor=oss, -Des.distribution.type=zip]
|
||||
[2018-09-13T12:20:02,543][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [aggs-matrix-stats]
|
||||
[2018-09-13T12:20:02,543][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [analysis-common]
|
||||
[2018-09-13T12:20:02,543][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [ingest-common]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [lang-expression]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [lang-mustache]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [lang-painless]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [mapper-extras]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [parent-join]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [percolator]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [rank-eval]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [reindex]
|
||||
[2018-09-13T12:20:02,545][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [repository-url]
|
||||
[2018-09-13T12:20:02,545][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [transport-netty4]
|
||||
[2018-09-13T12:20:02,545][INFO ][o.e.p.PluginsService ] [localhost.localdomain] no plugins loaded
|
||||
[2018-09-13T12:20:04,657][INFO ][o.e.d.DiscoveryModule ] [localhost.localdomain] using discovery type [zen]
|
||||
[2018-09-13T12:20:05,006][INFO ][o.e.n.Node ] [localhost.localdomain] initialized
|
||||
[2018-09-13T12:20:05,007][INFO ][o.e.n.Node ] [localhost.localdomain] starting ...
|
||||
[2018-09-13T12:20:05,202][INFO ][o.e.t.TransportService ] [localhost.localdomain] publish_address {127.0.0.1:9300}, bound_addresses {[::1]:9300}, {127.0.0.1:9300}
|
||||
[2018-09-13T12:20:05,221][WARN ][o.e.b.BootstrapChecks ] [localhost.localdomain] max file descriptors [4096] for elasticsearch process is too low, increase to at least [65536]
|
||||
[2018-09-13T12:20:05,221][WARN ][o.e.b.BootstrapChecks ] [localhost.localdomain] max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
|
||||
[2018-09-13T12:20:08,355][INFO ][o.e.c.s.MasterService ] [localhost.localdomain] zen-disco-elected-as-master ([0] nodes joined)[, ], reason: master node changed {previous [], current [{localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test}]}
|
||||
[2018-09-13T12:20:08,360][INFO ][o.e.c.s.ClusterApplierService] [localhost.localdomain] master node changed {previous [], current [{localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test}]}, reason: apply cluster state (from master [master {localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test} committed version [1] source [zen-disco-elected-as-master ([0] nodes joined)[, ]]])
|
||||
[2018-09-13T12:20:08,384][INFO ][o.e.h.n.Netty4HttpServerTransport] [localhost.localdomain] publish_address {127.0.0.1:9200}, bound_addresses {[::1]:9200}, {127.0.0.1:9200}
|
||||
[2018-09-13T12:20:08,384][INFO ][o.e.n.Node ] [localhost.localdomain] started
|
||||
|
||||
--------------------------------------------------
|
||||
|
||||
Without going too much into detail, we can see that our node named "6-bjhwl" (which will be a different set of characters in your case) has started and elected itself as a master in a single cluster. Don't worry yet at the moment what master means. The main thing that is important here is that we have started one node within one cluster.
|
||||
|
|
|
@ -2,6 +2,12 @@
|
|||
|
||||
=== Settings changes
|
||||
|
||||
==== The default for `node.name` is now the hostname
|
||||
|
||||
`node.name` now defaults to the hostname at the time when Elasticsearch
|
||||
is started. Previously the default node name was the first eight characters
|
||||
of the node id. It can still be configured explicitly in `elasticsearch.yml`.
|
||||
|
||||
==== Percolator
|
||||
|
||||
* The deprecated `index.percolator.map_unmapped_fields_as_string` setting has been removed in favour of
|
||||
|
|
|
@ -0,0 +1,489 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ml-find-file-structure]]
|
||||
=== Find File Structure API
|
||||
++++
|
||||
<titleabbrev>Find File Structure</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
Finds the structure of a text file. The text file must contain data that is
|
||||
suitable to be ingested into {es}.
|
||||
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/find_file_structure`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
This API provides a starting point for ingesting data into {es} in a format that
|
||||
is suitable for subsequent use with other {ml} functionality.
|
||||
|
||||
Unlike other {es} endpoints, the data that is posted to this endpoint does not
|
||||
need to be UTF-8 encoded and in JSON format. It must, however, be text; binary
|
||||
file formats are not currently supported.
|
||||
|
||||
The response from the API contains:
|
||||
|
||||
* A couple of messages from the beginning of the file.
|
||||
* Statistics that reveal the most common values for all fields detected within
|
||||
the file and basic numeric statistics for numeric fields.
|
||||
* Information about the structure of the file, which is useful when you write
|
||||
ingest configurations to index the file contents.
|
||||
* Appropriate mappings for an {es} index, which you could use to ingest the file
|
||||
contents.
|
||||
|
||||
All this information can be calculated by the structure finder with no guidance.
|
||||
However, you can optionally override some of the decisions about the file
|
||||
structure by specifying one or more query parameters.
|
||||
|
||||
Details of the output can be seen in the
|
||||
<<ml-find-file-structure-examples,examples>>.
|
||||
|
||||
If the structure finder produces unexpected results for a particular file,
|
||||
specify the `explain` query parameter. It causes an `explanation` to appear in
|
||||
the response, which should help in determining why the returned structure was
|
||||
chosen.
|
||||
|
||||
==== Query Parameters
|
||||
|
||||
`charset`::
|
||||
(string) The file's character set. It must be a character set that is supported
|
||||
by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or
|
||||
`EUC-JP`. If this parameter is not specified, the structure finder chooses an
|
||||
appropriate character set.
|
||||
|
||||
`column_names`::
|
||||
(string) If you have set `format` to `delimited`, you can specify the column names
|
||||
in a comma-separated list. If this parameter is not specified, the structure
|
||||
finder uses the column names from the header row of the file. If the file does
|
||||
not have a header role, columns are named "column1", "column2", "column3", etc.
|
||||
|
||||
`delimiter`::
|
||||
(string) If you have set `format` to `delimited`, you can specify the character used
|
||||
to delimit the values in each row. Only a single character is supported; the
|
||||
delimiter cannot have multiple characters. If this parameter is not specified,
|
||||
the structure finder considers the following possibilities: comma, tab,
|
||||
semi-colon, and pipe (`|`).
|
||||
|
||||
`explain`::
|
||||
(boolean) If this parameter is set to `true`, the response includes a field
|
||||
named `explanation`, which is an array of strings that indicate how the
|
||||
structure finder produced its result. The default value is `false`.
|
||||
|
||||
`format`::
|
||||
(string) The high level structure of the file. Valid values are `json`, `xml`,
|
||||
`delimited`, and `semi_structured_text`. If this parameter is not specified,
|
||||
the structure finder chooses one.
|
||||
|
||||
`grok_pattern`::
|
||||
(string) If you have set `format` to `semi_structured_text`, you can specify a Grok
|
||||
pattern that is used to extract fields from every message in the file. The
|
||||
name of the timestamp field in the Grok pattern must match what is specified
|
||||
in the `timestamp_field` parameter. If that parameter is not specified, the
|
||||
name of the timestamp field in the Grok pattern must match "timestamp". If
|
||||
`grok_pattern` is not specified, the structure finder creates a Grok pattern.
|
||||
|
||||
`has_header_row`::
|
||||
(boolean) If you have set `format` to `delimited`, you can use this parameter to
|
||||
indicate whether the column names are in the first row of the file. If this
|
||||
parameter is not specified, the structure finder guesses based on the similarity of
|
||||
the first row of the file to other rows.
|
||||
|
||||
`lines_to_sample`::
|
||||
(unsigned integer) The number of lines to include in the structural analysis,
|
||||
starting from the beginning of the file. The minimum is 2; the default
|
||||
is 1000. If the value of this parameter is greater than the number of lines in
|
||||
the file, the analysis proceeds (as long as there are at least two lines in the
|
||||
file) for all of the lines. +
|
||||
+
|
||||
--
|
||||
NOTE: The number of lines and the variation of the lines affects the speed of
|
||||
the analysis. For example, if you upload a log file where the first 1000 lines
|
||||
are all variations on the same message, the analysis will find more commonality
|
||||
than would be seen with a bigger sample. If possible, however, it is more
|
||||
efficient to upload a sample file with more variety in the first 1000 lines than
|
||||
to request analysis of 100000 lines to achieve some variety.
|
||||
--
|
||||
|
||||
`quote`::
|
||||
(string) If you have set `format` to `delimited`, you can specify the character used
|
||||
to quote the values in each row if they contain newlines or the delimiter
|
||||
character. Only a single character is supported. If this parameter is not
|
||||
specified, the default value is a double quote (`"`). If your delimited file
|
||||
format does not use quoting, a workaround is to set this argument to a
|
||||
character that does not appear anywhere in the sample.
|
||||
|
||||
`should_trim_fields`::
|
||||
(boolean) If you have set `format` to `delimited`, you can specify whether values
|
||||
between delimiters should have whitespace trimmed from them. If this parameter
|
||||
is not specified and the delimiter is pipe (`|`), the default value is `true`.
|
||||
Otherwise, the default value is `false`.
|
||||
|
||||
`timestamp_field`::
|
||||
(string) The name of the field that contains the primary timestamp of each
|
||||
record in the file. In particular, if the file were ingested into an index,
|
||||
this is the field that would be used to populate the `@timestamp` field. +
|
||||
+
|
||||
--
|
||||
If the `format` is `semi_structured_text`, this field must match the name of the
|
||||
appropriate extraction in the `grok_pattern`. Therefore, for semi-structured
|
||||
file formats, it is best not to specify this parameter unless `grok_pattern` is
|
||||
also specified.
|
||||
|
||||
For structured file formats, if you specify this parameter, the field must exist
|
||||
within the file.
|
||||
|
||||
If this parameter is not specified, the structure finder makes a decision about which
|
||||
field (if any) is the primary timestamp field. For structured file formats, it
|
||||
is not compulsory to have a timestamp in the file.
|
||||
--
|
||||
|
||||
`timestamp_format`::
|
||||
(string) The time format of the timestamp field in the file. +
|
||||
+
|
||||
--
|
||||
NOTE: Currently there is a limitation that this format must be one that the
|
||||
structure finder might choose by itself. The reason for this restriction is that
|
||||
to consistently set all the fields in the response the structure finder needs a
|
||||
corresponding Grok pattern name and simple regular expression for each timestamp
|
||||
format. Therefore, there is little value in specifying this parameter for
|
||||
structured file formats. If you know which field contains your primary timestamp,
|
||||
it is as good and less error-prone to just specify `timestamp_field`.
|
||||
|
||||
The valuable use case for this parameter is when the format is semi-structured
|
||||
text, there are multiple timestamp formats in the file, and you know which
|
||||
format corresponds to the primary timestamp, but you do not want to specify the
|
||||
full `grok_pattern`.
|
||||
|
||||
If this parameter is not specified, the structure finder chooses the best format from
|
||||
the formats it knows, which are:
|
||||
|
||||
* `dd/MMM/YYYY:HH:mm:ss Z`
|
||||
* `EEE MMM dd HH:mm zzz YYYY`
|
||||
* `EEE MMM dd HH:mm:ss YYYY`
|
||||
* `EEE MMM dd HH:mm:ss zzz YYYY`
|
||||
* `EEE MMM dd YYYY HH:mm zzz`
|
||||
* `EEE MMM dd YYYY HH:mm:ss zzz`
|
||||
* `EEE, dd MMM YYYY HH:mm Z`
|
||||
* `EEE, dd MMM YYYY HH:mm ZZ`
|
||||
* `EEE, dd MMM YYYY HH:mm:ss Z`
|
||||
* `EEE, dd MMM YYYY HH:mm:ss ZZ`
|
||||
* `ISO8601`
|
||||
* `MMM d HH:mm:ss`
|
||||
* `MMM d HH:mm:ss,SSS`
|
||||
* `MMM d YYYY HH:mm:ss`
|
||||
* `MMM dd HH:mm:ss`
|
||||
* `MMM dd HH:mm:ss,SSS`
|
||||
* `MMM dd YYYY HH:mm:ss`
|
||||
* `MMM dd, YYYY K:mm:ss a`
|
||||
* `TAI64N`
|
||||
* `UNIX`
|
||||
* `UNIX_MS`
|
||||
* `YYYY-MM-dd HH:mm:ss`
|
||||
* `YYYY-MM-dd HH:mm:ss,SSS`
|
||||
* `YYYY-MM-dd HH:mm:ss,SSS Z`
|
||||
* `YYYY-MM-dd HH:mm:ss,SSSZ`
|
||||
* `YYYY-MM-dd HH:mm:ss,SSSZZ`
|
||||
* `YYYY-MM-dd HH:mm:ssZ`
|
||||
* `YYYY-MM-dd HH:mm:ssZZ`
|
||||
* `YYYYMMddHHmmss`
|
||||
|
||||
--
|
||||
|
||||
==== Request Body
|
||||
|
||||
The text file that you want to analyze. It must contain data that is suitable to
|
||||
be ingested into {es}. It does not need to be in JSON format and it does not
|
||||
need to be UTF-8 encoded. The size is limited to the {es} HTTP receive buffer
|
||||
size, which defaults to 100 Mb.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, or `monitor` cluster privileges to use this API.
|
||||
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
[[ml-find-file-structure-examples]]
|
||||
==== Examples
|
||||
|
||||
Suppose you have a newline-delimited JSON file that contains information about
|
||||
some books. You can send the contents to the `find_file_structure` endpoint:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
POST _xpack/ml/find_file_structure
|
||||
{"name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561}
|
||||
{"name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482}
|
||||
{"name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604}
|
||||
{"name": "Dune Messiah", "author": "Frank Herbert", "release_date": "1969-10-15", "page_count": 331}
|
||||
{"name": "Children of Dune", "author": "Frank Herbert", "release_date": "1976-04-21", "page_count": 408}
|
||||
{"name": "God Emperor of Dune", "author": "Frank Herbert", "release_date": "1981-05-28", "page_count": 454}
|
||||
{"name": "Consider Phlebas", "author": "Iain M. Banks", "release_date": "1987-04-23", "page_count": 471}
|
||||
{"name": "Pandora's Star", "author": "Peter F. Hamilton", "release_date": "2004-03-02", "page_count": 768}
|
||||
{"name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585}
|
||||
{"name": "A Fire Upon the Deep", "author": "Vernor Vinge", "release_date": "1992-06-01", "page_count": 613}
|
||||
{"name": "Ender's Game", "author": "Orson Scott Card", "release_date": "1985-06-01", "page_count": 324}
|
||||
{"name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328}
|
||||
{"name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227}
|
||||
{"name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268}
|
||||
{"name": "Foundation", "author": "Isaac Asimov", "release_date": "1951-06-01", "page_count": 224}
|
||||
{"name": "The Giver", "author": "Lois Lowry", "release_date": "1993-04-26", "page_count": 208}
|
||||
{"name": "Slaughterhouse-Five", "author": "Kurt Vonnegut", "release_date": "1969-06-01", "page_count": 275}
|
||||
{"name": "The Hitchhiker's Guide to the Galaxy", "author": "Douglas Adams", "release_date": "1979-10-12", "page_count": 180}
|
||||
{"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470}
|
||||
{"name": "Neuromancer", "author": "William Gibson", "release_date": "1984-07-01", "page_count": 271}
|
||||
{"name": "The Handmaid's Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311}
|
||||
{"name": "Starship Troopers", "author": "Robert A. Heinlein", "release_date": "1959-12-01", "page_count": 335}
|
||||
{"name": "The Left Hand of Darkness", "author": "Ursula K. Le Guin", "release_date": "1969-06-01", "page_count": 304}
|
||||
{"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288}
|
||||
----
|
||||
// CONSOLE
|
||||
// TEST
|
||||
|
||||
If the request does not encounter errors, you receive the following result:
|
||||
[source,js]
|
||||
----
|
||||
{
|
||||
"num_lines_analyzed" : 24, <1>
|
||||
"num_messages_analyzed" : 24, <2>
|
||||
"sample_start" : "{\"name\": \"Leviathan Wakes\", \"author\": \"James S.A. Corey\", \"release_date\": \"2011-06-02\", \"page_count\": 561}\n{\"name\": \"Hyperion\", \"author\": \"Dan Simmons\", \"release_date\": \"1989-05-26\", \"page_count\": 482}\n", <3>
|
||||
"charset" : "UTF-8", <4>
|
||||
"has_byte_order_marker" : false, <5>
|
||||
"format" : "json", <6>
|
||||
"need_client_timezone" : false, <7>
|
||||
"mappings" : { <8>
|
||||
"author" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"name" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"page_count" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"release_date" : {
|
||||
"type" : "keyword"
|
||||
}
|
||||
},
|
||||
"field_stats" : { <9>
|
||||
"author" : {
|
||||
"count" : 24,
|
||||
"cardinality" : 20,
|
||||
"top_hits" : [
|
||||
{
|
||||
"value" : "Frank Herbert",
|
||||
"count" : 4
|
||||
},
|
||||
{
|
||||
"value" : "Robert A. Heinlein",
|
||||
"count" : 2
|
||||
},
|
||||
{
|
||||
"value" : "Alastair Reynolds",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Aldous Huxley",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Dan Simmons",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Douglas Adams",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "George Orwell",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Iain M. Banks",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Isaac Asimov",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "James S.A. Corey",
|
||||
"count" : 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"name" : {
|
||||
"count" : 24,
|
||||
"cardinality" : 24,
|
||||
"top_hits" : [
|
||||
{
|
||||
"value" : "1984",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "A Fire Upon the Deep",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Brave New World",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Children of Dune",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Consider Phlebas",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Dune",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Dune Messiah",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Ender's Game",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Fahrenheit 451",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Foundation",
|
||||
"count" : 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"page_count" : {
|
||||
"count" : 24,
|
||||
"cardinality" : 24,
|
||||
"min_value" : 180.0,
|
||||
"max_value" : 768.0,
|
||||
"mean_value" : 387.0833333333333,
|
||||
"median_value" : 329.5,
|
||||
"top_hits" : [
|
||||
{
|
||||
"value" : 180.0,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 208.0,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 224.0,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 227.0,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 268.0,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 271.0,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 275.0,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 288.0,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 304.0,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 311.0,
|
||||
"count" : 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"release_date" : {
|
||||
"count" : 24,
|
||||
"cardinality" : 20,
|
||||
"top_hits" : [
|
||||
{
|
||||
"value" : "1985-06-01",
|
||||
"count" : 3
|
||||
},
|
||||
{
|
||||
"value" : "1969-06-01",
|
||||
"count" : 2
|
||||
},
|
||||
{
|
||||
"value" : "1992-06-01",
|
||||
"count" : 2
|
||||
},
|
||||
{
|
||||
"value" : "1932-06-01",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1951-06-01",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1953-10-15",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1959-12-01",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1965-06-01",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1966-04-01",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1969-10-15",
|
||||
"count" : 1
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE[s/"sample_start" : ".*",/"sample_start" : "$body.sample_start",/]
|
||||
// The substitution is because the "file" is pre-processed by the test harness,
|
||||
// so the fields may get reordered in the JSON the endpoint sees
|
||||
|
||||
<1> `num_lines_analyzed` indicates how many lines of the file were analyzed.
|
||||
<2> `num_messages_analyzed` indicates how many distinct messages the lines contained.
|
||||
For ND-JSON, this value is the same as `num_lines_analyzed`. For other file
|
||||
formats, messages can span several lines.
|
||||
<3> `sample_start` reproduces the first two messages in the file verbatim. This
|
||||
may help to diagnose parse errors or accidental uploads of the wrong file.
|
||||
<4> `charset` indicates the character encoding used to parse the file.
|
||||
<5> For UTF character encodings, `has_byte_order_marker` indicates whether the
|
||||
file begins with a byte order marker.
|
||||
<6> `format` is one of `json`, `xml`, `delimited` or `semi_structured_text`.
|
||||
<7> If a timestamp format is detected that does not include a timezone,
|
||||
`need_client_timezone` will be `true`. The server that parses the file must
|
||||
therefore be told the correct timezone by the client.
|
||||
<8> `mappings` contains some suitable mappings for an index into which the data
|
||||
could be ingested. In this case, the `release_date` field has been given a
|
||||
`keyword` type as it is not considered specific enough to convert to the
|
||||
`date` type.
|
||||
<9> `field_stats` contains the most common values of each field, plus basic
|
||||
numeric statistics for the numeric `page_count` field. This information
|
||||
may provide clues that the data needs to be cleaned or transformed prior
|
||||
to use by other {ml} functionality.
|
||||
|
|
@ -70,6 +70,12 @@ machine learning APIs and in advanced job configuration options in Kibana.
|
|||
* <<ml-get-influencer,Get influencers>>
|
||||
* <<ml-get-record,Get records>>
|
||||
|
||||
[float]
|
||||
[[ml-api-file-structure-endpoint]]
|
||||
=== File Structure
|
||||
|
||||
* <<ml-find-file-structure,Find file structure>>
|
||||
|
||||
//ADD
|
||||
include::post-calendar-event.asciidoc[]
|
||||
include::put-calendar-job.asciidoc[]
|
||||
|
@ -89,6 +95,8 @@ include::delete-forecast.asciidoc[]
|
|||
include::delete-job.asciidoc[]
|
||||
include::delete-calendar-job.asciidoc[]
|
||||
include::delete-snapshot.asciidoc[]
|
||||
//FIND
|
||||
include::find-file-structure.asciidoc[]
|
||||
//FLUSH
|
||||
include::flush-job.asciidoc[]
|
||||
//FORECAST
|
||||
|
@ -126,3 +134,4 @@ include::update-snapshot.asciidoc[]
|
|||
//VALIDATE
|
||||
//include::validate-detector.asciidoc[]
|
||||
//include::validate-job.asciidoc[]
|
||||
|
||||
|
|
|
@ -1,22 +1,13 @@
|
|||
[[node.name]]
|
||||
=== `node.name`
|
||||
|
||||
By default, Elasticsearch will use the first seven characters of the randomly
|
||||
generated UUID as the node id. Note that the node id is persisted and does
|
||||
not change when a node restarts and therefore the default node name will also
|
||||
not change.
|
||||
|
||||
It is worth configuring a more meaningful name which will also have the
|
||||
advantage of persisting after restarting the node:
|
||||
Elasticsearch uses `node.name` as a human readable identifier for a
|
||||
particular instance of Elasticsearch so it is included in the response
|
||||
of many APIs. It defaults to the hostname that the machine has when
|
||||
Elasticsearch starts but can be configured explicitly in
|
||||
`elasticsearch.yml` as follows:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
node.name: prod-data-2
|
||||
--------------------------------------------------
|
||||
|
||||
The `node.name` can also be set to the server's HOSTNAME as follows:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
node.name: ${HOSTNAME}
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -4,10 +4,9 @@
|
|||
Elasticsearch uses https://logging.apache.org/log4j/2.x/[Log4j 2] for
|
||||
logging. Log4j 2 can be configured using the log4j2.properties
|
||||
file. Elasticsearch exposes three properties, `${sys:es.logs.base_path}`,
|
||||
`${sys:es.logs.cluster_name}`, and `${sys:es.logs.node_name}` (if the node name
|
||||
is explicitly set via `node.name`) that can be referenced in the configuration
|
||||
file to determine the location of the log files. The property
|
||||
`${sys:es.logs.base_path}` will resolve to the log directory,
|
||||
`${sys:es.logs.cluster_name}`, and `${sys:es.logs.node_name}` that can be
|
||||
referenced in the configuration file to determine the location of the log
|
||||
files. The property `${sys:es.logs.base_path}` will resolve to the log directory,
|
||||
`${sys:es.logs.cluster_name}` will resolve to the cluster name (used as the
|
||||
prefix of log filenames in the default configuration), and
|
||||
`${sys:es.logs.node_name}` will resolve to the node name (if the node name is
|
||||
|
|
|
@ -29,33 +29,20 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.ReferringFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class MultiplexerTokenFilterFactory extends AbstractTokenFilterFactory implements ReferringFilterFactory {
|
||||
public class MultiplexerTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
private List<TokenFilterFactory> filters;
|
||||
private List<String> filterNames;
|
||||
private final boolean preserveOriginal;
|
||||
|
||||
private static final TokenFilterFactory IDENTITY_FACTORY = new TokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return "identity";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return tokenStream;
|
||||
}
|
||||
};
|
||||
|
||||
public MultiplexerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) throws IOException {
|
||||
super(indexSettings, name, settings);
|
||||
this.filterNames = settings.getAsList("filters");
|
||||
|
@ -64,31 +51,56 @@ public class MultiplexerTokenFilterFactory extends AbstractTokenFilterFactory im
|
|||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
List<Function<TokenStream, TokenStream>> functions = new ArrayList<>();
|
||||
for (TokenFilterFactory tff : filters) {
|
||||
functions.add(tff::create);
|
||||
}
|
||||
return new RemoveDuplicatesTokenFilter(new MultiplexTokenFilter(tokenStream, functions));
|
||||
throw new UnsupportedOperationException("TokenFilterFactory.getChainAwareTokenFilterFactory() must be called first");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setReferences(Map<String, TokenFilterFactory> factories) {
|
||||
filters = new ArrayList<>();
|
||||
public TokenFilterFactory getChainAwareTokenFilterFactory(TokenizerFactory tokenizer, List<CharFilterFactory> charFilters,
|
||||
List<TokenFilterFactory> previousTokenFilters,
|
||||
Function<String, TokenFilterFactory> allFilters) {
|
||||
List<TokenFilterFactory> filters = new ArrayList<>();
|
||||
if (preserveOriginal) {
|
||||
filters.add(IDENTITY_FACTORY);
|
||||
filters.add(IDENTITY_FILTER);
|
||||
}
|
||||
for (String filter : filterNames) {
|
||||
String[] parts = Strings.tokenizeToStringArray(filter, ",");
|
||||
if (parts.length == 1) {
|
||||
filters.add(resolveFilterFactory(factories, parts[0]));
|
||||
TokenFilterFactory factory = resolveFilterFactory(allFilters, parts[0]);
|
||||
factory = factory.getChainAwareTokenFilterFactory(tokenizer, charFilters, previousTokenFilters, allFilters);
|
||||
filters.add(factory);
|
||||
} else {
|
||||
List<TokenFilterFactory> existingChain = new ArrayList<>(previousTokenFilters);
|
||||
List<TokenFilterFactory> chain = new ArrayList<>();
|
||||
for (String subfilter : parts) {
|
||||
chain.add(resolveFilterFactory(factories, subfilter));
|
||||
TokenFilterFactory factory = resolveFilterFactory(allFilters, subfilter);
|
||||
factory = factory.getChainAwareTokenFilterFactory(tokenizer, charFilters, existingChain, allFilters);
|
||||
chain.add(factory);
|
||||
existingChain.add(factory);
|
||||
}
|
||||
filters.add(chainFilters(filter, chain));
|
||||
}
|
||||
}
|
||||
|
||||
return new TokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return MultiplexerTokenFilterFactory.this.name();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
List<Function<TokenStream, TokenStream>> functions = new ArrayList<>();
|
||||
for (TokenFilterFactory tff : filters) {
|
||||
functions.add(tff::create);
|
||||
}
|
||||
return new RemoveDuplicatesTokenFilter(new MultiplexTokenFilter(tokenStream, functions));
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenFilterFactory getSynonymFilter() {
|
||||
return IDENTITY_FILTER;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private TokenFilterFactory chainFilters(String name, List<TokenFilterFactory> filters) {
|
||||
|
@ -108,11 +120,12 @@ public class MultiplexerTokenFilterFactory extends AbstractTokenFilterFactory im
|
|||
};
|
||||
}
|
||||
|
||||
private TokenFilterFactory resolveFilterFactory(Map<String, TokenFilterFactory> factories, String name) {
|
||||
if (factories.containsKey(name) == false) {
|
||||
private TokenFilterFactory resolveFilterFactory(Function<String, TokenFilterFactory> factories, String name) {
|
||||
TokenFilterFactory factory = factories.apply(name);
|
||||
if (factory == null) {
|
||||
throw new IllegalArgumentException("Multiplexing filter [" + name() + "] refers to undefined tokenfilter [" + name + "]");
|
||||
} else {
|
||||
return factories.get(name);
|
||||
return factory;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,26 +24,24 @@ import org.apache.lucene.analysis.miscellaneous.ConditionalTokenFilter;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.ReferringFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* A factory for a conditional token filter that only applies child filters if the underlying token
|
||||
* matches an {@link AnalysisPredicateScript}
|
||||
*/
|
||||
public class ScriptedConditionTokenFilterFactory extends AbstractTokenFilterFactory implements ReferringFilterFactory {
|
||||
public class ScriptedConditionTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
private final AnalysisPredicateScript.Factory factory;
|
||||
private final List<TokenFilterFactory> filters = new ArrayList<>();
|
||||
private final List<String> filterNames;
|
||||
|
||||
ScriptedConditionTokenFilterFactory(IndexSettings indexSettings, String name,
|
||||
|
@ -65,13 +63,43 @@ public class ScriptedConditionTokenFilterFactory extends AbstractTokenFilterFact
|
|||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
Function<TokenStream, TokenStream> filter = in -> {
|
||||
for (TokenFilterFactory tff : filters) {
|
||||
in = tff.create(in);
|
||||
throw new UnsupportedOperationException("getChainAwareTokenFilterFactory should be called first");
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenFilterFactory getChainAwareTokenFilterFactory(TokenizerFactory tokenizer, List<CharFilterFactory> charFilters,
|
||||
List<TokenFilterFactory> previousTokenFilters,
|
||||
Function<String, TokenFilterFactory> allFilters) {
|
||||
List<TokenFilterFactory> filters = new ArrayList<>();
|
||||
List<TokenFilterFactory> existingChain = new ArrayList<>(previousTokenFilters);
|
||||
for (String filter : filterNames) {
|
||||
TokenFilterFactory tff = allFilters.apply(filter);
|
||||
if (tff == null) {
|
||||
throw new IllegalArgumentException("ScriptedConditionTokenFilter [" + name() +
|
||||
"] refers to undefined token filter [" + filter + "]");
|
||||
}
|
||||
tff = tff.getChainAwareTokenFilterFactory(tokenizer, charFilters, existingChain, allFilters);
|
||||
filters.add(tff);
|
||||
existingChain.add(tff);
|
||||
}
|
||||
|
||||
return new TokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return ScriptedConditionTokenFilterFactory.this.name();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
Function<TokenStream, TokenStream> filter = in -> {
|
||||
for (TokenFilterFactory tff : filters) {
|
||||
in = tff.create(in);
|
||||
}
|
||||
return in;
|
||||
};
|
||||
return new ScriptedConditionTokenFilter(tokenStream, filter, factory.newInstance());
|
||||
}
|
||||
return in;
|
||||
};
|
||||
return new ScriptedConditionTokenFilter(tokenStream, filter, factory.newInstance());
|
||||
}
|
||||
|
||||
private static class ScriptedConditionTokenFilter extends ConditionalTokenFilter {
|
||||
|
@ -80,29 +108,17 @@ public class ScriptedConditionTokenFilterFactory extends AbstractTokenFilterFact
|
|||
private final AnalysisPredicateScript.Token token;
|
||||
|
||||
ScriptedConditionTokenFilter(TokenStream input, Function<TokenStream, TokenStream> inputFactory,
|
||||
AnalysisPredicateScript script) {
|
||||
AnalysisPredicateScript script) {
|
||||
super(input, inputFactory);
|
||||
this.script = script;
|
||||
this.token = new AnalysisPredicateScript.Token(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldFilter() throws IOException {
|
||||
protected boolean shouldFilter() {
|
||||
token.updatePosition();
|
||||
return script.execute(token);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setReferences(Map<String, TokenFilterFactory> factories) {
|
||||
for (String filter : filterNames) {
|
||||
TokenFilterFactory tff = factories.get(filter);
|
||||
if (tff == null) {
|
||||
throw new IllegalArgumentException("ScriptedConditionTokenFilter [" + name() +
|
||||
"] refers to undefined token filter [" + filter + "]");
|
||||
}
|
||||
filters.add(tff);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -117,6 +118,26 @@ public class SynonymsAnalysisTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSynonymsWithMultiplexer() throws IOException {
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put("path.home", createTempDir().toString())
|
||||
.put("index.analysis.filter.synonyms.type", "synonym")
|
||||
.putList("index.analysis.filter.synonyms.synonyms", "programmer, developer")
|
||||
.put("index.analysis.filter.my_english.type", "stemmer")
|
||||
.put("index.analysis.filter.my_english.language", "porter2")
|
||||
.put("index.analysis.filter.stem_repeat.type", "multiplexer")
|
||||
.putList("index.analysis.filter.stem_repeat.filters", "my_english, synonyms")
|
||||
.put("index.analysis.analyzer.synonymAnalyzer.tokenizer", "standard")
|
||||
.putList("index.analysis.analyzer.synonymAnalyzer.filter", "lowercase", "stem_repeat")
|
||||
.build();
|
||||
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
|
||||
indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers;
|
||||
|
||||
BaseTokenStreamTestCase.assertAnalyzesTo(indexAnalyzers.get("synonymAnalyzer"), "Some developers are odd",
|
||||
new String[]{ "some", "developers", "develop", "programm", "are", "odd" },
|
||||
new int[]{ 1, 1, 0, 0, 1, 1 });
|
||||
}
|
||||
|
||||
private void match(String analyzerName, String source, String target) throws IOException {
|
||||
Analyzer analyzer = indexAnalyzers.get(analyzerName).analyzer();
|
||||
|
|
|
@ -63,16 +63,16 @@ public final class Whitelist {
|
|||
/** The {@link List} of all the whitelisted static Painless methods. */
|
||||
public final List<WhitelistMethod> whitelistImportedMethods;
|
||||
|
||||
/** The {@link List} of all the whitelisted Painless bindings. */
|
||||
public final List<WhitelistBinding> whitelistBindings;
|
||||
/** The {@link List} of all the whitelisted Painless class bindings. */
|
||||
public final List<WhitelistClassBinding> whitelistClassBindings;
|
||||
|
||||
/** Standard constructor. All values must be not {@code null}. */
|
||||
public Whitelist(ClassLoader classLoader, List<WhitelistClass> whitelistClasses,
|
||||
List<WhitelistMethod> whitelistImportedMethods, List<WhitelistBinding> whitelistBindings) {
|
||||
List<WhitelistMethod> whitelistImportedMethods, List<WhitelistClassBinding> whitelistClassBindings) {
|
||||
|
||||
this.classLoader = Objects.requireNonNull(classLoader);
|
||||
this.whitelistClasses = Collections.unmodifiableList(Objects.requireNonNull(whitelistClasses));
|
||||
this.whitelistImportedMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistImportedMethods));
|
||||
this.whitelistBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistBindings));
|
||||
this.whitelistClassBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistClassBindings));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,23 +23,23 @@ import java.util.List;
|
|||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A binding represents a method call that stores state. Each binding class must have exactly one
|
||||
* public constructor and one public method excluding those inherited directly from {@link Object}.
|
||||
* The canonical type name parameters provided must match those of the constructor and method combined.
|
||||
* The constructor for a binding class will be called when the binding method is called for the first
|
||||
* time at which point state may be stored for the arguments passed into the constructor. The method
|
||||
* for a binding class will be called each time the binding method is called and may use the previously
|
||||
* stored state.
|
||||
* A class binding represents a method call that stores state. Each class binding's Java class must
|
||||
* have exactly one public constructor and one public method excluding those inherited directly
|
||||
* from {@link Object}. The canonical type name parameters provided must match those of the
|
||||
* constructor and method combined. The constructor for a class binding's Java class will be called
|
||||
* when the binding method is called for the first time at which point state may be stored for the
|
||||
* arguments passed into the constructor. The method for a binding class will be called each time
|
||||
* the binding method is called and may use the previously stored state.
|
||||
*/
|
||||
public class WhitelistBinding {
|
||||
public class WhitelistClassBinding {
|
||||
|
||||
/** Information about where this constructor was whitelisted from. */
|
||||
public final String origin;
|
||||
|
||||
/** The Java class name this binding represents. */
|
||||
/** The Java class name this class binding targets. */
|
||||
public final String targetJavaClassName;
|
||||
|
||||
/** The method name for this binding. */
|
||||
/** The method name for this class binding. */
|
||||
public final String methodName;
|
||||
|
||||
/**
|
||||
|
@ -54,7 +54,7 @@ public class WhitelistBinding {
|
|||
public final List<String> canonicalTypeNameParameters;
|
||||
|
||||
/** Standard constructor. All values must be not {@code null}. */
|
||||
public WhitelistBinding(String origin, String targetJavaClassName,
|
||||
public WhitelistClassBinding(String origin, String targetJavaClassName,
|
||||
String methodName, String returnCanonicalTypeName, List<String> canonicalTypeNameParameters) {
|
||||
|
||||
this.origin = Objects.requireNonNull(origin);
|
|
@ -134,7 +134,7 @@ public final class WhitelistLoader {
|
|||
public static Whitelist loadFromResourceFiles(Class<?> resource, String... filepaths) {
|
||||
List<WhitelistClass> whitelistClasses = new ArrayList<>();
|
||||
List<WhitelistMethod> whitelistStatics = new ArrayList<>();
|
||||
List<WhitelistBinding> whitelistBindings = new ArrayList<>();
|
||||
List<WhitelistClassBinding> whitelistClassBindings = new ArrayList<>();
|
||||
|
||||
// Execute a single pass through the whitelist text files. This will gather all the
|
||||
// constructors, methods, augmented methods, and fields for each whitelisted class.
|
||||
|
@ -292,7 +292,7 @@ public final class WhitelistLoader {
|
|||
whitelistStatics.add(new WhitelistMethod(origin, targetJavaClassName,
|
||||
methodName, returnCanonicalTypeName, Arrays.asList(canonicalTypeNameParameters)));
|
||||
} else if ("bound_to".equals(staticImportType)) {
|
||||
whitelistBindings.add(new WhitelistBinding(origin, targetJavaClassName,
|
||||
whitelistClassBindings.add(new WhitelistClassBinding(origin, targetJavaClassName,
|
||||
methodName, returnCanonicalTypeName, Arrays.asList(canonicalTypeNameParameters)));
|
||||
} else {
|
||||
throw new IllegalArgumentException("invalid static import definition: " +
|
||||
|
@ -392,7 +392,7 @@ public final class WhitelistLoader {
|
|||
|
||||
ClassLoader loader = AccessController.doPrivileged((PrivilegedAction<ClassLoader>)resource::getClassLoader);
|
||||
|
||||
return new Whitelist(loader, whitelistClasses, whitelistStatics, whitelistBindings);
|
||||
return new Whitelist(loader, whitelistClasses, whitelistStatics, whitelistClassBindings);
|
||||
}
|
||||
|
||||
private WhitelistLoader() {}
|
||||
|
|
|
@ -23,7 +23,7 @@ import java.lang.reflect.Constructor;
|
|||
import java.lang.reflect.Method;
|
||||
import java.util.List;
|
||||
|
||||
public class PainlessBinding {
|
||||
public class PainlessClassBinding {
|
||||
|
||||
public final Constructor<?> javaConstructor;
|
||||
public final Method javaMethod;
|
||||
|
@ -31,7 +31,7 @@ public class PainlessBinding {
|
|||
public final Class<?> returnType;
|
||||
public final List<Class<?>> typeParameters;
|
||||
|
||||
PainlessBinding(Constructor<?> javaConstructor, Method javaMethod, Class<?> returnType, List<Class<?>> typeParameters) {
|
||||
PainlessClassBinding(Constructor<?> javaConstructor, Method javaMethod, Class<?> returnType, List<Class<?>> typeParameters) {
|
||||
this.javaConstructor = javaConstructor;
|
||||
this.javaMethod = javaMethod;
|
||||
|
|
@ -38,23 +38,23 @@ public final class PainlessLookup {
|
|||
private final Map<Class<?>, PainlessClass> classesToPainlessClasses;
|
||||
|
||||
private final Map<String, PainlessMethod> painlessMethodKeysToImportedPainlessMethods;
|
||||
private final Map<String, PainlessBinding> painlessMethodKeysToPainlessBindings;
|
||||
private final Map<String, PainlessClassBinding> painlessMethodKeysToPainlessClassBindings;
|
||||
|
||||
PainlessLookup(Map<String, Class<?>> canonicalClassNamesToClasses, Map<Class<?>, PainlessClass> classesToPainlessClasses,
|
||||
Map<String, PainlessMethod> painlessMethodKeysToImportedPainlessMethods,
|
||||
Map<String, PainlessBinding> painlessMethodKeysToPainlessBindings) {
|
||||
Map<String, PainlessClassBinding> painlessMethodKeysToPainlessClassBindings) {
|
||||
|
||||
Objects.requireNonNull(canonicalClassNamesToClasses);
|
||||
Objects.requireNonNull(classesToPainlessClasses);
|
||||
|
||||
Objects.requireNonNull(painlessMethodKeysToImportedPainlessMethods);
|
||||
Objects.requireNonNull(painlessMethodKeysToPainlessBindings);
|
||||
Objects.requireNonNull(painlessMethodKeysToPainlessClassBindings);
|
||||
|
||||
this.canonicalClassNamesToClasses = Collections.unmodifiableMap(canonicalClassNamesToClasses);
|
||||
this.classesToPainlessClasses = Collections.unmodifiableMap(classesToPainlessClasses);
|
||||
|
||||
this.painlessMethodKeysToImportedPainlessMethods = Collections.unmodifiableMap(painlessMethodKeysToImportedPainlessMethods);
|
||||
this.painlessMethodKeysToPainlessBindings = Collections.unmodifiableMap(painlessMethodKeysToPainlessBindings);
|
||||
this.painlessMethodKeysToPainlessClassBindings = Collections.unmodifiableMap(painlessMethodKeysToPainlessClassBindings);
|
||||
}
|
||||
|
||||
public boolean isValidCanonicalClassName(String canonicalClassName) {
|
||||
|
@ -182,12 +182,12 @@ public final class PainlessLookup {
|
|||
return painlessMethodKeysToImportedPainlessMethods.get(painlessMethodKey);
|
||||
}
|
||||
|
||||
public PainlessBinding lookupPainlessBinding(String methodName, int arity) {
|
||||
public PainlessClassBinding lookupPainlessClassBinding(String methodName, int arity) {
|
||||
Objects.requireNonNull(methodName);
|
||||
|
||||
String painlessMethodKey = buildPainlessMethodKey(methodName, arity);
|
||||
|
||||
return painlessMethodKeysToPainlessBindings.get(painlessMethodKey);
|
||||
return painlessMethodKeysToPainlessClassBindings.get(painlessMethodKey);
|
||||
}
|
||||
|
||||
public PainlessMethod lookupFunctionalInterfacePainlessMethod(Class<?> targetClass) {
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.painless.lookup;
|
||||
|
||||
import org.elasticsearch.painless.spi.Whitelist;
|
||||
import org.elasticsearch.painless.spi.WhitelistBinding;
|
||||
import org.elasticsearch.painless.spi.WhitelistClassBinding;
|
||||
import org.elasticsearch.painless.spi.WhitelistClass;
|
||||
import org.elasticsearch.painless.spi.WhitelistConstructor;
|
||||
import org.elasticsearch.painless.spi.WhitelistField;
|
||||
|
@ -156,14 +156,14 @@ public final class PainlessLookupBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
private static class PainlessBindingCacheKey {
|
||||
private static class PainlessClassBindingCacheKey {
|
||||
|
||||
private final Class<?> targetClass;
|
||||
private final String methodName;
|
||||
private final Class<?> methodReturnType;
|
||||
private final List<Class<?>> methodTypeParameters;
|
||||
|
||||
private PainlessBindingCacheKey(Class<?> targetClass,
|
||||
private PainlessClassBindingCacheKey(Class<?> targetClass,
|
||||
String methodName, Class<?> returnType, List<Class<?>> typeParameters) {
|
||||
|
||||
this.targetClass = targetClass;
|
||||
|
@ -182,7 +182,7 @@ public final class PainlessLookupBuilder {
|
|||
return false;
|
||||
}
|
||||
|
||||
PainlessBindingCacheKey that = (PainlessBindingCacheKey)object;
|
||||
PainlessClassBindingCacheKey that = (PainlessClassBindingCacheKey)object;
|
||||
|
||||
return Objects.equals(targetClass, that.targetClass) &&
|
||||
Objects.equals(methodName, that.methodName) &&
|
||||
|
@ -196,10 +196,10 @@ public final class PainlessLookupBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
private static final Map<PainlessConstructorCacheKey, PainlessConstructor> painlessConstructorCache = new HashMap<>();
|
||||
private static final Map<PainlessMethodCacheKey, PainlessMethod> painlessMethodCache = new HashMap<>();
|
||||
private static final Map<PainlessFieldCacheKey, PainlessField> painlessFieldCache = new HashMap<>();
|
||||
private static final Map<PainlessBindingCacheKey, PainlessBinding> painlessBindingCache = new HashMap<>();
|
||||
private static final Map<PainlessConstructorCacheKey, PainlessConstructor> painlessConstructorCache = new HashMap<>();
|
||||
private static final Map<PainlessMethodCacheKey, PainlessMethod> painlessMethodCache = new HashMap<>();
|
||||
private static final Map<PainlessFieldCacheKey, PainlessField> painlessFieldCache = new HashMap<>();
|
||||
private static final Map<PainlessClassBindingCacheKey, PainlessClassBinding> painlessClassBindingCache = new HashMap<>();
|
||||
|
||||
private static final Pattern CLASS_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$");
|
||||
private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$");
|
||||
|
@ -251,12 +251,12 @@ public final class PainlessLookupBuilder {
|
|||
whitelistStatic.canonicalTypeNameParameters);
|
||||
}
|
||||
|
||||
for (WhitelistBinding whitelistBinding : whitelist.whitelistBindings) {
|
||||
origin = whitelistBinding.origin;
|
||||
painlessLookupBuilder.addPainlessBinding(
|
||||
whitelist.classLoader, whitelistBinding.targetJavaClassName,
|
||||
whitelistBinding.methodName, whitelistBinding.returnCanonicalTypeName,
|
||||
whitelistBinding.canonicalTypeNameParameters);
|
||||
for (WhitelistClassBinding whitelistClassBinding : whitelist.whitelistClassBindings) {
|
||||
origin = whitelistClassBinding.origin;
|
||||
painlessLookupBuilder.addPainlessClassBinding(
|
||||
whitelist.classLoader, whitelistClassBinding.targetJavaClassName,
|
||||
whitelistClassBinding.methodName, whitelistClassBinding.returnCanonicalTypeName,
|
||||
whitelistClassBinding.canonicalTypeNameParameters);
|
||||
}
|
||||
}
|
||||
} catch (Exception exception) {
|
||||
|
@ -270,14 +270,14 @@ public final class PainlessLookupBuilder {
|
|||
private final Map<Class<?>, PainlessClassBuilder> classesToPainlessClassBuilders;
|
||||
|
||||
private final Map<String, PainlessMethod> painlessMethodKeysToImportedPainlessMethods;
|
||||
private final Map<String, PainlessBinding> painlessMethodKeysToPainlessBindings;
|
||||
private final Map<String, PainlessClassBinding> painlessMethodKeysToPainlessClassBindings;
|
||||
|
||||
public PainlessLookupBuilder() {
|
||||
canonicalClassNamesToClasses = new HashMap<>();
|
||||
classesToPainlessClassBuilders = new HashMap<>();
|
||||
|
||||
painlessMethodKeysToImportedPainlessMethods = new HashMap<>();
|
||||
painlessMethodKeysToPainlessBindings = new HashMap<>();
|
||||
painlessMethodKeysToPainlessClassBindings = new HashMap<>();
|
||||
}
|
||||
|
||||
private Class<?> canonicalTypeNameToType(String canonicalTypeName) {
|
||||
|
@ -909,8 +909,8 @@ public final class PainlessLookupBuilder {
|
|||
|
||||
String painlessMethodKey = buildPainlessMethodKey(methodName, typeParametersSize);
|
||||
|
||||
if (painlessMethodKeysToPainlessBindings.containsKey(painlessMethodKey)) {
|
||||
throw new IllegalArgumentException("imported method and binding cannot have the same name [" + methodName + "]");
|
||||
if (painlessMethodKeysToPainlessClassBindings.containsKey(painlessMethodKey)) {
|
||||
throw new IllegalArgumentException("imported method and class binding cannot have the same name [" + methodName + "]");
|
||||
}
|
||||
|
||||
PainlessMethod importedPainlessMethod = painlessMethodKeysToImportedPainlessMethods.get(painlessMethodKey);
|
||||
|
@ -945,7 +945,7 @@ public final class PainlessLookupBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
public void addPainlessBinding(ClassLoader classLoader, String targetJavaClassName,
|
||||
public void addPainlessClassBinding(ClassLoader classLoader, String targetJavaClassName,
|
||||
String methodName, String returnCanonicalTypeName, List<String> canonicalTypeNameParameters) {
|
||||
|
||||
Objects.requireNonNull(classLoader);
|
||||
|
@ -969,7 +969,7 @@ public final class PainlessLookupBuilder {
|
|||
Class<?> typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter);
|
||||
|
||||
if (typeParameter == null) {
|
||||
throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for binding " +
|
||||
throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for class binding " +
|
||||
"[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]");
|
||||
}
|
||||
|
||||
|
@ -979,14 +979,14 @@ public final class PainlessLookupBuilder {
|
|||
Class<?> returnType = canonicalTypeNameToType(returnCanonicalTypeName);
|
||||
|
||||
if (returnType == null) {
|
||||
throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for binding " +
|
||||
throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for class binding " +
|
||||
"[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]");
|
||||
}
|
||||
|
||||
addPainlessBinding(targetClass, methodName, returnType, typeParameters);
|
||||
addPainlessClassBinding(targetClass, methodName, returnType, typeParameters);
|
||||
}
|
||||
|
||||
public void addPainlessBinding(Class<?> targetClass, String methodName, Class<?> returnType, List<Class<?>> typeParameters) {
|
||||
public void addPainlessClassBinding(Class<?> targetClass, String methodName, Class<?> returnType, List<Class<?>> typeParameters) {
|
||||
|
||||
Objects.requireNonNull(targetClass);
|
||||
Objects.requireNonNull(methodName);
|
||||
|
@ -994,7 +994,7 @@ public final class PainlessLookupBuilder {
|
|||
Objects.requireNonNull(typeParameters);
|
||||
|
||||
if (targetClass == def.class) {
|
||||
throw new IllegalArgumentException("cannot add binding as reserved class [" + DEF_CLASS_NAME + "]");
|
||||
throw new IllegalArgumentException("cannot add class binding as reserved class [" + DEF_CLASS_NAME + "]");
|
||||
}
|
||||
|
||||
String targetCanonicalClassName = typeToCanonicalTypeName(targetClass);
|
||||
|
@ -1005,7 +1005,8 @@ public final class PainlessLookupBuilder {
|
|||
for (Constructor<?> eachJavaConstructor : javaConstructors) {
|
||||
if (eachJavaConstructor.getDeclaringClass() == targetClass) {
|
||||
if (javaConstructor != null) {
|
||||
throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple constructors");
|
||||
throw new IllegalArgumentException(
|
||||
"class binding [" + targetCanonicalClassName + "] cannot have multiple constructors");
|
||||
}
|
||||
|
||||
javaConstructor = eachJavaConstructor;
|
||||
|
@ -1013,7 +1014,7 @@ public final class PainlessLookupBuilder {
|
|||
}
|
||||
|
||||
if (javaConstructor == null) {
|
||||
throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one constructor");
|
||||
throw new IllegalArgumentException("class binding [" + targetCanonicalClassName + "] must have exactly one constructor");
|
||||
}
|
||||
|
||||
int constructorTypeParametersSize = javaConstructor.getParameterCount();
|
||||
|
@ -1023,26 +1024,26 @@ public final class PainlessLookupBuilder {
|
|||
|
||||
if (isValidType(typeParameter) == false) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " +
|
||||
"for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
|
||||
Class<?> javaTypeParameter = javaConstructor.getParameterTypes()[typeParameterIndex];
|
||||
|
||||
if (isValidType(javaTypeParameter) == false) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " +
|
||||
"for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
|
||||
if (javaTypeParameter != typeToJavaType(typeParameter)) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " +
|
||||
"does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " +
|
||||
"for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) {
|
||||
throw new IllegalArgumentException(
|
||||
"invalid method name [" + methodName + "] for binding [" + targetCanonicalClassName + "].");
|
||||
"invalid method name [" + methodName + "] for class binding [" + targetCanonicalClassName + "].");
|
||||
}
|
||||
|
||||
Method[] javaMethods = targetClass.getMethods();
|
||||
|
@ -1051,7 +1052,7 @@ public final class PainlessLookupBuilder {
|
|||
for (Method eachJavaMethod : javaMethods) {
|
||||
if (eachJavaMethod.getDeclaringClass() == targetClass) {
|
||||
if (javaMethod != null) {
|
||||
throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple methods");
|
||||
throw new IllegalArgumentException("class binding [" + targetCanonicalClassName + "] cannot have multiple methods");
|
||||
}
|
||||
|
||||
javaMethod = eachJavaMethod;
|
||||
|
@ -1059,7 +1060,7 @@ public final class PainlessLookupBuilder {
|
|||
}
|
||||
|
||||
if (javaMethod == null) {
|
||||
throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one method");
|
||||
throw new IllegalArgumentException("class binding [" + targetCanonicalClassName + "] must have exactly one method");
|
||||
}
|
||||
|
||||
int methodTypeParametersSize = javaMethod.getParameterCount();
|
||||
|
@ -1069,60 +1070,60 @@ public final class PainlessLookupBuilder {
|
|||
|
||||
if (isValidType(typeParameter) == false) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " +
|
||||
"for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
|
||||
Class<?> javaTypeParameter = javaMethod.getParameterTypes()[typeParameterIndex];
|
||||
|
||||
if (isValidType(javaTypeParameter) == false) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " +
|
||||
"for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
|
||||
if (javaTypeParameter != typeToJavaType(typeParameter)) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " +
|
||||
"does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " +
|
||||
"for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (javaMethod.getReturnType() != typeToJavaType(returnType)) {
|
||||
throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(javaMethod.getReturnType()) + "] " +
|
||||
"does not match the specified returned type [" + typeToCanonicalTypeName(returnType) + "] " +
|
||||
"for binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " +
|
||||
"for class binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " +
|
||||
typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
|
||||
String painlessMethodKey = buildPainlessMethodKey(methodName, constructorTypeParametersSize + methodTypeParametersSize);
|
||||
|
||||
if (painlessMethodKeysToImportedPainlessMethods.containsKey(painlessMethodKey)) {
|
||||
throw new IllegalArgumentException("binding and imported method cannot have the same name [" + methodName + "]");
|
||||
throw new IllegalArgumentException("class binding and imported method cannot have the same name [" + methodName + "]");
|
||||
}
|
||||
|
||||
PainlessBinding painlessBinding = painlessMethodKeysToPainlessBindings.get(painlessMethodKey);
|
||||
PainlessClassBinding painlessClassBinding = painlessMethodKeysToPainlessClassBindings.get(painlessMethodKey);
|
||||
|
||||
if (painlessBinding == null) {
|
||||
if (painlessClassBinding == null) {
|
||||
Constructor<?> finalJavaConstructor = javaConstructor;
|
||||
Method finalJavaMethod = javaMethod;
|
||||
|
||||
painlessBinding = painlessBindingCache.computeIfAbsent(
|
||||
new PainlessBindingCacheKey(targetClass, methodName, returnType, typeParameters),
|
||||
key -> new PainlessBinding(finalJavaConstructor, finalJavaMethod, returnType, typeParameters));
|
||||
painlessClassBinding = painlessClassBindingCache.computeIfAbsent(
|
||||
new PainlessClassBindingCacheKey(targetClass, methodName, returnType, typeParameters),
|
||||
key -> new PainlessClassBinding(finalJavaConstructor, finalJavaMethod, returnType, typeParameters));
|
||||
|
||||
painlessMethodKeysToPainlessBindings.put(painlessMethodKey, painlessBinding);
|
||||
} else if (painlessBinding.javaConstructor.equals(javaConstructor) == false ||
|
||||
painlessBinding.javaMethod.equals(javaMethod) == false ||
|
||||
painlessBinding.returnType != returnType ||
|
||||
painlessBinding.typeParameters.equals(typeParameters) == false) {
|
||||
throw new IllegalArgumentException("cannot have bindings " +
|
||||
painlessMethodKeysToPainlessClassBindings.put(painlessMethodKey, painlessClassBinding);
|
||||
} else if (painlessClassBinding.javaConstructor.equals(javaConstructor) == false ||
|
||||
painlessClassBinding.javaMethod.equals(javaMethod) == false ||
|
||||
painlessClassBinding.returnType != returnType ||
|
||||
painlessClassBinding.typeParameters.equals(typeParameters) == false) {
|
||||
throw new IllegalArgumentException("cannot have class bindings " +
|
||||
"[[" + targetCanonicalClassName + "], " +
|
||||
"[" + methodName + "], " +
|
||||
"[" + typeToCanonicalTypeName(returnType) + "], " +
|
||||
typesToCanonicalTypeNames(typeParameters) + "] and " +
|
||||
"[[" + targetCanonicalClassName + "], " +
|
||||
"[" + methodName + "], " +
|
||||
"[" + typeToCanonicalTypeName(painlessBinding.returnType) + "], " +
|
||||
typesToCanonicalTypeNames(painlessBinding.typeParameters) + "] and " +
|
||||
"[" + typeToCanonicalTypeName(painlessClassBinding.returnType) + "], " +
|
||||
typesToCanonicalTypeNames(painlessClassBinding.typeParameters) + "] and " +
|
||||
"with the same name and arity but different constructors or methods");
|
||||
}
|
||||
}
|
||||
|
@ -1139,7 +1140,7 @@ public final class PainlessLookupBuilder {
|
|||
}
|
||||
|
||||
return new PainlessLookup(canonicalClassNamesToClasses, classesToPainlessClasses,
|
||||
painlessMethodKeysToImportedPainlessMethods, painlessMethodKeysToPainlessBindings);
|
||||
painlessMethodKeysToImportedPainlessMethods, painlessMethodKeysToPainlessClassBindings);
|
||||
}
|
||||
|
||||
private void copyPainlessClassMembers() {
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.painless.Locals;
|
|||
import org.elasticsearch.painless.Locals.LocalMethod;
|
||||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessBinding;
|
||||
import org.elasticsearch.painless.lookup.PainlessClassBinding;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
import org.objectweb.asm.Label;
|
||||
import org.objectweb.asm.Type;
|
||||
|
@ -45,9 +45,9 @@ public final class ECallLocal extends AExpression {
|
|||
private final String name;
|
||||
private final List<AExpression> arguments;
|
||||
|
||||
private LocalMethod method = null;
|
||||
private PainlessMethod imported = null;
|
||||
private PainlessBinding binding = null;
|
||||
private LocalMethod localMethod = null;
|
||||
private PainlessMethod importedMethod = null;
|
||||
private PainlessClassBinding classBinding = null;
|
||||
|
||||
public ECallLocal(Location location, String name, List<AExpression> arguments) {
|
||||
super(location);
|
||||
|
@ -65,15 +65,15 @@ public final class ECallLocal extends AExpression {
|
|||
|
||||
@Override
|
||||
void analyze(Locals locals) {
|
||||
method = locals.getMethod(name, arguments.size());
|
||||
localMethod = locals.getMethod(name, arguments.size());
|
||||
|
||||
if (method == null) {
|
||||
imported = locals.getPainlessLookup().lookupImportedPainlessMethod(name, arguments.size());
|
||||
if (localMethod == null) {
|
||||
importedMethod = locals.getPainlessLookup().lookupImportedPainlessMethod(name, arguments.size());
|
||||
|
||||
if (imported == null) {
|
||||
binding = locals.getPainlessLookup().lookupPainlessBinding(name, arguments.size());
|
||||
if (importedMethod == null) {
|
||||
classBinding = locals.getPainlessLookup().lookupPainlessClassBinding(name, arguments.size());
|
||||
|
||||
if (binding == null) {
|
||||
if (classBinding == null) {
|
||||
throw createError(
|
||||
new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments."));
|
||||
}
|
||||
|
@ -82,15 +82,15 @@ public final class ECallLocal extends AExpression {
|
|||
|
||||
List<Class<?>> typeParameters;
|
||||
|
||||
if (method != null) {
|
||||
typeParameters = new ArrayList<>(method.typeParameters);
|
||||
actual = method.returnType;
|
||||
} else if (imported != null) {
|
||||
typeParameters = new ArrayList<>(imported.typeParameters);
|
||||
actual = imported.returnType;
|
||||
} else if (binding != null) {
|
||||
typeParameters = new ArrayList<>(binding.typeParameters);
|
||||
actual = binding.returnType;
|
||||
if (localMethod != null) {
|
||||
typeParameters = new ArrayList<>(localMethod.typeParameters);
|
||||
actual = localMethod.returnType;
|
||||
} else if (importedMethod != null) {
|
||||
typeParameters = new ArrayList<>(importedMethod.typeParameters);
|
||||
actual = importedMethod.returnType;
|
||||
} else if (classBinding != null) {
|
||||
typeParameters = new ArrayList<>(classBinding.typeParameters);
|
||||
actual = classBinding.returnType;
|
||||
} else {
|
||||
throw new IllegalStateException("Illegal tree structure.");
|
||||
}
|
||||
|
@ -111,23 +111,23 @@ public final class ECallLocal extends AExpression {
|
|||
void write(MethodWriter writer, Globals globals) {
|
||||
writer.writeDebugInfo(location);
|
||||
|
||||
if (method != null) {
|
||||
if (localMethod != null) {
|
||||
for (AExpression argument : arguments) {
|
||||
argument.write(writer, globals);
|
||||
}
|
||||
|
||||
writer.invokeStatic(CLASS_TYPE, new Method(method.name, method.methodType.toMethodDescriptorString()));
|
||||
} else if (imported != null) {
|
||||
writer.invokeStatic(CLASS_TYPE, new Method(localMethod.name, localMethod.methodType.toMethodDescriptorString()));
|
||||
} else if (importedMethod != null) {
|
||||
for (AExpression argument : arguments) {
|
||||
argument.write(writer, globals);
|
||||
}
|
||||
|
||||
writer.invokeStatic(Type.getType(imported.targetClass),
|
||||
new Method(imported.javaMethod.getName(), imported.methodType.toMethodDescriptorString()));
|
||||
} else if (binding != null) {
|
||||
String name = globals.addBinding(binding.javaConstructor.getDeclaringClass());
|
||||
Type type = Type.getType(binding.javaConstructor.getDeclaringClass());
|
||||
int javaConstructorParameterCount = binding.javaConstructor.getParameterCount();
|
||||
writer.invokeStatic(Type.getType(importedMethod.targetClass),
|
||||
new Method(importedMethod.javaMethod.getName(), importedMethod.methodType.toMethodDescriptorString()));
|
||||
} else if (classBinding != null) {
|
||||
String name = globals.addBinding(classBinding.javaConstructor.getDeclaringClass());
|
||||
Type type = Type.getType(classBinding.javaConstructor.getDeclaringClass());
|
||||
int javaConstructorParameterCount = classBinding.javaConstructor.getParameterCount();
|
||||
|
||||
Label nonNull = new Label();
|
||||
|
||||
|
@ -142,18 +142,18 @@ public final class ECallLocal extends AExpression {
|
|||
arguments.get(argument).write(writer, globals);
|
||||
}
|
||||
|
||||
writer.invokeConstructor(type, Method.getMethod(binding.javaConstructor));
|
||||
writer.invokeConstructor(type, Method.getMethod(classBinding.javaConstructor));
|
||||
writer.putField(CLASS_TYPE, name, type);
|
||||
|
||||
writer.mark(nonNull);
|
||||
writer.loadThis();
|
||||
writer.getField(CLASS_TYPE, name, type);
|
||||
|
||||
for (int argument = 0; argument < binding.javaMethod.getParameterCount(); ++argument) {
|
||||
for (int argument = 0; argument < classBinding.javaMethod.getParameterCount(); ++argument) {
|
||||
arguments.get(argument + javaConstructorParameterCount).write(writer, globals);
|
||||
}
|
||||
|
||||
writer.invokeVirtual(type, Method.getMethod(binding.javaMethod));
|
||||
writer.invokeVirtual(type, Method.getMethod(classBinding.javaMethod));
|
||||
} else {
|
||||
throw new IllegalStateException("Illegal tree structure.");
|
||||
}
|
||||
|
|
|
@ -70,12 +70,14 @@ bundlePlugin {
|
|||
|
||||
additionalTest('testRepositoryCreds'){
|
||||
include '**/RepositoryCredentialsTests.class'
|
||||
include '**/S3BlobStoreRepositoryTests.class'
|
||||
systemProperty 'es.allow_insecure_settings', 'true'
|
||||
}
|
||||
|
||||
test {
|
||||
// these are tested explicitly in separate test tasks
|
||||
exclude '**/*CredentialsTests.class'
|
||||
exclude '**/S3BlobStoreRepositoryTests.class'
|
||||
}
|
||||
|
||||
boolean useFixture = false
|
||||
|
|
|
@ -19,18 +19,12 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.Protocol;
|
||||
import com.amazonaws.auth.AWSCredentials;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
|
||||
import com.amazonaws.auth.BasicSessionCredentials;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.settings.SecureSetting;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -38,6 +32,12 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A container for settings used to create an S3 client.
|
||||
*/
|
||||
|
@ -160,19 +160,6 @@ final class S3ClientSettings {
|
|||
return Collections.unmodifiableMap(clients);
|
||||
}
|
||||
|
||||
static Map<String, S3ClientSettings> overrideCredentials(Map<String, S3ClientSettings> clientsSettings,
|
||||
BasicAWSCredentials credentials) {
|
||||
final MapBuilder<String, S3ClientSettings> mapBuilder = new MapBuilder<>();
|
||||
for (final Map.Entry<String, S3ClientSettings> entry : clientsSettings.entrySet()) {
|
||||
final S3ClientSettings s3ClientSettings = new S3ClientSettings(credentials, entry.getValue().endpoint,
|
||||
entry.getValue().protocol, entry.getValue().proxyHost, entry.getValue().proxyPort, entry.getValue().proxyUsername,
|
||||
entry.getValue().proxyPassword, entry.getValue().readTimeoutMillis, entry.getValue().maxRetries,
|
||||
entry.getValue().throttleRetries);
|
||||
mapBuilder.put(entry.getKey(), s3ClientSettings);
|
||||
}
|
||||
return mapBuilder.immutableMap();
|
||||
}
|
||||
|
||||
static boolean checkDeprecatedCredentials(Settings repositorySettings) {
|
||||
if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) {
|
||||
if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) {
|
||||
|
@ -224,25 +211,37 @@ final class S3ClientSettings {
|
|||
|
||||
// pkg private for tests
|
||||
/** Parse settings for a single client. */
|
||||
static S3ClientSettings getClientSettings(Settings settings, String clientName) {
|
||||
static S3ClientSettings getClientSettings(final Settings settings, final String clientName) {
|
||||
final AWSCredentials credentials = S3ClientSettings.loadCredentials(settings, clientName);
|
||||
return getClientSettings(settings, clientName, credentials);
|
||||
}
|
||||
|
||||
static S3ClientSettings getClientSettings(final Settings settings, final String clientName, final AWSCredentials credentials) {
|
||||
try (SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING);
|
||||
SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) {
|
||||
return new S3ClientSettings(
|
||||
credentials,
|
||||
getConfigValue(settings, clientName, ENDPOINT_SETTING),
|
||||
getConfigValue(settings, clientName, PROTOCOL_SETTING),
|
||||
getConfigValue(settings, clientName, PROXY_HOST_SETTING),
|
||||
getConfigValue(settings, clientName, PROXY_PORT_SETTING),
|
||||
proxyUsername.toString(),
|
||||
proxyPassword.toString(),
|
||||
(int)getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis(),
|
||||
getConfigValue(settings, clientName, MAX_RETRIES_SETTING),
|
||||
getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING)
|
||||
credentials,
|
||||
getConfigValue(settings, clientName, ENDPOINT_SETTING),
|
||||
getConfigValue(settings, clientName, PROTOCOL_SETTING),
|
||||
getConfigValue(settings, clientName, PROXY_HOST_SETTING),
|
||||
getConfigValue(settings, clientName, PROXY_PORT_SETTING),
|
||||
proxyUsername.toString(),
|
||||
proxyPassword.toString(),
|
||||
Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()),
|
||||
getConfigValue(settings, clientName, MAX_RETRIES_SETTING),
|
||||
getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
static S3ClientSettings getClientSettings(final RepositoryMetaData metadata, final AWSCredentials credentials) {
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
for (final String key : metadata.settings().keySet()) {
|
||||
builder.put(PREFIX + "provided" + "." + key, metadata.settings().get(key));
|
||||
}
|
||||
return getClientSettings(builder.build(), "provided", credentials);
|
||||
}
|
||||
|
||||
private static <T> T getConfigValue(Settings settings, String clientName,
|
||||
Setting.AffixSetting<T> clientSetting) {
|
||||
final Setting<T> concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName);
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.monitor.jvm.JvmInfo;
|
|||
import org.elasticsearch.repositories.RepositoryException;
|
||||
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
|
@ -163,6 +162,8 @@ class S3Repository extends BlobStoreRepository {
|
|||
|
||||
private final String clientName;
|
||||
|
||||
private final AmazonS3Reference reference;
|
||||
|
||||
/**
|
||||
* Constructs an s3 backed repository
|
||||
*/
|
||||
|
@ -200,21 +201,54 @@ class S3Repository extends BlobStoreRepository {
|
|||
|
||||
this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
|
||||
this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
|
||||
|
||||
this.clientName = CLIENT_NAME.get(metadata.settings());
|
||||
|
||||
logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " +
|
||||
"buffer_size [{}], cannedACL [{}], storageClass [{}]",
|
||||
bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
|
||||
// (repository settings)
|
||||
if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
|
||||
overrideCredentialsFromClusterState(service);
|
||||
if (CLIENT_NAME.exists(metadata.settings()) && S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
|
||||
logger.warn(
|
||||
"ignoring use of named client [{}] for repository [{}] as insecure credentials were specified",
|
||||
clientName,
|
||||
metadata.name());
|
||||
}
|
||||
|
||||
if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
|
||||
// provided repository settings
|
||||
deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead "
|
||||
+ "store these in named clients and the elasticsearch keystore for secure settings.");
|
||||
final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings());
|
||||
final S3ClientSettings s3ClientSettings = S3ClientSettings.getClientSettings(metadata, insecureCredentials);
|
||||
this.reference = new AmazonS3Reference(service.buildClient(s3ClientSettings));
|
||||
} else {
|
||||
reference = null;
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
"using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]",
|
||||
bucket,
|
||||
chunkSize,
|
||||
serverSideEncryption,
|
||||
bufferSize,
|
||||
cannedACL,
|
||||
storageClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected S3BlobStore createBlobStore() {
|
||||
return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
if (reference != null) {
|
||||
assert S3ClientSettings.checkDeprecatedCredentials(metadata.settings()) : metadata.name();
|
||||
return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass) {
|
||||
@Override
|
||||
public AmazonS3Reference clientReference() {
|
||||
if (reference.tryIncRef()) {
|
||||
return reference;
|
||||
} else {
|
||||
throw new IllegalStateException("S3 client is closed");
|
||||
}
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
}
|
||||
}
|
||||
|
||||
// only use for testing
|
||||
|
@ -244,13 +278,13 @@ class S3Repository extends BlobStoreRepository {
|
|||
return chunkSize;
|
||||
}
|
||||
|
||||
void overrideCredentialsFromClusterState(final S3Service s3Service) {
|
||||
deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead "
|
||||
+ "store these in named clients and the elasticsearch keystore for secure settings.");
|
||||
final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings());
|
||||
// hack, but that's ok because the whole if branch should be axed
|
||||
final Map<String, S3ClientSettings> prevSettings = s3Service.refreshAndClearCache(S3ClientSettings.load(Settings.EMPTY));
|
||||
final Map<String, S3ClientSettings> newSettings = S3ClientSettings.overrideCredentials(prevSettings, insecureCredentials);
|
||||
s3Service.refreshAndClearCache(newSettings);
|
||||
@Override
|
||||
protected void doClose() {
|
||||
if (reference != null) {
|
||||
assert S3ClientSettings.checkDeprecatedCredentials(metadata.settings()) : metadata.name();
|
||||
reference.decRef();
|
||||
}
|
||||
super.doClose();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -107,7 +107,6 @@ public class RepositoryCredentialsTests extends ESTestCase {
|
|||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
// repository settings for credentials override node secure settings
|
||||
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(S3Repository.CLIENT_NAME.getKey(), randomFrom(clientNames))
|
||||
.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key")
|
||||
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build());
|
||||
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings);
|
||||
|
@ -163,11 +162,13 @@ public class RepositoryCredentialsTests extends ESTestCase {
|
|||
secureSettings.setString("s3.client." + clientName + ".secret_key", "secure_aws_secret");
|
||||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
// repository settings
|
||||
final Settings.Builder builder = Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName);
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
final boolean repositorySettings = randomBoolean();
|
||||
if (repositorySettings) {
|
||||
builder.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key");
|
||||
builder.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret");
|
||||
} else {
|
||||
builder.put(S3Repository.CLIENT_NAME.getKey(), clientName);
|
||||
}
|
||||
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build());
|
||||
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings);
|
||||
|
@ -202,8 +203,13 @@ public class RepositoryCredentialsTests extends ESTestCase {
|
|||
try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
|
||||
final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials
|
||||
.getCredentials();
|
||||
assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key"));
|
||||
assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret"));
|
||||
if (repositorySettings) {
|
||||
assertThat(newCredentials.getAWSAccessKeyId(), is("insecure_aws_key"));
|
||||
assertThat(newCredentials.getAWSSecretKey(), is("insecure_aws_secret"));
|
||||
} else {
|
||||
assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key"));
|
||||
assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret"));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (repositorySettings) {
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
*/
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.CannedAccessControlList;
|
||||
import com.amazonaws.services.s3.model.StorageClass;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
|
@ -91,7 +91,6 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
|
|||
.setVerify(verify)
|
||||
.setSettings(Settings.builder()
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucket)
|
||||
.put(S3Repository.CLIENT_NAME.getKey(), client)
|
||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize)
|
||||
.put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption)
|
||||
.put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL)
|
||||
|
@ -121,14 +120,10 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
|
|||
return Collections.singletonMap(S3Repository.TYPE,
|
||||
(metadata) -> new S3Repository(metadata, env.settings(), registry, new S3Service(env.settings()) {
|
||||
@Override
|
||||
public synchronized AmazonS3Reference client(String clientName) {
|
||||
return new AmazonS3Reference(new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass));
|
||||
AmazonS3 buildClient(S3ClientSettings clientSettings) {
|
||||
return new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass);
|
||||
}
|
||||
}) {
|
||||
@Override
|
||||
void overrideCredentialsFromClusterState(S3Service awsService) {
|
||||
}
|
||||
});
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -340,26 +340,22 @@ public class EvilLoggerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testProperties() throws IOException, UserException {
|
||||
final Settings.Builder builder = Settings.builder().put("cluster.name", randomAlphaOfLength(16));
|
||||
if (randomBoolean()) {
|
||||
builder.put("node.name", randomAlphaOfLength(16));
|
||||
}
|
||||
final Settings settings = builder.build();
|
||||
final Settings settings = Settings.builder()
|
||||
.put("cluster.name", randomAlphaOfLength(16))
|
||||
.put("node.name", randomAlphaOfLength(16))
|
||||
.build();
|
||||
setupLogging("minimal", settings);
|
||||
|
||||
assertNotNull(System.getProperty("es.logs.base_path"));
|
||||
|
||||
assertThat(System.getProperty("es.logs.cluster_name"), equalTo(ClusterName.CLUSTER_NAME_SETTING.get(settings).value()));
|
||||
if (Node.NODE_NAME_SETTING.exists(settings)) {
|
||||
assertThat(System.getProperty("es.logs.node_name"), equalTo(Node.NODE_NAME_SETTING.get(settings)));
|
||||
} else {
|
||||
assertNull(System.getProperty("es.logs.node_name"));
|
||||
}
|
||||
assertThat(System.getProperty("es.logs.node_name"), equalTo(Node.NODE_NAME_SETTING.get(settings)));
|
||||
}
|
||||
|
||||
public void testNoNodeNameInPatternWarning() throws IOException, UserException {
|
||||
String nodeName = randomAlphaOfLength(16);
|
||||
LogConfigurator.setNodeName(nodeName);
|
||||
setupLogging("no_node_name");
|
||||
|
||||
final String path =
|
||||
System.getProperty("es.logs.base_path") +
|
||||
System.getProperty("file.separator") +
|
||||
|
@ -368,10 +364,10 @@ public class EvilLoggerTests extends ESTestCase {
|
|||
assertThat(events.size(), equalTo(2));
|
||||
final String location = "org.elasticsearch.common.logging.LogConfigurator";
|
||||
// the first message is a warning for unsupported configuration files
|
||||
assertLogLine(events.get(0), Level.WARN, location, "\\[unknown\\] Some logging configurations have %marker but don't "
|
||||
+ "have %node_name. We will automatically add %node_name to the pattern to ease the migration for users "
|
||||
+ "who customize log4j2.properties but will stop this behavior in 7.0. You should manually replace "
|
||||
+ "`%node_name` with `\\[%node_name\\]%marker ` in these locations:");
|
||||
assertLogLine(events.get(0), Level.WARN, location, "\\[" + nodeName + "\\] Some logging configurations have "
|
||||
+ "%marker but don't have %node_name. We will automatically add %node_name to the pattern to ease the "
|
||||
+ "migration for users who customize log4j2.properties but will stop this behavior in 7.0. You should "
|
||||
+ "manually replace `%node_name` with `\\[%node_name\\]%marker ` in these locations:");
|
||||
if (Constants.WINDOWS) {
|
||||
assertThat(events.get(1), endsWith("no_node_name\\log4j2.properties"));
|
||||
} else {
|
||||
|
|
|
@ -52,7 +52,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
|
|||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
|
||||
IOException ioException = expectThrows(IOException.class, () -> {
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
|
||||
});
|
||||
assertTrue(ioException.getMessage(), ioException.getMessage().startsWith(path.toString()));
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
|
|||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
|
||||
IOException ioException = expectThrows(IOException.class, () -> {
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
|
||||
});
|
||||
assertTrue(ioException.getMessage(), ioException.getMessage().startsWith("failed to test writes in data directory"));
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
|
|||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
|
||||
IOException ioException = expectThrows(IOException.class, () -> {
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
|
||||
});
|
||||
assertTrue(ioException.getMessage(), ioException.getMessage().startsWith("failed to test writes in data directory"));
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.unconfigured_node_name;
|
||||
|
||||
import org.elasticsearch.bootstrap.BootstrapInfo;
|
||||
import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase;
|
||||
import org.hamcrest.Matcher;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.BufferedReader;
|
||||
|
@ -30,11 +30,16 @@ import java.nio.file.Path;
|
|||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
|
||||
@Override
|
||||
protected BufferedReader openReader(Path logFile) throws IOException {
|
||||
assumeTrue("We log a line without the node name if we can't install the seccomp filters",
|
||||
BootstrapInfo.isSystemCallFilterInstalled());
|
||||
protected Matcher<String> nodeNameMatcher() {
|
||||
return not("");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BufferedReader openReader(Path logFile) {
|
||||
return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> {
|
||||
try {
|
||||
return Files.newBufferedReader(logFile, StandardCharsets.UTF_8);
|
||||
|
@ -43,11 +48,4 @@ public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testDummy() {
|
||||
/* Dummy test case so that when we run this test on a platform that
|
||||
* does not support our syscall filters and we skip the test above
|
||||
* we don't fail the entire test run because we skipped all the tests.
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,11 +48,9 @@ import org.elasticsearch.index.IndexSettings;
|
|||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.ReferringFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
|
@ -66,6 +64,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -73,6 +72,7 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Transport action used to execute analyze requests
|
||||
|
@ -571,11 +571,48 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
return charFilterFactoryList;
|
||||
}
|
||||
|
||||
public static class DeferredTokenFilterRegistry implements Function<String, TokenFilterFactory> {
|
||||
|
||||
private final AnalysisRegistry analysisRegistry;
|
||||
private final IndexSettings indexSettings;
|
||||
Map<String, TokenFilterFactory> prebuiltFilters;
|
||||
|
||||
public DeferredTokenFilterRegistry(AnalysisRegistry analysisRegistry, IndexSettings indexSettings) {
|
||||
this.analysisRegistry = analysisRegistry;
|
||||
if (indexSettings == null) {
|
||||
// Settings are null when _analyze is called with no index name, so
|
||||
// we create dummy settings which will make prebuilt analysis components
|
||||
// available
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(settings).build();
|
||||
indexSettings = new IndexSettings(metaData, Settings.EMPTY);
|
||||
}
|
||||
this.indexSettings = indexSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenFilterFactory apply(String s) {
|
||||
if (prebuiltFilters == null) {
|
||||
try {
|
||||
prebuiltFilters = analysisRegistry.buildTokenFilterFactories(indexSettings);
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
return prebuiltFilters.get(s);
|
||||
}
|
||||
}
|
||||
|
||||
private static List<TokenFilterFactory> parseTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
|
||||
Environment environment, Tuple<String, TokenizerFactory> tokenizerFactory,
|
||||
List<CharFilterFactory> charFilterFactoryList, boolean normalizer) throws IOException {
|
||||
List<TokenFilterFactory> tokenFilterFactoryList = new ArrayList<>();
|
||||
List<ReferringFilterFactory> referringFilters = new ArrayList<>();
|
||||
DeferredTokenFilterRegistry deferredRegistry = new DeferredTokenFilterRegistry(analysisRegistry, indexSettings);
|
||||
if (request.tokenFilters() != null && request.tokenFilters().size() > 0) {
|
||||
List<AnalyzeRequest.NameOrDefinition> tokenFilters = request.tokenFilters();
|
||||
for (AnalyzeRequest.NameOrDefinition tokenFilter : tokenFilters) {
|
||||
|
@ -594,11 +631,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
}
|
||||
// Need to set anonymous "name" of tokenfilter
|
||||
tokenFilterFactory = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter", settings);
|
||||
tokenFilterFactory = CustomAnalyzerProvider.checkAndApplySynonymFilter(tokenFilterFactory, tokenizerFactory.v1(), tokenizerFactory.v2(), tokenFilterFactoryList,
|
||||
charFilterFactoryList, environment);
|
||||
if (tokenFilterFactory instanceof ReferringFilterFactory) {
|
||||
referringFilters.add((ReferringFilterFactory)tokenFilterFactory);
|
||||
}
|
||||
tokenFilterFactory = tokenFilterFactory.getChainAwareTokenFilterFactory(tokenizerFactory.v2(), charFilterFactoryList,
|
||||
tokenFilterFactoryList, deferredRegistry);
|
||||
|
||||
} else {
|
||||
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory;
|
||||
|
@ -616,8 +650,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
Settings settings = AnalysisRegistry.getSettingsFromIndexSettings(indexSettings,
|
||||
AnalysisRegistry.INDEX_ANALYSIS_FILTER + "." + tokenFilter.name);
|
||||
tokenFilterFactory = tokenFilterFactoryFactory.get(indexSettings, environment, tokenFilter.name, settings);
|
||||
tokenFilterFactory = CustomAnalyzerProvider.checkAndApplySynonymFilter(tokenFilterFactory, tokenizerFactory.v1(), tokenizerFactory.v2(), tokenFilterFactoryList,
|
||||
charFilterFactoryList, environment);
|
||||
tokenFilterFactory = tokenFilterFactory.getChainAwareTokenFilterFactory(tokenizerFactory.v2(), charFilterFactoryList,
|
||||
tokenFilterFactoryList, deferredRegistry);
|
||||
}
|
||||
}
|
||||
if (tokenFilterFactory == null) {
|
||||
|
@ -633,26 +667,6 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
tokenFilterFactoryList.add(tokenFilterFactory);
|
||||
}
|
||||
}
|
||||
if (referringFilters.isEmpty() == false) {
|
||||
// The request included at least one custom referring tokenfilter that has not already been built by the
|
||||
// analysis registry, so we need to set its references. Note that this will only apply pre-built
|
||||
// tokenfilters
|
||||
if (indexSettings == null) {
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(settings).build();
|
||||
indexSettings = new IndexSettings(metaData, Settings.EMPTY);
|
||||
}
|
||||
Map<String, TokenFilterFactory> prebuiltFilters = analysisRegistry.buildTokenFilterFactories(indexSettings);
|
||||
for (ReferringFilterFactory rff : referringFilters) {
|
||||
rff.setReferences(prebuiltFilters);
|
||||
}
|
||||
|
||||
}
|
||||
return tokenFilterFactoryList;
|
||||
}
|
||||
|
||||
|
|
|
@ -200,7 +200,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
} catch (QueryShardException|ParsingException e) {
|
||||
valid = false;
|
||||
error = e.getDetailedMessage();
|
||||
} catch (AssertionError|IOException e) {
|
||||
} catch (AssertionError e) {
|
||||
valid = false;
|
||||
error = e.getMessage();
|
||||
} finally {
|
||||
|
@ -210,7 +210,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error);
|
||||
}
|
||||
|
||||
private String explain(SearchContext context, boolean rewritten) throws IOException {
|
||||
private String explain(SearchContext context, boolean rewritten) {
|
||||
Query query = context.query();
|
||||
if (rewritten && query instanceof MatchNoDocsQuery) {
|
||||
return context.parsedQuery().query().toString();
|
||||
|
|
|
@ -152,4 +152,11 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference()
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getExecutor(ExplainRequest request, ShardId shardId) {
|
||||
IndexService indexService = searchService.getIndicesService().indexServiceSafe(shardId.getIndex());
|
||||
return indexService.getIndexSettings().isSearchThrottled() ? ThreadPool.Names.SEARCH_THROTTLED : super.getExecutor(request,
|
||||
shardId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -111,4 +111,11 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
|||
protected GetResponse newResponse() {
|
||||
return new GetResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getExecutor(GetRequest request, ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
return indexService.getIndexSettings().isSearchThrottled() ? ThreadPool.Names.SEARCH_THROTTLED : super.getExecutor(request,
|
||||
shardId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -102,4 +102,11 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
|
|||
|
||||
return response;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getExecutor(MultiGetShardRequest request, ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
return indexService.getIndexSettings().isSearchThrottled() ? ThreadPool.Names.SEARCH_THROTTLED : super.getExecutor(request,
|
||||
shardId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.util.FixedBitSet;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
|
@ -40,7 +41,7 @@ import java.util.stream.Stream;
|
|||
* which allows to fan out to more shards at the same time without running into rejections even if we are hitting a
|
||||
* large portion of the clusters indices.
|
||||
*/
|
||||
final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<SearchTransportService.CanMatchResponse> {
|
||||
final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<SearchService.CanMatchResponse> {
|
||||
|
||||
private final Function<GroupShardsIterator<SearchShardIterator>, SearchPhase> phaseFactory;
|
||||
private final GroupShardsIterator<SearchShardIterator> shardsIts;
|
||||
|
@ -67,13 +68,13 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
|
|||
|
||||
@Override
|
||||
protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting shard,
|
||||
SearchActionListener<SearchTransportService.CanMatchResponse> listener) {
|
||||
SearchActionListener<SearchService.CanMatchResponse> listener) {
|
||||
getSearchTransport().sendCanMatch(getConnection(shardIt.getClusterAlias(), shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt), getTask(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(SearchPhaseResults<SearchTransportService.CanMatchResponse> results,
|
||||
protected SearchPhase getNextPhase(SearchPhaseResults<SearchService.CanMatchResponse> results,
|
||||
SearchPhaseContext context) {
|
||||
|
||||
return phaseFactory.apply(getIterator((BitSetSearchPhaseResults) results, shardsIts));
|
||||
|
@ -100,7 +101,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
|
|||
}
|
||||
|
||||
private static final class BitSetSearchPhaseResults extends InitialSearchPhase.
|
||||
SearchPhaseResults<SearchTransportService.CanMatchResponse> {
|
||||
SearchPhaseResults<SearchService.CanMatchResponse> {
|
||||
|
||||
private final FixedBitSet possibleMatches;
|
||||
private int numPossibleMatches;
|
||||
|
@ -111,7 +112,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
|
|||
}
|
||||
|
||||
@Override
|
||||
void consumeResult(SearchTransportService.CanMatchResponse result) {
|
||||
void consumeResult(SearchService.CanMatchResponse result) {
|
||||
if (result.canMatch()) {
|
||||
consumeShardFailure(result.getShardIndex());
|
||||
}
|
||||
|
@ -139,7 +140,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
|
|||
}
|
||||
|
||||
@Override
|
||||
Stream<SearchTransportService.CanMatchResponse> getSuccessfulResults() {
|
||||
Stream<SearchService.CanMatchResponse> getSuccessfulResults() {
|
||||
return Stream.empty();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.HandledTransportAction.ChannelActionListener;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -112,9 +112,9 @@ public class SearchTransportService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public void sendCanMatch(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final
|
||||
ActionListener<CanMatchResponse> listener) {
|
||||
ActionListener<SearchService.CanMatchResponse> listener) {
|
||||
transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task,
|
||||
TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchResponse::new));
|
||||
TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, SearchService.CanMatchResponse::new));
|
||||
}
|
||||
|
||||
public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener<TransportResponse> listener) {
|
||||
|
@ -349,83 +349,54 @@ public class SearchTransportService extends AbstractComponent {
|
|||
|
||||
transportService.registerRequestHandler(QUERY_ACTION_NAME, ThreadPool.Names.SAME, ShardSearchTransportRequest::new,
|
||||
(request, channel, task) -> {
|
||||
searchService.executeQueryPhase(request, (SearchTask) task, new HandledTransportAction.ChannelActionListener<>(
|
||||
searchService.executeQueryPhase(request, (SearchTask) task, new ChannelActionListener<>(
|
||||
channel, QUERY_ACTION_NAME, request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_ACTION_NAME,
|
||||
(request) -> ((ShardSearchRequest)request).numberOfShards() == 1 ? QueryFetchSearchResult::new : QuerySearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, ThreadPool.Names.SEARCH, QuerySearchRequest::new,
|
||||
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, ThreadPool.Names.SAME, QuerySearchRequest::new,
|
||||
(request, channel, task) -> {
|
||||
QuerySearchResult result = searchService.executeQueryPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeQueryPhase(request, (SearchTask)task, new ChannelActionListener<>(channel, QUERY_ID_ACTION_NAME,
|
||||
request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, QuerySearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, ThreadPool.Names.SEARCH, InternalScrollSearchRequest::new,
|
||||
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, ThreadPool.Names.SAME, InternalScrollSearchRequest::new,
|
||||
(request, channel, task) -> {
|
||||
ScrollQuerySearchResult result = searchService.executeQueryPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeQueryPhase(request, (SearchTask)task, new ChannelActionListener<>(channel, QUERY_SCROLL_ACTION_NAME,
|
||||
request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, ScrollQuerySearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, ThreadPool.Names.SEARCH, InternalScrollSearchRequest::new,
|
||||
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, ThreadPool.Names.SAME, InternalScrollSearchRequest::new,
|
||||
(request, channel, task) -> {
|
||||
ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeFetchPhase(request, (SearchTask)task, new ChannelActionListener<>(channel,
|
||||
QUERY_FETCH_SCROLL_ACTION_NAME, request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, ScrollQueryFetchSearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ThreadPool.Names.SEARCH, ShardFetchRequest::new,
|
||||
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ThreadPool.Names.SAME, ShardFetchRequest::new,
|
||||
(request, channel, task) -> {
|
||||
FetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeFetchPhase(request, (SearchTask)task, new ChannelActionListener<>(channel,
|
||||
FETCH_ID_SCROLL_ACTION_NAME, request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, FetchSearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ThreadPool.Names.SEARCH, true, true, ShardFetchSearchRequest::new,
|
||||
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ThreadPool.Names.SAME, true, true, ShardFetchSearchRequest::new,
|
||||
(request, channel, task) -> {
|
||||
FetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeFetchPhase(request, (SearchTask)task, new ChannelActionListener<>(channel, FETCH_ID_ACTION_NAME,
|
||||
request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, FetchSearchResult::new);
|
||||
|
||||
// this is cheap, it does not fetch during the rewrite phase, so we can let it quickly execute on a networking thread
|
||||
transportService.registerRequestHandler(QUERY_CAN_MATCH_NAME, ThreadPool.Names.SAME, ShardSearchTransportRequest::new,
|
||||
(request, channel, task) -> {
|
||||
boolean canMatch = searchService.canMatch(request);
|
||||
channel.sendResponse(new CanMatchResponse(canMatch));
|
||||
searchService.canMatch(request, new ChannelActionListener<>(channel, QUERY_CAN_MATCH_NAME, request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME,
|
||||
(Supplier<TransportResponse>) CanMatchResponse::new);
|
||||
}
|
||||
|
||||
public static final class CanMatchResponse extends SearchPhaseResult {
|
||||
private boolean canMatch;
|
||||
|
||||
public CanMatchResponse() {
|
||||
}
|
||||
|
||||
public CanMatchResponse(boolean canMatch) {
|
||||
this.canMatch = canMatch;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
canMatch = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(canMatch);
|
||||
}
|
||||
|
||||
public boolean canMatch() {
|
||||
return canMatch;
|
||||
}
|
||||
(Supplier<TransportResponse>) SearchService.CanMatchResponse::new);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
|
@ -57,6 +58,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||
protected final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
|
||||
final String transportShardAction;
|
||||
private final String shardExecutor;
|
||||
|
||||
protected TransportBroadcastAction(Settings settings, String actionName, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
|
@ -66,8 +68,9 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||
this.transportService = transportService;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
this.transportShardAction = actionName + "[s]";
|
||||
this.shardExecutor = shardExecutor;
|
||||
|
||||
transportService.registerRequestHandler(transportShardAction, shardRequest, shardExecutor, new ShardTransportHandler());
|
||||
transportService.registerRequestHandler(transportShardAction, shardRequest, ThreadPool.Names.SAME, new ShardTransportHandler());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -276,7 +279,45 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||
|
||||
@Override
|
||||
public void messageReceived(ShardRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
channel.sendResponse(shardOperation(request, task));
|
||||
asyncShardOperation(request, task, new ActionListener<ShardResponse>() {
|
||||
@Override
|
||||
public void onResponse(ShardResponse response) {
|
||||
try {
|
||||
channel.sendResponse(response);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception e1) {
|
||||
logger.warn(() -> new ParameterizedMessage(
|
||||
"Failed to send error response for action [{}] and request [{}]", actionName, request), e1);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
protected void asyncShardOperation(ShardRequest request, Task task, ActionListener<ShardResponse> listener) {
|
||||
transportService.getThreadPool().executor(getExecutor(request)).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
listener.onResponse(shardOperation(request, task));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
protected String getExecutor(ShardRequest request) {
|
||||
return shardExecutor;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -49,7 +50,6 @@ import org.elasticsearch.transport.TransportResponseHandler;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException;
|
||||
|
@ -66,8 +66,8 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
protected final TransportService transportService;
|
||||
protected final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
|
||||
final String transportShardAction;
|
||||
final String executor;
|
||||
private final String transportShardAction;
|
||||
private final String executor;
|
||||
|
||||
protected TransportSingleShardAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
|
@ -104,7 +104,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException;
|
||||
|
||||
protected void asyncShardOperation(Request request, ShardId shardId, ActionListener<Response> listener) throws IOException {
|
||||
threadPool.executor(this.executor).execute(new AbstractRunnable() {
|
||||
threadPool.executor(getExecutor(request, shardId)).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
|
@ -274,25 +274,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
@Override
|
||||
public void messageReceived(Request request, final TransportChannel channel, Task task) throws Exception {
|
||||
// if we have a local operation, execute it on a thread since we don't spawn
|
||||
execute(request, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response result) {
|
||||
try {
|
||||
channel.sendResponse(result);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception e1) {
|
||||
logger.warn("failed to send response for get", e1);
|
||||
}
|
||||
}
|
||||
});
|
||||
execute(request, new HandledTransportAction.ChannelActionListener<>(channel, actionName, request));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -303,25 +285,8 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("executing [{}] on shard [{}]", request, request.internalShardId);
|
||||
}
|
||||
asyncShardOperation(request, request.internalShardId, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
try {
|
||||
channel.sendResponse(response);
|
||||
} catch (IOException e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (IOException e1) {
|
||||
throw new UncheckedIOException(e1);
|
||||
}
|
||||
}
|
||||
});
|
||||
asyncShardOperation(request, request.internalShardId, new HandledTransportAction.ChannelActionListener<>(channel,
|
||||
transportShardAction, request));
|
||||
}
|
||||
}
|
||||
/**
|
||||
|
@ -344,4 +309,8 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
return concreteIndex;
|
||||
}
|
||||
}
|
||||
|
||||
protected String getExecutor(Request request, ShardId shardId) {
|
||||
return executor;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,4 +96,11 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
|
|||
|
||||
return response;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getExecutor(MultiTermVectorsShardRequest request, ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
return indexService.getIndexSettings().isSearchThrottled() ? ThreadPool.Names.SEARCH_THROTTLED : super.getExecutor(request,
|
||||
shardId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -113,4 +113,11 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
|
|||
protected TermVectorsResponse newResponse() {
|
||||
return new TermVectorsResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getExecutor(TermVectorsRequest request, ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
return indexService.getIndexSettings().isSearchThrottled() ? ThreadPool.Names.SEARCH_THROTTLED : super.getExecutor(request,
|
||||
shardId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -216,11 +216,6 @@ final class Bootstrap {
|
|||
final BoundTransportAddress boundTransportAddress, List<BootstrapCheck> checks) throws NodeValidationException {
|
||||
BootstrapChecks.check(context, boundTransportAddress, checks);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void registerDerivedNodeNameWithLogger(String nodeName) {
|
||||
LogConfigurator.setNodeName(nodeName);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -260,7 +255,9 @@ final class Bootstrap {
|
|||
if (secureSettings != null) {
|
||||
builder.setSecureSettings(secureSettings);
|
||||
}
|
||||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), Collections.emptyMap(), configPath);
|
||||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), Collections.emptyMap(), configPath,
|
||||
// HOSTNAME is set by elasticsearch-env and elasticsearch-env.bat so it is always available
|
||||
() -> System.getenv("HOSTNAME"));
|
||||
}
|
||||
|
||||
private void start() throws NodeValidationException {
|
||||
|
@ -293,9 +290,7 @@ final class Bootstrap {
|
|||
final SecureSettings keystore = loadSecureSettings(initialEnv);
|
||||
final Environment environment = createEnvironment(pidFile, keystore, initialEnv.settings(), initialEnv.configFile());
|
||||
|
||||
if (Node.NODE_NAME_SETTING.exists(environment.settings())) {
|
||||
LogConfigurator.setNodeName(Node.NODE_NAME_SETTING.get(environment.settings()));
|
||||
}
|
||||
LogConfigurator.setNodeName(Node.NODE_NAME_SETTING.get(environment.settings()));
|
||||
try {
|
||||
LogConfigurator.configure(environment);
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -92,7 +92,10 @@ public abstract class EnvironmentAwareCommand extends Command {
|
|||
if (esPathConf == null) {
|
||||
throw new UserException(ExitCodes.CONFIG, "the system property [es.path.conf] must be set");
|
||||
}
|
||||
return InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, settings, getConfigPath(esPathConf));
|
||||
return InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, settings,
|
||||
getConfigPath(esPathConf),
|
||||
// HOSTNAME is set by elasticsearch-env and elasticsearch-env.bat so it is always available
|
||||
() -> System.getenv("HOSTNAME"));
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "need path to construct environment")
|
||||
|
|
|
@ -279,8 +279,7 @@ public class LogConfigurator {
|
|||
* {@code es.logs.cluster_name} the cluster name, used as the prefix of log filenames in the default configuration
|
||||
* </li>
|
||||
* <li>
|
||||
* {@code es.logs.node_name} the node name, can be used as part of log filenames (only exposed if {@link Node#NODE_NAME_SETTING} is
|
||||
* explicitly set)
|
||||
* {@code es.logs.node_name} the node name, can be used as part of log filenames
|
||||
* </li>
|
||||
* </ul>
|
||||
*
|
||||
|
@ -291,9 +290,7 @@ public class LogConfigurator {
|
|||
private static void setLogConfigurationSystemProperty(final Path logsPath, final Settings settings) {
|
||||
System.setProperty("es.logs.base_path", logsPath.toString());
|
||||
System.setProperty("es.logs.cluster_name", ClusterName.CLUSTER_NAME_SETTING.get(settings).value());
|
||||
if (Node.NODE_NAME_SETTING.exists(settings)) {
|
||||
System.setProperty("es.logs.node_name", Node.NODE_NAME_SETTING.get(settings));
|
||||
}
|
||||
System.setProperty("es.logs.node_name", Node.NODE_NAME_SETTING.get(settings));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -57,21 +57,22 @@ public final class NodeNamePatternConverter extends LogEventPatternConverter {
|
|||
throw new IllegalArgumentException("no options supported but options provided: "
|
||||
+ Arrays.toString(options));
|
||||
}
|
||||
return new NodeNamePatternConverter();
|
||||
String nodeName = NODE_NAME.get();
|
||||
if (nodeName == null) {
|
||||
throw new IllegalStateException("the node name hasn't been set");
|
||||
}
|
||||
return new NodeNamePatternConverter(nodeName);
|
||||
}
|
||||
|
||||
private NodeNamePatternConverter() {
|
||||
private final String nodeName;
|
||||
|
||||
private NodeNamePatternConverter(String nodeName) {
|
||||
super("NodeName", "node_name");
|
||||
this.nodeName = nodeName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void format(LogEvent event, StringBuilder toAppendTo) {
|
||||
/*
|
||||
* We're not thrilled about this volatile read on every line logged but
|
||||
* the alternatives are slightly terrifying and/or don't work with the
|
||||
* security manager.
|
||||
*/
|
||||
String nodeName = NODE_NAME.get();
|
||||
toAppendTo.append(nodeName == null ? "unknown" : nodeName);
|
||||
toAppendTo.append(nodeName);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -142,6 +142,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING,
|
||||
IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING,
|
||||
IndexSettings.INDEX_SEARCH_IDLE_AFTER,
|
||||
IndexSettings.INDEX_SEARCH_THROTTLED,
|
||||
IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY,
|
||||
FieldMapper.IGNORE_MALFORMED_SETTING,
|
||||
FieldMapper.COERCE_SETTING,
|
||||
|
|
|
@ -78,7 +78,6 @@ import java.util.concurrent.Semaphore;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
|
||||
|
@ -234,18 +233,14 @@ public final class NodeEnvironment implements Closeable {
|
|||
/**
|
||||
* Setup the environment.
|
||||
* @param settings settings from elasticsearch.yml
|
||||
* @param nodeIdConsumer called as soon as the node id is available to the
|
||||
* node name in log messages if it wasn't loaded from
|
||||
* elasticsearch.yml
|
||||
*/
|
||||
public NodeEnvironment(Settings settings, Environment environment, Consumer<String> nodeIdConsumer) throws IOException {
|
||||
public NodeEnvironment(Settings settings, Environment environment) throws IOException {
|
||||
if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) {
|
||||
nodePaths = null;
|
||||
sharedDataPath = null;
|
||||
locks = null;
|
||||
nodeLockId = -1;
|
||||
nodeMetaData = new NodeMetaData(generateNodeId(settings));
|
||||
nodeIdConsumer.accept(nodeMetaData.nodeId());
|
||||
return;
|
||||
}
|
||||
boolean success = false;
|
||||
|
@ -295,7 +290,6 @@ public final class NodeEnvironment implements Closeable {
|
|||
this.nodePaths = nodeLock.nodePaths;
|
||||
this.nodeLockId = nodeLock.nodeId;
|
||||
this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths);
|
||||
nodeIdConsumer.accept(nodeMetaData.nodeId());
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("using node location [{}], local_lock_id [{}]", nodePaths, nodeLockId);
|
||||
|
|
|
@ -277,6 +277,12 @@ public final class IndexSettings {
|
|||
return s;
|
||||
}, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
/**
|
||||
* Marks an index to be searched throttled. This means that never more than one shard of such an index will be searched concurrently
|
||||
*/
|
||||
public static final Setting<Boolean> INDEX_SEARCH_THROTTLED = Setting.boolSetting("index.search.throttled", false,
|
||||
Property.IndexScope, Property.PrivateIndex, Property.Dynamic);
|
||||
|
||||
private final Index index;
|
||||
private final Version version;
|
||||
private final Logger logger;
|
||||
|
@ -319,6 +325,7 @@ public final class IndexSettings {
|
|||
private volatile int maxAnalyzedOffset;
|
||||
private volatile int maxTermsCount;
|
||||
private volatile String defaultPipeline;
|
||||
private volatile boolean searchThrottled;
|
||||
|
||||
/**
|
||||
* The maximum number of refresh listeners allows on this shard.
|
||||
|
@ -402,6 +409,7 @@ public final class IndexSettings {
|
|||
this.indexMetaData = indexMetaData;
|
||||
numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
|
||||
|
||||
this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings);
|
||||
this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings);
|
||||
this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings);
|
||||
this.queryStringAllowLeadingWildcard = QUERY_STRING_ALLOW_LEADING_WILDCARD.get(nodeSettings);
|
||||
|
@ -478,6 +486,7 @@ public final class IndexSettings {
|
|||
scopedSettings.addSettingsUpdateConsumer(MAX_REGEX_LENGTH_SETTING, this::setMaxRegexLength);
|
||||
scopedSettings.addSettingsUpdateConsumer(DEFAULT_PIPELINE, this::setDefaultPipeline);
|
||||
scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, this::setSoftDeleteRetentionOperations);
|
||||
scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_THROTTLED, this::setSearchThrottled);
|
||||
}
|
||||
|
||||
private void setSearchIdleAfter(TimeValue searchIdleAfter) { this.searchIdleAfter = searchIdleAfter; }
|
||||
|
@ -879,4 +888,16 @@ public final class IndexSettings {
|
|||
public long getSoftDeleteRetentionOperations() {
|
||||
return this.softDeleteRetentionOperations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the this index should be searched throttled ie. using the
|
||||
* {@link org.elasticsearch.threadpool.ThreadPool.Names#SEARCH_THROTTLED} thread-pool
|
||||
*/
|
||||
public boolean isSearchThrottled() {
|
||||
return searchThrottled;
|
||||
}
|
||||
|
||||
private void setSearchThrottled(boolean searchThrottled) {
|
||||
this.searchThrottled = searchThrottled;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -167,17 +167,7 @@ public final class AnalysisRegistry implements Closeable {
|
|||
tokenFilters.put("synonym", requiresAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)));
|
||||
tokenFilters.put("synonym_graph", requiresAnalysisSettings((is, env, name, settings) -> new SynonymGraphTokenFilterFactory(is, env, this, name, settings)));
|
||||
|
||||
Map<String, TokenFilterFactory> mappings
|
||||
= buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters);
|
||||
|
||||
// ReferringTokenFilters require references to other tokenfilters, so we pass these in
|
||||
// after all factories have been registered
|
||||
for (TokenFilterFactory tff : mappings.values()) {
|
||||
if (tff instanceof ReferringFilterFactory) {
|
||||
((ReferringFilterFactory)tff).setReferences(mappings);
|
||||
}
|
||||
}
|
||||
return mappings;
|
||||
return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters);
|
||||
}
|
||||
|
||||
public Map<String, TokenizerFactory> buildTokenizerFactories(IndexSettings indexSettings) throws IOException {
|
||||
|
|
|
@ -81,9 +81,7 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
|
|||
if (tokenFilter == null) {
|
||||
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find filter under name [" + tokenFilterName + "]");
|
||||
}
|
||||
// no need offsetGap for tokenize synonyms
|
||||
tokenFilter = checkAndApplySynonymFilter(tokenFilter, tokenizerName, tokenizer, tokenFilterList, charFiltersList,
|
||||
this.environment);
|
||||
tokenFilter = tokenFilter.getChainAwareTokenFilterFactory(tokenizer, charFiltersList, tokenFilterList, tokenFilters::get);
|
||||
tokenFilterList.add(tokenFilter);
|
||||
}
|
||||
|
||||
|
@ -95,33 +93,6 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
|
|||
);
|
||||
}
|
||||
|
||||
public static TokenFilterFactory checkAndApplySynonymFilter(TokenFilterFactory tokenFilter, String tokenizerName, TokenizerFactory tokenizer,
|
||||
List<TokenFilterFactory> tokenFilterList,
|
||||
List<CharFilterFactory> charFiltersList, Environment env) {
|
||||
if (tokenFilter instanceof SynonymGraphTokenFilterFactory) {
|
||||
List<TokenFilterFactory> tokenFiltersListForSynonym = new ArrayList<>(tokenFilterList);
|
||||
|
||||
try (CustomAnalyzer analyzer = new CustomAnalyzer(tokenizerName, tokenizer,
|
||||
charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]),
|
||||
tokenFiltersListForSynonym.toArray(new TokenFilterFactory[tokenFiltersListForSynonym.size()]),
|
||||
TextFieldMapper.Defaults.POSITION_INCREMENT_GAP,
|
||||
-1)){
|
||||
tokenFilter = ((SynonymGraphTokenFilterFactory) tokenFilter).createPerAnalyzerSynonymGraphFactory(analyzer, env);
|
||||
}
|
||||
|
||||
} else if (tokenFilter instanceof SynonymTokenFilterFactory) {
|
||||
List<TokenFilterFactory> tokenFiltersListForSynonym = new ArrayList<>(tokenFilterList);
|
||||
try (CustomAnalyzer analyzer = new CustomAnalyzer(tokenizerName, tokenizer,
|
||||
charFiltersList.toArray(new CharFilterFactory[charFiltersList.size()]),
|
||||
tokenFiltersListForSynonym.toArray(new TokenFilterFactory[tokenFiltersListForSynonym.size()]),
|
||||
TextFieldMapper.Defaults.POSITION_INCREMENT_GAP,
|
||||
-1)) {
|
||||
tokenFilter = ((SynonymTokenFilterFactory) tokenFilter).createPerAnalyzerSynonymFactory(analyzer, env);
|
||||
}
|
||||
}
|
||||
return tokenFilter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CustomAnalyzer get() {
|
||||
return this.customAnalyzer;
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Marks a {@link TokenFilterFactory} that refers to other filter factories.
|
||||
*
|
||||
* The analysis registry will call {@link #setReferences(Map)} with a map of all
|
||||
* available TokenFilterFactories after all factories have been registered
|
||||
*/
|
||||
public interface ReferringFilterFactory {
|
||||
|
||||
/**
|
||||
* Called with a map of all registered filter factories
|
||||
*/
|
||||
void setReferences(Map<String, TokenFilterFactory> factories);
|
||||
|
||||
}
|
|
@ -28,9 +28,11 @@ import org.elasticsearch.env.Environment;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class SynonymGraphTokenFilterFactory extends SynonymTokenFilterFactory {
|
||||
|
||||
public SynonymGraphTokenFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry,
|
||||
String name, Settings settings) throws IOException {
|
||||
super(indexSettings, env, analysisRegistry, name, settings);
|
||||
|
@ -41,42 +43,24 @@ public class SynonymGraphTokenFilterFactory extends SynonymTokenFilterFactory {
|
|||
throw new IllegalStateException("Call createPerAnalyzerSynonymGraphFactory to specialize this factory for an analysis chain first");
|
||||
}
|
||||
|
||||
Factory createPerAnalyzerSynonymGraphFactory(Analyzer analyzerForParseSynonym, Environment env){
|
||||
return new Factory("synonymgraph", analyzerForParseSynonym, getRulesFromSettings(env));
|
||||
}
|
||||
|
||||
public class Factory implements TokenFilterFactory{
|
||||
|
||||
private final String name;
|
||||
private final SynonymMap synonymMap;
|
||||
|
||||
public Factory(String name, final Analyzer analyzerForParseSynonym, Reader rulesReader) {
|
||||
this.name = name;
|
||||
|
||||
try {
|
||||
SynonymMap.Builder parser;
|
||||
if ("wordnet".equalsIgnoreCase(format)) {
|
||||
parser = new ESWordnetSynonymParser(true, expand, lenient, analyzerForParseSynonym);
|
||||
((ESWordnetSynonymParser) parser).parse(rulesReader);
|
||||
} else {
|
||||
parser = new ESSolrSynonymParser(true, expand, lenient, analyzerForParseSynonym);
|
||||
((ESSolrSynonymParser) parser).parse(rulesReader);
|
||||
}
|
||||
synonymMap = parser.build();
|
||||
} catch (Exception e) {
|
||||
throw new IllegalArgumentException("failed to build synonyms", e);
|
||||
@Override
|
||||
public TokenFilterFactory getChainAwareTokenFilterFactory(TokenizerFactory tokenizer, List<CharFilterFactory> charFilters,
|
||||
List<TokenFilterFactory> previousTokenFilters,
|
||||
Function<String, TokenFilterFactory> allFilters) {
|
||||
final Analyzer analyzer = buildSynonymAnalyzer(tokenizer, charFilters, previousTokenFilters);
|
||||
final SynonymMap synonyms = buildSynonyms(analyzer, getRulesFromSettings(environment));
|
||||
final String name = name();
|
||||
return new TokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
// fst is null means no synonyms
|
||||
return synonymMap.fst == null ? tokenStream : new SynonymGraphFilter(tokenStream, synonymMap, false);
|
||||
}
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return synonyms.fst == null ? tokenStream : new SynonymGraphFilter(tokenStream, synonyms, false);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.io.IOException;
|
|||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
|
@ -38,6 +39,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
protected final boolean expand;
|
||||
protected final boolean lenient;
|
||||
protected final Settings settings;
|
||||
protected final Environment environment;
|
||||
|
||||
public SynonymTokenFilterFactory(IndexSettings indexSettings, Environment env, AnalysisRegistry analysisRegistry,
|
||||
String name, Settings settings) throws IOException {
|
||||
|
@ -53,6 +55,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
this.expand = settings.getAsBoolean("expand", true);
|
||||
this.lenient = settings.getAsBoolean("lenient", false);
|
||||
this.format = settings.get("format", "");
|
||||
this.environment = env;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -60,6 +63,50 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
throw new IllegalStateException("Call createPerAnalyzerSynonymFactory to specialize this factory for an analysis chain first");
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenFilterFactory getChainAwareTokenFilterFactory(TokenizerFactory tokenizer, List<CharFilterFactory> charFilters,
|
||||
List<TokenFilterFactory> previousTokenFilters,
|
||||
Function<String, TokenFilterFactory> allFilters) {
|
||||
final Analyzer analyzer = buildSynonymAnalyzer(tokenizer, charFilters, previousTokenFilters);
|
||||
final SynonymMap synonyms = buildSynonyms(analyzer, getRulesFromSettings(environment));
|
||||
final String name = name();
|
||||
return new TokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return synonyms.fst == null ? tokenStream : new SynonymFilter(tokenStream, synonyms, false);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
protected Analyzer buildSynonymAnalyzer(TokenizerFactory tokenizer, List<CharFilterFactory> charFilters,
|
||||
List<TokenFilterFactory> tokenFilters) {
|
||||
return new CustomAnalyzer("synonyms", tokenizer, charFilters.toArray(new CharFilterFactory[0]),
|
||||
tokenFilters.stream()
|
||||
.map(TokenFilterFactory::getSynonymFilter)
|
||||
.toArray(TokenFilterFactory[]::new));
|
||||
}
|
||||
|
||||
protected SynonymMap buildSynonyms(Analyzer analyzer, Reader rules) {
|
||||
try {
|
||||
SynonymMap.Builder parser;
|
||||
if ("wordnet".equalsIgnoreCase(format)) {
|
||||
parser = new ESWordnetSynonymParser(true, expand, lenient, analyzer);
|
||||
((ESWordnetSynonymParser) parser).parse(rules);
|
||||
} else {
|
||||
parser = new ESSolrSynonymParser(true, expand, lenient, analyzer);
|
||||
((ESSolrSynonymParser) parser).parse(rules);
|
||||
}
|
||||
return parser.build();
|
||||
} catch (Exception e) {
|
||||
throw new IllegalArgumentException("failed to build synonyms", e);
|
||||
}
|
||||
}
|
||||
|
||||
protected Reader getRulesFromSettings(Environment env) {
|
||||
Reader rulesReader;
|
||||
if (settings.getAsList("synonyms", null) != null) {
|
||||
|
@ -77,44 +124,4 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
return rulesReader;
|
||||
}
|
||||
|
||||
Factory createPerAnalyzerSynonymFactory(Analyzer analyzerForParseSynonym, Environment env){
|
||||
return new Factory("synonym", analyzerForParseSynonym, getRulesFromSettings(env));
|
||||
}
|
||||
|
||||
public class Factory implements TokenFilterFactory{
|
||||
|
||||
private final String name;
|
||||
private final SynonymMap synonymMap;
|
||||
|
||||
public Factory(String name, Analyzer analyzerForParseSynonym, Reader rulesReader) {
|
||||
|
||||
this.name = name;
|
||||
|
||||
try {
|
||||
SynonymMap.Builder parser;
|
||||
if ("wordnet".equalsIgnoreCase(format)) {
|
||||
parser = new ESWordnetSynonymParser(true, expand, lenient, analyzerForParseSynonym);
|
||||
((ESWordnetSynonymParser) parser).parse(rulesReader);
|
||||
} else {
|
||||
parser = new ESSolrSynonymParser(true, expand, lenient, analyzerForParseSynonym);
|
||||
((ESSolrSynonymParser) parser).parse(rulesReader);
|
||||
}
|
||||
synonymMap = parser.build();
|
||||
} catch (Exception e) {
|
||||
throw new IllegalArgumentException("failed to build synonyms", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
// fst is null means no synonyms
|
||||
return synonymMap.fst == null ? tokenStream : new SynonymFilter(tokenStream, synonymMap, false);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,6 +23,9 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
public interface TokenFilterFactory {
|
||||
String name();
|
||||
|
||||
|
@ -36,4 +39,43 @@ public interface TokenFilterFactory {
|
|||
default boolean breaksFastVectorHighlighter() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rewrite the TokenFilterFactory to take into account the preceding analysis chain, or refer
|
||||
* to other TokenFilterFactories
|
||||
* @param tokenizer the TokenizerFactory for the preceding chain
|
||||
* @param charFilters any CharFilterFactories for the preceding chain
|
||||
* @param previousTokenFilters a list of TokenFilterFactories in the preceding chain
|
||||
* @param allFilters access to previously defined TokenFilterFactories
|
||||
*/
|
||||
default TokenFilterFactory getChainAwareTokenFilterFactory(TokenizerFactory tokenizer, List<CharFilterFactory> charFilters,
|
||||
List<TokenFilterFactory> previousTokenFilters,
|
||||
Function<String, TokenFilterFactory> allFilters) {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a version of this TokenFilterFactory appropriate for synonym parsing
|
||||
*
|
||||
* Filters that should not be applied to synonyms (for example, those that produce
|
||||
* multiple tokens) can return {@link #IDENTITY_FILTER}
|
||||
*/
|
||||
default TokenFilterFactory getSynonymFilter() {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A TokenFilterFactory that does no filtering to its TokenStream
|
||||
*/
|
||||
TokenFilterFactory IDENTITY_FILTER = new TokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
return "identity";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return tokenStream;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -19,9 +19,11 @@
|
|||
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectLongHashMap;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
|
@ -32,8 +34,10 @@ import org.apache.lucene.index.SegmentCommitInfo;
|
|||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ReferenceManager;
|
||||
import org.apache.lucene.search.suggest.document.CompletionTerms;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
@ -42,6 +46,7 @@ import org.apache.lucene.util.Accountables;
|
|||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.CheckedRunnable;
|
||||
import org.elasticsearch.common.FieldMemoryStats;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
|
@ -56,6 +61,7 @@ import org.elasticsearch.common.lucene.uid.Versions;
|
|||
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver;
|
||||
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -71,6 +77,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.FileNotFoundException;
|
||||
|
@ -176,6 +183,34 @@ public abstract class Engine implements Closeable {
|
|||
/** Returns how many bytes we are currently moving from heap to disk */
|
||||
public abstract long getWritingBytes();
|
||||
|
||||
/**
|
||||
* Returns the {@link CompletionStats} for this engine
|
||||
*/
|
||||
public CompletionStats completionStats(String... fieldNamePatterns) throws IOException {
|
||||
try (Engine.Searcher currentSearcher = acquireSearcher("completion_stats", SearcherScope.INTERNAL)) {
|
||||
long sizeInBytes = 0;
|
||||
ObjectLongHashMap<String> completionFields = null;
|
||||
if (fieldNamePatterns != null && fieldNamePatterns.length > 0) {
|
||||
completionFields = new ObjectLongHashMap<>(fieldNamePatterns.length);
|
||||
}
|
||||
for (LeafReaderContext atomicReaderContext : currentSearcher.reader().leaves()) {
|
||||
LeafReader atomicReader = atomicReaderContext.reader();
|
||||
for (FieldInfo info : atomicReader.getFieldInfos()) {
|
||||
Terms terms = atomicReader.terms(info.name);
|
||||
if (terms instanceof CompletionTerms) {
|
||||
// TODO: currently we load up the suggester for reporting its size
|
||||
long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed();
|
||||
if (Regex.simpleMatch(fieldNamePatterns, info.name)) {
|
||||
completionFields.addTo(info.name, fstSize);
|
||||
}
|
||||
sizeInBytes += fstSize;
|
||||
}
|
||||
}
|
||||
}
|
||||
return new CompletionStats(sizeInBytes, completionFields == null ? null : new FieldMemoryStats(completionFields));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link DocsStats} for this engine
|
||||
*/
|
||||
|
|
|
@ -97,7 +97,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||
final IndexFieldDataCache cache = fieldDataCaches.remove(fieldName);
|
||||
if (cache != null) {
|
||||
try {
|
||||
cache.clear();
|
||||
cache.clear(fieldName);
|
||||
} catch (Exception e) {
|
||||
exceptions.add(e);
|
||||
}
|
||||
|
|
|
@ -158,6 +158,25 @@ public class LocalCheckpointTracker {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the given sequence number was marked as completed in this tracker.
|
||||
*/
|
||||
public boolean contains(final long seqNo) {
|
||||
assert seqNo >= 0 : "invalid seq_no=" + seqNo;
|
||||
if (seqNo >= nextSeqNo) {
|
||||
return false;
|
||||
}
|
||||
if (seqNo <= checkpoint) {
|
||||
return true;
|
||||
}
|
||||
final long bitSetKey = getBitSetKey(seqNo);
|
||||
final CountedBitSet bitSet;
|
||||
synchronized (this) {
|
||||
bitSet = processedSeqNo.get(bitSetKey);
|
||||
}
|
||||
return bitSet != null && bitSet.get(seqNoToBitSetOffset(seqNo));
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves the checkpoint to the last consecutively processed sequence number. This method assumes that the sequence number following the
|
||||
* current checkpoint is processed.
|
||||
|
@ -206,7 +225,6 @@ public class LocalCheckpointTracker {
|
|||
* @return the bit set corresponding to the provided sequence number
|
||||
*/
|
||||
private long getBitSetKey(final long seqNo) {
|
||||
assert Thread.holdsLock(this);
|
||||
return seqNo / BIT_SET_SIZE;
|
||||
}
|
||||
|
||||
|
@ -232,7 +250,6 @@ public class LocalCheckpointTracker {
|
|||
* @return the position in the bit set corresponding to the provided sequence number
|
||||
*/
|
||||
private int seqNoToBitSetOffset(final long seqNo) {
|
||||
assert Thread.holdsLock(this);
|
||||
return Math.toIntExact(seqNo % BIT_SET_SIZE);
|
||||
}
|
||||
|
||||
|
|
|
@ -128,13 +128,13 @@ import org.elasticsearch.indices.recovery.RecoveryState;
|
|||
import org.elasticsearch.repositories.RepositoriesService;
|
||||
import org.elasticsearch.repositories.Repository;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionFieldStats;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.channels.ClosedByInterruptException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
|
@ -875,6 +875,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
public DocsStats docStats() {
|
||||
readAllowed();
|
||||
DocsStats docsStats = getEngine().docStats();
|
||||
markSearcherAccessed();
|
||||
return docsStats;
|
||||
|
@ -954,14 +955,16 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
public CompletionStats completionStats(String... fields) {
|
||||
CompletionStats completionStats = new CompletionStats();
|
||||
try (Engine.Searcher currentSearcher = acquireSearcher("completion_stats")) {
|
||||
readAllowed();
|
||||
try {
|
||||
CompletionStats stats = getEngine().completionStats(fields);
|
||||
// we don't wait for a pending refreshes here since it's a stats call instead we mark it as accessed only which will cause
|
||||
// the next scheduled refresh to go through and refresh the stats as well
|
||||
markSearcherAccessed();
|
||||
completionStats.add(CompletionFieldStats.completionStats(currentSearcher.reader(), fields));
|
||||
return stats;
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
return completionStats;
|
||||
}
|
||||
|
||||
public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) {
|
||||
|
|
|
@ -26,14 +26,15 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
||||
public class InternalSettingsPreparer {
|
||||
|
||||
|
@ -41,34 +42,27 @@ public class InternalSettingsPreparer {
|
|||
private static final String TEXT_PROMPT_VALUE = "${prompt.text}";
|
||||
|
||||
/**
|
||||
* Prepares the settings by gathering all elasticsearch system properties and setting defaults.
|
||||
* Prepares settings for the transport client by gathering all
|
||||
* elasticsearch system properties and setting defaults.
|
||||
*/
|
||||
public static Settings prepareSettings(Settings input) {
|
||||
Settings.Builder output = Settings.builder();
|
||||
initializeSettings(output, input, Collections.emptyMap());
|
||||
finalizeSettings(output);
|
||||
finalizeSettings(output, () -> null);
|
||||
return output.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the settings by gathering all elasticsearch system properties, optionally loading the configuration settings.
|
||||
*
|
||||
* @param input The custom settings to use. These are not overwritten by settings in the configuration file.
|
||||
* @return the {@link Settings} and {@link Environment} as a {@link Tuple}
|
||||
*/
|
||||
public static Environment prepareEnvironment(Settings input) {
|
||||
return prepareEnvironment(input, Collections.emptyMap(), null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the settings by gathering all elasticsearch system properties, optionally loading the configuration settings.
|
||||
*
|
||||
* @param input the custom settings to use; these are not overwritten by settings in the configuration file
|
||||
* @param properties map of properties key/value pairs (usually from the command-line)
|
||||
* @param configPath path to config directory; (use null to indicate the default)
|
||||
* @return the {@link Settings} and {@link Environment} as a {@link Tuple}
|
||||
* @param defaultNodeName supplier for the default node.name if the setting isn't defined
|
||||
* @return the {@link Environment}
|
||||
*/
|
||||
public static Environment prepareEnvironment(Settings input, Map<String, String> properties, Path configPath) {
|
||||
public static Environment prepareEnvironment(Settings input, Map<String, String> properties,
|
||||
Path configPath, Supplier<String> defaultNodeName) {
|
||||
// just create enough settings to build the environment, to get the config dir
|
||||
Settings.Builder output = Settings.builder();
|
||||
initializeSettings(output, input, properties);
|
||||
|
@ -95,7 +89,7 @@ public class InternalSettingsPreparer {
|
|||
// re-initialize settings now that the config file has been loaded
|
||||
initializeSettings(output, input, properties);
|
||||
checkSettingsForTerminalDeprecation(output);
|
||||
finalizeSettings(output);
|
||||
finalizeSettings(output, defaultNodeName);
|
||||
|
||||
environment = new Environment(output.build(), configPath);
|
||||
|
||||
|
@ -140,7 +134,7 @@ public class InternalSettingsPreparer {
|
|||
/**
|
||||
* Finish preparing settings by replacing forced settings and any defaults that need to be added.
|
||||
*/
|
||||
private static void finalizeSettings(Settings.Builder output) {
|
||||
private static void finalizeSettings(Settings.Builder output, Supplier<String> defaultNodeName) {
|
||||
// allow to force set properties based on configuration of the settings provided
|
||||
List<String> forcedSettings = new ArrayList<>();
|
||||
for (String setting : output.keys()) {
|
||||
|
@ -154,9 +148,12 @@ public class InternalSettingsPreparer {
|
|||
}
|
||||
output.replacePropertyPlaceholders();
|
||||
|
||||
// put the cluster name
|
||||
// put the cluster and node name if they aren't set
|
||||
if (output.get(ClusterName.CLUSTER_NAME_SETTING.getKey()) == null) {
|
||||
output.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY).value());
|
||||
}
|
||||
if (output.get(Node.NODE_NAME_SETTING.getKey()) == null) {
|
||||
output.put(Node.NODE_NAME_SETTING.getKey(), defaultNodeName.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -185,9 +185,7 @@ import static java.util.stream.Collectors.toList;
|
|||
* A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used
|
||||
* in order to use a {@link Client} to perform actions/operations against the cluster.
|
||||
*/
|
||||
public abstract class Node implements Closeable {
|
||||
|
||||
|
||||
public class Node implements Closeable {
|
||||
public static final Setting<Boolean> WRITE_PORTS_FILE_SETTING =
|
||||
Setting.boolSetting("node.portsfile", false, Property.NodeScope);
|
||||
public static final Setting<Boolean> NODE_DATA_SETTING = Setting.boolSetting("node.data", true, Property.NodeScope);
|
||||
|
@ -251,15 +249,6 @@ public abstract class Node implements Closeable {
|
|||
private final LocalNodeFactory localNodeFactory;
|
||||
private final NodeService nodeService;
|
||||
|
||||
/**
|
||||
* Constructs a node with the given settings.
|
||||
*
|
||||
* @param preparedSettings Base settings to configure the node with
|
||||
*/
|
||||
public Node(Settings preparedSettings) {
|
||||
this(InternalSettingsPreparer.prepareEnvironment(preparedSettings));
|
||||
}
|
||||
|
||||
public Node(Environment environment) {
|
||||
this(environment, Collections.emptyList(), true);
|
||||
}
|
||||
|
@ -282,33 +271,10 @@ public abstract class Node implements Closeable {
|
|||
Settings tmpSettings = Settings.builder().put(environment.settings())
|
||||
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build();
|
||||
|
||||
/*
|
||||
* Create the node environment as soon as possible so we can
|
||||
* recover the node id which we might have to use to derive the
|
||||
* node name. And it is important to get *that* as soon as possible
|
||||
* so that log lines can contain it.
|
||||
*/
|
||||
boolean nodeNameExplicitlyDefined = NODE_NAME_SETTING.exists(tmpSettings);
|
||||
try {
|
||||
Consumer<String> nodeIdConsumer = nodeNameExplicitlyDefined ?
|
||||
nodeId -> {} : nodeId -> registerDerivedNodeNameWithLogger(nodeIdToNodeName(nodeId));
|
||||
nodeEnvironment = new NodeEnvironment(tmpSettings, environment, nodeIdConsumer);
|
||||
resourcesToClose.add(nodeEnvironment);
|
||||
} catch (IOException ex) {
|
||||
throw new IllegalStateException("Failed to create node environment", ex);
|
||||
}
|
||||
if (nodeNameExplicitlyDefined) {
|
||||
logger.info("node name [{}], node ID [{}]",
|
||||
NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId());
|
||||
} else {
|
||||
tmpSettings = Settings.builder()
|
||||
.put(tmpSettings)
|
||||
.put(NODE_NAME_SETTING.getKey(), nodeIdToNodeName(nodeEnvironment.nodeId()))
|
||||
.build();
|
||||
logger.info("node name derived from node ID [{}]; set [{}] to override",
|
||||
nodeEnvironment.nodeId(), NODE_NAME_SETTING.getKey());
|
||||
}
|
||||
|
||||
nodeEnvironment = new NodeEnvironment(tmpSettings, environment);
|
||||
resourcesToClose.add(nodeEnvironment);
|
||||
logger.info("node name [{}], node ID [{}]",
|
||||
NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId());
|
||||
|
||||
final JvmInfo jvmInfo = JvmInfo.jvmInfo();
|
||||
logger.info(
|
||||
|
@ -1018,18 +984,6 @@ public abstract class Node implements Closeable {
|
|||
return networkModule.getHttpServerTransportSupplier().get();
|
||||
}
|
||||
|
||||
/**
|
||||
* If the node name was derived from the node id this is called with the
|
||||
* node name as soon as it is available so that we can register the
|
||||
* node name with the logger. If the node name defined in elasticsearch.yml
|
||||
* this is never called.
|
||||
*/
|
||||
protected abstract void registerDerivedNodeNameWithLogger(String nodeName);
|
||||
|
||||
private String nodeIdToNodeName(String nodeId) {
|
||||
return nodeId.substring(0, 7);
|
||||
}
|
||||
|
||||
private static class LocalNodeFactory implements Function<BoundTransportAddress, DiscoveryNode> {
|
||||
private final SetOnce<DiscoveryNode> localNode = new SetOnce<>();
|
||||
private final String persistentNodeId;
|
||||
|
|
|
@ -32,16 +32,30 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.Table;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
import org.elasticsearch.index.get.GetStats;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
import org.elasticsearch.index.seqno.SeqNoStats;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.index.warmer.WarmerStats;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.action.RestActionListener;
|
||||
import org.elasticsearch.rest.action.RestResponseListener;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.Locale;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
|
||||
|
@ -177,6 +191,16 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
return table;
|
||||
}
|
||||
|
||||
private static <S, T> Object getOrNull(S stats, Function<S, T> accessor, Function<T, Object> func) {
|
||||
if(stats != null) {
|
||||
T t = accessor.apply(stats);
|
||||
if (t != null) {
|
||||
return func.apply(t);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsResponse stats) {
|
||||
Table table = getTableWithHeader(request);
|
||||
|
||||
|
@ -200,8 +224,8 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
table.addCell("r");
|
||||
}
|
||||
table.addCell(shard.state());
|
||||
table.addCell(commonStats == null ? null : commonStats.getDocs().getCount());
|
||||
table.addCell(commonStats == null ? null : commonStats.getStore().getSize());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getDocs, DocsStats::getCount));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getStore, StoreStats::getSize));
|
||||
if (shard.assignedToNode()) {
|
||||
String ip = state.getState().nodes().get(shard.currentNodeId()).getHostAddress();
|
||||
String nodeId = shard.currentNodeId();
|
||||
|
@ -248,69 +272,69 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
table.addCell(null);
|
||||
}
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getCompletion().getSize());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getCompletion, CompletionStats::getSize));
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getFieldData().getMemorySize());
|
||||
table.addCell(commonStats == null ? null : commonStats.getFieldData().getEvictions());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getFieldData, FieldDataStats::getMemorySize));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getFieldData, FieldDataStats::getEvictions));
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getQueryCache().getMemorySize());
|
||||
table.addCell(commonStats == null ? null : commonStats.getQueryCache().getEvictions());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getQueryCache, QueryCacheStats::getMemorySize));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getQueryCache, QueryCacheStats::getEvictions));
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getFlush().getTotal());
|
||||
table.addCell(commonStats == null ? null : commonStats.getFlush().getTotalTime());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getFlush, FlushStats::getTotal));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getFlush, FlushStats::getTotalTime));
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getGet().current());
|
||||
table.addCell(commonStats == null ? null : commonStats.getGet().getTime());
|
||||
table.addCell(commonStats == null ? null : commonStats.getGet().getCount());
|
||||
table.addCell(commonStats == null ? null : commonStats.getGet().getExistsTime());
|
||||
table.addCell(commonStats == null ? null : commonStats.getGet().getExistsCount());
|
||||
table.addCell(commonStats == null ? null : commonStats.getGet().getMissingTime());
|
||||
table.addCell(commonStats == null ? null : commonStats.getGet().getMissingCount());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getGet, GetStats::current));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getGet, GetStats::getTime));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getGet, GetStats::getCount));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getGet, GetStats::getExistsTime));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getGet, GetStats::getExistsCount));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getGet, GetStats::getMissingTime));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getGet, GetStats::getMissingCount));
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getIndexing().getTotal().getDeleteCurrent());
|
||||
table.addCell(commonStats == null ? null : commonStats.getIndexing().getTotal().getDeleteTime());
|
||||
table.addCell(commonStats == null ? null : commonStats.getIndexing().getTotal().getDeleteCount());
|
||||
table.addCell(commonStats == null ? null : commonStats.getIndexing().getTotal().getIndexCurrent());
|
||||
table.addCell(commonStats == null ? null : commonStats.getIndexing().getTotal().getIndexTime());
|
||||
table.addCell(commonStats == null ? null : commonStats.getIndexing().getTotal().getIndexCount());
|
||||
table.addCell(commonStats == null ? null : commonStats.getIndexing().getTotal().getIndexFailedCount());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getIndexing, i -> i.getTotal().getDeleteCurrent()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getIndexing, i -> i.getTotal().getDeleteTime()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getIndexing, i -> i.getTotal().getDeleteCount()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getIndexing, i -> i.getTotal().getIndexCurrent()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getIndexing, i -> i.getTotal().getIndexTime()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getIndexing, i -> i.getTotal().getIndexCount()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getIndexing, i -> i.getTotal().getIndexFailedCount()));
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getMerge().getCurrent());
|
||||
table.addCell(commonStats == null ? null : commonStats.getMerge().getCurrentNumDocs());
|
||||
table.addCell(commonStats == null ? null : commonStats.getMerge().getCurrentSize());
|
||||
table.addCell(commonStats == null ? null : commonStats.getMerge().getTotal());
|
||||
table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalNumDocs());
|
||||
table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalSize());
|
||||
table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalTime());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrent));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrentNumDocs));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrentSize));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotal));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalNumDocs));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalSize));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalTime));
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotal());
|
||||
table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotalTime());
|
||||
table.addCell(commonStats == null ? null : commonStats.getRefresh().getListeners());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getTotal));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getTotalTime));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getListeners));
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getFetchCurrent());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getFetchTime());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getFetchCount());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getOpenContexts());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getQueryCurrent());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getQueryTime());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getQueryCount());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getScrollCurrent());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getScrollTime());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getScrollCount());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getFetchCurrent()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getFetchTime()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getFetchCount()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSearch, SearchStats::getOpenContexts));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryCurrent()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryTime()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryCount()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCurrent()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollTime()));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount()));
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getSegments().getCount());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSegments().getMemory());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSegments().getIndexWriterMemory());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSegments().getVersionMapMemory());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSegments().getBitsetMemory());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getMemory));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getIndexWriterMemory));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getVersionMapMemory));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getBitsetMemory));
|
||||
|
||||
table.addCell(shardStats == null || shardStats.getSeqNoStats() == null ? null : shardStats.getSeqNoStats().getMaxSeqNo());
|
||||
table.addCell(shardStats == null || shardStats.getSeqNoStats() == null ? null : shardStats.getSeqNoStats().getLocalCheckpoint());
|
||||
table.addCell(commitStats == null || shardStats.getSeqNoStats() == null ? null : shardStats.getSeqNoStats().getGlobalCheckpoint());
|
||||
table.addCell(getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getMaxSeqNo));
|
||||
table.addCell(getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getLocalCheckpoint));
|
||||
table.addCell(getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getGlobalCheckpoint));
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getWarmer().current());
|
||||
table.addCell(commonStats == null ? null : commonStats.getWarmer().total());
|
||||
table.addCell(commonStats == null ? null : commonStats.getWarmer().totalTime());
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getWarmer, WarmerStats::current));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getWarmer, WarmerStats::total));
|
||||
table.addCell(getOrNull(commonStats, CommonStats::getWarmer, WarmerStats::totalTime));
|
||||
|
||||
table.endRow();
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ import org.elasticsearch.action.search.SearchType;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -106,8 +108,10 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueHours;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
@ -344,7 +348,21 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
});
|
||||
}
|
||||
|
||||
SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchTask task) throws IOException {
|
||||
private <T> void runAsync(long id, Supplier<T> executable, ActionListener<T> listener) {
|
||||
getExecutor(id).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() {
|
||||
listener.onResponse(executable.get());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchTask task) throws IOException {
|
||||
final SearchContext context = createAndPutContext(request);
|
||||
final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
|
||||
context.incRef();
|
||||
|
@ -405,59 +423,63 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
return new QueryFetchSearchResult(context.queryResult(), context.fetchResult());
|
||||
}
|
||||
|
||||
public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request, SearchTask task) {
|
||||
final SearchContext context = findContext(request.id(), request);
|
||||
SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
|
||||
context.incRef();
|
||||
try {
|
||||
context.setTask(task);
|
||||
operationListener.onPreQueryPhase(context);
|
||||
long time = System.nanoTime();
|
||||
contextProcessing(context);
|
||||
processScroll(request, context);
|
||||
queryPhase.execute(context);
|
||||
contextProcessedSuccessfully(context);
|
||||
operationListener.onQueryPhase(context, System.nanoTime() - time);
|
||||
return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget());
|
||||
} catch (Exception e) {
|
||||
operationListener.onFailedQueryPhase(context);
|
||||
logger.trace("Query phase failed", e);
|
||||
processFailure(context, e);
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
} finally {
|
||||
cleanContext(context);
|
||||
}
|
||||
public void executeQueryPhase(InternalScrollSearchRequest request, SearchTask task, ActionListener<ScrollQuerySearchResult> listener) {
|
||||
runAsync(request.id(), () -> {
|
||||
final SearchContext context = findContext(request.id(), request);
|
||||
SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
|
||||
context.incRef();
|
||||
try {
|
||||
context.setTask(task);
|
||||
operationListener.onPreQueryPhase(context);
|
||||
long time = System.nanoTime();
|
||||
contextProcessing(context);
|
||||
processScroll(request, context);
|
||||
queryPhase.execute(context);
|
||||
contextProcessedSuccessfully(context);
|
||||
operationListener.onQueryPhase(context, System.nanoTime() - time);
|
||||
return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget());
|
||||
} catch (Exception e) {
|
||||
operationListener.onFailedQueryPhase(context);
|
||||
logger.trace("Query phase failed", e);
|
||||
processFailure(context, e);
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
} finally {
|
||||
cleanContext(context);
|
||||
}
|
||||
}, listener);
|
||||
}
|
||||
|
||||
public QuerySearchResult executeQueryPhase(QuerySearchRequest request, SearchTask task) {
|
||||
final SearchContext context = findContext(request.id(), request);
|
||||
context.setTask(task);
|
||||
IndexShard indexShard = context.indexShard();
|
||||
SearchOperationListener operationListener = indexShard.getSearchOperationListener();
|
||||
context.incRef();
|
||||
try {
|
||||
contextProcessing(context);
|
||||
context.searcher().setAggregatedDfs(request.dfs());
|
||||
public void executeQueryPhase(QuerySearchRequest request, SearchTask task, ActionListener<QuerySearchResult> listener) {
|
||||
runAsync(request.id(), () -> {
|
||||
final SearchContext context = findContext(request.id(), request);
|
||||
context.setTask(task);
|
||||
IndexShard indexShard = context.indexShard();
|
||||
SearchOperationListener operationListener = indexShard.getSearchOperationListener();
|
||||
context.incRef();
|
||||
try {
|
||||
contextProcessing(context);
|
||||
context.searcher().setAggregatedDfs(request.dfs());
|
||||
|
||||
operationListener.onPreQueryPhase(context);
|
||||
long time = System.nanoTime();
|
||||
queryPhase.execute(context);
|
||||
if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) {
|
||||
// no hits, we can release the context since there will be no fetch phase
|
||||
freeContext(context.id());
|
||||
} else {
|
||||
contextProcessedSuccessfully(context);
|
||||
operationListener.onPreQueryPhase(context);
|
||||
long time = System.nanoTime();
|
||||
queryPhase.execute(context);
|
||||
if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) {
|
||||
// no hits, we can release the context since there will be no fetch phase
|
||||
freeContext(context.id());
|
||||
} else {
|
||||
contextProcessedSuccessfully(context);
|
||||
}
|
||||
operationListener.onQueryPhase(context, System.nanoTime() - time);
|
||||
return context.queryResult();
|
||||
} catch (Exception e) {
|
||||
operationListener.onFailedQueryPhase(context);
|
||||
logger.trace("Query phase failed", e);
|
||||
processFailure(context, e);
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
} finally {
|
||||
cleanContext(context);
|
||||
}
|
||||
operationListener.onQueryPhase(context, System.nanoTime() - time);
|
||||
return context.queryResult();
|
||||
} catch (Exception e) {
|
||||
operationListener.onFailedQueryPhase(context);
|
||||
logger.trace("Query phase failed", e);
|
||||
processFailure(context, e);
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
} finally {
|
||||
cleanContext(context);
|
||||
}
|
||||
}, listener);
|
||||
}
|
||||
|
||||
private boolean fetchPhaseShouldFreeContext(SearchContext context) {
|
||||
|
@ -470,66 +492,83 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
}
|
||||
}
|
||||
|
||||
public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request, SearchTask task) {
|
||||
final SearchContext context = findContext(request.id(), request);
|
||||
context.incRef();
|
||||
try {
|
||||
context.setTask(task);
|
||||
contextProcessing(context);
|
||||
SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
|
||||
processScroll(request, context);
|
||||
operationListener.onPreQueryPhase(context);
|
||||
final long time = System.nanoTime();
|
||||
try {
|
||||
queryPhase.execute(context);
|
||||
} catch (Exception e) {
|
||||
operationListener.onFailedQueryPhase(context);
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
}
|
||||
long afterQueryTime = System.nanoTime();
|
||||
operationListener.onQueryPhase(context, afterQueryTime - time);
|
||||
QueryFetchSearchResult fetchSearchResult = executeFetchPhase(context, operationListener, afterQueryTime);
|
||||
|
||||
return new ScrollQueryFetchSearchResult(fetchSearchResult,
|
||||
context.shardTarget());
|
||||
} catch (Exception e) {
|
||||
logger.trace("Fetch phase failed", e);
|
||||
processFailure(context, e);
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
} finally {
|
||||
cleanContext(context);
|
||||
final Executor getExecutor(long id) {
|
||||
SearchContext context = activeContexts.get(id);
|
||||
if (context == null) {
|
||||
throw new SearchContextMissingException(id);
|
||||
}
|
||||
return getExecutor(context.indexShard());
|
||||
}
|
||||
|
||||
public FetchSearchResult executeFetchPhase(ShardFetchRequest request, SearchTask task) {
|
||||
final SearchContext context = findContext(request.id(), request);
|
||||
final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
|
||||
context.incRef();
|
||||
try {
|
||||
context.setTask(task);
|
||||
contextProcessing(context);
|
||||
if (request.lastEmittedDoc() != null) {
|
||||
context.scrollContext().lastEmittedDoc = request.lastEmittedDoc();
|
||||
private Executor getExecutor(IndexShard indexShard) {
|
||||
assert indexShard != null;
|
||||
return threadPool.executor(indexShard.indexSettings().isSearchThrottled() ? Names.SEARCH_THROTTLED : Names.SEARCH);
|
||||
}
|
||||
|
||||
public void executeFetchPhase(InternalScrollSearchRequest request, SearchTask task,
|
||||
ActionListener<ScrollQueryFetchSearchResult> listener) {
|
||||
runAsync(request.id(), () -> {
|
||||
final SearchContext context = findContext(request.id(), request);
|
||||
context.incRef();
|
||||
try {
|
||||
context.setTask(task);
|
||||
contextProcessing(context);
|
||||
SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
|
||||
processScroll(request, context);
|
||||
operationListener.onPreQueryPhase(context);
|
||||
final long time = System.nanoTime();
|
||||
try {
|
||||
queryPhase.execute(context);
|
||||
} catch (Exception e) {
|
||||
operationListener.onFailedQueryPhase(context);
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
}
|
||||
long afterQueryTime = System.nanoTime();
|
||||
operationListener.onQueryPhase(context, afterQueryTime - time);
|
||||
QueryFetchSearchResult fetchSearchResult = executeFetchPhase(context, operationListener, afterQueryTime);
|
||||
return new ScrollQueryFetchSearchResult(fetchSearchResult,
|
||||
context.shardTarget());
|
||||
} catch (Exception e) {
|
||||
logger.trace("Fetch phase failed", e);
|
||||
processFailure(context, e);
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
} finally {
|
||||
cleanContext(context);
|
||||
}
|
||||
context.docIdsToLoad(request.docIds(), 0, request.docIdsSize());
|
||||
operationListener.onPreFetchPhase(context);
|
||||
long time = System.nanoTime();
|
||||
fetchPhase.execute(context);
|
||||
if (fetchPhaseShouldFreeContext(context)) {
|
||||
freeContext(request.id());
|
||||
} else {
|
||||
contextProcessedSuccessfully(context);
|
||||
}, listener);
|
||||
}
|
||||
|
||||
public void executeFetchPhase(ShardFetchRequest request, SearchTask task, ActionListener<FetchSearchResult> listener) {
|
||||
runAsync(request.id(), () -> {
|
||||
final SearchContext context = findContext(request.id(), request);
|
||||
final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener();
|
||||
context.incRef();
|
||||
try {
|
||||
context.setTask(task);
|
||||
contextProcessing(context);
|
||||
if (request.lastEmittedDoc() != null) {
|
||||
context.scrollContext().lastEmittedDoc = request.lastEmittedDoc();
|
||||
}
|
||||
context.docIdsToLoad(request.docIds(), 0, request.docIdsSize());
|
||||
operationListener.onPreFetchPhase(context);
|
||||
long time = System.nanoTime();
|
||||
fetchPhase.execute(context);
|
||||
if (fetchPhaseShouldFreeContext(context)) {
|
||||
freeContext(request.id());
|
||||
} else {
|
||||
contextProcessedSuccessfully(context);
|
||||
}
|
||||
operationListener.onFetchPhase(context, System.nanoTime() - time);
|
||||
return context.fetchResult();
|
||||
} catch (Exception e) {
|
||||
operationListener.onFailedFetchPhase(context);
|
||||
logger.trace("Fetch phase failed", e);
|
||||
processFailure(context, e);
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
} finally {
|
||||
cleanContext(context);
|
||||
}
|
||||
operationListener.onFetchPhase(context, System.nanoTime() - time);
|
||||
return context.fetchResult();
|
||||
} catch (Exception e) {
|
||||
operationListener.onFailedFetchPhase(context);
|
||||
logger.trace("Fetch phase failed", e);
|
||||
processFailure(context, e);
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
} finally {
|
||||
cleanContext(context);
|
||||
}
|
||||
}, listener);
|
||||
}
|
||||
|
||||
private SearchContext findContext(long id, TransportRequest request) throws SearchContextMissingException {
|
||||
|
@ -985,6 +1024,15 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
public void canMatch(ShardSearchRequest request, ActionListener<CanMatchResponse> listener) {
|
||||
try {
|
||||
listener.onResponse(new CanMatchResponse(canMatch(request)));
|
||||
} catch (IOException e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff the given search source builder can be early terminated by rewriting to a match none query. Or in other words
|
||||
* if the execution of a the search request can be early terminated without executing it. This is for instance not possible if
|
||||
|
@ -1009,31 +1057,27 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
* The action listener is guaranteed to be executed on the search thread-pool
|
||||
*/
|
||||
private void rewriteShardRequest(ShardSearchRequest request, ActionListener<ShardSearchRequest> listener) {
|
||||
IndexShard shard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
|
||||
Executor executor = getExecutor(shard);
|
||||
ActionListener<Rewriteable> actionListener = ActionListener.wrap(r ->
|
||||
threadPool.executor(Names.SEARCH).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
listener.onResponse(request);
|
||||
}
|
||||
}), listener::onFailure);
|
||||
IndexShard shardOrNull = indicesService.getShardOrNull(request.shardId());
|
||||
if (shardOrNull != null) {
|
||||
// now we need to check if there is a pending refresh and register
|
||||
ActionListener<Rewriteable> finalListener = actionListener;
|
||||
actionListener = ActionListener.wrap(r ->
|
||||
shardOrNull.awaitShardSearchActive(b -> finalListener.onResponse(r)), finalListener::onFailure);
|
||||
}
|
||||
shard.awaitShardSearchActive(b ->
|
||||
executor.execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() {
|
||||
listener.onResponse(request);
|
||||
}
|
||||
})
|
||||
), listener::onFailure);
|
||||
// we also do rewrite on the coordinating node (TransportSearchService) but we also need to do it here for BWC as well as
|
||||
// AliasFilters that might need to be rewritten. These are edge-cases but we are every efficient doing the rewrite here so it's not
|
||||
// adding a lot of overhead
|
||||
Rewriteable.rewriteAndFetch(request.getRewriteable(), indicesService.getRewriteContext(request::nowInMillis), actionListener);
|
||||
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1050,4 +1094,31 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
public InternalAggregation.ReduceContext createReduceContext(boolean finalReduce) {
|
||||
return new InternalAggregation.ReduceContext(bigArrays, scriptService, multiBucketConsumerService.create(), finalReduce);
|
||||
}
|
||||
|
||||
public static final class CanMatchResponse extends SearchPhaseResult {
|
||||
private boolean canMatch;
|
||||
|
||||
public CanMatchResponse() {
|
||||
}
|
||||
|
||||
public CanMatchResponse(boolean canMatch) {
|
||||
this.canMatch = canMatch;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
canMatch = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(canMatch);
|
||||
}
|
||||
|
||||
public boolean canMatch() {
|
||||
return canMatch;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.suggest.completion;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectLongHashMap;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.search.suggest.document.CompletionTerms;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.FieldMemoryStats;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class CompletionFieldStats {
|
||||
|
||||
/**
|
||||
* Returns total in-heap bytes used by all suggesters. This method has CPU cost <code>O(numIndexedFields)</code>.
|
||||
*
|
||||
* @param fieldNamePatterns if non-null, any completion field name matching any of these patterns will break out its in-heap bytes
|
||||
* separately in the returned {@link CompletionStats}
|
||||
*/
|
||||
public static CompletionStats completionStats(IndexReader indexReader, String ... fieldNamePatterns) {
|
||||
long sizeInBytes = 0;
|
||||
ObjectLongHashMap<String> completionFields = null;
|
||||
if (fieldNamePatterns != null && fieldNamePatterns.length > 0) {
|
||||
completionFields = new ObjectLongHashMap<>(fieldNamePatterns.length);
|
||||
}
|
||||
for (LeafReaderContext atomicReaderContext : indexReader.leaves()) {
|
||||
LeafReader atomicReader = atomicReaderContext.reader();
|
||||
try {
|
||||
for (FieldInfo info : atomicReader.getFieldInfos()) {
|
||||
Terms terms = atomicReader.terms(info.name);
|
||||
if (terms instanceof CompletionTerms) {
|
||||
// TODO: currently we load up the suggester for reporting its size
|
||||
long fstSize = ((CompletionTerms) terms).suggester().ramBytesUsed();
|
||||
if (fieldNamePatterns != null && fieldNamePatterns.length > 0 && Regex.simpleMatch(fieldNamePatterns, info.name)) {
|
||||
completionFields.addTo(info.name, fstSize);
|
||||
}
|
||||
sizeInBytes += fstSize;
|
||||
}
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
throw new ElasticsearchException(ioe);
|
||||
}
|
||||
}
|
||||
return new CompletionStats(sizeInBytes, completionFields == null ? null : new FieldMemoryStats(completionFields));
|
||||
}
|
||||
}
|
|
@ -71,6 +71,7 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
|||
public static final String ANALYZE = "analyze";
|
||||
public static final String WRITE = "write";
|
||||
public static final String SEARCH = "search";
|
||||
public static final String SEARCH_THROTTLED = "search_throttled";
|
||||
public static final String MANAGEMENT = "management";
|
||||
public static final String FLUSH = "flush";
|
||||
public static final String REFRESH = "refresh";
|
||||
|
@ -135,6 +136,7 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
|||
map.put(Names.FORCE_MERGE, ThreadPoolType.FIXED);
|
||||
map.put(Names.FETCH_SHARD_STARTED, ThreadPoolType.SCALING);
|
||||
map.put(Names.FETCH_SHARD_STORE, ThreadPoolType.SCALING);
|
||||
map.put(Names.SEARCH_THROTTLED, ThreadPoolType.FIXED_AUTO_QUEUE_SIZE);
|
||||
THREAD_POOL_TYPES = Collections.unmodifiableMap(map);
|
||||
}
|
||||
|
||||
|
@ -175,6 +177,8 @@ public class ThreadPool extends AbstractComponent implements Scheduler, Closeabl
|
|||
builders.put(Names.ANALYZE, new FixedExecutorBuilder(settings, Names.ANALYZE, 1, 16));
|
||||
builders.put(Names.SEARCH, new AutoQueueAdjustingExecutorBuilder(settings,
|
||||
Names.SEARCH, searchThreadPoolSize(availableProcessors), 1000, 1000, 1000, 2000));
|
||||
builders.put(Names.SEARCH_THROTTLED, new AutoQueueAdjustingExecutorBuilder(settings,
|
||||
Names.SEARCH_THROTTLED, 1, 100, 100, 100, 200));
|
||||
builders.put(Names.MANAGEMENT, new ScalingExecutorBuilder(Names.MANAGEMENT, 1, 5, TimeValue.timeValueMinutes(5)));
|
||||
// no queue as this means clients will need to handle rejections on listener queue even if the operation succeeded
|
||||
// the assumption here is that the listeners should be very lightweight on the listeners side
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.search.Sort;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.SortedSetSelector;
|
||||
import org.apache.lucene.search.SortedSetSortField;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
|
@ -81,6 +82,7 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testCreateShrinkIndexToN() {
|
||||
assumeFalse("Broken on windows - https://github.com/elastic/elasticsearch/issues/33857", Constants.WINDOWS);
|
||||
int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}};
|
||||
int[] shardSplits = randomFrom(possibleShardSplits);
|
||||
assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]);
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
|
@ -64,8 +65,8 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
|
||||
@Override
|
||||
public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRequest request, SearchTask task,
|
||||
ActionListener<CanMatchResponse> listener) {
|
||||
new Thread(() -> listener.onResponse(new CanMatchResponse(request.shardId().id() == 0 ? shard1 :
|
||||
ActionListener<SearchService.CanMatchResponse> listener) {
|
||||
new Thread(() -> listener.onResponse(new SearchService.CanMatchResponse(request.shardId().id() == 0 ? shard1 :
|
||||
shard2))).start();
|
||||
}
|
||||
};
|
||||
|
@ -123,14 +124,14 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
|
||||
@Override
|
||||
public void sendCanMatch(Transport.Connection connection, ShardSearchTransportRequest request, SearchTask task,
|
||||
ActionListener<CanMatchResponse> listener) {
|
||||
ActionListener<SearchService.CanMatchResponse> listener) {
|
||||
boolean throwException = request.shardId().id() != 0;
|
||||
if (throwException && randomBoolean()) {
|
||||
throw new IllegalArgumentException("boom");
|
||||
} else {
|
||||
new Thread(() -> {
|
||||
if (throwException == false) {
|
||||
listener.onResponse(new CanMatchResponse(shard1));
|
||||
listener.onResponse(new SearchService.CanMatchResponse(shard1));
|
||||
} else {
|
||||
listener.onFailure(new NullPointerException());
|
||||
}
|
||||
|
@ -192,8 +193,8 @@ public class CanMatchPreFilterSearchPhaseTests extends ESTestCase {
|
|||
Transport.Connection connection,
|
||||
ShardSearchTransportRequest request,
|
||||
SearchTask task,
|
||||
ActionListener<CanMatchResponse> listener) {
|
||||
listener.onResponse(new CanMatchResponse(randomBoolean()));
|
||||
ActionListener<SearchService.CanMatchResponse> listener) {
|
||||
listener.onResponse(new SearchService.CanMatchResponse(randomBoolean()));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
|
|||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -794,4 +795,12 @@ public class MetaDataTests extends ESTestCase {
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
||||
public void testTransientSettingsOverridePersistentSettings() {
|
||||
final Setting setting = Setting.simpleString("key");
|
||||
final MetaData metaData = MetaData.builder()
|
||||
.persistentSettings(Settings.builder().put(setting.getKey(), "persistent-value").build())
|
||||
.transientSettings(Settings.builder().put(setting.getKey(), "transient-value").build()).build();
|
||||
assertThat(setting.get(metaData.settings()), equalTo("transient-value"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.common.network;
|
|||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Table;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.ModuleTestCase;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
|
@ -39,6 +38,7 @@ import org.elasticsearch.plugins.NetworkPlugin;
|
|||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.cat.AbstractCatAction;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
@ -56,7 +56,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class NetworkModuleTests extends ModuleTestCase {
|
||||
public class NetworkModuleTests extends ESTestCase {
|
||||
private ThreadPool threadPool;
|
||||
|
||||
@Override
|
||||
|
|
|
@ -80,12 +80,12 @@ public class NodeEnvironmentTests extends ESTestCase {
|
|||
|
||||
// Reuse the same location and attempt to lock again
|
||||
IllegalStateException ex = expectThrows(IllegalStateException.class, () ->
|
||||
new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {}));
|
||||
new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)));
|
||||
assertThat(ex.getMessage(), containsString("failed to obtain node lock"));
|
||||
|
||||
// Close the environment that holds the lock and make sure we can get the lock after release
|
||||
env.close();
|
||||
env = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
|
||||
env = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
|
||||
assertThat(env.nodeDataPaths(), arrayWithSize(dataPaths.size()));
|
||||
|
||||
for (int i = 0; i < dataPaths.size(); i++) {
|
||||
|
@ -120,7 +120,7 @@ public class NodeEnvironmentTests extends ESTestCase {
|
|||
final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 2).build());
|
||||
final NodeEnvironment first = newNodeEnvironment(settings);
|
||||
List<String> dataPaths = Environment.PATH_DATA_SETTING.get(settings);
|
||||
NodeEnvironment second = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
|
||||
NodeEnvironment second = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
|
||||
assertEquals(first.nodeDataPaths().length, dataPaths.size());
|
||||
assertEquals(second.nodeDataPaths().length, dataPaths.size());
|
||||
for (int i = 0; i < dataPaths.size(); i++) {
|
||||
|
@ -477,7 +477,7 @@ public class NodeEnvironmentTests extends ESTestCase {
|
|||
@Override
|
||||
public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException {
|
||||
Settings build = buildEnvSettings(settings);
|
||||
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
|
||||
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
|
||||
}
|
||||
|
||||
public Settings buildEnvSettings(Settings settings) {
|
||||
|
@ -492,7 +492,7 @@ public class NodeEnvironmentTests extends ESTestCase {
|
|||
.put(settings)
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), dataPaths).build();
|
||||
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
|
||||
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
|
||||
}
|
||||
|
||||
public NodeEnvironment newNodeEnvironment(String[] dataPaths, String sharedDataPath, Settings settings) throws IOException {
|
||||
|
@ -501,6 +501,6 @@ public class NodeEnvironmentTests extends ESTestCase {
|
|||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), sharedDataPath)
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), dataPaths).build();
|
||||
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
|
||||
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ public class IndexModuleTests extends ESTestCase {
|
|||
bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService);
|
||||
scriptService = new ScriptService(settings, Collections.emptyMap(), Collections.emptyMap());
|
||||
clusterService = ClusterServiceUtils.createClusterService(threadPool);
|
||||
nodeEnvironment = new NodeEnvironment(settings, environment, nodeId -> {});
|
||||
nodeEnvironment = new NodeEnvironment(settings, environment);
|
||||
mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
|
||||
}
|
||||
|
||||
|
|
|
@ -100,6 +100,66 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
|||
assertTrue(fd instanceof SortedNumericDVIndexFieldData);
|
||||
}
|
||||
|
||||
public void testClearField() throws Exception {
|
||||
final IndexService indexService = createIndex("test");
|
||||
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
// copy the ifdService since we can set the listener only once.
|
||||
final IndexFieldDataService ifdService = new IndexFieldDataService(indexService.getIndexSettings(),
|
||||
indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService());
|
||||
|
||||
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
|
||||
final MappedFieldType mapper1 = new TextFieldMapper.Builder("field_1").fielddata(true).build(ctx).fieldType();
|
||||
final MappedFieldType mapper2 = new TextFieldMapper.Builder("field_2").fielddata(true).build(ctx).fieldType();
|
||||
final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("field_1", "thisisastring", Store.NO));
|
||||
doc.add(new StringField("field_2", "thisisanotherstring", Store.NO));
|
||||
writer.addDocument(doc);
|
||||
final IndexReader reader = DirectoryReader.open(writer);
|
||||
final AtomicInteger onCacheCalled = new AtomicInteger();
|
||||
final AtomicInteger onRemovalCalled = new AtomicInteger();
|
||||
ifdService.setListener(new IndexFieldDataCache.Listener() {
|
||||
@Override
|
||||
public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
|
||||
onCacheCalled.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
|
||||
onRemovalCalled.incrementAndGet();
|
||||
}
|
||||
});
|
||||
IndexFieldData<?> ifd1 = ifdService.getForField(mapper1);
|
||||
IndexFieldData<?> ifd2 = ifdService.getForField(mapper2);
|
||||
LeafReaderContext leafReaderContext = reader.getContext().leaves().get(0);
|
||||
AtomicFieldData loadField1 = ifd1.load(leafReaderContext);
|
||||
AtomicFieldData loadField2 = ifd2.load(leafReaderContext);
|
||||
|
||||
assertEquals(2, onCacheCalled.get());
|
||||
assertEquals(0, onRemovalCalled.get());
|
||||
|
||||
ifdService.clearField("field_1");
|
||||
|
||||
assertEquals(2, onCacheCalled.get());
|
||||
assertEquals(1, onRemovalCalled.get());
|
||||
|
||||
ifdService.clearField("field_1");
|
||||
|
||||
assertEquals(2, onCacheCalled.get());
|
||||
assertEquals(1, onRemovalCalled.get());
|
||||
|
||||
ifdService.clearField("field_2");
|
||||
|
||||
assertEquals(2, onCacheCalled.get());
|
||||
assertEquals(2, onRemovalCalled.get());
|
||||
|
||||
reader.close();
|
||||
loadField1.close();
|
||||
loadField2.close();
|
||||
writer.close();
|
||||
ifdService.clear();
|
||||
}
|
||||
|
||||
public void testFieldDataCacheListener() throws Exception {
|
||||
final IndexService indexService = createIndex("test");
|
||||
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
|
|
|
@ -65,24 +65,36 @@ public class LocalCheckpointTrackerTests extends ESTestCase {
|
|||
assertThat(seqNo1, equalTo(0L));
|
||||
tracker.markSeqNoAsCompleted(seqNo1);
|
||||
assertThat(tracker.getCheckpoint(), equalTo(0L));
|
||||
assertThat(tracker.contains(0L), equalTo(true));
|
||||
assertThat(tracker.contains(atLeast(1)), equalTo(false));
|
||||
seqNo1 = tracker.generateSeqNo();
|
||||
seqNo2 = tracker.generateSeqNo();
|
||||
assertThat(seqNo1, equalTo(1L));
|
||||
assertThat(seqNo2, equalTo(2L));
|
||||
tracker.markSeqNoAsCompleted(seqNo2);
|
||||
assertThat(tracker.getCheckpoint(), equalTo(0L));
|
||||
assertThat(tracker.contains(seqNo1), equalTo(false));
|
||||
assertThat(tracker.contains(seqNo2), equalTo(true));
|
||||
tracker.markSeqNoAsCompleted(seqNo1);
|
||||
assertThat(tracker.getCheckpoint(), equalTo(2L));
|
||||
assertThat(tracker.contains(between(0, 2)), equalTo(true));
|
||||
assertThat(tracker.contains(atLeast(3)), equalTo(false));
|
||||
}
|
||||
|
||||
public void testSimpleReplica() {
|
||||
assertThat(tracker.getCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED));
|
||||
assertThat(tracker.contains(randomNonNegativeLong()), equalTo(false));
|
||||
tracker.markSeqNoAsCompleted(0L);
|
||||
assertThat(tracker.getCheckpoint(), equalTo(0L));
|
||||
assertThat(tracker.contains(0), equalTo(true));
|
||||
tracker.markSeqNoAsCompleted(2L);
|
||||
assertThat(tracker.getCheckpoint(), equalTo(0L));
|
||||
assertThat(tracker.contains(1L), equalTo(false));
|
||||
assertThat(tracker.contains(2L), equalTo(true));
|
||||
tracker.markSeqNoAsCompleted(1L);
|
||||
assertThat(tracker.getCheckpoint(), equalTo(2L));
|
||||
assertThat(tracker.contains(between(0, 2)), equalTo(true));
|
||||
assertThat(tracker.contains(atLeast(3)), equalTo(false));
|
||||
}
|
||||
|
||||
public void testLazyInitialization() {
|
||||
|
@ -90,20 +102,24 @@ public class LocalCheckpointTrackerTests extends ESTestCase {
|
|||
* Previously this would allocate the entire chain of bit sets to the one for the sequence number being marked; for very large
|
||||
* sequence numbers this could lead to excessive memory usage resulting in out of memory errors.
|
||||
*/
|
||||
tracker.markSeqNoAsCompleted(randomNonNegativeLong());
|
||||
long seqNo = randomNonNegativeLong();
|
||||
tracker.markSeqNoAsCompleted(seqNo);
|
||||
assertThat(tracker.processedSeqNo.size(), equalTo(1));
|
||||
assertThat(tracker.contains(seqNo), equalTo(true));
|
||||
assertThat(tracker.contains(randomValueOtherThan(seqNo, ESTestCase::randomNonNegativeLong)), equalTo(false));
|
||||
assertThat(tracker.processedSeqNo.size(), equalTo(1));
|
||||
}
|
||||
|
||||
public void testSimpleOverFlow() {
|
||||
List<Integer> seqNoList = new ArrayList<>();
|
||||
List<Long> seqNoList = new ArrayList<>();
|
||||
final boolean aligned = randomBoolean();
|
||||
final int maxOps = BIT_SET_SIZE * randomIntBetween(1, 5) + (aligned ? 0 : randomIntBetween(1, BIT_SET_SIZE - 1));
|
||||
|
||||
for (int i = 0; i < maxOps; i++) {
|
||||
for (long i = 0; i < maxOps; i++) {
|
||||
seqNoList.add(i);
|
||||
}
|
||||
Collections.shuffle(seqNoList, random());
|
||||
for (Integer seqNo : seqNoList) {
|
||||
for (Long seqNo : seqNoList) {
|
||||
tracker.markSeqNoAsCompleted(seqNo);
|
||||
}
|
||||
assertThat(tracker.checkpoint, equalTo(maxOps - 1L));
|
||||
|
@ -111,6 +127,9 @@ public class LocalCheckpointTrackerTests extends ESTestCase {
|
|||
if (aligned == false) {
|
||||
assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.checkpoint / BIT_SET_SIZE));
|
||||
}
|
||||
assertThat(tracker.contains(randomFrom(seqNoList)), equalTo(true));
|
||||
final long notCompletedSeqNo = randomValueOtherThanMany(seqNoList::contains, ESTestCase::randomNonNegativeLong);
|
||||
assertThat(tracker.contains(notCompletedSeqNo), equalTo(false));
|
||||
}
|
||||
|
||||
public void testConcurrentPrimary() throws InterruptedException {
|
||||
|
@ -199,8 +218,12 @@ public class LocalCheckpointTrackerTests extends ESTestCase {
|
|||
}
|
||||
assertThat(tracker.getMaxSeqNo(), equalTo(maxOps - 1L));
|
||||
assertThat(tracker.getCheckpoint(), equalTo(unFinishedSeq - 1L));
|
||||
assertThat(tracker.contains(randomValueOtherThan(unFinishedSeq, () -> (long) randomFrom(seqNos))), equalTo(true));
|
||||
assertThat(tracker.contains(unFinishedSeq), equalTo(false));
|
||||
tracker.markSeqNoAsCompleted(unFinishedSeq);
|
||||
assertThat(tracker.getCheckpoint(), equalTo(maxOps - 1L));
|
||||
assertThat(tracker.contains(unFinishedSeq), equalTo(true));
|
||||
assertThat(tracker.contains(randomLongBetween(maxOps, Long.MAX_VALUE)), equalTo(false));
|
||||
assertThat(tracker.processedSeqNo.size(), isOneOf(0, 1));
|
||||
if (tracker.processedSeqNo.size() == 1) {
|
||||
assertThat(tracker.processedSeqNo.keys().iterator().next().value, equalTo(tracker.checkpoint / BIT_SET_SIZE));
|
||||
|
@ -272,4 +295,23 @@ public class LocalCheckpointTrackerTests extends ESTestCase {
|
|||
});
|
||||
assertThat(tracker.generateSeqNo(), equalTo((long) (maxSeqNo + 1)));
|
||||
}
|
||||
|
||||
public void testContains() {
|
||||
final long maxSeqNo = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, 100);
|
||||
final long localCheckpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, maxSeqNo);
|
||||
final LocalCheckpointTracker tracker = new LocalCheckpointTracker(maxSeqNo, localCheckpoint);
|
||||
if (localCheckpoint >= 0) {
|
||||
assertThat(tracker.contains(randomLongBetween(0, localCheckpoint)), equalTo(true));
|
||||
}
|
||||
assertThat(tracker.contains(randomLongBetween(localCheckpoint + 1, Long.MAX_VALUE)), equalTo(false));
|
||||
final int numOps = between(1, 100);
|
||||
final List<Long> seqNos = new ArrayList<>();
|
||||
for (int i = 0; i < numOps; i++) {
|
||||
long seqNo = randomLongBetween(0, 1000);
|
||||
seqNos.add(seqNo);
|
||||
tracker.markSeqNoAsCompleted(seqNo);
|
||||
}
|
||||
final long seqNo = randomNonNegativeLong();
|
||||
assertThat(tracker.contains(seqNo), equalTo(seqNo <= localCheckpoint || seqNos.contains(seqNo)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2438,6 +2438,23 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
closeShards(sourceShard, targetShard);
|
||||
}
|
||||
|
||||
public void testCompletionStatsMarksSearcherAccessed() throws Exception {
|
||||
IndexShard indexShard = null;
|
||||
try {
|
||||
indexShard = newStartedShard();
|
||||
IndexShard shard = indexShard;
|
||||
assertBusy(() -> {
|
||||
ThreadPool threadPool = shard.getThreadPool();
|
||||
assertThat(threadPool.relativeTimeInMillis(), greaterThan(shard.getLastSearcherAccess()));
|
||||
});
|
||||
long prevAccessTime = shard.getLastSearcherAccess();
|
||||
indexShard.completionStats();
|
||||
assertThat("searcher was not marked as accessed", shard.getLastSearcherAccess(), greaterThan(prevAccessTime));
|
||||
} finally {
|
||||
closeShards(indexShard);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDocStats() throws Exception {
|
||||
IndexShard indexShard = null;
|
||||
try {
|
||||
|
|
|
@ -178,7 +178,7 @@ public class NewPathForShardTests extends ESTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), path)
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
|
||||
|
||||
// Make sure all our mocking above actually worked:
|
||||
NodePath[] nodePaths = nodeEnv.nodePaths();
|
||||
|
@ -233,7 +233,7 @@ public class NewPathForShardTests extends ESTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), path)
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
|
||||
|
||||
// Make sure all our mocking above actually worked:
|
||||
NodePath[] nodePaths = nodeEnv.nodePaths();
|
||||
|
@ -290,7 +290,7 @@ public class NewPathForShardTests extends ESTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), path)
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
|
||||
|
||||
aFileStore.usableSpace = 100000;
|
||||
bFileStore.usableSpace = 1000;
|
||||
|
@ -315,7 +315,7 @@ public class NewPathForShardTests extends ESTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), path)
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
|
||||
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
|
||||
|
||||
// Make sure all our mocking above actually worked:
|
||||
NodePath[] nodePaths = nodeEnv.nodePaths();
|
||||
|
|
|
@ -399,7 +399,7 @@ public class RemoveCorruptedShardDataCommandTests extends IndexShardTestCase {
|
|||
|
||||
private void writeIndexState() throws IOException {
|
||||
// create _state of IndexMetaData
|
||||
try(NodeEnvironment nodeEnvironment = new NodeEnvironment(environment.settings(), environment, nId -> {})) {
|
||||
try(NodeEnvironment nodeEnvironment = new NodeEnvironment(environment.settings(), environment)) {
|
||||
final Path[] paths = nodeEnvironment.indexPaths(indexMetaData.getIndex());
|
||||
IndexMetaData.FORMAT.write(indexMetaData, paths);
|
||||
logger.info("--> index metadata persisted to {} ", Arrays.toString(paths));
|
||||
|
|
|
@ -64,14 +64,14 @@ public class InternalOrPrivateSettingsPlugin extends Plugin implements ActionPlu
|
|||
|
||||
public static class UpdateInternalOrPrivateAction extends Action<UpdateInternalOrPrivateAction.Response> {
|
||||
|
||||
static final UpdateInternalOrPrivateAction INSTANCE = new UpdateInternalOrPrivateAction();
|
||||
public static final UpdateInternalOrPrivateAction INSTANCE = new UpdateInternalOrPrivateAction();
|
||||
private static final String NAME = "indices:admin/settings/update-internal-or-private-index";
|
||||
|
||||
public UpdateInternalOrPrivateAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
static class Request extends MasterNodeRequest<Request> {
|
||||
public static class Request extends MasterNodeRequest<Request> {
|
||||
|
||||
private String index;
|
||||
private String key;
|
||||
|
@ -81,7 +81,7 @@ public class InternalOrPrivateSettingsPlugin extends Plugin implements ActionPlu
|
|||
|
||||
}
|
||||
|
||||
Request(final String index, final String key, final String value) {
|
||||
public Request(final String index, final String key, final String value) {
|
||||
this.index = index;
|
||||
this.key = key;
|
||||
this.value = value;
|
||||
|
|
|
@ -37,9 +37,14 @@ import java.nio.file.Files;
|
|||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
public class InternalSettingsPreparerTests extends ESTestCase {
|
||||
private static final Supplier<String> DEFAULT_NODE_NAME_SHOULDNT_BE_CALLED = () -> {
|
||||
throw new AssertionError("shouldn't be called");
|
||||
};
|
||||
|
||||
Path homeDir;
|
||||
Settings baseEnvSettings;
|
||||
|
@ -60,13 +65,14 @@ public class InternalSettingsPreparerTests extends ESTestCase {
|
|||
|
||||
public void testEmptySettings() {
|
||||
Settings settings = InternalSettingsPreparer.prepareSettings(Settings.EMPTY);
|
||||
assertNull(settings.get("node.name")); // a name was not set
|
||||
assertNull(settings.get("node.name"));
|
||||
assertNotNull(settings.get(ClusterName.CLUSTER_NAME_SETTING.getKey())); // a cluster name was set
|
||||
int size = settings.names().size();
|
||||
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings);
|
||||
String defaultNodeName = randomAlphaOfLength(8);
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, emptyMap(), null, () -> defaultNodeName);
|
||||
settings = env.settings();
|
||||
assertNull(settings.get("node.name")); // a name was not set
|
||||
assertEquals(defaultNodeName, settings.get("node.name"));
|
||||
assertNotNull(settings.get(ClusterName.CLUSTER_NAME_SETTING.getKey())); // a cluster name was set
|
||||
assertEquals(settings.toString(), size + 1 /* path.home is in the base settings */, settings.names().size());
|
||||
String home = Environment.PATH_HOME_SETTING.get(baseEnvSettings);
|
||||
|
@ -88,9 +94,8 @@ public class InternalSettingsPreparerTests extends ESTestCase {
|
|||
Path config = home.resolve("config");
|
||||
Files.createDirectory(config);
|
||||
Files.copy(garbage, config.resolve("elasticsearch.yml"));
|
||||
InternalSettingsPreparer.prepareEnvironment(Settings.builder()
|
||||
.put(baseEnvSettings)
|
||||
.build());
|
||||
InternalSettingsPreparer.prepareEnvironment(Settings.builder().put(baseEnvSettings).build(),
|
||||
emptyMap(), null, () -> "default_node_name");
|
||||
} catch (SettingsException e) {
|
||||
assertEquals("Failed to load settings from [elasticsearch.yml]", e.getMessage());
|
||||
}
|
||||
|
@ -101,8 +106,8 @@ public class InternalSettingsPreparerTests extends ESTestCase {
|
|||
Path config = homeDir.resolve("config");
|
||||
Files.createDirectory(config);
|
||||
Files.copy(yaml, config.resolve("elasticsearch.yaml"));
|
||||
SettingsException e = expectThrows(SettingsException.class, () ->
|
||||
InternalSettingsPreparer.prepareEnvironment(Settings.builder().put(baseEnvSettings).build()));
|
||||
SettingsException e = expectThrows(SettingsException.class, () -> InternalSettingsPreparer.prepareEnvironment(
|
||||
Settings.builder().put(baseEnvSettings).build(), emptyMap(), null, DEFAULT_NODE_NAME_SHOULDNT_BE_CALLED));
|
||||
assertEquals("elasticsearch.yaml was deprecated in 5.5.0 and must be renamed to elasticsearch.yml", e.getMessage());
|
||||
}
|
||||
|
||||
|
@ -111,8 +116,8 @@ public class InternalSettingsPreparerTests extends ESTestCase {
|
|||
Path config = homeDir.resolve("config");
|
||||
Files.createDirectory(config);
|
||||
Files.copy(yaml, config.resolve("elasticsearch.json"));
|
||||
SettingsException e = expectThrows(SettingsException.class, () ->
|
||||
InternalSettingsPreparer.prepareEnvironment(Settings.builder().put(baseEnvSettings).build()));
|
||||
SettingsException e = expectThrows(SettingsException.class, () -> InternalSettingsPreparer.prepareEnvironment(
|
||||
Settings.builder().put(baseEnvSettings).build(), emptyMap(), null, DEFAULT_NODE_NAME_SHOULDNT_BE_CALLED));
|
||||
assertEquals("elasticsearch.json was deprecated in 5.5.0 and must be converted to elasticsearch.yml", e.getMessage());
|
||||
}
|
||||
|
||||
|
@ -120,14 +125,16 @@ public class InternalSettingsPreparerTests extends ESTestCase {
|
|||
MockSecureSettings secureSettings = new MockSecureSettings();
|
||||
secureSettings.setString("foo", "secret");
|
||||
Settings input = Settings.builder().put(baseEnvSettings).setSecureSettings(secureSettings).build();
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(input);
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(input, emptyMap(),
|
||||
null, () -> "default_node_name");
|
||||
Setting<SecureString> fakeSetting = SecureSetting.secureString("foo", null);
|
||||
assertEquals("secret", fakeSetting.get(env.settings()).toString());
|
||||
}
|
||||
|
||||
public void testDefaultPropertiesDoNothing() throws Exception {
|
||||
Map<String, String> props = Collections.singletonMap("default.setting", "foo");
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, props, null);
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, props,
|
||||
null, () -> "default_node_name");
|
||||
assertEquals("foo", env.settings().get("default.setting"));
|
||||
assertNull(env.settings().get("setting"));
|
||||
}
|
||||
|
|
|
@ -40,7 +40,6 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.reset;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
@ -49,22 +48,6 @@ import static org.mockito.Mockito.verifyNoMoreInteractions;
|
|||
@LuceneTestCase.SuppressFileSystems(value = "ExtrasFS")
|
||||
public class NodeTests extends ESTestCase {
|
||||
|
||||
public void testNodeName() throws IOException {
|
||||
final String name = randomBoolean() ? randomAlphaOfLength(10) : null;
|
||||
Settings.Builder settings = baseSettings();
|
||||
if (name != null) {
|
||||
settings.put(Node.NODE_NAME_SETTING.getKey(), name);
|
||||
}
|
||||
try (Node node = new MockNode(settings.build(), basePlugins())) {
|
||||
final Settings nodeSettings = randomBoolean() ? node.settings() : node.getEnvironment().settings();
|
||||
if (name == null) {
|
||||
assertThat(Node.NODE_NAME_SETTING.get(nodeSettings), equalTo(node.getNodeEnvironment().nodeId().substring(0, 7)));
|
||||
} else {
|
||||
assertThat(Node.NODE_NAME_SETTING.get(nodeSettings), equalTo(name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class CheckPlugin extends Plugin {
|
||||
public static final BootstrapCheck CHECK = context -> BootstrapCheck.BootstrapCheckResult.success();
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.search;
|
|||
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.elasticsearch.common.inject.ModuleTestCase;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -72,6 +71,7 @@ import org.elasticsearch.search.suggest.SuggestionBuilder;
|
|||
import org.elasticsearch.search.suggest.SuggestionSearchContext;
|
||||
import org.elasticsearch.search.suggest.term.TermSuggestion;
|
||||
import org.elasticsearch.search.suggest.term.TermSuggestionBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -89,7 +89,7 @@ import static java.util.stream.Collectors.toSet;
|
|||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
public class SearchModuleTests extends ModuleTestCase {
|
||||
public class SearchModuleTests extends ESTestCase {
|
||||
|
||||
public void testDoubleRegister() {
|
||||
SearchPlugin registersDupeHighlighter = new SearchPlugin() {
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
|||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchTask;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -35,7 +36,10 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.query.AbstractQueryBuilder;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.MatchNoneQueryBuilder;
|
||||
|
@ -44,7 +48,10 @@ import org.elasticsearch.index.query.QueryRewriteContext;
|
|||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.SearchOperationListener;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.settings.InternalOrPrivateSettingsPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.SearchPlugin;
|
||||
import org.elasticsearch.script.MockScriptEngine;
|
||||
|
@ -55,6 +62,7 @@ import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuil
|
|||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
@ -77,9 +85,12 @@ import static java.util.Collections.singletonList;
|
|||
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
|
||||
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.CoreMatchers.is;
|
||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
||||
import static org.hamcrest.CoreMatchers.startsWith;
|
||||
|
||||
public class SearchServiceTests extends ESSingleNodeTestCase {
|
||||
|
||||
|
@ -90,19 +101,51 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return pluginList(FailOnRewriteQueryPlugin.class, CustomScriptPlugin.class);
|
||||
return pluginList(FailOnRewriteQueryPlugin.class, CustomScriptPlugin.class, InternalOrPrivateSettingsPlugin.class);
|
||||
}
|
||||
|
||||
public static class CustomScriptPlugin extends MockScriptPlugin {
|
||||
|
||||
static final String DUMMY_SCRIPT = "dummyScript";
|
||||
|
||||
|
||||
@Override
|
||||
protected Map<String, Function<Map<String, Object>, Object>> pluginScripts() {
|
||||
return Collections.singletonMap(DUMMY_SCRIPT, vars -> {
|
||||
return "dummy";
|
||||
return Collections.singletonMap(DUMMY_SCRIPT, vars -> "dummy");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onIndexModule(IndexModule indexModule) {
|
||||
indexModule.addSearchOperationListener(new SearchOperationListener() {
|
||||
@Override
|
||||
public void onNewContext(SearchContext context) {
|
||||
if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) {
|
||||
assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]"));
|
||||
} else {
|
||||
assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]"));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFetchPhase(SearchContext context, long tookInNanos) {
|
||||
if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) {
|
||||
assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]"));
|
||||
} else {
|
||||
assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]"));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onQueryPhase(SearchContext context, long tookInNanos) {
|
||||
if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) {
|
||||
assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]"));
|
||||
} else {
|
||||
assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]"));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -210,15 +253,24 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
final int rounds = scaledRandomIntBetween(100, 10000);
|
||||
for (int i = 0; i < rounds; i++) {
|
||||
try {
|
||||
SearchPhaseResult searchPhaseResult = service.executeQueryPhase(
|
||||
try {
|
||||
PlainActionFuture<SearchPhaseResult> result = new PlainActionFuture<>();
|
||||
service.executeQueryPhase(
|
||||
new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
|
||||
new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f,
|
||||
true, null, null),
|
||||
new SearchTask(123L, "", "", "", null, Collections.emptyMap()));
|
||||
IntArrayList intCursors = new IntArrayList(1);
|
||||
intCursors.add(0);
|
||||
ShardFetchRequest req = new ShardFetchRequest(searchPhaseResult.getRequestId(), intCursors, null /* not a scroll */);
|
||||
service.executeFetchPhase(req, new SearchTask(123L, "", "", "", null, Collections.emptyMap()));
|
||||
new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f,
|
||||
true, null, null),
|
||||
new SearchTask(123L, "", "", "", null, Collections.emptyMap()), result);
|
||||
SearchPhaseResult searchPhaseResult = result.get();
|
||||
IntArrayList intCursors = new IntArrayList(1);
|
||||
intCursors.add(0);
|
||||
ShardFetchRequest req = new ShardFetchRequest(searchPhaseResult.getRequestId(), intCursors, null/* not a scroll */);
|
||||
PlainActionFuture<FetchSearchResult> listener = new PlainActionFuture<>();
|
||||
service.executeFetchPhase(req, new SearchTask(123L, "", "", "", null, Collections.emptyMap()), listener);
|
||||
listener.get();
|
||||
} catch (ExecutionException ex) {
|
||||
assertThat(ex.getCause(), instanceOf(RuntimeException.class));
|
||||
throw ((RuntimeException)ex.getCause());
|
||||
}
|
||||
} catch (AlreadyClosedException ex) {
|
||||
throw ex;
|
||||
} catch (IllegalStateException ex) {
|
||||
|
@ -467,4 +519,37 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
.suggest(new SuggestBuilder())));
|
||||
|
||||
}
|
||||
|
||||
public void testSetSearchThrottled() {
|
||||
createIndex("throttled_threadpool_index");
|
||||
client().execute(
|
||||
InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE,
|
||||
new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request("throttled_threadpool_index",
|
||||
IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), "true"))
|
||||
.actionGet();
|
||||
final SearchService service = getInstanceFromNode(SearchService.class);
|
||||
Index index = resolveIndex("throttled_threadpool_index");
|
||||
assertTrue(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled());
|
||||
client().prepareIndex("throttled_threadpool_index", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get();
|
||||
SearchResponse searchResponse = client().prepareSearch("throttled_threadpool_index").setSize(1).get();
|
||||
assertSearchHits(searchResponse, "1");
|
||||
// we add a search action listener in a plugin above to assert that this is actually used
|
||||
client().execute(
|
||||
InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.INSTANCE,
|
||||
new InternalOrPrivateSettingsPlugin.UpdateInternalOrPrivateAction.Request("throttled_threadpool_index",
|
||||
IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), "false"))
|
||||
.actionGet();
|
||||
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () ->
|
||||
client().admin().indices().prepareUpdateSettings("throttled_threadpool_index").setSettings(Settings.builder().put(IndexSettings
|
||||
.INDEX_SEARCH_THROTTLED.getKey(), false)).get());
|
||||
assertEquals("can not update private setting [index.search.throttled]; this setting is managed by Elasticsearch",
|
||||
iae.getMessage());
|
||||
assertFalse(service.getIndicesService().indexServiceSafe(index).getIndexSettings().isSearchThrottled());
|
||||
ShardSearchLocalRequest req = new ShardSearchLocalRequest(new ShardId(index, 0), 1, SearchType.QUERY_THEN_FETCH, null,
|
||||
Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, false, null, null);
|
||||
Thread currentThread = Thread.currentThread();
|
||||
// we still make sure can match is executed on the network thread
|
||||
service.canMatch(req, ActionListener.wrap(r -> assertSame(Thread.currentThread(), currentThread), e -> fail("unexpected")));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
|
@ -56,6 +55,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
|
@ -578,7 +578,7 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), "index_not_found",
|
||||
null, remoteIndicesByCluster,
|
||||
new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch));
|
||||
assertTrue(latch.await(1, TimeUnit.SECONDS));
|
||||
assertTrue(latch.await(2, TimeUnit.SECONDS));
|
||||
assertNull(response.get());
|
||||
assertNotNull(failure.get());
|
||||
assertThat(failure.get(), instanceOf(RemoteTransportException.class));
|
||||
|
|
|
@ -62,7 +62,8 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService {
|
|||
usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeBytes());
|
||||
paths[0] = path;
|
||||
FsInfo fsInfo = new FsInfo(System.currentTimeMillis(), null, paths);
|
||||
return new NodeStats(new DiscoveryNode(nodeName, ESTestCase.buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT),
|
||||
return new NodeStats(
|
||||
new DiscoveryNode(nodeName, ESTestCase.buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT),
|
||||
System.currentTimeMillis(),
|
||||
null, null, null, null, null,
|
||||
fsInfo,
|
||||
|
@ -107,7 +108,8 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService {
|
|||
@Override
|
||||
public ClusterInfo getClusterInfo() {
|
||||
ClusterInfo clusterInfo = super.getClusterInfo();
|
||||
return new DevNullClusterInfo(clusterInfo.getNodeLeastAvailableDiskUsages(), clusterInfo.getNodeMostAvailableDiskUsages(), clusterInfo.shardSizes);
|
||||
return new DevNullClusterInfo(clusterInfo.getNodeLeastAvailableDiskUsages(),
|
||||
clusterInfo.getNodeMostAvailableDiskUsages(), clusterInfo.shardSizes);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -40,39 +40,52 @@ public class TestShardRouting {
|
|||
}
|
||||
|
||||
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, ShardRoutingState state) {
|
||||
return new ShardRouting(shardId, currentNodeId, null, primary, state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), buildAllocationId(state), -1);
|
||||
return new ShardRouting(shardId, currentNodeId, null, primary, state, buildRecoveryTarget(primary, state),
|
||||
buildUnassignedInfo(state), buildAllocationId(state), -1);
|
||||
}
|
||||
|
||||
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, ShardRoutingState state, RecoverySource recoverySource) {
|
||||
return new ShardRouting(shardId, currentNodeId, null, primary, state, recoverySource, buildUnassignedInfo(state), buildAllocationId(state), -1);
|
||||
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary,
|
||||
ShardRoutingState state, RecoverySource recoverySource) {
|
||||
return new ShardRouting(shardId, currentNodeId, null, primary, state, recoverySource,
|
||||
buildUnassignedInfo(state), buildAllocationId(state), -1);
|
||||
}
|
||||
|
||||
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state) {
|
||||
return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId, relocatingNodeId, primary, state);
|
||||
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId,
|
||||
boolean primary, ShardRoutingState state) {
|
||||
return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId,
|
||||
relocatingNodeId, primary, state);
|
||||
}
|
||||
|
||||
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state) {
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), buildAllocationId(state), -1);
|
||||
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId,
|
||||
boolean primary, ShardRoutingState state) {
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state,
|
||||
buildRecoveryTarget(primary, state), buildUnassignedInfo(state), buildAllocationId(state), -1);
|
||||
}
|
||||
|
||||
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state, AllocationId allocationId) {
|
||||
return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId, relocatingNodeId, primary, state, allocationId);
|
||||
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId,
|
||||
String relocatingNodeId, boolean primary, ShardRoutingState state, AllocationId allocationId) {
|
||||
return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId,
|
||||
relocatingNodeId, primary, state, allocationId);
|
||||
}
|
||||
|
||||
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state, AllocationId allocationId) {
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), allocationId, -1);
|
||||
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, String relocatingNodeId, boolean primary,
|
||||
ShardRoutingState state, AllocationId allocationId) {
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state,
|
||||
buildRecoveryTarget(primary, state), buildUnassignedInfo(state), allocationId, -1);
|
||||
}
|
||||
|
||||
public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId,
|
||||
String relocatingNodeId, boolean primary, ShardRoutingState state,
|
||||
UnassignedInfo unassignedInfo) {
|
||||
return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId, relocatingNodeId, primary, state, unassignedInfo);
|
||||
return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId, relocatingNodeId,
|
||||
primary, state, unassignedInfo);
|
||||
}
|
||||
|
||||
public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId,
|
||||
String relocatingNodeId, boolean primary, ShardRoutingState state,
|
||||
UnassignedInfo unassignedInfo) {
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state, buildRecoveryTarget(primary, state), unassignedInfo, buildAllocationId(state), -1);
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state, buildRecoveryTarget(primary, state),
|
||||
unassignedInfo, buildAllocationId(state), -1);
|
||||
}
|
||||
|
||||
public static ShardRouting relocate(ShardRouting shardRouting, String relocatingNodeId, long expectedShardSize) {
|
||||
|
|
|
@ -21,150 +21,17 @@ package org.elasticsearch.common.inject;
|
|||
import org.elasticsearch.common.inject.spi.Element;
|
||||
import org.elasticsearch.common.inject.spi.Elements;
|
||||
import org.elasticsearch.common.inject.spi.InstanceBinding;
|
||||
import org.elasticsearch.common.inject.spi.LinkedKeyBinding;
|
||||
import org.elasticsearch.common.inject.spi.ProviderInstanceBinding;
|
||||
import org.elasticsearch.common.inject.spi.ProviderLookup;
|
||||
import org.elasticsearch.common.inject.spi.UntargettedBinding;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.lang.annotation.Annotation;
|
||||
import java.lang.reflect.Type;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* Base testcase for testing {@link Module} implementations.
|
||||
*/
|
||||
public abstract class ModuleTestCase extends ESTestCase {
|
||||
|
||||
/** Configures the module and asserts "clazz" is bound to "to". */
|
||||
public void assertBinding(Module module, Class<?> to, Class<?> clazz) {
|
||||
List<Element> elements = Elements.getElements(module);
|
||||
for (Element element : elements) {
|
||||
if (element instanceof LinkedKeyBinding) {
|
||||
LinkedKeyBinding<?> binding = (LinkedKeyBinding<?>) element;
|
||||
if (to.equals(binding.getKey().getTypeLiteral().getType())) {
|
||||
assertSame(clazz, binding.getLinkedKey().getTypeLiteral().getType());
|
||||
return;
|
||||
}
|
||||
} else if (element instanceof UntargettedBinding) {
|
||||
UntargettedBinding<?> binding = (UntargettedBinding<?>) element;
|
||||
if (to.equals(binding.getKey().getTypeLiteral().getType())) {
|
||||
assertSame(clazz, to);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
StringBuilder s = new StringBuilder();
|
||||
for (Element element : elements) {
|
||||
s.append(element).append("\n");
|
||||
}
|
||||
fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s);
|
||||
}
|
||||
|
||||
/** Configures the module and asserts "clazz" is not bound to anything. */
|
||||
public void assertNotBound(Module module, Class<?> clazz) {
|
||||
List<Element> elements = Elements.getElements(module);
|
||||
for (Element element : elements) {
|
||||
if (element instanceof LinkedKeyBinding) {
|
||||
LinkedKeyBinding<?> binding = (LinkedKeyBinding<?>) element;
|
||||
if (clazz.equals(binding.getKey().getTypeLiteral().getType())) {
|
||||
fail("Found binding for " + clazz.getName() + " to " + binding.getKey().getTypeLiteral().getType().getTypeName());
|
||||
}
|
||||
} else if (element instanceof UntargettedBinding) {
|
||||
UntargettedBinding<?> binding = (UntargettedBinding<?>) element;
|
||||
if (clazz.equals(binding.getKey().getTypeLiteral().getType())) {
|
||||
fail("Found binding for " + clazz.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to configure the module, and asserts an {@link IllegalArgumentException} is
|
||||
* caught, containing the given messages
|
||||
*/
|
||||
public void assertBindingFailure(Module module, String... msgs) {
|
||||
try {
|
||||
List<Element> elements = Elements.getElements(module);
|
||||
StringBuilder s = new StringBuilder();
|
||||
for (Element element : elements) {
|
||||
s.append(element).append("\n");
|
||||
}
|
||||
fail("Expected exception from configuring module. Found these bindings:\n" + s);
|
||||
} catch (IllegalArgumentException e) {
|
||||
for (String msg : msgs) {
|
||||
assertTrue(e.getMessage() + " didn't contain: " + msg, e.getMessage().contains(msg));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures the module and checks a Map<String, Class> of the "to" class
|
||||
* is bound to "theClass".
|
||||
*/
|
||||
public void assertMapMultiBinding(Module module, Class<?> to, Class<?> theClass) {
|
||||
List<Element> elements = Elements.getElements(module);
|
||||
Set<Type> bindings = new HashSet<>();
|
||||
boolean providerFound = false;
|
||||
for (Element element : elements) {
|
||||
if (element instanceof LinkedKeyBinding) {
|
||||
LinkedKeyBinding<?> binding = (LinkedKeyBinding<?>) element;
|
||||
if (to.equals(binding.getKey().getTypeLiteral().getType())) {
|
||||
bindings.add(binding.getLinkedKey().getTypeLiteral().getType());
|
||||
}
|
||||
} else if (element instanceof ProviderInstanceBinding) {
|
||||
ProviderInstanceBinding<?> binding = (ProviderInstanceBinding<?>) element;
|
||||
String setType = binding.getKey().getTypeLiteral().getType().toString();
|
||||
if (setType.equals("java.util.Map<java.lang.String, " + to.getName() + ">")) {
|
||||
providerFound = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (bindings.contains(theClass) == false) {
|
||||
fail("Expected to find " + theClass.getName() + " as binding to " + to.getName() + ", found these classes:\n" + bindings);
|
||||
}
|
||||
assertTrue("Did not find provider for map of " + to.getName(), providerFound);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Configures the module and checks a Set of the "to" class
|
||||
* is bound to "classes". There may be more classes bound
|
||||
* to "to" than just "classes".
|
||||
*/
|
||||
public void assertSetMultiBinding(Module module, Class<?> to, Class<?>... classes) {
|
||||
List<Element> elements = Elements.getElements(module);
|
||||
Set<Type> bindings = new HashSet<>();
|
||||
boolean providerFound = false;
|
||||
for (Element element : elements) {
|
||||
if (element instanceof LinkedKeyBinding) {
|
||||
LinkedKeyBinding<?> binding = (LinkedKeyBinding<?>) element;
|
||||
if (to.equals(binding.getKey().getTypeLiteral().getType())) {
|
||||
bindings.add(binding.getLinkedKey().getTypeLiteral().getType());
|
||||
}
|
||||
} else if (element instanceof ProviderInstanceBinding) {
|
||||
ProviderInstanceBinding<?> binding = (ProviderInstanceBinding<?>) element;
|
||||
String setType = binding.getKey().getTypeLiteral().getType().toString();
|
||||
if (setType.equals("java.util.Set<" + to.getName() + ">")) {
|
||||
providerFound = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (Class<?> clazz : classes) {
|
||||
if (bindings.contains(clazz) == false) {
|
||||
fail("Expected to find " + clazz.getName() + " as set binding to " + to.getName() + ", found these classes:\n" + bindings);
|
||||
}
|
||||
}
|
||||
assertTrue("Did not find provider for set of " + to.getName(), providerFound);
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures the module, and ensures an instance is bound to the "to" class, and the
|
||||
* provided tester returns true on the instance.
|
||||
|
@ -173,33 +40,12 @@ public abstract class ModuleTestCase extends ESTestCase {
|
|||
assertInstanceBindingWithAnnotation(module, to, tester, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures the module, and returns an instance bound to the "to" class.
|
||||
*/
|
||||
public static <T> T bindAndGetInstance(Module module, Class<T> to) {
|
||||
List<Element> elements = Elements.getElements(module);
|
||||
for (Element element : elements) {
|
||||
if (element instanceof InstanceBinding) {
|
||||
InstanceBinding<?> binding = (InstanceBinding<?>) element;
|
||||
if (to.equals(binding.getKey().getTypeLiteral().getType())) {
|
||||
return to.cast(binding.getInstance());
|
||||
}
|
||||
} else if (element instanceof ProviderInstanceBinding) {
|
||||
ProviderInstanceBinding<?> binding = (ProviderInstanceBinding<?>) element;
|
||||
if (to.equals(binding.getKey().getTypeLiteral().getType())) {
|
||||
return to.cast(binding.getProviderInstance().get());
|
||||
}
|
||||
}
|
||||
}
|
||||
fail("can't get instance for class " + to);
|
||||
return null; // won't happen ;)
|
||||
}
|
||||
|
||||
/**
|
||||
* Like {@link #assertInstanceBinding(Module, Class, Predicate)}, but filters the
|
||||
* classes checked by the given annotation.
|
||||
*/
|
||||
public <T> void assertInstanceBindingWithAnnotation(Module module, Class<T> to, Predicate<T> tester, Class<? extends Annotation> annotation) {
|
||||
private <T> void assertInstanceBindingWithAnnotation(Module module, Class<T> to,
|
||||
Predicate<T> tester, Class<? extends Annotation> annotation) {
|
||||
List<Element> elements = Elements.getElements(module);
|
||||
for (Element element : elements) {
|
||||
if (element instanceof InstanceBinding) {
|
||||
|
@ -224,39 +70,4 @@ public abstract class ModuleTestCase extends ESTestCase {
|
|||
}
|
||||
fail("Did not find any instance binding to " + to.getName() + ". Found these bindings:\n" + s);
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures the module, and ensures a map exists between the "keyType" and "valueType",
|
||||
* and that all of the "expected" values are bound.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public <K, V> void assertMapInstanceBinding(Module module, Class<K> keyType, Class<V> valueType, Map<K, V> expected) throws Exception {
|
||||
// this method is insane because java type erasure makes it incredibly difficult...
|
||||
Map<K, Key<?>> keys = new HashMap<>();
|
||||
Map<Key<?>, V> values = new HashMap<>();
|
||||
List<Element> elements = Elements.getElements(module);
|
||||
for (Element element : elements) {
|
||||
if (element instanceof InstanceBinding) {
|
||||
InstanceBinding<?> binding = (InstanceBinding<?>) element;
|
||||
if (binding.getKey().getRawType().equals(valueType)) {
|
||||
values.put(binding.getKey(), (V) binding.getInstance());
|
||||
} else if (binding.getInstance() instanceof Map.Entry) {
|
||||
Map.Entry<?, ?> entry = (Map.Entry<?, ?>) binding.getInstance();
|
||||
Object key = entry.getKey();
|
||||
Object providerValue = entry.getValue();
|
||||
if (key.getClass().equals(keyType) && providerValue instanceof ProviderLookup.ProviderImpl) {
|
||||
ProviderLookup.ProviderImpl<?> provider = (ProviderLookup.ProviderImpl<?>) providerValue;
|
||||
keys.put((K) key, provider.getKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (Map.Entry<K, V> entry : expected.entrySet()) {
|
||||
Key<?> valueKey = keys.get(entry.getKey());
|
||||
assertNotNull("Could not find binding for key [" + entry.getKey() + "], found these keys:\n" + keys.keySet(), valueKey);
|
||||
V value = values.get(valueKey);
|
||||
assertNotNull("Could not find value for instance key [" + valueKey + "], found these bindings:\n" + elements);
|
||||
assertEquals(entry.getValue(), value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,12 +46,17 @@ public abstract class NodeNameInLogsIntegTestCase extends ESRestTestCase {
|
|||
*/
|
||||
private static final int LINES_TO_CHECK = 10;
|
||||
|
||||
/**
|
||||
* The node name to expect in the logs file.
|
||||
*/
|
||||
protected abstract org.hamcrest.Matcher<String> nodeNameMatcher();
|
||||
|
||||
/**
|
||||
* Open the log file. This is delegated to subclasses because the test
|
||||
* framework doesn't have permission to read from the log file but
|
||||
* subclasses can grant themselves that permission.
|
||||
*/
|
||||
protected abstract BufferedReader openReader(Path logFile) throws IOException ;
|
||||
protected abstract BufferedReader openReader(Path logFile);
|
||||
|
||||
public void testNodeNameIsOnAllLinesOfLog() throws IOException {
|
||||
BufferedReader logReader = openReader(getLogFile());
|
||||
|
@ -64,7 +69,7 @@ public abstract class NodeNameInLogsIntegTestCase extends ESRestTestCase {
|
|||
}
|
||||
String nodeName = m.group(1);
|
||||
|
||||
assertNotEquals("unknown", nodeName);
|
||||
assertThat(nodeName, nodeNameMatcher());
|
||||
|
||||
int lineNumber = 1;
|
||||
while (true) {
|
||||
|
|
|
@ -85,7 +85,7 @@ public class MockNode extends Node {
|
|||
final Path configPath,
|
||||
final boolean forbidPrivateIndexSettings) {
|
||||
this(
|
||||
InternalSettingsPreparer.prepareEnvironment(settings, Collections.emptyMap(), configPath),
|
||||
InternalSettingsPreparer.prepareEnvironment(settings, Collections.emptyMap(), configPath, () -> "mock_ node"),
|
||||
classpathPlugins,
|
||||
forbidPrivateIndexSettings);
|
||||
}
|
||||
|
@ -174,9 +174,4 @@ public class MockNode extends Node {
|
|||
return new MockHttpTransport();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void registerDerivedNodeNameWithLogger(String nodeName) {
|
||||
// Nothing to do because test uses the thread name
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,6 +93,7 @@ import java.util.function.Function;
|
|||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
public abstract class AbstractBuilderTestCase extends ESTestCase {
|
||||
|
@ -330,7 +331,10 @@ public abstract class AbstractBuilderTestCase extends ESTestCase {
|
|||
AbstractBuilderTestCase testCase,
|
||||
boolean registerType) throws IOException {
|
||||
this.nowInMillis = nowInMillis;
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings);
|
||||
Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings, emptyMap(),
|
||||
null, () -> {
|
||||
throw new AssertionError("node.name must be set");
|
||||
});
|
||||
PluginsService pluginsService;
|
||||
pluginsService = new PluginsService(nodeSettings, null, env.modulesFile(), env.pluginsFile(), plugins);
|
||||
|
||||
|
|
|
@ -142,7 +142,8 @@ public class BackgroundIndexer implements AutoCloseable {
|
|||
if (batch) {
|
||||
int batchSize = threadRandom.nextInt(20) + 1;
|
||||
if (hasBudget.get()) {
|
||||
batchSize = Math.max(Math.min(batchSize, availableBudget.availablePermits()), 1);// always try to get at least one
|
||||
// always try to get at least one
|
||||
batchSize = Math.max(Math.min(batchSize, availableBudget.availablePermits()), 1);
|
||||
if (!availableBudget.tryAcquire(batchSize, 250, TimeUnit.MILLISECONDS)) {
|
||||
// time out -> check if we have to stop.
|
||||
continue;
|
||||
|
@ -155,7 +156,8 @@ public class BackgroundIndexer implements AutoCloseable {
|
|||
if (useAutoGeneratedIDs) {
|
||||
bulkRequest.add(client.prepareIndex(index, type).setSource(generateSource(id, threadRandom)));
|
||||
} else {
|
||||
bulkRequest.add(client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)));
|
||||
bulkRequest.add(client.prepareIndex(index, type, Long.toString(id))
|
||||
.setSource(generateSource(id, threadRandom)));
|
||||
}
|
||||
}
|
||||
BulkResponse bulkResponse = bulkRequest.get();
|
||||
|
@ -165,7 +167,8 @@ public class BackgroundIndexer implements AutoCloseable {
|
|||
assert add : "ID: " + bulkItemResponse.getId() + " already used";
|
||||
} else {
|
||||
throw new ElasticsearchException("bulk request failure, id: ["
|
||||
+ bulkItemResponse.getFailure().getId() + "] message: " + bulkItemResponse.getFailure().getMessage());
|
||||
+ bulkItemResponse.getFailure().getId() + "] message: "
|
||||
+ bulkItemResponse.getFailure().getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -177,11 +180,13 @@ public class BackgroundIndexer implements AutoCloseable {
|
|||
}
|
||||
id = idGenerator.incrementAndGet();
|
||||
if (useAutoGeneratedIDs) {
|
||||
IndexResponse indexResponse = client.prepareIndex(index, type).setSource(generateSource(id, threadRandom)).get();
|
||||
IndexResponse indexResponse = client.prepareIndex(index, type)
|
||||
.setSource(generateSource(id, threadRandom)).get();
|
||||
boolean add = ids.add(indexResponse.getId());
|
||||
assert add : "ID: " + indexResponse.getId() + " already used";
|
||||
} else {
|
||||
IndexResponse indexResponse = client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)).get();
|
||||
IndexResponse indexResponse = client.prepareIndex(index, type, Long.toString(id))
|
||||
.setSource(generateSource(id, threadRandom)).get();
|
||||
boolean add = ids.add(indexResponse.getId());
|
||||
assert add : "ID: " + indexResponse.getId() + " already used";
|
||||
}
|
||||
|
|
|
@ -93,7 +93,8 @@ public final class CorruptionUtils {
|
|||
// rewrite
|
||||
raf.position(filePointer);
|
||||
raf.write(bb);
|
||||
logger.info("Corrupting file -- flipping at position {} from {} to {} file: {}", filePointer, Integer.toHexString(oldValue), Integer.toHexString(newValue), fileToCorrupt.getFileName());
|
||||
logger.info("Corrupting file -- flipping at position {} from {} to {} file: {}", filePointer,
|
||||
Integer.toHexString(oldValue), Integer.toHexString(newValue), fileToCorrupt.getFileName());
|
||||
}
|
||||
long checksumAfterCorruption;
|
||||
long actualChecksumAfterCorruption;
|
||||
|
@ -109,7 +110,8 @@ public final class CorruptionUtils {
|
|||
msg.append("before: [").append(checksumBeforeCorruption).append("] ");
|
||||
msg.append("after: [").append(checksumAfterCorruption).append("] ");
|
||||
msg.append("checksum value after corruption: ").append(actualChecksumAfterCorruption).append("] ");
|
||||
msg.append("file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
|
||||
msg.append("file: ").append(fileToCorrupt.getFileName()).append(" length: ");
|
||||
msg.append(dir.fileLength(fileToCorrupt.getFileName().toString()));
|
||||
logger.info("Checksum {}", msg);
|
||||
assumeTrue("Checksum collision - " + msg.toString(),
|
||||
checksumAfterCorruption != checksumBeforeCorruption // collision
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue