Merge branch 'master' into feature/rank-eval

This commit is contained in:
Christoph Büscher 2016-09-15 11:38:40 +02:00
commit f91ee9a897
274 changed files with 4388 additions and 3025 deletions

View File

@ -368,7 +368,8 @@ These are the linux flavors the Vagrantfile currently supports:
* debian-8 aka jessie, the current debian stable distribution
* centos-6
* centos-7
* fedora-22
* fedora-24
* oel-6 aka Oracle Enterprise Linux 6
* oel-7 aka Oracle Enterprise Linux 7
* sles-12
* opensuse-13
@ -377,7 +378,6 @@ We're missing the following from the support matrix because there aren't high
quality boxes available in vagrant atlas:
* sles-11
* oel-6
We're missing the follow because our tests are very linux/bash centric:

View File

@ -148,6 +148,9 @@ class PrecommitTasks {
checkstyleTask.dependsOn(task)
task.dependsOn(copyCheckstyleConf)
task.inputs.file(checkstyleSuppressions)
task.reports {
html.enabled false
}
}
}
return checkstyleTask

View File

@ -72,7 +72,7 @@ public class RestNoopBulkAction extends BaseRestHandler {
}
bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
bulkRequest.setRefreshPolicy(request.param("refresh"));
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultPipeline, null, true);
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, null, defaultPipeline, null, true);
// short circuit the call to the transport layer
BulkRestBuilderListener listener = new BulkRestBuilderListener(channel, request);

View File

@ -216,7 +216,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
public void onFailure(Exception e) {
if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
// We haven't yet created the index for the task results so it can't be found.
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or stored its results", e,
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e,
request.getTaskId()));
} else {
listener.onFailure(e);

View File

@ -293,7 +293,7 @@ public class BulkProcessor implements Closeable {
}
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultPipeline, @Nullable Object payload) throws Exception {
bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, payload, true);
bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true);
executeIfNeeded();
return this;
}

View File

@ -35,12 +35,15 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import java.io.IOException;
import java.util.ArrayList;
@ -57,6 +60,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* @see org.elasticsearch.client.Client#bulk(BulkRequest)
*/
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest, WriteRequest<BulkRequest> {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(BulkRequest.class));
private static final int REQUEST_OVERHEAD = 50;
@ -257,17 +262,17 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
* Adds a framed data in binary format
*/
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
return add(data, defaultIndex, defaultType, null, null, null, null, true);
return add(data, defaultIndex, defaultType, null, null, null, null, null, true);
}
/**
* Adds a framed data in binary format
*/
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws Exception {
return add(data, defaultIndex, defaultType, null, null, null, null, allowExplicitIndex);
return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex);
}
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
XContent xContent = XContentFactory.xContent(data);
int line = 0;
int from = 0;
@ -301,6 +306,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
String id = null;
String routing = defaultRouting;
String parent = null;
FetchSourceContext fetchSourceContext = defaultFetchSourceContext;
String[] fields = defaultFields;
String timestamp = null;
TimeValue ttl = null;
@ -353,16 +359,21 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
pipeline = parser.text();
} else if ("fields".equals(currentFieldName)) {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected");
} else if ("_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.parse(parser);
} else {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
List<Object> values = parser.list();
fields = values.toArray(new String[values.size()]);
} else {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
} else if (token == XContentParser.Token.START_OBJECT && "_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.parse(parser);
} else if (token != XContentParser.Token.VALUE_NULL) {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
@ -402,7 +413,10 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
.version(version).versionType(versionType)
.routing(routing)
.parent(parent)
.source(data.slice(from, nextMarker - from));
.fromXContent(data.slice(from, nextMarker - from));
if (fetchSourceContext != null) {
updateRequest.fetchSource(fetchSourceContext);
}
if (fields != null) {
updateRequest.fields(fields);
}

View File

@ -251,7 +251,8 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
// add the response
IndexResponse indexResponse = result.getResponse();
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult());
if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
}

View File

@ -40,7 +40,7 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
private String routing;
private String preference;
private QueryBuilder query;
private String[] fields;
private String[] storedFields;
private FetchSourceContext fetchSourceContext;
private String[] filteringAlias = Strings.EMPTY_ARRAY;
@ -122,12 +122,12 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
}
public String[] fields() {
return fields;
public String[] storedFields() {
return storedFields;
}
public ExplainRequest fields(String[] fields) {
this.fields = fields;
public ExplainRequest storedFields(String[] fields) {
this.storedFields = fields;
return this;
}
@ -167,8 +167,8 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
preference = in.readOptionalString();
query = in.readNamedWriteable(QueryBuilder.class);
filteringAlias = in.readStringArray();
fields = in.readOptionalStringArray();
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
storedFields = in.readOptionalStringArray();
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
nowInMillis = in.readVLong();
}
@ -181,8 +181,8 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
out.writeOptionalString(preference);
out.writeNamedWriteable(query);
out.writeStringArray(filteringAlias);
out.writeOptionalStringArray(fields);
out.writeOptionalStreamable(fetchSourceContext);
out.writeOptionalStringArray(storedFields);
out.writeOptionalWriteable(fetchSourceContext);
out.writeVLong(nowInMillis);
}
}

View File

@ -88,10 +88,10 @@ public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder<Ex
}
/**
* Explicitly specify the fields that will be returned for the explained document. By default, nothing is returned.
* Explicitly specify the stored fields that will be returned for the explained document. By default, nothing is returned.
*/
public ExplainRequestBuilder setFields(String... fields) {
request.fields(fields);
public ExplainRequestBuilder setStoredFields(String... fields) {
request.storedFields(fields);
return this;
}

View File

@ -106,12 +106,11 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
Rescorer rescorer = ctx.rescorer();
explanation = rescorer.explain(topLevelDocId, context, ctx, explanation);
}
if (request.fields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
if (request.storedFields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
// Advantage is that we're not opening a second searcher to retrieve the _source. Also
// because we are working in the same searcher in engineGetResult we can be sure that a
// doc isn't deleted between the initial get and this call.
GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.fields(),
request.fetchSourceContext());
GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), request.fetchSourceContext());
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
} else {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);

View File

@ -51,7 +51,7 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
private String parent;
private String preference;
private String[] fields;
private String[] storedFields;
private FetchSourceContext fetchSourceContext;
@ -186,20 +186,20 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
}
/**
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
* Explicitly specify the stored fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
public GetRequest fields(String... fields) {
this.fields = fields;
public GetRequest storedFields(String... fields) {
this.storedFields = fields;
return this;
}
/**
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
* Explicitly specify the stored fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
public String[] fields() {
return this.fields;
public String[] storedFields() {
return this.storedFields;
}
/**
@ -260,18 +260,12 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
parent = in.readOptionalString();
preference = in.readOptionalString();
refresh = in.readBoolean();
int size = in.readInt();
if (size >= 0) {
fields = new String[size];
for (int i = 0; i < size; i++) {
fields[i] = in.readString();
}
}
storedFields = in.readOptionalStringArray();
realtime = in.readBoolean();
this.versionType = VersionType.fromValue(in.readByte());
this.version = in.readLong();
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
}
@Override
@ -284,18 +278,11 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
out.writeOptionalString(preference);
out.writeBoolean(refresh);
if (fields == null) {
out.writeInt(-1);
} else {
out.writeInt(fields.length);
for (String field : fields) {
out.writeString(field);
}
}
out.writeOptionalStringArray(storedFields);
out.writeBoolean(realtime);
out.writeByte(versionType.getValue());
out.writeLong(version);
out.writeOptionalStreamable(fetchSourceContext);
out.writeOptionalWriteable(fetchSourceContext);
}
@Override

View File

@ -88,8 +88,8 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetReq
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
* field will be returned.
*/
public GetRequestBuilder setFields(String... fields) {
request.fields(fields);
public GetRequestBuilder setStoredFields(String... fields) {
request.storedFields(fields);
return this;
}

View File

@ -134,14 +134,26 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
return getResult.getSource();
}
/**
* @deprecated Use {@link GetResponse#getSource()} instead
*/
@Deprecated
public Map<String, GetField> getFields() {
return getResult.getFields();
}
/**
* @deprecated Use {@link GetResponse#getSource()} instead
*/
@Deprecated
public GetField getField(String name) {
return getResult.field(name);
}
/**
* @deprecated Use {@link GetResponse#getSource()} instead
*/
@Deprecated
@Override
public Iterator<GetField> iterator() {
return getResult.iterator();

View File

@ -28,6 +28,7 @@ import org.elasticsearch.action.RealtimeRequest;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@ -58,7 +59,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
private String id;
private String routing;
private String parent;
private String[] fields;
private String[] storedFields;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
private FetchSourceContext fetchSourceContext;
@ -136,13 +137,13 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
return parent;
}
public Item fields(String... fields) {
this.fields = fields;
public Item storedFields(String... fields) {
this.storedFields = fields;
return this;
}
public String[] fields() {
return this.fields;
public String[] storedFields() {
return this.storedFields;
}
public long version() {
@ -188,17 +189,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
id = in.readString();
routing = in.readOptionalString();
parent = in.readOptionalString();
int size = in.readVInt();
if (size > 0) {
fields = new String[size];
for (int i = 0; i < size; i++) {
fields[i] = in.readString();
}
}
storedFields = in.readOptionalStringArray();
version = in.readLong();
versionType = VersionType.fromValue(in.readByte());
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
}
@Override
@ -208,19 +203,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
out.writeString(id);
out.writeOptionalString(routing);
out.writeOptionalString(parent);
if (fields == null) {
out.writeVInt(0);
} else {
out.writeVInt(fields.length);
for (String field : fields) {
out.writeString(field);
}
}
out.writeOptionalStringArray(storedFields);
out.writeLong(version);
out.writeByte(versionType.getValue());
out.writeOptionalStreamable(fetchSourceContext);
out.writeOptionalWriteable(fetchSourceContext);
}
@Override
@ -233,7 +220,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
if (version != item.version) return false;
if (fetchSourceContext != null ? !fetchSourceContext.equals(item.fetchSourceContext) : item.fetchSourceContext != null)
return false;
if (!Arrays.equals(fields, item.fields)) return false;
if (!Arrays.equals(storedFields, item.storedFields)) return false;
if (!id.equals(item.id)) return false;
if (!index.equals(item.index)) return false;
if (routing != null ? !routing.equals(item.routing) : item.routing != null) return false;
@ -251,7 +238,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
result = 31 * result + id.hashCode();
result = 31 * result + (routing != null ? routing.hashCode() : 0);
result = 31 * result + (parent != null ? parent.hashCode() : 0);
result = 31 * result + (fields != null ? Arrays.hashCode(fields) : 0);
result = 31 * result + (storedFields != null ? Arrays.hashCode(storedFields) : 0);
result = 31 * result + Long.hashCode(version);
result = 31 * result + versionType.hashCode();
result = 31 * result + (fetchSourceContext != null ? fetchSourceContext.hashCode() : 0);
@ -379,7 +366,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
String id = null;
String routing = defaultRouting;
String parent = null;
List<String> fields = null;
List<String> storedFields = null;
long version = Versions.MATCH_ANY;
VersionType versionType = VersionType.INTERNAL;
@ -403,8 +390,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
parent = parser.text();
} else if ("fields".equals(currentFieldName)) {
fields = new ArrayList<>();
fields.add(parser.text());
throw new ParsingException(parser.getTokenLocation(),
"Unsupported field [fields] used, expected [stored_fields] instead");
} else if ("stored_fields".equals(currentFieldName)) {
storedFields = new ArrayList<>();
storedFields.add(parser.text());
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
version = parser.longValue();
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
@ -420,9 +410,12 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
fields = new ArrayList<>();
throw new ParsingException(parser.getTokenLocation(),
"Unsupported field [fields] used, expected [stored_fields] instead");
} else if ("stored_fields".equals(currentFieldName)) {
storedFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
fields.add(parser.text());
storedFields.add(parser.text());
}
} else if ("_source".equals(currentFieldName)) {
ArrayList<String> includes = new ArrayList<>();
@ -464,12 +457,12 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
}
}
String[] aFields;
if (fields != null) {
aFields = fields.toArray(new String[fields.size()]);
if (storedFields != null) {
aFields = storedFields.toArray(new String[storedFields.size()]);
} else {
aFields = defaultFields;
}
items.add(new Item(index, type, id).routing(routing).fields(aFields).parent(parent).version(version).versionType(versionType)
items.add(new Item(index, type, id).routing(routing).storedFields(aFields).parent(parent).version(version).versionType(versionType)
.fetchSourceContext(fetchSourceContext == null ? defaultFetchSource : fetchSourceContext));
}
}
@ -484,7 +477,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
if (!token.isValue()) {
throw new IllegalArgumentException("ids array element should only contain ids");
}
items.add(new Item(defaultIndex, defaultType, parser.text()).fields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting));
items.add(new Item(defaultIndex, defaultType, parser.text()).storedFields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting));
}
}

View File

@ -92,7 +92,7 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
indexShard.refresh("refresh_flag_get");
}
GetResult result = indexShard.getService().get(request.type(), request.id(), request.fields(),
GetResult result = indexShard.getService().get(request.type(), request.id(), request.storedFields(),
request.realtime(), request.version(), request.versionType(), request.fetchSourceContext());
return new GetResponse(result);
}

View File

@ -88,7 +88,7 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
for (int i = 0; i < request.locations.size(); i++) {
MultiGetRequest.Item item = request.items.get(i);
try {
GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.fields(), request.realtime(), item.version(),
GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.storedFields(), request.realtime(), item.version(),
item.versionType(), item.fetchSourceContext());
response.add(request.locations.get(i), new GetResponse(getResult));
} catch (Exception e) {

View File

@ -43,9 +43,7 @@ import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.search.query.ScrollQuerySearchResult;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
@ -71,36 +69,10 @@ public class SearchTransportService extends AbstractComponent {
public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]";
private final TransportService transportService;
private final SearchService searchService;
SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) {
SearchTransportService(Settings settings, TransportService transportService) {
super(settings);
this.transportService = transportService;
this.searchService = searchService;
transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME,
new FreeContextTransportHandler<>());
transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME,
new FreeContextTransportHandler<>());
transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ClearScrollContextsRequest::new, ThreadPool.Names.SAME,
new ClearScrollContextsTransportHandler());
transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
new SearchDfsTransportHandler());
transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryTransportHandler());
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryByIdTransportHandler());
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryScrollTransportHandler());
transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryFetchTransportHandler());
transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryQueryFetchTransportHandler());
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
new SearchQueryFetchScrollTransportHandler());
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH,
new FetchByIdTransportHandler<>());
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH,
new FetchByIdTransportHandler<>());
}
public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) {
@ -124,8 +96,8 @@ public class SearchTransportService extends AbstractComponent {
}
public void sendClearAllScrollContexts(DiscoveryNode node, final ActionListener<TransportResponse> listener) {
transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, new ClearScrollContextsRequest(),
new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, TransportRequest.Empty.INSTANCE,
new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
}
public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request,
@ -278,87 +250,66 @@ public class SearchTransportService extends AbstractComponent {
}
}
class FreeContextTransportHandler<FreeContextRequest extends ScrollFreeContextRequest>
implements TransportRequestHandler<FreeContextRequest> {
@Override
public void messageReceived(FreeContextRequest request, TransportChannel channel) throws Exception {
boolean freed = searchService.freeContext(request.id());
channel.sendResponse(new SearchFreeContextResponse(freed));
}
}
static class ClearScrollContextsRequest extends TransportRequest {
}
class ClearScrollContextsTransportHandler implements TransportRequestHandler<ClearScrollContextsRequest> {
@Override
public void messageReceived(ClearScrollContextsRequest request, TransportChannel channel) throws Exception {
searchService.freeAllScrollContexts();
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class SearchDfsTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
@Override
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
DfsSearchResult result = searchService.executeDfsPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
@Override
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
QuerySearchResultProvider result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryByIdTransportHandler implements TransportRequestHandler<QuerySearchRequest> {
@Override
public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception {
QuerySearchResult result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryScrollTransportHandler implements TransportRequestHandler<InternalScrollSearchRequest> {
@Override
public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
ScrollQuerySearchResult result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryFetchTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
@Override
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryQueryFetchTransportHandler implements TransportRequestHandler<QuerySearchRequest> {
@Override
public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception {
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
}
}
class FetchByIdTransportHandler<Request extends ShardFetchRequest> implements TransportRequestHandler<Request> {
@Override
public void messageReceived(Request request, TransportChannel channel) throws Exception {
FetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
}
}
class SearchQueryFetchScrollTransportHandler implements TransportRequestHandler<InternalScrollSearchRequest> {
@Override
public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
}
public static void registerRequestHandler(TransportService transportService, SearchService searchService) {
transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME,
((request, channel) -> {
boolean freed = searchService.freeContext(request.id());
channel.sendResponse(new SearchFreeContextResponse(freed));
}));
transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME,
(request, channel) -> {
boolean freed = searchService.freeContext(request.id());
channel.sendResponse(new SearchFreeContextResponse(freed));
});
transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, () -> TransportRequest.Empty.INSTANCE,
ThreadPool.Names.SAME, (request, channel) -> {
searchService.freeAllScrollContexts();
channel.sendResponse(TransportResponse.Empty.INSTANCE);
});
transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
DfsSearchResult result = searchService.executeDfsPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
QuerySearchResultProvider result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
QuerySearchResult result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
ScrollQuerySearchResult result = searchService.executeQueryPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
FetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
});
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH,
(request, channel) -> {
FetchSearchResult result = searchService.executeFetchPhase(request);
channel.sendResponse(result);
});
}
}

View File

@ -32,7 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
@ -44,8 +43,6 @@ import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
/**
*/
public class TransportClearScrollAction extends HandledTransportAction<ClearScrollRequest, ClearScrollResponse> {
private final ClusterService clusterService;
@ -53,11 +50,11 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
@Inject
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ClusterService clusterService, SearchService searchService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
ClusterService clusterService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
this.clusterService = clusterService;
this.searchTransportService = new SearchTransportService(settings, transportService, searchService);
this.searchTransportService = new SearchTransportService(settings, transportService);
}
@Override

View File

@ -60,7 +60,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
indexNameExpressionResolver) {
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);;
this.searchTransportService = new SearchTransportService(settings, transportService, searchService);
this.searchTransportService = new SearchTransportService(settings, transportService);
SearchTransportService.registerRequestHandler(transportService, searchService);
this.clusterService = clusterService;
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -47,13 +46,12 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
@Inject
public TransportSearchScrollAction(Settings settings, BigArrays bigArrays, ThreadPool threadPool, ScriptService scriptService,
TransportService transportService,
ClusterService clusterService, SearchService searchService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
TransportService transportService, ClusterService clusterService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
SearchScrollRequest::new);
this.clusterService = clusterService;
this.searchTransportService = new SearchTransportService(settings, transportService, searchService);
this.searchTransportService = new SearchTransportService(settings, transportService);
this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);
}

View File

@ -180,7 +180,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
super(item.index());
this.id = item.id();
this.type = item.type();
this.selectedFields(item.fields());
this.selectedFields(item.storedFields());
this.routing(item.routing());
this.parent(item.parent());
}

View File

@ -186,7 +186,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
@Override
public void onResponse(IndexResponse response) {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult());
if (request.fields() != null && request.fields().length > 0) {
if ((request.fetchSource() != null && request.fetchSource().fetchSource()) ||
(request.fields() != null && request.fields().length > 0)) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true);
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
} else {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.update;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
@ -28,9 +29,11 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
@ -51,6 +54,7 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.lookup.SourceLookup;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -267,17 +271,19 @@ public class UpdateHelper extends AbstractComponent {
}
/**
* Extracts the fields from the updated document to be returned in a update response
* Applies {@link UpdateRequest#fetchSource()} to the _source of the updated document to be returned in a update response.
* For BWC this function also extracts the {@link UpdateRequest#fields()} from the updated document to be returned in a update response
*/
public GetResult extractGetResult(final UpdateRequest request, String concreteIndex, long version, final Map<String, Object> source, XContentType sourceContentType, @Nullable final BytesReference sourceAsBytes) {
if (request.fields() == null || request.fields().length == 0) {
if ((request.fields() == null || request.fields().length == 0) &&
(request.fetchSource() == null || request.fetchSource().fetchSource() == false)) {
return null;
}
SourceLookup sourceLookup = new SourceLookup();
sourceLookup.setSource(source);
boolean sourceRequested = false;
Map<String, GetField> fields = null;
if (request.fields() != null && request.fields().length > 0) {
SourceLookup sourceLookup = new SourceLookup();
sourceLookup.setSource(source);
for (String field : request.fields()) {
if (field.equals("_source")) {
sourceRequested = true;
@ -298,8 +304,26 @@ public class UpdateHelper extends AbstractComponent {
}
}
BytesReference sourceFilteredAsBytes = sourceAsBytes;
if (request.fetchSource() != null && request.fetchSource().fetchSource()) {
sourceRequested = true;
if (request.fetchSource().includes().length > 0 || request.fetchSource().excludes().length > 0) {
Object value = sourceLookup.filter(request.fetchSource().includes(), request.fetchSource().excludes());
try {
final int initialCapacity = Math.min(1024, sourceAsBytes.length());
BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity);
try (XContentBuilder builder = new XContentBuilder(sourceContentType.xContent(), streamOutput)) {
builder.value(value);
sourceFilteredAsBytes = builder.bytes();
}
} catch (IOException e) {
throw new ElasticsearchException("Error filtering source", e);
}
}
}
// TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType)
return new GetResult(concreteIndex, request.type(), request.id(), version, true, sourceRequested ? sourceAsBytes : null, fields);
return new GetResult(concreteIndex, request.type(), request.id(), version, true, sourceRequested ? sourceFilteredAsBytes : null, fields);
}
public static class Result {

View File

@ -32,6 +32,8 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@ -42,6 +44,7 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import java.io.IOException;
import java.util.Collections;
@ -55,6 +58,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
*/
public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
implements DocumentRequest<UpdateRequest>, WriteRequest<UpdateRequest> {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(UpdateRequest.class));
private String type;
private String id;
@ -68,6 +73,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
Script script;
private String[] fields;
private FetchSourceContext fetchSourceContext;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
@ -373,17 +379,80 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
/**
* Explicitly specify the fields that will be returned. By default, nothing is returned.
* @deprecated Use {@link UpdateRequest#fetchSource(String[], String[])} instead
*/
@Deprecated
public UpdateRequest fields(String... fields) {
this.fields = fields;
return this;
}
/**
* Get the fields to be returned.
* Indicate that _source should be returned with every hit, with an
* "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param include
* An optional include (optionally wildcarded) pattern to filter
* the returned _source
* @param exclude
* An optional exclude (optionally wildcarded) pattern to filter
* the returned _source
*/
public UpdateRequest fetchSource(@Nullable String include, @Nullable String exclude) {
this.fetchSourceContext = new FetchSourceContext(include, exclude);
return this;
}
/**
* Indicate that _source should be returned, with an
* "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param includes
* An optional list of include (optionally wildcarded) pattern to
* filter the returned _source
* @param excludes
* An optional list of exclude (optionally wildcarded) pattern to
* filter the returned _source
*/
public UpdateRequest fetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
this.fetchSourceContext = new FetchSourceContext(includes, excludes);
return this;
}
/**
* Indicates whether the response should contain the updated _source.
*/
public UpdateRequest fetchSource(boolean fetchSource) {
this.fetchSourceContext = new FetchSourceContext(fetchSource);
return this;
}
/**
* Explicitely set the fetch source context for this request
*/
public UpdateRequest fetchSource(FetchSourceContext context) {
this.fetchSourceContext = context;
return this;
}
/**
* Get the fields to be returned.
* @deprecated Use {@link UpdateRequest#fetchSource()} instead
*/
@Deprecated
public String[] fields() {
return this.fields;
return fields;
}
/**
* Gets the {@link FetchSourceContext} which defines how the _source should
* be fetched.
*/
public FetchSourceContext fetchSource() {
return fetchSourceContext;
}
/**
@ -618,16 +687,16 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return upsertRequest;
}
public UpdateRequest source(XContentBuilder source) throws Exception {
return source(source.bytes());
public UpdateRequest fromXContent(XContentBuilder source) throws Exception {
return fromXContent(source.bytes());
}
public UpdateRequest source(byte[] source) throws Exception {
return source(source, 0, source.length);
public UpdateRequest fromXContent(byte[] source) throws Exception {
return fromXContent(source, 0, source.length);
}
public UpdateRequest source(byte[] source, int offset, int length) throws Exception {
return source(new BytesArray(source, offset, length));
public UpdateRequest fromXContent(byte[] source, int offset, int length) throws Exception {
return fromXContent(new BytesArray(source, offset, length));
}
/**
@ -646,7 +715,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
return detectNoop;
}
public UpdateRequest source(BytesReference source) throws Exception {
public UpdateRequest fromXContent(BytesReference source) throws Exception {
Script script = null;
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
XContentParser.Token token = parser.nextToken();
@ -685,6 +754,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
if (fields != null) {
fields(fields.toArray(new String[fields.size()]));
}
} else if ("_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.parse(parser);
}
}
if (script != null) {
@ -729,13 +800,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
doc = new IndexRequest();
doc.readFrom(in);
}
int size = in.readInt();
if (size >= 0) {
fields = new String[size];
for (int i = 0; i < size; i++) {
fields[i] = in.readString();
}
}
fields = in.readOptionalStringArray();
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
if (in.readBoolean()) {
upsertRequest = new IndexRequest();
upsertRequest.readFrom(in);
@ -772,14 +838,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
doc.id(id);
doc.writeTo(out);
}
if (fields == null) {
out.writeInt(-1);
} else {
out.writeInt(fields.length);
for (String field : fields) {
out.writeString(field);
}
}
out.writeOptionalStringArray(fields);
out.writeOptionalWriteable(fetchSourceContext);
if (upsertRequest == null) {
out.writeBoolean(false);
} else {

View File

@ -25,17 +25,22 @@ import org.elasticsearch.action.support.WriteRequestBuilder;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.rest.action.document.RestUpdateAction;
import org.elasticsearch.script.Script;
import java.util.Map;
public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<UpdateRequest, UpdateResponse, UpdateRequestBuilder>
implements WriteRequestBuilder<UpdateRequestBuilder> {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class));
public UpdateRequestBuilder(ElasticsearchClient client, UpdateAction action) {
super(client, action, new UpdateRequest());
@ -90,12 +95,57 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
/**
* Explicitly specify the fields that will be returned. By default, nothing is returned.
* @deprecated Use {@link UpdateRequestBuilder#setFetchSource(String[], String[])} instead
*/
@Deprecated
public UpdateRequestBuilder setFields(String... fields) {
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
request.fields(fields);
return this;
}
/**
* Indicate that _source should be returned with every hit, with an
* "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param include
* An optional include (optionally wildcarded) pattern to filter
* the returned _source
* @param exclude
* An optional exclude (optionally wildcarded) pattern to filter
* the returned _source
*/
public UpdateRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
request.fetchSource(include, exclude);
return this;
}
/**
* Indicate that _source should be returned, with an
* "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param includes
* An optional list of include (optionally wildcarded) pattern to
* filter the returned _source
* @param excludes
* An optional list of exclude (optionally wildcarded) pattern to
* filter the returned _source
*/
public UpdateRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
request.fetchSource(includes, excludes);
return this;
}
/**
* Indicates whether the response should contain the updated _source.
*/
public UpdateRequestBuilder setFetchSource(boolean fetchSource) {
request.fetchSource(fetchSource);
return this;
}
/**
* Sets the number of retries of a version conflict occurs because the document was updated between
* getting it and updating it. Defaults to 0.
@ -279,26 +329,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
return this;
}
public UpdateRequestBuilder setSource(XContentBuilder source) throws Exception {
request.source(source);
return this;
}
public UpdateRequestBuilder setSource(byte[] source) throws Exception {
request.source(source);
return this;
}
public UpdateRequestBuilder setSource(byte[] source, int offset, int length) throws Exception {
request.source(source, offset, length);
return this;
}
public UpdateRequestBuilder setSource(BytesReference source) throws Exception {
request.source(source);
return this;
}
/**
* Sets whether the specified doc parameter should be used as upsert document.
*/

View File

@ -227,12 +227,12 @@ final class Bootstrap {
}
/**
* This method is invoked by {@link Elasticsearch#main(String[])}
* to startup elasticsearch.
* This method is invoked by {@link Elasticsearch#main(String[])} to startup elasticsearch.
*/
static void init(
final boolean foreground,
final Path pidFile,
final boolean quiet,
final Map<String, String> esSettings) throws BootstrapException, NodeValidationException {
// Set the system property before anything has a chance to trigger its use
initLoggerPrefix();
@ -259,8 +259,9 @@ final class Bootstrap {
}
}
final boolean closeStandardStreams = (foreground == false) || quiet;
try {
if (!foreground) {
if (closeStandardStreams) {
final Logger rootLogger = ESLoggerFactory.getRootLogger();
final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class);
if (maybeConsoleAppender != null) {
@ -285,7 +286,7 @@ final class Bootstrap {
INSTANCE.start();
if (!foreground) {
if (closeStandardStreams) {
closeSysError();
}
} catch (NodeValidationException | RuntimeException e) {

View File

@ -26,7 +26,7 @@ import java.util.Map;
* Wrapper exception for checked exceptions thrown during the bootstrap process. Methods invoked
* during bootstrap should explicitly declare the checked exceptions that they can throw, rather
* than declaring the top-level checked exception {@link Exception}. This exception exists to wrap
* these checked exceptions so that {@link Bootstrap#init(boolean, Path, Map)} does not have to
* these checked exceptions so that {@link Bootstrap#init(boolean, Path, boolean, Map)} does not have to
* declare all of these checked exceptions.
*/
class BootstrapException extends Exception {

View File

@ -44,6 +44,7 @@ class Elasticsearch extends SettingCommand {
private final OptionSpecBuilder versionOption;
private final OptionSpecBuilder daemonizeOption;
private final OptionSpec<Path> pidfileOption;
private final OptionSpecBuilder quietOption;
// visible for testing
Elasticsearch() {
@ -58,6 +59,10 @@ class Elasticsearch extends SettingCommand {
.availableUnless(versionOption)
.withRequiredArg()
.withValuesConvertedBy(new PathConverter());
quietOption = parser.acceptsAll(Arrays.asList("q", "quiet"),
"Turns off standard ouput/error streams logging in console")
.availableUnless(versionOption)
.availableUnless(daemonizeOption);
}
/**
@ -92,17 +97,19 @@ class Elasticsearch extends SettingCommand {
final boolean daemonize = options.has(daemonizeOption);
final Path pidFile = pidfileOption.value(options);
final boolean quiet = options.has(quietOption);
try {
init(daemonize, pidFile, settings);
init(daemonize, pidFile, quiet, settings);
} catch (NodeValidationException e) {
throw new UserException(ExitCodes.CONFIG, e.getMessage());
}
}
void init(final boolean daemonize, final Path pidFile, final Map<String, String> esSettings) throws NodeValidationException {
void init(final boolean daemonize, final Path pidFile, final boolean quiet, final Map<String, String> esSettings)
throws NodeValidationException {
try {
Bootstrap.init(!daemonize, pidFile, esSettings);
Bootstrap.init(!daemonize, pidFile, quiet, esSettings);
} catch (BootstrapException | RuntimeException e) {
// format exceptions to the console in a special way
// to avoid 2MB stacktraces from guice, etc.

View File

@ -136,7 +136,6 @@ public abstract class TransportClient extends AbstractClient {
}
modules.add(networkModule);
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
modules.add(searchModule);
ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
pluginsService.filterPlugins(ActionPlugin.class));
modules.add(actionModule);

View File

@ -500,15 +500,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
validationErrors.add("custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]");
}
}
//norelease - this can be removed?
Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null);
if (number_of_primaries != null && number_of_primaries <= 0) {
validationErrors.add("index must have 1 or more primary shards");
}
if (number_of_replicas != null && number_of_replicas < 0) {
validationErrors.add("index must have 0 or more replica shards");
}
return validationErrors;
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
@ -63,15 +64,21 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
private final IndicesService indicesService;
private final MetaDataCreateIndexService metaDataCreateIndexService;
private final NodeServicesProvider nodeServicesProvider;
private final IndexScopedSettings indexScopedSettings;
@Inject
public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService, MetaDataCreateIndexService metaDataCreateIndexService, AliasValidator aliasValidator, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) {
public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService,
MetaDataCreateIndexService metaDataCreateIndexService,
AliasValidator aliasValidator, IndicesService indicesService,
NodeServicesProvider nodeServicesProvider,
IndexScopedSettings indexScopedSettings) {
super(settings);
this.clusterService = clusterService;
this.aliasValidator = aliasValidator;
this.indicesService = indicesService;
this.metaDataCreateIndexService = metaDataCreateIndexService;
this.nodeServicesProvider = nodeServicesProvider;
this.indexScopedSettings = indexScopedSettings;
}
public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
@ -260,6 +267,14 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
validationErrors.add("template must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
}
try {
indexScopedSettings.validate(request.settings);
} catch (IllegalArgumentException iae) {
validationErrors.add(iae.getMessage());
for (Throwable t : iae.getSuppressed()) {
validationErrors.add(t.getMessage());
}
}
List<String> indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings);
validationErrors.addAll(indexSettingsValidation);
if (!validationErrors.isEmpty()) {

View File

@ -96,7 +96,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
* @param version the version of the node
*/
public DiscoveryNode(final String id, TransportAddress address, Version version) {
this(id, address, Collections.emptyMap(), Collections.emptySet(), version);
this(id, address, Collections.emptyMap(), EnumSet.allOf(Role.class), version);
}
/**

View File

@ -19,8 +19,15 @@
package org.elasticsearch.common.geo;
import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.geo.GeoEncodingUtils;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.apache.lucene.util.BitUtil;
import org.apache.lucene.util.BytesRef;
import java.util.Arrays;
import static org.elasticsearch.common.geo.GeoHashUtils.mortonEncode;
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
@ -88,6 +95,24 @@ public final class GeoPoint {
return this;
}
// todo this is a crutch because LatLonPoint doesn't have a helper for returning .stringValue()
// todo remove with next release of lucene
public GeoPoint resetFromIndexableField(IndexableField field) {
if (field instanceof LatLonPoint) {
BytesRef br = field.binaryValue();
byte[] bytes = Arrays.copyOfRange(br.bytes, br.offset, br.length);
return this.reset(
GeoEncodingUtils.decodeLatitude(bytes, 0),
GeoEncodingUtils.decodeLongitude(bytes, Integer.BYTES));
} else if (field instanceof LatLonDocValuesField) {
long encoded = (long)(field.numericValue());
return this.reset(
GeoEncodingUtils.decodeLatitude((int)(encoded >>> 32)),
GeoEncodingUtils.decodeLongitude((int)encoded));
}
return resetFromIndexHash(Long.parseLong(field.stringValue()));
}
public GeoPoint resetFromGeoHash(String geohash) {
final long hash = mortonEncode(geohash);
return this.reset(GeoPointField.decodeLatitude(hash), GeoPointField.decodeLongitude(hash));

View File

@ -22,17 +22,18 @@ package org.elasticsearch.common.logging;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.MessageFactory;
import org.apache.logging.log4j.spi.ExtendedLogger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import java.util.Locale;
import java.util.function.Function;
/**
* Factory to get {@link Logger}s
*/
public abstract class ESLoggerFactory {
public final class ESLoggerFactory {
private ESLoggerFactory() {
}
public static final Setting<Level> LOG_DEFAULT_LEVEL_SETTING =
new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope);
@ -41,40 +42,27 @@ public abstract class ESLoggerFactory {
Property.Dynamic, Property.NodeScope);
public static Logger getLogger(String prefix, String name) {
name = name.intern();
final Logger logger = getLogger(new PrefixMessageFactory(), name);
final MessageFactory factory = logger.getMessageFactory();
// in some cases, we initialize the logger before we are ready to set the prefix
// we can not re-initialize the logger, so the above getLogger might return an existing
// instance without the prefix set; thus, we hack around this by resetting the prefix
if (prefix != null && factory instanceof PrefixMessageFactory) {
((PrefixMessageFactory) factory).setPrefix(prefix.intern());
}
return logger;
return getLogger(prefix, LogManager.getLogger(name));
}
public static Logger getLogger(MessageFactory messageFactory, String name) {
return LogManager.getLogger(name, messageFactory);
public static Logger getLogger(String prefix, Class<?> clazz) {
return getLogger(prefix, LogManager.getLogger(clazz));
}
public static Logger getLogger(String prefix, Logger logger) {
return new PrefixLogger((ExtendedLogger)logger, logger.getName(), prefix);
}
public static Logger getLogger(Class<?> clazz) {
return getLogger(null, clazz);
}
public static Logger getLogger(String name) {
return getLogger((String)null, name);
}
public static DeprecationLogger getDeprecationLogger(String name) {
return new DeprecationLogger(getLogger(name));
}
public static DeprecationLogger getDeprecationLogger(String prefix, String name) {
return new DeprecationLogger(getLogger(prefix, name));
return getLogger(null, name);
}
public static Logger getRootLogger() {
return LogManager.getRootLogger();
}
private ESLoggerFactory() {
// Utility class can't be built.
}
}

View File

@ -30,7 +30,6 @@ import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration;
import org.apache.logging.log4j.core.config.composite.CompositeConfiguration;
import org.apache.logging.log4j.core.config.properties.PropertiesConfiguration;
import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationFactory;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings;
@ -44,7 +43,6 @@ import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
@ -81,13 +79,14 @@ public class LogConfigurator {
}
if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) {
Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings));
final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings);
Loggers.setLevel(ESLoggerFactory.getRootLogger(), level);
}
final Map<String, String> levels = settings.filter(ESLoggerFactory.LOG_LEVEL_SETTING::match).getAsMap();
for (String key : levels.keySet()) {
final Level level = ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings);
Loggers.setLevel(Loggers.getLogger(key.substring("logger.".length())), level);
Loggers.setLevel(ESLoggerFactory.getLogger(key.substring("logger.".length())), level);
}
}

View File

@ -35,10 +35,12 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.node.Node;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import static java.util.Arrays.asList;
import static javax.security.auth.login.Configuration.getConfiguration;
import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
/**
@ -46,24 +48,8 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
*/
public class Loggers {
static final String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
public static final String SPACE = " ";
private static boolean consoleLoggingEnabled = true;
public static void disableConsoleLogging() {
consoleLoggingEnabled = false;
}
public static void enableConsoleLogging() {
consoleLoggingEnabled = true;
}
public static boolean consoleLoggingEnabled() {
return consoleLoggingEnabled;
}
public static Logger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
}
@ -82,10 +68,16 @@ public class Loggers {
}
public static Logger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), settings, prefixes);
final List<String> prefixesList = prefixesList(settings, prefixes);
return getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()]));
}
public static Logger getLogger(String loggerName, Settings settings, String... prefixes) {
final List<String> prefixesList = prefixesList(settings, prefixes);
return getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()]));
}
private static List<String> prefixesList(Settings settings, String... prefixes) {
List<String> prefixesList = new ArrayList<>();
if (Node.NODE_NAME_SETTING.exists(settings)) {
prefixesList.add(Node.NODE_NAME_SETTING.get(settings));
@ -93,26 +85,31 @@ public class Loggers {
if (prefixes != null && prefixes.length > 0) {
prefixesList.addAll(asList(prefixes));
}
return getLogger(getLoggerName(loggerName), prefixesList.toArray(new String[prefixesList.size()]));
return prefixesList;
}
public static Logger getLogger(Logger parentLogger, String s) {
return ESLoggerFactory.getLogger(parentLogger.<MessageFactory>getMessageFactory(), getLoggerName(parentLogger.getName() + s));
assert parentLogger instanceof PrefixLogger;
return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s);
}
public static Logger getLogger(String s) {
return ESLoggerFactory.getLogger(getLoggerName(s));
return ESLoggerFactory.getLogger(s);
}
public static Logger getLogger(Class<?> clazz) {
return ESLoggerFactory.getLogger(getLoggerName(buildClassLoggerName(clazz)));
return ESLoggerFactory.getLogger(clazz);
}
public static Logger getLogger(Class<?> clazz, String... prefixes) {
return getLogger(buildClassLoggerName(clazz), prefixes);
return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz);
}
public static Logger getLogger(String name, String... prefixes) {
return ESLoggerFactory.getLogger(formatPrefix(prefixes), name);
}
private static String formatPrefix(String... prefixes) {
String prefix = null;
if (prefixes != null && prefixes.length > 0) {
StringBuilder sb = new StringBuilder();
@ -130,7 +127,7 @@ public class Loggers {
prefix = sb.toString();
}
}
return ESLoggerFactory.getLogger(prefix, getLoggerName(name));
return prefix;
}
/**
@ -148,30 +145,23 @@ public class Loggers {
}
public static void setLevel(Logger logger, Level level) {
if (!"".equals(logger.getName())) {
if (!LogManager.ROOT_LOGGER_NAME.equals(logger.getName())) {
Configurator.setLevel(logger.getName(), level);
} else {
LoggerContext ctx = LoggerContext.getContext(false);
Configuration config = ctx.getConfiguration();
LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
final LoggerContext ctx = LoggerContext.getContext(false);
final Configuration config = ctx.getConfiguration();
final LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
loggerConfig.setLevel(level);
ctx.updateLoggers();
}
}
private static String buildClassLoggerName(Class<?> clazz) {
String name = clazz.getName();
if (name.startsWith("org.elasticsearch.")) {
name = Classes.getPackageName(clazz);
// we have to descend the hierarchy
final LoggerContext ctx = LoggerContext.getContext(false);
for (final LoggerConfig loggerConfig : ctx.getConfiguration().getLoggers().values()) {
if (LogManager.ROOT_LOGGER_NAME.equals(logger.getName()) || loggerConfig.getName().startsWith(logger.getName() + ".")) {
Configurator.setLevel(loggerConfig.getName(), level);
}
}
return name;
}
private static String getLoggerName(String name) {
if (name.startsWith("org.elasticsearch.")) {
name = name.substring("org.elasticsearch.".length());
}
return commonPrefix + name;
}
public static void addAppender(final Logger logger, final Appender appender) {

View File

@ -0,0 +1,70 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.Marker;
import org.apache.logging.log4j.MarkerManager;
import org.apache.logging.log4j.message.Message;
import org.apache.logging.log4j.spi.ExtendedLogger;
import org.apache.logging.log4j.spi.ExtendedLoggerWrapper;
import java.lang.ref.WeakReference;
import java.util.WeakHashMap;
class PrefixLogger extends ExtendedLoggerWrapper {
// we can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds
// a permanent reference to the marker; however, we have transient markers from index-level and
// shard-level components so this would effectively be a memory leak
private static final WeakHashMap<String, WeakReference<Marker>> markers = new WeakHashMap<>();
private final Marker marker;
public String prefix() {
return marker.getName();
}
PrefixLogger(final ExtendedLogger logger, final String name, final String prefix) {
super(logger, name, null);
final String actualPrefix = (prefix == null ? "" : prefix).intern();
final Marker actualMarker;
// markers is not thread-safe, so we synchronize access
synchronized (markers) {
final WeakReference<Marker> marker = markers.get(actualPrefix);
final Marker maybeMarker = marker == null ? null : marker.get();
if (maybeMarker == null) {
actualMarker = new MarkerManager.Log4jMarker(actualPrefix);
markers.put(actualPrefix, new WeakReference<>(actualMarker));
} else {
actualMarker = maybeMarker;
}
}
this.marker = actualMarker;
}
@Override
public void logMessage(final String fqcn, final Level level, final Marker marker, final Message message, final Throwable t) {
assert marker == null;
super.logMessage(fqcn, level, this.marker, message, t);
}
}

View File

@ -1,221 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging;
import org.apache.logging.log4j.message.Message;
import org.apache.logging.log4j.message.MessageFactory2;
import org.apache.logging.log4j.message.ObjectMessage;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.message.SimpleMessage;
public class PrefixMessageFactory implements MessageFactory2 {
private String prefix = "";
public String getPrefix() {
return prefix;
}
public void setPrefix(String prefix) {
this.prefix = prefix;
}
@Override
public Message newMessage(Object message) {
return new PrefixObjectMessage(prefix, message);
}
private static class PrefixObjectMessage extends ObjectMessage {
private final String prefix;
private final Object object;
private String prefixObjectString;
private PrefixObjectMessage(String prefix, Object object) {
super(object);
this.prefix = prefix;
this.object = object;
}
@Override
public String getFormattedMessage() {
if (prefixObjectString == null) {
prefixObjectString = prefix + super.getFormattedMessage();
}
return prefixObjectString;
}
@Override
public void formatTo(StringBuilder buffer) {
buffer.append(prefix);
super.formatTo(buffer);
}
@Override
public Object[] getParameters() {
return new Object[]{prefix, object};
}
}
@Override
public Message newMessage(String message) {
return new PrefixSimpleMessage(prefix, message);
}
private static class PrefixSimpleMessage extends SimpleMessage {
private final String prefix;
private String prefixMessage;
PrefixSimpleMessage(String prefix, String message) {
super(message);
this.prefix = prefix;
}
PrefixSimpleMessage(String prefix, CharSequence charSequence) {
super(charSequence);
this.prefix = prefix;
}
@Override
public String getFormattedMessage() {
if (prefixMessage == null) {
prefixMessage = prefix + super.getFormattedMessage();
}
return prefixMessage;
}
@Override
public void formatTo(StringBuilder buffer) {
buffer.append(prefix);
super.formatTo(buffer);
}
@Override
public int length() {
return prefixMessage.length();
}
@Override
public char charAt(int index) {
return prefixMessage.charAt(index);
}
@Override
public CharSequence subSequence(int start, int end) {
return prefixMessage.subSequence(start, end);
}
}
@Override
public Message newMessage(String message, Object... params) {
return new PrefixParameterizedMessage(prefix, message, params);
}
private static class PrefixParameterizedMessage extends ParameterizedMessage {
private static ThreadLocal<StringBuilder> threadLocalStringBuilder = ThreadLocal.withInitial(StringBuilder::new);
private final String prefix;
private String formattedMessage;
private PrefixParameterizedMessage(String prefix, String messagePattern, Object... arguments) {
super(messagePattern, arguments);
this.prefix = prefix;
}
@Override
public String getFormattedMessage() {
if (formattedMessage == null) {
final StringBuilder buffer = threadLocalStringBuilder.get();
buffer.setLength(0);
formatTo(buffer);
formattedMessage = buffer.toString();
}
return formattedMessage;
}
@Override
public void formatTo(StringBuilder buffer) {
buffer.append(prefix);
super.formatTo(buffer);
}
}
@Override
public Message newMessage(CharSequence charSequence) {
return new PrefixSimpleMessage(prefix, charSequence);
}
@Override
public Message newMessage(String message, Object p0) {
return new PrefixParameterizedMessage(prefix, message, p0);
}
@Override
public Message newMessage(String message, Object p0, Object p1) {
return new PrefixParameterizedMessage(prefix, message, p0, p1);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6);
}
@Override
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7);
}
@Override
public Message newMessage(
String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7, p8);
}
@Override
public Message newMessage(
String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8, Object p9) {
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9);
}
}

View File

@ -25,19 +25,12 @@ import org.elasticsearch.common.logging.Loggers;
/** An InfoStream (for Lucene's IndexWriter) that redirects
* messages to "lucene.iw.ifd" and "lucene.iw" Logger.trace. */
public final class LoggerInfoStream extends InfoStream {
/** Used for component-specific logging: */
/** Logger for everything */
private final Logger logger;
private final Logger parentLogger;
/** Logger for IndexFileDeleter */
private final Logger ifdLogger;
public LoggerInfoStream(Logger parentLogger) {
logger = Loggers.getLogger(parentLogger, ".lucene.iw");
ifdLogger = Loggers.getLogger(parentLogger, ".lucene.iw.ifd");
public LoggerInfoStream(final Logger parentLogger) {
this.parentLogger = parentLogger;
}
@Override
@ -53,14 +46,11 @@ public final class LoggerInfoStream extends InfoStream {
}
private Logger getLogger(String component) {
if (component.equals("IFD")) {
return ifdLogger;
} else {
return logger;
}
return Loggers.getLogger(parentLogger, "." + component);
}
@Override
public void close() {
}
}

View File

@ -32,6 +32,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
/**
* Utilities for network interfaces / addresses binding and publishing.
@ -227,14 +228,15 @@ public abstract class NetworkUtils {
/** Returns addresses for the given interface (it must be marked up) */
static InetAddress[] getAddressesForInterface(String name) throws SocketException {
NetworkInterface intf = NetworkInterface.getByName(name);
if (intf == null) {
Optional<NetworkInterface> networkInterface = getInterfaces().stream().filter((netIf) -> name.equals(netIf.getName())).findFirst();
if (networkInterface.isPresent() == false) {
throw new IllegalArgumentException("No interface named '" + name + "' found, got " + getInterfaces());
}
if (!intf.isUp()) {
if (!networkInterface.get().isUp()) {
throw new IllegalArgumentException("Interface '" + name + "' is not up and running");
}
List<InetAddress> list = Collections.list(intf.getInetAddresses());
List<InetAddress> list = Collections.list(networkInterface.get().getInetAddresses());
if (list.isEmpty()) {
throw new IllegalArgumentException("Interface '" + name + "' has no internet addresses");
}

View File

@ -284,8 +284,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
protected void doClose() {
masterFD.close();
nodesFD.close();
publishClusterState.close();
membership.close();
pingService.close();
}

View File

@ -168,7 +168,6 @@ public class MasterFaultDetection extends FaultDetection {
super.close();
stop("closing");
this.listeners.clear();
transportService.removeHandler(MASTER_PING_ACTION_NAME);
}
@Override

View File

@ -139,7 +139,6 @@ public class NodesFaultDetection extends FaultDetection {
public void close() {
super.close();
stop();
transportService.removeHandler(PING_ACTION_NAME);
}
@Override

View File

@ -76,12 +76,6 @@ public class MembershipAction extends AbstractComponent {
transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler());
}
public void close() {
transportService.removeHandler(DISCOVERY_JOIN_ACTION_NAME);
transportService.removeHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME);
transportService.removeHandler(DISCOVERY_LEAVE_ACTION_NAME);
}
public void sendLeaveRequest(DiscoveryNode masterNode, DiscoveryNode node) {
transportService.sendRequest(node, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(masterNode), EmptyTransportResponseHandler.INSTANCE_SAME);
}

View File

@ -160,18 +160,10 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
}
logger.debug("using initial hosts {}, with concurrent_connects [{}]", hosts, concurrentConnects);
List<DiscoveryNode> configuredTargetNodes = new ArrayList<>();
for (String host : hosts) {
try {
TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts);
for (TransportAddress address : addresses) {
configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#",
address, emptyMap(), emptySet(), getVersion().minimumCompatibilityVersion()));
}
} catch (Exception e) {
throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e);
}
for (final String host : hosts) {
configuredTargetNodes.addAll(resolveDiscoveryNodes(host, limitPortCounts, transportService,
() -> UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#"));
}
this.configuredTargetNodes = configuredTargetNodes.toArray(new DiscoveryNode[configuredTargetNodes.size()]);
@ -183,6 +175,32 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
threadFactory, threadPool.getThreadContext());
}
/**
* Resolves a host to a list of discovery nodes. The host is resolved into a transport
* address (or a collection of addresses if the number of ports is greater than one) and
* the transport addresses are used to created discovery nodes.
*
* @param host the host to resolve
* @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)
* @param transportService the transport service
* @param idGenerator the generator to supply unique ids for each discovery node
* @return a list of discovery nodes with resolved transport addresses
*/
public static List<DiscoveryNode> resolveDiscoveryNodes(final String host, final int limitPortCounts,
final TransportService transportService, final Supplier<String> idGenerator) {
List<DiscoveryNode> discoveryNodes = new ArrayList<>();
try {
TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts);
for (TransportAddress address : addresses) {
discoveryNodes.add(new DiscoveryNode(idGenerator.get(), address, emptyMap(), emptySet(),
Version.CURRENT.minimumCompatibilityVersion()));
}
} catch (Exception e) {
throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e);
}
return discoveryNodes;
}
@Override
protected void doStart() {
}
@ -193,7 +211,6 @@ public class UnicastZenPing extends AbstractLifecycleComponent implements ZenPin
@Override
protected void doClose() {
transportService.removeHandler(ACTION_NAME);
ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS);
try {
IOUtils.close(receivedResponses.values());

View File

@ -107,11 +107,6 @@ public class PublishClusterStateAction extends AbstractComponent {
transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, new CommitClusterStateRequestHandler());
}
public void close() {
transportService.removeHandler(SEND_ACTION_NAME);
transportService.removeHandler(COMMIT_ACTION_NAME);
}
public PendingClusterStatesQueue pendingStatesQueue() {
return pendingStatesQueue;
}

View File

@ -789,30 +789,30 @@ public class InternalEngine extends Engine {
} catch (Exception e) {
throw new FlushFailedEngineException(shardId, e);
}
}
/*
* we have to inc-ref the store here since if the engine is closed by a tragic event
* we don't acquire the write lock and wait until we have exclusive access. This might also
* dec the store reference which can essentially close the store and unless we can inc the reference
* we can't use it.
*/
store.incRef();
try {
// reread the last committed segment infos
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
} catch (Exception e) {
if (isClosed.get() == false) {
try {
logger.warn("failed to read latest segment infos on flush", e);
} catch (Exception inner) {
e.addSuppressed(inner);
}
if (Lucene.isCorruptionException(e)) {
throw new FlushFailedEngineException(shardId, e);
/*
* we have to inc-ref the store here since if the engine is closed by a tragic event
* we don't acquire the write lock and wait until we have exclusive access. This might also
* dec the store reference which can essentially close the store and unless we can inc the reference
* we can't use it.
*/
store.incRef();
try {
// reread the last committed segment infos
lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
} catch (Exception e) {
if (isClosed.get() == false) {
try {
logger.warn("failed to read latest segment infos on flush", e);
} catch (Exception inner) {
e.addSuppressed(inner);
}
if (Lucene.isCorruptionException(e)) {
throw new FlushFailedEngineException(shardId, e);
}
}
} finally {
store.decRef();
}
} finally {
store.decRef();
}
newCommitId = lastCommittedSegmentInfos.getId();
} catch (FlushFailedEngineException ex) {

View File

@ -0,0 +1,98 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata.plain;
import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.search.MultiValueMode;
import java.io.IOException;
public abstract class AbstractLatLonPointDVIndexFieldData extends DocValuesIndexFieldData
implements IndexGeoPointFieldData {
AbstractLatLonPointDVIndexFieldData(Index index, String fieldName) {
super(index, fieldName);
}
@Override
public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode,
XFieldComparatorSource.Nested nested) {
throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance");
}
public static class LatLonPointDVIndexFieldData extends AbstractLatLonPointDVIndexFieldData {
public LatLonPointDVIndexFieldData(Index index, String fieldName) {
super(index, fieldName);
}
@Override
public AtomicGeoPointFieldData load(LeafReaderContext context) {
try {
LeafReader reader = context.reader();
FieldInfo info = reader.getFieldInfos().fieldInfo(fieldName);
if (info != null) {
checkCompatible(info);
}
return new LatLonPointDVAtomicFieldData(DocValues.getSortedNumeric(reader, fieldName));
} catch (IOException e) {
throw new IllegalStateException("Cannot load doc values", e);
}
}
@Override
public AtomicGeoPointFieldData loadDirect(LeafReaderContext context) throws Exception {
return load(context);
}
/** helper: checks a fieldinfo and throws exception if its definitely not a LatLonDocValuesField */
static void checkCompatible(FieldInfo fieldInfo) {
// dv properties could be "unset", if you e.g. used only StoredField with this same name in the segment.
if (fieldInfo.getDocValuesType() != DocValuesType.NONE
&& fieldInfo.getDocValuesType() != LatLonDocValuesField.TYPE.docValuesType()) {
throw new IllegalArgumentException("field=\"" + fieldInfo.name + "\" was indexed with docValuesType="
+ fieldInfo.getDocValuesType() + " but this type has docValuesType="
+ LatLonDocValuesField.TYPE.docValuesType() + ", is the field really a LatLonDocValuesField?");
}
}
}
public static class Builder implements IndexFieldData.Builder {
@Override
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
CircuitBreakerService breakerService, MapperService mapperService) {
// ignore breaker
return new LatLonPointDVIndexFieldData(indexSettings.getIndex(), fieldType.name());
}
}
}

View File

@ -0,0 +1,91 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata.plain;
import org.apache.lucene.geo.GeoEncodingUtils;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
final class LatLonPointDVAtomicFieldData extends AbstractAtomicGeoPointFieldData {
private final SortedNumericDocValues values;
LatLonPointDVAtomicFieldData(SortedNumericDocValues values) {
super();
this.values = values;
}
@Override
public long ramBytesUsed() {
return 0; // not exposed by lucene
}
@Override
public Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
@Override
public void close() {
// noop
}
@Override
public MultiGeoPointValues getGeoPointValues() {
return new MultiGeoPointValues() {
GeoPoint[] points = new GeoPoint[0];
private int count = 0;
@Override
public void setDocument(int docId) {
values.setDocument(docId);
count = values.count();
if (count > points.length) {
final int previousLength = points.length;
points = Arrays.copyOf(points, ArrayUtil.oversize(count, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
for (int i = previousLength; i < points.length; ++i) {
points[i] = new GeoPoint(Double.NaN, Double.NaN);
}
}
long encoded;
for (int i=0; i<count; ++i) {
encoded = values.valueAt(i);
points[i].reset(GeoEncodingUtils.decodeLatitude((int)(encoded >>> 32)), GeoEncodingUtils.decodeLongitude((int)encoded));
}
}
@Override
public int count() {
return count;
}
@Override
public GeoPoint valueAt(int index) {
return points[index];
}
};
}
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.search.lookup.SourceLookup;
import java.io.IOException;
@ -229,7 +230,7 @@ public class GetResult implements Streamable, Iterable<GetField>, ToXContent {
builder.field(Fields.FOUND, exists);
if (source != null) {
XContentHelper.writeRawField("_source", source, builder, params);
XContentHelper.writeRawField(SourceFieldMapper.NAME, source, builder, params);
}
if (!otherFields.isEmpty()) {

View File

@ -89,7 +89,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
protected Boolean ignoreMalformed;
public Builder(String name, GeoPointFieldType fieldType) {
public Builder(String name, MappedFieldType fieldType) {
super(name, fieldType, fieldType);
}
@ -143,7 +143,16 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
public Y build(Mapper.BuilderContext context) {
GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType;
// version 5.0 cuts over to LatLonPoint and no longer indexes geohash, or lat/lon separately
if (context.indexCreatedVersion().before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
return buildLegacy(context);
}
return build(context, name, fieldType, defaultFieldType, context.indexSettings(),
null, null, null, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo);
}
private Y buildLegacy(Mapper.BuilderContext context) {
LegacyGeoPointFieldType geoPointFieldType = (LegacyGeoPointFieldType)fieldType;
FieldMapper latMapper = null;
FieldMapper lonMapper = null;
@ -161,9 +170,9 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
lonMapper = (LegacyDoubleFieldMapper) lonMapperBuilder.includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
} else {
latMapper = new NumberFieldMapper.Builder(Names.LAT, NumberFieldMapper.NumberType.DOUBLE)
.includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
.includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
lonMapper = new NumberFieldMapper.Builder(Names.LON, NumberFieldMapper.NumberType.DOUBLE)
.includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
.includeInAll(false).store(fieldType.stored()).docValues(false).build(context);
}
geoPointFieldType.setLatLonEnabled(latMapper.fieldType(), lonMapper.fieldType());
}
@ -183,7 +192,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
context.path().remove();
return build(context, name, fieldType, defaultFieldType, context.indexSettings(),
latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo);
latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo);
}
}
@ -191,8 +200,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
Builder builder;
if (parserContext.indexVersionCreated().before(Version.V_2_2_0)) {
Version indexVersionCreated = parserContext.indexVersionCreated();
if (indexVersionCreated.before(Version.V_2_2_0)) {
builder = new LegacyGeoPointFieldMapper.Builder(name);
} else if (indexVersionCreated.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
builder = new LatLonPointFieldMapper.Builder(name);
} else {
builder = new GeoPointFieldMapper.Builder(name);
}
@ -202,39 +214,43 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
Map.Entry<String, Object> entry = iterator.next();
String propName = entry.getKey();
Object propNode = entry.getValue();
if (propName.equals("lat_lon")) {
deprecationLogger.deprecated(CONTENT_TYPE + " lat_lon parameter is deprecated and will be removed "
+ "in the next major release");
builder.enableLatLon(XContentMapValues.lenientNodeBooleanValue(propNode));
iterator.remove();
} else if (propName.equals("precision_step")) {
deprecationLogger.deprecated(CONTENT_TYPE + " precision_step parameter is deprecated and will be removed "
+ "in the next major release");
builder.precisionStep(XContentMapValues.nodeIntegerValue(propNode));
iterator.remove();
} else if (propName.equals("geohash")) {
deprecationLogger.deprecated(CONTENT_TYPE + " geohash parameter is deprecated and will be removed "
+ "in the next major release");
builder.enableGeoHash(XContentMapValues.lenientNodeBooleanValue(propNode));
iterator.remove();
} else if (propName.equals("geohash_prefix")) {
deprecationLogger.deprecated(CONTENT_TYPE + " geohash_prefix parameter is deprecated and will be removed "
+ "in the next major release");
builder.geoHashPrefix(XContentMapValues.lenientNodeBooleanValue(propNode));
if (XContentMapValues.lenientNodeBooleanValue(propNode)) {
builder.enableGeoHash(true);
if (indexVersionCreated.before(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
if (propName.equals("lat_lon")) {
deprecationLogger.deprecated(CONTENT_TYPE + " lat_lon parameter is deprecated and will be removed "
+ "in the next major release");
builder.enableLatLon(XContentMapValues.lenientNodeBooleanValue(propNode));
iterator.remove();
} else if (propName.equals("precision_step")) {
deprecationLogger.deprecated(CONTENT_TYPE + " precision_step parameter is deprecated and will be removed "
+ "in the next major release");
builder.precisionStep(XContentMapValues.nodeIntegerValue(propNode));
iterator.remove();
} else if (propName.equals("geohash")) {
deprecationLogger.deprecated(CONTENT_TYPE + " geohash parameter is deprecated and will be removed "
+ "in the next major release");
builder.enableGeoHash(XContentMapValues.lenientNodeBooleanValue(propNode));
iterator.remove();
} else if (propName.equals("geohash_prefix")) {
deprecationLogger.deprecated(CONTENT_TYPE + " geohash_prefix parameter is deprecated and will be removed "
+ "in the next major release");
builder.geoHashPrefix(XContentMapValues.lenientNodeBooleanValue(propNode));
if (XContentMapValues.lenientNodeBooleanValue(propNode)) {
builder.enableGeoHash(true);
}
iterator.remove();
} else if (propName.equals("geohash_precision")) {
deprecationLogger.deprecated(CONTENT_TYPE + " geohash_precision parameter is deprecated and will be removed "
+ "in the next major release");
if (propNode instanceof Integer) {
builder.geoHashPrecision(XContentMapValues.nodeIntegerValue(propNode));
} else {
builder.geoHashPrecision(GeoUtils.geoHashLevelsForPrecision(propNode.toString()));
}
iterator.remove();
}
iterator.remove();
} else if (propName.equals("geohash_precision")) {
deprecationLogger.deprecated(CONTENT_TYPE + " geohash_precision parameter is deprecated and will be removed "
+ "in the next major release");
if (propNode instanceof Integer) {
builder.geoHashPrecision(XContentMapValues.nodeIntegerValue(propNode));
} else {
builder.geoHashPrecision(GeoUtils.geoHashLevelsForPrecision(propNode.toString()));
}
iterator.remove();
} else if (propName.equals(Names.IGNORE_MALFORMED)) {
}
if (propName.equals(Names.IGNORE_MALFORMED)) {
builder.ignoreMalformed(XContentMapValues.lenientNodeBooleanValue(propNode));
iterator.remove();
}
@ -242,13 +258,29 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
if (builder instanceof LegacyGeoPointFieldMapper.Builder) {
return LegacyGeoPointFieldMapper.parse((LegacyGeoPointFieldMapper.Builder) builder, node, parserContext);
} else if (builder instanceof LatLonPointFieldMapper.Builder) {
return (LatLonPointFieldMapper.Builder) builder;
}
return (GeoPointFieldMapper.Builder) builder;
}
}
public static class GeoPointFieldType extends MappedFieldType {
public abstract static class GeoPointFieldType extends MappedFieldType {
GeoPointFieldType() {
}
GeoPointFieldType(GeoPointFieldType ref) {
super(ref);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
}
public static class LegacyGeoPointFieldType extends GeoPointFieldType {
protected MappedFieldType geoHashFieldType;
protected int geoHashPrecision;
protected boolean geoHashPrefixEnabled;
@ -256,9 +288,9 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
protected MappedFieldType latFieldType;
protected MappedFieldType lonFieldType;
GeoPointFieldType() {}
LegacyGeoPointFieldType() {}
GeoPointFieldType(GeoPointFieldType ref) {
LegacyGeoPointFieldType(LegacyGeoPointFieldType ref) {
super(ref);
this.geoHashFieldType = ref.geoHashFieldType; // copying ref is ok, this can never be modified
this.geoHashPrecision = ref.geoHashPrecision;
@ -269,13 +301,13 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override
public MappedFieldType clone() {
return new GeoPointFieldType(this);
return new LegacyGeoPointFieldType(this);
}
@Override
public boolean equals(Object o) {
if (!super.equals(o)) return false;
GeoPointFieldType that = (GeoPointFieldType) o;
LegacyGeoPointFieldType that = (LegacyGeoPointFieldType) o;
return geoHashPrecision == that.geoHashPrecision &&
geoHashPrefixEnabled == that.geoHashPrefixEnabled &&
java.util.Objects.equals(geoHashFieldType, that.geoHashFieldType) &&
@ -289,15 +321,10 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
lonFieldType);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
GeoPointFieldType other = (GeoPointFieldType)fieldType;
LegacyGeoPointFieldType other = (LegacyGeoPointFieldType)fieldType;
if (isLatLonEnabled() != other.isLatLonEnabled()) {
conflicts.add("mapper [" + name() + "] has different [lat_lon]");
}
@ -398,9 +425,10 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
this.ignoreMalformed = ignoreMalformed;
}
@Override
public GeoPointFieldType fieldType() {
return (GeoPointFieldType) super.fieldType();
public LegacyGeoPointFieldType legacyFieldType() {
return (LegacyGeoPointFieldType) super.fieldType();
}
@Override
@ -414,15 +442,22 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override
public Iterator<Mapper> iterator() {
if (this instanceof LatLonPointFieldMapper == false) {
return Iterators.concat(super.iterator(), legacyIterator());
}
return super.iterator();
}
public Iterator<Mapper> legacyIterator() {
List<Mapper> extras = new ArrayList<>();
if (fieldType().isGeoHashEnabled()) {
if (legacyFieldType().isGeoHashEnabled()) {
extras.add(geoHashMapper);
}
if (fieldType().isLatLonEnabled()) {
if (legacyFieldType().isLatLonEnabled()) {
extras.add(latMapper);
extras.add(lonMapper);
}
return Iterators.concat(super.iterator(), extras.iterator());
return extras.iterator();
}
@Override
@ -436,13 +471,13 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
protected void parse(ParseContext context, GeoPoint point, String geoHash) throws IOException {
if (fieldType().isGeoHashEnabled()) {
if (legacyFieldType().isGeoHashEnabled()) {
if (geoHash == null) {
geoHash = GeoHashUtils.stringEncode(point.lon(), point.lat());
}
addGeoHashField(context, geoHash);
}
if (fieldType().isLatLonEnabled()) {
if (legacyFieldType().isLatLonEnabled()) {
latMapper.parse(context.createExternalValueContext(point.lat()));
lonMapper.parse(context.createExternalValueContext(point.lon()));
}
@ -517,8 +552,9 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
private void addGeoHashField(ParseContext context, String geoHash) throws IOException {
int len = Math.min(fieldType().geoHashPrecision(), geoHash.length());
int min = fieldType().isGeoHashPrefixEnabled() ? 1 : len;
LegacyGeoPointFieldType ft = (LegacyGeoPointFieldType)fieldType;
int len = Math.min(ft.geoHashPrecision(), geoHash.length());
int min = ft.isGeoHashPrefixEnabled() ? 1 : len;
for (int i = len; i >= min; i--) {
// side effect of this call is adding the field
@ -537,26 +573,33 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
super.doXContentBody(builder, includeDefaults, params);
if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) {
builder.field("lat_lon", fieldType().isLatLonEnabled());
}
if (fieldType().isLatLonEnabled() && (includeDefaults || fieldType().latFieldType().numericPrecisionStep() != LegacyNumericUtils.PRECISION_STEP_DEFAULT)) {
builder.field("precision_step", fieldType().latFieldType().numericPrecisionStep());
}
if (includeDefaults || fieldType().isGeoHashEnabled() != Defaults.ENABLE_GEOHASH) {
builder.field("geohash", fieldType().isGeoHashEnabled());
}
if (includeDefaults || fieldType().isGeoHashPrefixEnabled() != Defaults.ENABLE_GEOHASH_PREFIX) {
builder.field("geohash_prefix", fieldType().isGeoHashPrefixEnabled());
}
if (fieldType().isGeoHashEnabled() && (includeDefaults || fieldType().geoHashPrecision() != Defaults.GEO_HASH_PRECISION)) {
builder.field("geohash_precision", fieldType().geoHashPrecision());
if (this instanceof LatLonPointFieldMapper == false) {
legacyDoXContentBody(builder, includeDefaults, params);
}
if (includeDefaults || ignoreMalformed.explicit()) {
builder.field(Names.IGNORE_MALFORMED, ignoreMalformed.value());
}
}
protected void legacyDoXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
LegacyGeoPointFieldType ft = (LegacyGeoPointFieldType) fieldType;
if (includeDefaults || ft.isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) {
builder.field("lat_lon", ft.isLatLonEnabled());
}
if (ft.isLatLonEnabled() && (includeDefaults || ft.latFieldType().numericPrecisionStep() != LegacyNumericUtils.PRECISION_STEP_DEFAULT)) {
builder.field("precision_step", ft.latFieldType().numericPrecisionStep());
}
if (includeDefaults || ft.isGeoHashEnabled() != Defaults.ENABLE_GEOHASH) {
builder.field("geohash", ft.isGeoHashEnabled());
}
if (includeDefaults || ft.isGeoHashPrefixEnabled() != Defaults.ENABLE_GEOHASH_PREFIX) {
builder.field("geohash_prefix", ft.isGeoHashPrefixEnabled());
}
if (ft.isGeoHashEnabled() && (includeDefaults || ft.geoHashPrecision() != Defaults.GEO_HASH_PRECISION)) {
builder.field("geohash_precision", ft.geoHashPrecision());
}
}
@Override
public FieldMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
BaseGeoPointFieldMapper updated = (BaseGeoPointFieldMapper) super.updateFieldType(fullNameToFieldType);

View File

@ -48,7 +48,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
public static class Defaults extends BaseGeoPointFieldMapper.Defaults {
public static final GeoPointFieldType FIELD_TYPE = new GeoPointFieldType();
public static final GeoPointFieldType FIELD_TYPE = new LegacyGeoPointFieldType();
static {
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
@ -130,4 +130,9 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
}
super.parse(context, point, geoHash);
}
@Override
public LegacyGeoPointFieldType fieldType() {
return (LegacyGeoPointFieldType) super.fieldType();
}
}

View File

@ -0,0 +1,156 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper;
import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.Query;
import org.elasticsearch.Version;
import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.plain.AbstractLatLonPointDVIndexFieldData;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.QueryShardException;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
/**
* Field Mapper for geo_point types.
*
* Uses lucene 6 LatLonPoint encoding
*/
public class LatLonPointFieldMapper extends BaseGeoPointFieldMapper {
public static final String CONTENT_TYPE = "geo_point";
public static final Version LAT_LON_FIELD_VERSION = Version.V_5_0_0_alpha6;
public static class Defaults extends BaseGeoPointFieldMapper.Defaults {
public static final LatLonPointFieldType FIELD_TYPE = new LatLonPointFieldType();
static {
FIELD_TYPE.setTokenized(false);
FIELD_TYPE.setHasDocValues(true);
FIELD_TYPE.setDimensions(2, Integer.BYTES);
FIELD_TYPE.freeze();
}
}
public static class Builder extends BaseGeoPointFieldMapper.Builder<Builder, LatLonPointFieldMapper> {
public Builder(String name) {
super(name, Defaults.FIELD_TYPE);
}
@Override
public LatLonPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
MappedFieldType defaultFieldType, Settings indexSettings,
FieldMapper latMapper, FieldMapper lonMapper, FieldMapper geoHashMapper,
MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
CopyTo copyTo) {
setupFieldType(context);
return new LatLonPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, multiFields,
ignoreMalformed, copyTo);
}
@Override
public LatLonPointFieldMapper build(BuilderContext context) {
return super.build(context);
}
}
public static class TypeParser extends BaseGeoPointFieldMapper.TypeParser {
@Override
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext)
throws MapperParsingException {
return super.parse(name, node, parserContext);
}
}
public LatLonPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
Settings indexSettings, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, null, null, null, multiFields, ignoreMalformed, copyTo);
}
public static class LatLonPointFieldType extends GeoPointFieldType {
LatLonPointFieldType() {
}
LatLonPointFieldType(LatLonPointFieldType ref) {
super(ref);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public MappedFieldType clone() {
return new LatLonPointFieldType(this);
}
@Override
public IndexFieldData.Builder fielddataBuilder() {
failIfNoDocValues();
return new AbstractLatLonPointDVIndexFieldData.Builder();
}
@Override
public Query termQuery(Object value, QueryShardContext context) {
throw new QueryShardException(context, "Geo fields do not support exact searching, use dedicated geo queries instead: ["
+ name() + "]");
}
}
@Override
protected void parse(ParseContext originalContext, GeoPoint point, String geoHash) throws IOException {
// Geopoint fields, by default, will not be included in _all
final ParseContext context = originalContext.setIncludeInAllDefault(false);
if (ignoreMalformed.value() == false) {
if (point.lat() > 90.0 || point.lat() < -90.0) {
throw new IllegalArgumentException("illegal latitude value [" + point.lat() + "] for " + name());
}
if (point.lon() > 180.0 || point.lon() < -180) {
throw new IllegalArgumentException("illegal longitude value [" + point.lon() + "] for " + name());
}
} else {
GeoUtils.normalizePoint(point);
}
if (fieldType().indexOptions() != IndexOptions.NONE) {
context.doc().add(new LatLonPoint(fieldType().name(), point.lat(), point.lon()));
}
if (fieldType().stored()) {
context.doc().add(new StoredField(fieldType().name(), point.toString()));
}
if (fieldType.hasDocValues()) {
context.doc().add(new LatLonDocValuesField(fieldType().name(), point.lat(), point.lon()));
}
// if the mapping contains multifields then use the geohash string
if (multiFields.iterator().hasNext()) {
multiFields.parse(this, context.createExternalValueContext(point.geohash()));
}
}
}

View File

@ -60,7 +60,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement
public static class Defaults extends BaseGeoPointFieldMapper.Defaults{
public static final Explicit<Boolean> COERCE = new Explicit<>(false, false);
public static final GeoPointFieldType FIELD_TYPE = new GeoPointFieldType();
public static final GeoPointFieldType FIELD_TYPE = new LegacyGeoPointFieldType();
static {
FIELD_TYPE.setIndexOptions(IndexOptions.DOCS);
@ -331,6 +331,11 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement
}
}
@Override
public LegacyGeoPointFieldType fieldType() {
return (LegacyGeoPointFieldType) super.fieldType();
}
public static class CustomGeoPointDocValuesField extends CustomDocValuesField {
private final ObjectHashSet<GeoPoint> points;

View File

@ -133,7 +133,7 @@ public abstract class MappedFieldType extends FieldType {
eagerGlobalOrdinals, similarity == null ? null : similarity.name(), nullValue, nullValueAsString);
}
// norelease: we need to override freeze() and add safety checks that all settings are actually set
// TODO: we need to override freeze() and add safety checks that all settings are actually set
/** Returns the name of this type, as would be specified in mapping properties */
public abstract String typeName();

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.query;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
@ -38,7 +39,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper.LegacyGeoPointFieldType;
import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.search.geo.LegacyInMemoryGeoBoundingBoxQuery;
import org.elasticsearch.index.search.geo.LegacyIndexedGeoBoundingBoxQuery;
@ -359,7 +361,10 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
}
}
if (indexVersionCreated.onOrAfter(Version.V_2_2_0)) {
if (indexVersionCreated.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
return LatLonPoint.newBoxQuery(fieldType.name(), luceneBottomRight.getLat(), luceneTopLeft.getLat(),
luceneTopLeft.getLon(), luceneBottomRight.getLon());
} else if (indexVersionCreated.onOrAfter(Version.V_2_2_0)) {
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
// if index created V_2_3 > use prefix encoded postings format
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?
@ -371,7 +376,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
Query query;
switch(type) {
case INDEXED:
LegacyGeoPointFieldMapper.GeoPointFieldType geoFieldType = ((LegacyGeoPointFieldMapper.GeoPointFieldType) fieldType);
LegacyGeoPointFieldType geoFieldType = ((LegacyGeoPointFieldType) fieldType);
query = LegacyIndexedGeoBoundingBoxQuery.create(luceneTopLeft, luceneBottomRight, geoFieldType);
break;
case MEMORY:

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.query;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
@ -38,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
@ -297,8 +299,10 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
double normDistance = geoDistance.normalize(this.distance, DistanceUnit.DEFAULT);
if (indexVersionCreated.before(Version.V_2_2_0)) {
LegacyGeoPointFieldMapper.GeoPointFieldType geoFieldType = ((LegacyGeoPointFieldMapper.GeoPointFieldType) fieldType);
if (indexVersionCreated.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
return LatLonPoint.newDistanceQuery(fieldType.name(), center.lat(), center.lon(), normDistance);
} else if (indexVersionCreated.before(Version.V_2_2_0)) {
LegacyGeoPointFieldMapper.LegacyGeoPointFieldType geoFieldType = (LegacyGeoPointFieldMapper.LegacyGeoPointFieldType) fieldType;
IndexGeoPointFieldData indexFieldData = shardContext.getForField(fieldType);
String bboxOptimization = Strings.isEmpty(optimizeBbox) ? DEFAULT_OPTIMIZE_BBOX : optimizeBbox;
return new GeoDistanceRangeQuery(center, null, normDistance, true, false, geoDistance,

View File

@ -37,8 +37,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper.LegacyGeoPointFieldType;
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper;
import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
@ -347,8 +348,11 @@ public class GeoDistanceRangeQueryBuilder extends AbstractQueryBuilder<GeoDistan
}
final Version indexVersionCreated = context.indexVersionCreated();
if (indexVersionCreated.before(Version.V_2_2_0)) {
LegacyGeoPointFieldMapper.GeoPointFieldType geoFieldType = ((LegacyGeoPointFieldMapper.GeoPointFieldType) fieldType);
if (indexVersionCreated.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
throw new QueryShardException(context, "[{}] queries are no longer supported for geo_point field types. "
+ "Use geo_distance sort or aggregations", NAME);
} else if (indexVersionCreated.before(Version.V_2_2_0)) {
LegacyGeoPointFieldType geoFieldType = (LegacyGeoPointFieldType) fieldType;
IndexGeoPointFieldData indexFieldData = context.getForField(fieldType);
String bboxOptimization = Strings.isEmpty(optimizeBbox) ? DEFAULT_OPTIMIZE_BBOX : optimizeBbox;
return new GeoDistanceRangeQuery(point, fromValue, toValue, includeLower, includeUpper, geoDistance, geoFieldType,

View File

@ -19,6 +19,8 @@
package org.elasticsearch.index.query;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.geo.Polygon;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
@ -36,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.search.geo.GeoPolygonQuery;
@ -210,10 +213,14 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
double[] lons = new double[shellSize];
GeoPoint p;
for (int i=0; i<shellSize; ++i) {
p = new GeoPoint(shell.get(i));
p = shell.get(i);
lats[i] = p.lat();
lons[i] = p.lon();
}
if (indexVersionCreated.onOrAfter(LatLonPointFieldMapper.LAT_LON_FIELD_VERSION)) {
return LatLonPoint.newPolygonQuery(fieldType.name(), new Polygon(lats, lons));
}
// if index created V_2_2 use (soon to be legacy) numeric encoding postings format
// if index created V_2_3 > use prefix encoded postings format
final GeoPointField.TermEncoding encoding = (indexVersionCreated.before(Version.V_2_3_0)) ?

View File

@ -36,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import java.io.IOException;
@ -83,7 +84,7 @@ public class GeohashCellQuery {
* @param geohashes optional array of additional geohashes
* @return a new GeoBoundinboxfilter
*/
public static Query create(QueryShardContext context, BaseGeoPointFieldMapper.GeoPointFieldType fieldType,
public static Query create(QueryShardContext context, BaseGeoPointFieldMapper.LegacyGeoPointFieldType fieldType,
String geohash, @Nullable List<CharSequence> geohashes) {
MappedFieldType geoHashMapper = fieldType.geoHashFieldType();
if (geoHashMapper == null) {
@ -241,11 +242,14 @@ public class GeohashCellQuery {
}
}
if (!(fieldType instanceof BaseGeoPointFieldMapper.GeoPointFieldType)) {
if (fieldType instanceof LatLonPointFieldMapper.LatLonPointFieldType) {
throw new QueryShardException(context, "failed to parse [{}] query. "
+ "geo_point field no longer supports geohash_cell queries", NAME);
} else if (!(fieldType instanceof BaseGeoPointFieldMapper.LegacyGeoPointFieldType)) {
throw new QueryShardException(context, "failed to parse [{}] query. field [{}] is not a geo_point field", NAME, fieldName);
}
BaseGeoPointFieldMapper.GeoPointFieldType geoFieldType = ((BaseGeoPointFieldMapper.GeoPointFieldType) fieldType);
BaseGeoPointFieldMapper.LegacyGeoPointFieldType geoFieldType = ((BaseGeoPointFieldMapper.LegacyGeoPointFieldType) fieldType);
if (!geoFieldType.isGeoHashPrefixEnabled()) {
throw new QueryShardException(context, "failed to parse [{}] query. [geohash_prefix] is not enabled for field [{}]", NAME,
fieldName);

View File

@ -94,7 +94,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
ObjectParser.ValueType.OBJECT_ARRAY);
PARSER.declareField((p, i, c) -> {
try {
i.setFetchSourceContext(FetchSourceContext.parse(c));
i.setFetchSourceContext(FetchSourceContext.parse(c.parser()));
} catch (IOException e) {
throw new ParsingException(p.getTokenLocation(), "Could not parse inner _source definition", e);
}
@ -219,7 +219,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
scriptFields.add(new ScriptField(in));
}
}
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
if (in.readBoolean()) {
int size = in.readVInt();
sorts = new ArrayList<>(size);
@ -258,7 +258,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
scriptField.writeTo(out);
}
}
out.writeOptionalStreamable(fetchSourceContext);
out.writeOptionalWriteable(fetchSourceContext);
boolean hasSorts = sorts != null;
out.writeBoolean(hasSorts);
if (hasSorts) {

View File

@ -35,6 +35,7 @@ import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
import org.elasticsearch.index.mapper.BaseGeoPointFieldMapper;
import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper;
import java.io.IOException;
@ -60,7 +61,8 @@ public class GeoDistanceRangeQuery extends Query {
private final IndexGeoPointFieldData indexFieldData;
public GeoDistanceRangeQuery(GeoPoint point, Double lowerVal, Double upperVal, boolean includeLower,
boolean includeUpper, GeoDistance geoDistance, LegacyGeoPointFieldMapper.GeoPointFieldType fieldType,
boolean includeUpper, GeoDistance geoDistance,
LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType,
IndexGeoPointFieldData indexFieldData, String optimizeBbox) {
this.lat = point.lat();
this.lon = point.lon();

View File

@ -33,7 +33,8 @@ import org.elasticsearch.index.mapper.LegacyGeoPointFieldMapper;
@Deprecated
public class LegacyIndexedGeoBoundingBoxQuery {
public static Query create(GeoPoint topLeft, GeoPoint bottomRight, LegacyGeoPointFieldMapper.GeoPointFieldType fieldType) {
public static Query create(GeoPoint topLeft, GeoPoint bottomRight,
LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType) {
if (!fieldType.isLatLonEnabled()) {
throw new IllegalArgumentException("lat/lon is not enabled (indexed) for field [" + fieldType.name()
+ "], can't use indexed filter on it");
@ -47,7 +48,7 @@ public class LegacyIndexedGeoBoundingBoxQuery {
}
private static Query westGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight,
LegacyGeoPointFieldMapper.GeoPointFieldType fieldType) {
LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType) {
BooleanQuery.Builder filter = new BooleanQuery.Builder();
filter.setMinimumNumberShouldMatch(1);
filter.add(fieldType.lonFieldType().rangeQuery(null, bottomRight.lon(), true, true), Occur.SHOULD);
@ -57,7 +58,7 @@ public class LegacyIndexedGeoBoundingBoxQuery {
}
private static Query eastGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight,
LegacyGeoPointFieldMapper.GeoPointFieldType fieldType) {
LegacyGeoPointFieldMapper.LegacyGeoPointFieldType fieldType) {
BooleanQuery.Builder filter = new BooleanQuery.Builder();
filter.add(fieldType.lonFieldType().rangeQuery(topLeft.lon(), bottomRight.lon(), true, true), Occur.MUST);
filter.add(fieldType.latFieldType().rangeQuery(bottomRight.lat(), topLeft.lat(), true, true), Occur.MUST);

View File

@ -48,7 +48,6 @@ import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.Tuple;
@ -116,9 +115,9 @@ import org.elasticsearch.index.warmer.WarmerStats;
import org.elasticsearch.indices.IndexingMemoryController;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryFailedException;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.search.suggest.completion.CompletionFieldStats;
@ -135,7 +134,6 @@ import java.nio.file.NoSuchFileException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
@ -368,60 +366,46 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* @throws IOException if shard state could not be persisted
*/
public void updateRoutingEntry(final ShardRouting newRouting) throws IOException {
final ShardRouting currentRouting = this.shardRouting;
if (!newRouting.shardId().equals(shardId())) {
throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId() + "");
}
if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) {
throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting);
}
if (currentRouting != null) {
if (!newRouting.primary() && currentRouting.primary()) {
logger.warn("suspect illegal state: trying to move shard from primary mode to replica mode");
}
// if its the same routing, return
if (currentRouting.equals(newRouting)) {
return;
}
}
final ShardRouting currentRouting;
synchronized (mutex) {
currentRouting = this.shardRouting;
if (state == IndexShardState.POST_RECOVERY) {
// if the state is started or relocating (cause it might move right away from started to relocating)
// then move to STARTED
if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) {
if (!newRouting.shardId().equals(shardId())) {
throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId() + "");
}
if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) {
throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting);
}
if (currentRouting != null && currentRouting.primary() && newRouting.primary() == false) {
throw new IllegalArgumentException("illegal state: trying to move shard from primary mode to replica mode. Current "
+ currentRouting + ", new " + newRouting);
}
if (state == IndexShardState.POST_RECOVERY && newRouting.active()) {
assert currentRouting.active() == false : "we are in POST_RECOVERY, but our shard routing is active " + currentRouting;
// we want to refresh *before* we move to internal STARTED state
try {
getEngine().refresh("cluster_state_started");
} catch (Exception e) {
logger.debug("failed to refresh due to move to cluster wide started", e);
}
boolean movedToStarted = false;
synchronized (mutex) {
// do the check under a mutex, so we make sure to only change to STARTED if in POST_RECOVERY
if (state == IndexShardState.POST_RECOVERY) {
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
movedToStarted = true;
} else {
logger.debug("state [{}] not changed, not in POST_RECOVERY, global state is [{}]", state, newRouting.state());
}
}
if (movedToStarted) {
indexEventListener.afterIndexShardStarted(this);
}
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
} else if (state == IndexShardState.RELOCATED &&
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
// if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery
// failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two
// active primaries.
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
}
this.shardRouting = newRouting;
persistMetadata(newRouting, currentRouting);
}
if (state == IndexShardState.RELOCATED &&
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
// if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery
// failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two
// active primaries.
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
if (currentRouting != null && currentRouting.active() == false && newRouting.active()) {
indexEventListener.afterIndexShardStarted(this);
}
if (newRouting.equals(currentRouting) == false) {
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
}
this.shardRouting = newRouting;
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
persistMetadata(newRouting, currentRouting);
}
/**
@ -451,6 +435,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
public void relocated(String reason) throws IllegalIndexShardStateException, InterruptedException {
assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting;
try {
indexShardOperationsLock.blockOperations(30, TimeUnit.MINUTES, () -> {
// no shard operation locks are being held here, move state from started to relocated
@ -460,6 +445,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
}
// if the master cancelled the recovery, the target will be removed
// and the recovery will stopped.
// However, it is still possible that we concurrently end up here
// and therefore have to protect we don't mark the shard as relocated when
// its shard routing says otherwise.
if (shardRouting.relocating() == false) {
throw new IllegalIndexShardStateException(shardId, IndexShardState.STARTED,
": shard is no longer relocating " + shardRouting);
}
changeState(IndexShardState.RELOCATED, reason);
}
});

View File

@ -258,8 +258,12 @@ public class TermVectorsService {
for (Map.Entry<String, Collection<Object>> entry : values.entrySet()) {
String field = entry.getKey();
Analyzer analyzer = getAnalyzerAtField(indexShard, field, perFieldAnalyzer);
for (Object text : entry.getValue()) {
index.addField(field, text.toString(), analyzer);
if (entry.getValue() instanceof List) {
for (Object text : entry.getValue()) {
index.addField(field, text.toString(), analyzer);
}
} else {
index.addField(field, entry.getValue().toString(), analyzer);
}
}
/* and read vectors from it */

View File

@ -36,23 +36,24 @@ import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.FieldNamesFieldMapper;
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.IndexFieldMapper;
import org.elasticsearch.index.mapper.IpFieldMapper;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.LatLonPointFieldMapper;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.index.mapper.ObjectMapper;
import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.index.mapper.TokenCountFieldMapper;
import org.elasticsearch.index.mapper.ScaledFloatFieldMapper;
import org.elasticsearch.index.mapper.StringFieldMapper;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.IndexFieldMapper;
import org.elasticsearch.index.mapper.ParentFieldMapper;
import org.elasticsearch.index.mapper.RoutingFieldMapper;
import org.elasticsearch.index.mapper.ScaledFloatFieldMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.mapper.StringFieldMapper;
import org.elasticsearch.index.mapper.TTLFieldMapper;
import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.index.mapper.TimestampFieldMapper;
import org.elasticsearch.index.mapper.TokenCountFieldMapper;
import org.elasticsearch.index.mapper.TTLFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.mapper.VersionFieldMapper;
@ -119,6 +120,7 @@ public class IndicesModule extends AbstractModule {
mappers.put(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser());
mappers.put(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser());
mappers.put(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser());
mappers.put(LatLonPointFieldMapper.CONTENT_TYPE, new LatLonPointFieldMapper.TypeParser());
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
mappers.put(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser());
}

View File

@ -88,6 +88,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.ingest.IngestService;
@ -111,7 +112,9 @@ import org.elasticsearch.repositories.RepositoriesModule;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchExtRegistry;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.SearchRequestParsers;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.snapshots.SnapshotShardsService;
@ -327,7 +330,6 @@ public class Node implements Closeable {
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
modules.add(indicesModule);
SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class));
modules.add(searchModule);
modules.add(new ActionModule(DiscoveryNode.isIngestNode(settings), false, settings,
clusterModule.getIndexNameExpressionResolver(), settingsModule.getClusterSettings(),
pluginsService.filterPlugins(ActionPlugin.class)));
@ -363,7 +365,11 @@ public class Node implements Closeable {
.map(Plugin::getCustomMetaDataUpgrader)
.collect(Collectors.toList());
final MetaDataUpgrader metaDataUpgrader = new MetaDataUpgrader(customMetaDataUpgraders);
modules.add(b -> {
b.bind(IndicesQueriesRegistry.class).toInstance(searchModule.getQueryParserRegistry());
b.bind(SearchRequestParsers.class).toInstance(searchModule.getSearchRequestParsers());
b.bind(SearchExtRegistry.class).toInstance(searchModule.getSearchExtRegistry());
b.bind(PluginsService.class).toInstance(pluginsService);
b.bind(Client.class).toInstance(client);
b.bind(NodeClient.class).toInstance(client);

View File

@ -64,10 +64,6 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
transportService.registerRequestHandler(ACTION_NAME, VerifyNodeRepositoryRequest::new, ThreadPool.Names.SAME, new VerifyNodeRepositoryRequestHandler());
}
public void close() {
transportService.removeHandler(ACTION_NAME);
}
public void verify(String repository, String verificationToken, final ActionListener<VerifyResponse> listener) {
final DiscoveryNodes discoNodes = clusterService.state().nodes();
final DiscoveryNode localNode = discoNodes.getLocalNode();

View File

@ -27,9 +27,10 @@ import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.client.Requests;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterIndexHealth;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -314,16 +315,32 @@ public class RestIndicesAction extends AbstractCatAction {
}
// package private for testing
Table buildTable(RestRequest request, Index[] indices, ClusterHealthResponse health, IndicesStatsResponse stats, MetaData indexMetaDatas) {
Table buildTable(RestRequest request, Index[] indices, ClusterHealthResponse response, IndicesStatsResponse stats, MetaData indexMetaDatas) {
final String healthParam = request.param("health");
final ClusterHealthStatus status;
if (healthParam != null) {
status = ClusterHealthStatus.fromString(healthParam);
} else {
status = null;
}
Table table = getTableWithHeader(request);
for (final Index index : indices) {
final String indexName = index.getName();
ClusterIndexHealth indexHealth = health.getIndices().get(indexName);
ClusterIndexHealth indexHealth = response.getIndices().get(indexName);
IndexStats indexStats = stats.getIndices().get(indexName);
IndexMetaData indexMetaData = indexMetaDatas.getIndices().get(indexName);
IndexMetaData.State state = indexMetaData.getState();
if (status != null) {
if (state == IndexMetaData.State.CLOSE ||
(indexHealth == null && !ClusterHealthStatus.RED.equals(status)) ||
!indexHealth.getStatus().equals(status)) {
continue;
}
}
table.startRow();
table.addCell(state == IndexMetaData.State.OPEN ? (indexHealth == null ? "red*" : indexHealth.getStatus().toString().toLowerCase(Locale.ROOT)) : null);
table.addCell(state.toString().toLowerCase(Locale.ROOT));

View File

@ -24,10 +24,12 @@ import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.client.Requests;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler;
@ -37,6 +39,7 @@ import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.RestBuilderListener;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.rest.RestRequest.Method.PUT;
@ -52,6 +55,8 @@ import static org.elasticsearch.rest.RestStatus.OK;
* </pre>
*/
public class RestBulkAction extends BaseRestHandler {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(RestBulkAction.class));
private final boolean allowExplicitIndex;
@ -75,18 +80,21 @@ public class RestBulkAction extends BaseRestHandler {
String defaultIndex = request.param("index");
String defaultType = request.param("type");
String defaultRouting = request.param("routing");
FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
String fieldsParam = request.param("fields");
String defaultPipeline = request.param("pipeline");
if (fieldsParam != null) {
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
}
String[] defaultFields = fieldsParam != null ? Strings.commaDelimitedListToStringArray(fieldsParam) : null;
String defaultPipeline = request.param("pipeline");
String waitForActiveShards = request.param("wait_for_active_shards");
if (waitForActiveShards != null) {
bulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
}
bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
bulkRequest.setRefreshPolicy(request.param("refresh"));
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultPipeline,
null, allowExplicitIndex);
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields,
defaultFetchSourceContext, defaultPipeline, null, allowExplicitIndex);
client.bulk(bulkRequest, new RestBuilderListener<BulkResponse>(channel) {
@Override

View File

@ -58,12 +58,15 @@ public class RestGetAction extends BaseRestHandler {
getRequest.parent(request.param("parent"));
getRequest.preference(request.param("preference"));
getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime()));
String sField = request.param("fields");
if (request.param("fields") != null) {
throw new IllegalArgumentException("The parameter [fields] is no longer supported, " +
"please use [stored_fields] to retrieve stored fields or or [_source] to load the field from _source");
}
String sField = request.param("stored_fields");
if (sField != null) {
String[] sFields = Strings.splitStringByCommaToArray(sField);
if (sFields != null) {
getRequest.fields(sFields);
getRequest.storedFields(sFields);
}
}

View File

@ -91,7 +91,7 @@ public abstract class RestHeadAction extends BaseRestHandler {
getRequest.preference(request.param("preference"));
getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime()));
// don't get any fields back...
getRequest.fields(Strings.EMPTY_ARRAY);
getRequest.storedFields(Strings.EMPTY_ARRAY);
// TODO we can also just return the document size as Content-Length
client.get(getRequest, new RestResponseListener<GetResponse>(channel) {

View File

@ -59,9 +59,12 @@ public class RestMultiGetAction extends BaseRestHandler {
multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh()));
multiGetRequest.preference(request.param("preference"));
multiGetRequest.realtime(request.paramAsBoolean("realtime", multiGetRequest.realtime()));
if (request.param("fields") != null) {
throw new IllegalArgumentException("The parameter [fields] is no longer supported, " +
"please use [stored_fields] to retrieve stored fields or _source filtering if the field is not stored");
}
String[] sFields = null;
String sField = request.param("fields");
String sField = request.param("stored_fields");
if (sField != null) {
sFields = Strings.splitStringByCommaToArray(sField);
}

View File

@ -25,6 +25,8 @@ import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.rest.BaseRestHandler;
@ -33,12 +35,15 @@ import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.RestActions;
import org.elasticsearch.rest.action.RestStatusToXContentListener;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import static org.elasticsearch.rest.RestRequest.Method.POST;
/**
*/
public class RestUpdateAction extends BaseRestHandler {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class));
@Inject
public RestUpdateAction(Settings settings, RestController controller) {
@ -58,13 +63,19 @@ public class RestUpdateAction extends BaseRestHandler {
updateRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
}
updateRequest.docAsUpsert(request.paramAsBoolean("doc_as_upsert", updateRequest.docAsUpsert()));
FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
String sField = request.param("fields");
if (sField != null) {
String[] sFields = Strings.splitStringByCommaToArray(sField);
if (sFields != null) {
updateRequest.fields(sFields);
}
if (sField != null && fetchSourceContext != null) {
throw new IllegalArgumentException("[fields] and [_source] cannot be used in the same request");
}
if (sField != null) {
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
String[] sFields = Strings.splitStringByCommaToArray(sField);
updateRequest.fields(sFields);
} else if (fetchSourceContext != null) {
updateRequest.fetchSource(fetchSourceContext);
}
updateRequest.retryOnConflict(request.paramAsInt("retry_on_conflict", updateRequest.retryOnConflict()));
updateRequest.version(RestActions.parseVersion(request));
updateRequest.versionType(VersionType.fromString(request.param("version_type"), updateRequest.versionType()));
@ -72,7 +83,7 @@ public class RestUpdateAction extends BaseRestHandler {
// see if we have it in the body
if (request.hasContent()) {
updateRequest.source(request.content());
updateRequest.fromXContent(request.content());
IndexRequest upsertRequest = updateRequest.upsertRequest();
if (upsertRequest != null) {
upsertRequest.routing(request.param("routing"));

View File

@ -78,11 +78,15 @@ public class RestExplainAction extends BaseRestHandler {
explainRequest.query(query);
}
String sField = request.param("fields");
if (request.param("fields") != null) {
throw new IllegalArgumentException("The parameter [fields] is no longer supported, " +
"please use [stored_fields] to retrieve stored fields");
}
String sField = request.param("stored_fields");
if (sField != null) {
String[] sFields = Strings.splitStringByCommaToArray(sField);
if (sFields != null) {
explainRequest.fields(sFields);
explainRequest.storedFields(sFields);
}
}

View File

@ -23,7 +23,6 @@ import org.apache.lucene.search.BooleanQuery;
import org.elasticsearch.common.NamedRegistry;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.geo.builders.ShapeBuilders;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Setting;
@ -286,7 +285,7 @@ import static java.util.Objects.requireNonNull;
/**
* Sets up things that can be done at search time like queries, aggregations, and suggesters.
*/
public class SearchModule extends AbstractModule {
public class SearchModule {
public static final Setting<Integer> INDICES_MAX_CLAUSE_COUNT_SETTING = Setting.intSetting("indices.query.bool.max_clause_count",
1024, 1, Integer.MAX_VALUE, Setting.Property.NodeScope);
@ -375,16 +374,6 @@ public class SearchModule extends AbstractModule {
return aggregatorParsers;
}
@Override
protected void configure() {
if (false == transportClient) {
bind(IndicesQueriesRegistry.class).toInstance(queryParserRegistry);
bind(SearchRequestParsers.class).toInstance(searchRequestParsers);
bind(SearchExtRegistry.class).toInstance(searchExtParserRegistry);
}
}
private void registerAggregations(List<SearchPlugin> plugins) {
registerAggregation(new AggregationSpec(AvgAggregationBuilder.NAME, AvgAggregationBuilder::new, new AvgParser())
.addResultReader(InternalAvg::new));
@ -811,4 +800,8 @@ public class SearchModule extends AbstractModule {
public FetchPhase getFetchPhase() {
return new FetchPhase(fetchSubPhases);
}
public SearchExtRegistry getSearchExtRegistry() {
return searchExtParserRegistry;
}
}

View File

@ -79,7 +79,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
public TopHitsAggregationBuilder(StreamInput in) throws IOException {
super(in, TYPE);
explain = in.readBoolean();
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
if (in.readBoolean()) {
int size = in.readVInt();
fieldDataFields = new ArrayList<>(size);
@ -112,7 +112,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeBoolean(explain);
out.writeOptionalStreamable(fetchSourceContext);
out.writeOptionalWriteable(fetchSourceContext);
boolean hasFieldDataFields = fieldDataFields != null;
out.writeBoolean(hasFieldDataFields);
if (hasFieldDataFields) {
@ -596,7 +596,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.TRACK_SCORES_FIELD)) {
factory.trackScores(parser.booleanValue());
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder._SOURCE_FIELD)) {
factory.fetchSource(FetchSourceContext.parse(context));
factory.fetchSource(FetchSourceContext.parse(context.parser()));
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.STORED_FIELDS_FIELD)) {
factory.storedFieldsContext =
StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), context);
@ -608,7 +608,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder._SOURCE_FIELD)) {
factory.fetchSource(FetchSourceContext.parse(context));
factory.fetchSource(FetchSourceContext.parse(context.parser()));
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder.SCRIPT_FIELDS_FIELD)) {
List<ScriptField> scriptFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
@ -680,7 +680,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder<TopHit
List<SortBuilder<?>> sorts = SortBuilder.fromXContent(context);
factory.sorts(sorts);
} else if (context.getParseFieldMatcher().match(currentFieldName, SearchSourceBuilder._SOURCE_FIELD)) {
factory.fetchSource(FetchSourceContext.parse(context));
factory.fetchSource(FetchSourceContext.parse(context.parser()));
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation());

View File

@ -187,7 +187,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
public SearchSourceBuilder(StreamInput in) throws IOException {
aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new);
explain = in.readOptionalBoolean();
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
docValueFields = (List<String>) in.readGenericValue();
storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new);
from = in.readVInt();
@ -234,7 +234,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalWriteable(aggregations);
out.writeOptionalBoolean(explain);
out.writeOptionalStreamable(fetchSourceContext);
out.writeOptionalWriteable(fetchSourceContext);
out.writeGenericValue(docValueFields);
out.writeOptionalWriteable(storedFieldsContext);
out.writeVInt(from);
@ -961,7 +961,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} else if (context.getParseFieldMatcher().match(currentFieldName, TRACK_SCORES_FIELD)) {
trackScores = parser.booleanValue();
} else if (context.getParseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
fetchSourceContext = FetchSourceContext.parse(context);
fetchSourceContext = FetchSourceContext.parse(context.parser());
} else if (context.getParseFieldMatcher().match(currentFieldName, STORED_FIELDS_FIELD)) {
storedFieldsContext =
StoredFieldsContext.fromXContent(SearchSourceBuilder.STORED_FIELDS_FIELD.getPreferredName(), context);
@ -983,7 +983,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} else if (context.getParseFieldMatcher().match(currentFieldName, POST_FILTER_FIELD)) {
postQueryBuilder = context.parseInnerQueryBuilder().orElse(null);
} else if (context.getParseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
fetchSourceContext = FetchSourceContext.parse(context);
fetchSourceContext = FetchSourceContext.parse(context.parser());
} else if (context.getParseFieldMatcher().match(currentFieldName, SCRIPT_FIELDS_FIELD)) {
scriptFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
@ -1068,7 +1068,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
}
} else if (context.getParseFieldMatcher().match(currentFieldName, _SOURCE_FIELD)) {
fetchSourceContext = FetchSourceContext.parse(context);
fetchSourceContext = FetchSourceContext.parse(context.parser());
} else if (context.getParseFieldMatcher().match(currentFieldName, SEARCH_AFTER)) {
searchAfterBuilder = SearchAfterBuilder.fromXContent(parser, context.getParseFieldMatcher());
} else if (context.getParseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {

View File

@ -21,14 +21,13 @@ package org.elasticsearch.search.fetch;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.internal.InternalSearchHits;
import org.elasticsearch.transport.TransportResponse;
import java.io.IOException;
import static org.elasticsearch.search.internal.InternalSearchHits.StreamContext;
/**
*
*/
@ -70,9 +69,17 @@ public class FetchSearchResult extends TransportResponse implements FetchSearchR
}
public void hits(InternalSearchHits hits) {
assert assertNoSearchTarget(hits);
this.hits = hits;
}
private boolean assertNoSearchTarget(InternalSearchHits hits) {
for (SearchHit hit : hits.hits()) {
assert hit.getShard() == null : "expected null but got: " + hit.getShard();
}
return true;
}
public InternalSearchHits hits() {
return hits;
}
@ -96,13 +103,13 @@ public class FetchSearchResult extends TransportResponse implements FetchSearchR
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
id = in.readLong();
hits = InternalSearchHits.readSearchHits(in, InternalSearchHits.streamContext().streamShardTarget(StreamContext.ShardTargetType.NO_STREAM));
hits = InternalSearchHits.readSearchHits(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeLong(id);
hits.writeTo(out, InternalSearchHits.streamContext().streamShardTarget(StreamContext.ShardTargetType.NO_STREAM));
hits.writeTo(out);
}
}

View File

@ -21,15 +21,15 @@ package org.elasticsearch.search.fetch.subphase;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.rest.RestRequest;
import java.io.IOException;
@ -40,7 +40,7 @@ import java.util.List;
/**
* Context used to fetch the {@code _source}.
*/
public class FetchSourceContext implements Streamable, ToXContent {
public class FetchSourceContext implements Writeable, ToXContent {
public static final ParseField INCLUDES_FIELD = new ParseField("includes", "include");
public static final ParseField EXCLUDES_FIELD = new ParseField("excludes", "exclude");
@ -51,9 +51,9 @@ public class FetchSourceContext implements Streamable, ToXContent {
private String[] includes;
private String[] excludes;
public static FetchSourceContext parse(QueryParseContext context) throws IOException {
public static FetchSourceContext parse(XContentParser parser) throws IOException {
FetchSourceContext fetchSourceContext = new FetchSourceContext();
fetchSourceContext.fromXContent(context);
fetchSourceContext.fromXContent(parser, ParseFieldMatcher.STRICT);
return fetchSourceContext;
}
@ -88,6 +88,19 @@ public class FetchSourceContext implements Streamable, ToXContent {
this.excludes = excludes == null ? Strings.EMPTY_ARRAY : excludes;
}
public FetchSourceContext(StreamInput in) throws IOException {
fetchSource = in.readBoolean();
includes = in.readStringArray();
excludes = in.readStringArray();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(fetchSource);
out.writeStringArray(includes);
out.writeStringArray(excludes);
}
public boolean fetchSource() {
return this.fetchSource;
}
@ -148,8 +161,7 @@ public class FetchSourceContext implements Streamable, ToXContent {
return null;
}
public void fromXContent(QueryParseContext context) throws IOException {
XContentParser parser = context.parser();
public void fromXContent(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException {
XContentParser.Token token = parser.currentToken();
boolean fetchSource = true;
String[] includes = Strings.EMPTY_ARRAY;
@ -170,7 +182,7 @@ public class FetchSourceContext implements Streamable, ToXContent {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if (context.getParseFieldMatcher().match(currentFieldName, INCLUDES_FIELD)) {
if (parseFieldMatcher.match(currentFieldName, INCLUDES_FIELD)) {
List<String> includesList = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
@ -181,7 +193,7 @@ public class FetchSourceContext implements Streamable, ToXContent {
}
}
includes = includesList.toArray(new String[includesList.size()]);
} else if (context.getParseFieldMatcher().match(currentFieldName, EXCLUDES_FIELD)) {
} else if (parseFieldMatcher.match(currentFieldName, EXCLUDES_FIELD)) {
List<String> excludesList = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
@ -197,10 +209,13 @@ public class FetchSourceContext implements Streamable, ToXContent {
+ " in [" + currentFieldName + "].", parser.getTokenLocation());
}
} else if (token == XContentParser.Token.VALUE_STRING) {
if (context.getParseFieldMatcher().match(currentFieldName, INCLUDES_FIELD)) {
if (parseFieldMatcher.match(currentFieldName, INCLUDES_FIELD)) {
includes = new String[] {parser.text()};
} else if (context.getParseFieldMatcher().match(currentFieldName, EXCLUDES_FIELD)) {
} else if (parseFieldMatcher.match(currentFieldName, EXCLUDES_FIELD)) {
excludes = new String[] {parser.text()};
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token
+ " in [" + currentFieldName + "].", parser.getTokenLocation());
}
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
@ -229,22 +244,6 @@ public class FetchSourceContext implements Streamable, ToXContent {
return builder;
}
@Override
public void readFrom(StreamInput in) throws IOException {
fetchSource = in.readBoolean();
includes = in.readStringArray();
excludes = in.readStringArray();
in.readBoolean(); // Used to be transformSource but that was dropped in 2.1
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(fetchSource);
out.writeStringArray(includes);
out.writeStringArray(excludes);
out.writeBoolean(false); // Used to be transformSource but that was dropped in 2.1
}
@Override
public boolean equals(Object o) {
if (this == o) return true;

View File

@ -87,11 +87,7 @@ public final class CustomQueryScorer extends QueryScorer {
}
protected void extract(Query query, float boost, Map<String, WeightedSpanTerm> terms) throws IOException {
if (query instanceof GeoPointInBBoxQuery) {
// skip all geo queries, see https://issues.apache.org/jira/browse/LUCENE-7293 and
// https://github.com/elastic/elasticsearch/issues/17537
return;
} else if (query instanceof HasChildQueryBuilder.LateParsingQuery) {
if (query instanceof HasChildQueryBuilder.LateParsingQuery) {
// skip has_child or has_parent queries, see: https://github.com/elastic/elasticsearch/issues/14999
return;
} else if (query instanceof FunctionScoreQuery) {

View File

@ -39,7 +39,6 @@ import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
import org.elasticsearch.search.internal.InternalSearchHits.StreamContext.ShardTargetType;
import org.elasticsearch.search.lookup.SourceLookup;
import java.io.IOException;
@ -554,18 +553,14 @@ public class InternalSearchHit implements SearchHit {
return builder;
}
public static InternalSearchHit readSearchHit(StreamInput in, InternalSearchHits.StreamContext context) throws IOException {
public static InternalSearchHit readSearchHit(StreamInput in) throws IOException {
InternalSearchHit hit = new InternalSearchHit();
hit.readFrom(in, context);
hit.readFrom(in);
return hit;
}
@Override
public void readFrom(StreamInput in) throws IOException {
readFrom(in, InternalSearchHits.streamContext().streamShardTarget(ShardTargetType.STREAM));
}
public void readFrom(StreamInput in, InternalSearchHits.StreamContext context) throws IOException {
score = in.readFloat();
id = in.readOptionalText();
type = in.readOptionalText();
@ -644,26 +639,13 @@ public class InternalSearchHit implements SearchHit {
matchedQueries[i] = in.readString();
}
}
if (context.streamShardTarget() == ShardTargetType.STREAM) {
if (in.readBoolean()) {
shard = new SearchShardTarget(in);
}
} else if (context.streamShardTarget() == ShardTargetType.LOOKUP) {
int lookupId = in.readVInt();
if (lookupId > 0) {
shard = context.handleShardLookup().get(lookupId);
}
}
shard = in.readOptionalWriteable(SearchShardTarget::new);
size = in.readVInt();
if (size > 0) {
innerHits = new HashMap<>(size);
for (int i = 0; i < size; i++) {
String key = in.readString();
ShardTargetType shardTarget = context.streamShardTarget();
InternalSearchHits value = InternalSearchHits.readSearchHits(in, context.streamShardTarget(ShardTargetType.NO_STREAM));
context.streamShardTarget(shardTarget);
InternalSearchHits value = InternalSearchHits.readSearchHits(in);
innerHits.put(key, value);
}
}
@ -671,10 +653,6 @@ public class InternalSearchHit implements SearchHit {
@Override
public void writeTo(StreamOutput out) throws IOException {
writeTo(out, InternalSearchHits.streamContext().streamShardTarget(ShardTargetType.STREAM));
}
public void writeTo(StreamOutput out, InternalSearchHits.StreamContext context) throws IOException {
out.writeFloat(score);
out.writeOptionalText(id);
out.writeOptionalText(type);
@ -752,31 +730,14 @@ public class InternalSearchHit implements SearchHit {
out.writeString(matchedFilter);
}
}
if (context.streamShardTarget() == ShardTargetType.STREAM) {
if (shard == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
shard.writeTo(out);
}
} else if (context.streamShardTarget() == ShardTargetType.LOOKUP) {
if (shard == null) {
out.writeVInt(0);
} else {
out.writeVInt(context.shardHandleLookup().get(shard));
}
}
out.writeOptionalWriteable(shard);
if (innerHits == null) {
out.writeVInt(0);
} else {
out.writeVInt(innerHits.size());
for (Map.Entry<String, InternalSearchHits> entry : innerHits.entrySet()) {
out.writeString(entry.getKey());
ShardTargetType shardTarget = context.streamShardTarget();
entry.getValue().writeTo(out, context.streamShardTarget(ShardTargetType.NO_STREAM));
context.streamShardTarget(shardTarget);
entry.getValue().writeTo(out);
}
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.search.internal;
import com.carrotsearch.hppc.IntObjectHashMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -29,65 +28,12 @@ import org.elasticsearch.search.SearchShardTarget;
import java.io.IOException;
import java.util.Arrays;
import java.util.IdentityHashMap;
import java.util.Iterator;
import java.util.Map;
import static org.elasticsearch.search.internal.InternalSearchHit.readSearchHit;
/**
*
*/
public class InternalSearchHits implements SearchHits {
public static class StreamContext {
public static enum ShardTargetType {
STREAM,
LOOKUP,
NO_STREAM
}
private IdentityHashMap<SearchShardTarget, Integer> shardHandleLookup = new IdentityHashMap<>();
private IntObjectHashMap<SearchShardTarget> handleShardLookup = new IntObjectHashMap<>();
private ShardTargetType streamShardTarget = ShardTargetType.STREAM;
public StreamContext reset() {
shardHandleLookup.clear();
handleShardLookup.clear();
streamShardTarget = ShardTargetType.STREAM;
return this;
}
public IdentityHashMap<SearchShardTarget, Integer> shardHandleLookup() {
return shardHandleLookup;
}
public IntObjectHashMap<SearchShardTarget> handleShardLookup() {
return handleShardLookup;
}
public ShardTargetType streamShardTarget() {
return streamShardTarget;
}
public StreamContext streamShardTarget(ShardTargetType streamShardTarget) {
this.streamShardTarget = streamShardTarget;
return this;
}
}
private static final ThreadLocal<StreamContext> cache = new ThreadLocal<StreamContext>() {
@Override
protected StreamContext initialValue() {
return new StreamContext();
}
};
public static StreamContext streamContext() {
return cache.get().reset();
}
public static InternalSearchHits empty() {
// We shouldn't use static final instance, since that could directly be returned by native transport clients
return new InternalSearchHits(EMPTY, 0, 0);
@ -186,11 +132,6 @@ public class InternalSearchHits implements SearchHits {
return builder;
}
public static InternalSearchHits readSearchHits(StreamInput in, StreamContext context) throws IOException {
InternalSearchHits hits = new InternalSearchHits();
hits.readFrom(in, context);
return hits;
}
public static InternalSearchHits readSearchHits(StreamInput in) throws IOException {
InternalSearchHits hits = new InternalSearchHits();
@ -200,63 +141,27 @@ public class InternalSearchHits implements SearchHits {
@Override
public void readFrom(StreamInput in) throws IOException {
readFrom(in, streamContext().streamShardTarget(StreamContext.ShardTargetType.LOOKUP));
}
public void readFrom(StreamInput in, StreamContext context) throws IOException {
totalHits = in.readVLong();
maxScore = in.readFloat();
int size = in.readVInt();
if (size == 0) {
hits = EMPTY;
} else {
if (context.streamShardTarget() == StreamContext.ShardTargetType.LOOKUP) {
// read the lookup table first
int lookupSize = in.readVInt();
for (int i = 0; i < lookupSize; i++) {
context.handleShardLookup().put(in.readVInt(), new SearchShardTarget(in));
}
}
hits = new InternalSearchHit[size];
for (int i = 0; i < hits.length; i++) {
hits[i] = readSearchHit(in, context);
hits[i] = readSearchHit(in);
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
writeTo(out, streamContext().streamShardTarget(StreamContext.ShardTargetType.LOOKUP));
}
public void writeTo(StreamOutput out, StreamContext context) throws IOException {
out.writeVLong(totalHits);
out.writeFloat(maxScore);
out.writeVInt(hits.length);
if (hits.length > 0) {
if (context.streamShardTarget() == StreamContext.ShardTargetType.LOOKUP) {
// start from 1, 0 is for null!
int counter = 1;
for (InternalSearchHit hit : hits) {
if (hit.shard() != null) {
Integer handle = context.shardHandleLookup().get(hit.shard());
if (handle == null) {
context.shardHandleLookup().put(hit.shard(), counter++);
}
}
}
out.writeVInt(context.shardHandleLookup().size());
if (!context.shardHandleLookup().isEmpty()) {
for (Map.Entry<SearchShardTarget, Integer> entry : context.shardHandleLookup().entrySet()) {
out.writeVInt(entry.getValue());
entry.getKey().writeTo(out);
}
}
}
for (InternalSearchHit hit : hits) {
hit.writeTo(out, context);
hit.writeTo(out);
}
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.search.query;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -42,9 +41,6 @@ import static java.util.Collections.emptyList;
import static org.elasticsearch.common.lucene.Lucene.readTopDocs;
import static org.elasticsearch.common.lucene.Lucene.writeTopDocs;
/**
*
*/
public class QuerySearchResult extends QuerySearchResultProvider {
private long id;
@ -209,7 +205,6 @@ public class QuerySearchResult extends QuerySearchResultProvider {
public void readFromWithId(long id, StreamInput in) throws IOException {
this.id = id;
// shardTarget = readSearchShardTarget(in);
from = in.readVInt();
size = in.readVInt();
int numSortFieldsPlus1 = in.readVInt();
@ -232,10 +227,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
}
searchTimedOut = in.readBoolean();
terminatedEarly = in.readOptionalBoolean();
if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
profileShardResults = new ProfileShardResult(in);
}
profileShardResults = in.readOptionalWriteable(ProfileShardResult::new);
}
@Override
@ -246,7 +238,6 @@ public class QuerySearchResult extends QuerySearchResultProvider {
}
public void writeToNoId(StreamOutput out) throws IOException {
// shardTarget.writeTo(out);
out.writeVInt(from);
out.writeVInt(size);
if (sortValueFormats == null) {
@ -273,14 +264,6 @@ public class QuerySearchResult extends QuerySearchResultProvider {
}
out.writeBoolean(searchTimedOut);
out.writeOptionalBoolean(terminatedEarly);
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
if (profileShardResults == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
profileShardResults.writeTo(out);
}
}
out.writeOptionalWriteable(profileShardResults);
}
}

View File

@ -27,7 +27,6 @@ import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHits;
import org.elasticsearch.search.internal.InternalSearchHits.StreamContext.ShardTargetType;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
@ -261,8 +260,7 @@ public final class CompletionSuggestion extends Suggest.Suggestion<CompletionSug
super.readFrom(in);
this.doc = Lucene.readScoreDoc(in);
if (in.readBoolean()) {
this.hit = InternalSearchHit.readSearchHit(in,
InternalSearchHits.streamContext().streamShardTarget(ShardTargetType.STREAM));
this.hit = InternalSearchHit.readSearchHit(in);
}
int contextSize = in.readInt();
this.contexts = new LinkedHashMap<>(contextSize);
@ -283,7 +281,7 @@ public final class CompletionSuggestion extends Suggest.Suggestion<CompletionSug
Lucene.writeScoreDoc(out, doc);
if (hit != null) {
out.writeBoolean(true);
hit.writeTo(out, InternalSearchHits.streamContext().streamShardTarget(ShardTargetType.STREAM));
hit.writeTo(out);
} else {
out.writeBoolean(false);
}

View File

@ -207,7 +207,8 @@ public class GeoContextMapping extends ContextMapping<GeoQueryContext> {
if (field instanceof StringField) {
spare.resetFromString(field.stringValue());
} else {
spare.resetFromIndexHash(Long.parseLong(field.stringValue()));
// todo return this to .stringValue() once LatLonPoint implements it
spare.resetFromIndexableField(field);
}
geohashes.add(spare.geohash());
}

View File

@ -620,19 +620,12 @@ public class TransportService extends AbstractLifecycleComponent {
registerRequestHandler(reg);
}
protected <Request extends TransportRequest> void registerRequestHandler(RequestHandlerRegistry<Request> reg) {
private <Request extends TransportRequest> void registerRequestHandler(RequestHandlerRegistry<Request> reg) {
synchronized (requestHandlerMutex) {
RequestHandlerRegistry replaced = requestHandlers.get(reg.getAction());
requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap();
if (replaced != null) {
logger.warn("registered two transport handlers for action {}, handlers: {}, {}", reg.getAction(), reg, replaced);
if (requestHandlers.containsKey(reg.getAction())) {
throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered");
}
}
}
public void removeHandler(String action) {
synchronized (requestHandlerMutex) {
requestHandlers = MapBuilder.newMapBuilder(requestHandlers).remove(action).immutableMap();
requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap();
}
}

View File

@ -37,7 +37,10 @@ import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.fieldstats.FieldStatsAction;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.support.replication.TransportReplicationActionTests;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
@ -46,10 +49,10 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.tasks.TaskResult;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskInfo;
import org.elasticsearch.tasks.TaskResult;
import org.elasticsearch.tasks.TaskResultsService;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.tasks.MockTaskManager;
@ -71,7 +74,6 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.function.Function;
@ -94,7 +96,7 @@ import static org.hamcrest.Matchers.not;
* <p>
* We need at least 2 nodes so we have a master node a non-master node
*/
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2)
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2, transportClientRatio = 0.0)
public class TasksIT extends ESIntegTestCase {
private Map<Tuple<String, String>, RecordingTaskManagerListener> listeners = new HashMap<>();
@ -326,48 +328,35 @@ public class TasksIT extends ESIntegTestCase {
}
/**
* Very basic "is it plugged in" style test that indexes a document and
* makes sure that you can fetch the status of the process. The goal here is
* to verify that the large moving parts that make fetching task status work
* fit together rather than to verify any particular status results from
* indexing. For that, look at
* {@link org.elasticsearch.action.support.replication.TransportReplicationActionTests}
* . We intentionally don't use the task recording mechanism used in other
* places in this test so we can make sure that the status fetching works
* properly over the wire.
* Very basic "is it plugged in" style test that indexes a document and makes sure that you can fetch the status of the process. The
* goal here is to verify that the large moving parts that make fetching task status work fit together rather than to verify any
* particular status results from indexing. For that, look at {@link TransportReplicationActionTests}. We intentionally don't use the
* task recording mechanism used in other places in this test so we can make sure that the status fetching works properly over the wire.
*/
public void testCanFetchIndexStatus() throws InterruptedException, ExecutionException, IOException {
/*
* We prevent any tasks from unregistering until the test is done so we
* can fetch them. This will gum up the server if we leave it enabled
* but we'll be quick so it'll be OK (TM).
*/
ReentrantLock taskFinishLock = new ReentrantLock();
taskFinishLock.lock();
ListenableActionFuture<?> indexFuture = null;
/* We make sure all indexing tasks wait to start before this lock is *unlocked* so we can fetch their status with both the get and
* list APIs. */
CountDownLatch taskRegistered = new CountDownLatch(1);
CountDownLatch letTaskFinish = new CountDownLatch(1);
ListenableActionFuture<IndexResponse> indexFuture = null;
try {
CountDownLatch taskRegistered = new CountDownLatch(1);
for (TransportService transportService : internalCluster().getInstances(TransportService.class)) {
((MockTaskManager) transportService.getTaskManager()).addListener(new MockTaskManagerListener() {
@Override
public void onTaskRegistered(Task task) {
if (task.getAction().startsWith(IndexAction.NAME)) {
taskRegistered.countDown();
logger.debug("Blocking [{}] starting", task);
try {
letTaskFinish.await(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
@Override
public void onTaskUnregistered(Task task) {
/*
* We can't block all tasks here or the task listing task
* would never return.
*/
if (false == task.getAction().startsWith(IndexAction.NAME)) {
return;
}
logger.debug("Blocking {} from being unregistered", task);
taskFinishLock.lock();
taskFinishLock.unlock();
}
@Override
@ -390,16 +379,17 @@ public class TasksIT extends ESIntegTestCase {
assertEquals(task.getType(), fetchedWithGet.getType());
assertEquals(task.getAction(), fetchedWithGet.getAction());
assertEquals(task.getDescription(), fetchedWithGet.getDescription());
// The status won't always be equal - it might change between the list and the get.
assertEquals(task.getStatus(), fetchedWithGet.getStatus());
assertEquals(task.getStartTime(), fetchedWithGet.getStartTime());
assertThat(fetchedWithGet.getRunningTimeNanos(), greaterThanOrEqualTo(task.getRunningTimeNanos()));
assertEquals(task.isCancellable(), fetchedWithGet.isCancellable());
assertEquals(task.getParentTaskId(), fetchedWithGet.getParentTaskId());
}
} finally {
taskFinishLock.unlock();
letTaskFinish.countDown();
if (indexFuture != null) {
indexFuture.get();
IndexResponse indexResponse = indexFuture.get();
assertArrayEquals(ReplicationResponse.EMPTY, indexResponse.getShardInfo().getFailures());
}
}
}

View File

@ -60,7 +60,7 @@ import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
@TestLogging("_root:DEBUG,action.admin.indices.shards:TRACE,cluster.service:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.action.admin.indices.shards:TRACE,org.elasticsearch.cluster.service:TRACE")
public class IndicesShardStoreRequestIT extends ESIntegTestCase {
@Override

View File

@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService.PutRequest;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.NodeServicesProvider;
@ -54,12 +55,17 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase {
Map<String, Object> map = new HashMap<>();
map.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "0");
map.put("index.shard.check_on_startup", "blargh");
request.settings(Settings.builder().put(map).build());
List<Throwable> throwables = putTemplate(request);
assertEquals(throwables.size(), 1);
assertThat(throwables.get(0), instanceOf(InvalidIndexTemplateException.class));
assertThat(throwables.get(0).getMessage(), containsString("index must have 1 or more primary shards"));
assertThat(throwables.get(0).getMessage(),
containsString("Failed to parse value [0] for setting [index.number_of_shards] must be >= 1"));
assertThat(throwables.get(0).getMessage(),
containsString("unknown value for [index.shard.check_on_startup] " +
"must be one of [true, false, fix, checksum] but was: blargh"));
}
public void testIndexTemplateValidationAccumulatesValidationErrors() {
@ -75,7 +81,8 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase {
assertThat(throwables.get(0), instanceOf(InvalidIndexTemplateException.class));
assertThat(throwables.get(0).getMessage(), containsString("name must not contain a space"));
assertThat(throwables.get(0).getMessage(), containsString("template must not start with '_'"));
assertThat(throwables.get(0).getMessage(), containsString("index must have 1 or more primary shards"));
assertThat(throwables.get(0).getMessage(),
containsString("Failed to parse value [0] for setting [index.number_of_shards] must be >= 1"));
}
public void testIndexTemplateWithAliasNameEqualToTemplatePattern() {
@ -160,7 +167,9 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase {
null,
null,
null, null, null);
MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, new AliasValidator(Settings.EMPTY), null, null);
MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService,
new AliasValidator(Settings.EMPTY), null, null,
new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS));
final List<Throwable> throwables = new ArrayList<>();
service.putTemplate(request, new MetaDataIndexTemplateService.PutListener() {
@ -192,7 +201,8 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase {
null,
null);
MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(
Settings.EMPTY, clusterService, createIndexService, new AliasValidator(Settings.EMPTY), indicesService, nodeServicesProvider);
Settings.EMPTY, clusterService, createIndexService, new AliasValidator(Settings.EMPTY), indicesService, nodeServicesProvider,
new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS));
final List<Throwable> throwables = new ArrayList<>();
final CountDownLatch latch = new CountDownLatch(1);

View File

@ -27,6 +27,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.script.Script;
@ -39,6 +40,7 @@ import java.util.Map;
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
@ -125,49 +127,34 @@ public class BulkRequestTests extends ESTestCase {
public void testSimpleBulk6() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk6.json");
BulkRequest bulkRequest = new BulkRequest();
try {
bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
fail("should have thrown an exception about the wrong format of line 1");
} catch (IllegalArgumentException e) {
assertThat("message contains error about the wrong format of line 1: " + e.getMessage(),
e.getMessage().contains("Malformed action/metadata line [1], expected a simple value for field [_source] but found [START_OBJECT]"), equalTo(true));
}
ParsingException exc = expectThrows(ParsingException.class,
() -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null));
assertThat(exc.getMessage(), containsString("Unknown key for a VALUE_STRING in [hello]"));
}
public void testSimpleBulk7() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk7.json");
BulkRequest bulkRequest = new BulkRequest();
try {
bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
fail("should have thrown an exception about the wrong format of line 5");
} catch (IllegalArgumentException e) {
assertThat("message contains error about the wrong format of line 5: " + e.getMessage(),
e.getMessage().contains("Malformed action/metadata line [5], expected a simple value for field [_unkown] but found [START_ARRAY]"), equalTo(true));
}
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
() -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null));
assertThat(exc.getMessage(),
containsString("Malformed action/metadata line [5], expected a simple value for field [_unkown] but found [START_ARRAY]"));
}
public void testSimpleBulk8() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk8.json");
BulkRequest bulkRequest = new BulkRequest();
try {
bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
fail("should have thrown an exception about the unknown parameter _foo");
} catch (IllegalArgumentException e) {
assertThat("message contains error about the unknown parameter _foo: " + e.getMessage(),
e.getMessage().contains("Action/metadata line [3] contains an unknown parameter [_foo]"), equalTo(true));
}
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
() -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null));
assertThat(exc.getMessage(), containsString("Action/metadata line [3] contains an unknown parameter [_foo]"));
}
public void testSimpleBulk9() throws Exception {
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk9.json");
BulkRequest bulkRequest = new BulkRequest();
try {
bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
fail("should have thrown an exception about the wrong format of line 3");
} catch (IllegalArgumentException e) {
assertThat("message contains error about the wrong format of line 3: " + e.getMessage(),
e.getMessage().contains("Malformed action/metadata line [3], expected START_OBJECT or END_OBJECT but found [START_ARRAY]"), equalTo(true));
}
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class,
() -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null));
assertThat(exc.getMessage(), containsString("Malformed action/metadata line [3], expected START_OBJECT or END_OBJECT but found [START_ARRAY]"));
}
public void testSimpleBulk10() throws Exception {

View File

@ -295,7 +295,8 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
for (int i = 0; i < numDocs; i++) {
builder.add(
client().prepareUpdate()
.setIndex("test").setType("type1").setId(Integer.toString(i)).setFields("counter")
.setIndex("test").setType("type1").setId(Integer.toString(i))
.setFields("counter")
.setScript(script)
.setUpsert(jsonBuilder().startObject().field("counter", 1).endObject()));
}
@ -405,8 +406,7 @@ public class BulkWithUpdatesIT extends ESIntegTestCase {
assertThat(response.getItems()[i].getType(), equalTo("type1"));
assertThat(response.getItems()[i].getOpType(), equalTo("update"));
for (int j = 0; j < 5; j++) {
GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute()
.actionGet();
GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).get();
assertThat(getResponse.isExists(), equalTo(false));
}
}

View File

@ -52,7 +52,7 @@ public class MultiGetShardRequestTests extends ESTestCase {
for (int j = 0; j < fields.length; j++) {
fields[j] = randomAsciiOfLength(randomIntBetween(1, 10));
}
item.fields(fields);
item.storedFields(fields);
}
if (randomBoolean()) {
item.version(randomIntBetween(1, Integer.MAX_VALUE));
@ -84,7 +84,7 @@ public class MultiGetShardRequestTests extends ESTestCase {
assertThat(item2.index(), equalTo(item.index()));
assertThat(item2.type(), equalTo(item.type()));
assertThat(item2.id(), equalTo(item.id()));
assertThat(item2.fields(), equalTo(item.fields()));
assertThat(item2.storedFields(), equalTo(item.storedFields()));
assertThat(item2.version(), equalTo(item.version()));
assertThat(item2.versionType(), equalTo(item.versionType()));
assertThat(item2.fetchSourceContext(), equalTo(item.fetchSourceContext()));

View File

@ -131,6 +131,8 @@ public class TransportReplicationActionTests extends ESTestCase {
private TransportService transportService;
private CapturingTransport transport;
private Action action;
private ShardStateAction shardStateAction;
/* *
* TransportReplicationAction needs an instance of IndexShard to count operations.
* indexShards is reset to null before each test and will be initialized upon request in the tests.
@ -150,7 +152,8 @@ public class TransportReplicationActionTests extends ESTestCase {
transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
action = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool);
shardStateAction = new ShardStateAction(Settings.EMPTY, clusterService, transportService, null, null, threadPool);
action = new Action(Settings.EMPTY, "testAction", transportService, clusterService, shardStateAction, threadPool);
}
@After
@ -707,7 +710,8 @@ public class TransportReplicationActionTests extends ESTestCase {
final ShardRouting replicaRouting = state.getRoutingTable().shardRoutingTable(shardId).replicaShards().get(0);
boolean throwException = randomBoolean();
final ReplicationTask task = maybeTask();
Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool) {
Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, shardStateAction,
threadPool) {
@Override
protected ReplicaResult shardOperationOnReplica(Request request) {
assertIndexShardCounter(1);
@ -826,7 +830,8 @@ public class TransportReplicationActionTests extends ESTestCase {
setState(clusterService, state);
AtomicBoolean throwException = new AtomicBoolean(true);
final ReplicationTask task = maybeTask();
Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool) {
Action action = new Action(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, shardStateAction,
threadPool) {
@Override
protected ReplicaResult shardOperationOnReplica(Request request) {
assertPhase(task, "replica");
@ -940,9 +945,10 @@ public class TransportReplicationActionTests extends ESTestCase {
Action(Settings settings, String actionName, TransportService transportService,
ClusterService clusterService,
ShardStateAction shardStateAction,
ThreadPool threadPool) {
super(settings, actionName, transportService, clusterService, mockIndicesService(clusterService), threadPool,
new ShardStateAction(settings, clusterService, transportService, null, null, threadPool),
shardStateAction,
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY),
Request::new, Request::new, ThreadPool.Names.SAME);
}

View File

@ -48,7 +48,7 @@ public class UpdateRequestTests extends ESTestCase {
public void testUpdateRequest() throws Exception {
UpdateRequest request = new UpdateRequest("test", "type", "1");
// simple script
request.source(XContentFactory.jsonBuilder().startObject()
request.fromXContent(XContentFactory.jsonBuilder().startObject()
.field("script", "script1")
.endObject());
Script script = request.script();
@ -60,7 +60,7 @@ public class UpdateRequestTests extends ESTestCase {
assertThat(params, nullValue());
// simple verbose script
request.source(XContentFactory.jsonBuilder().startObject()
request.fromXContent(XContentFactory.jsonBuilder().startObject()
.startObject("script").field("inline", "script1").endObject()
.endObject());
script = request.script();
@ -73,8 +73,13 @@ public class UpdateRequestTests extends ESTestCase {
// script with params
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject().startObject("script").field("inline", "script1").startObject("params")
.field("param1", "value1").endObject().endObject().endObject());
request.fromXContent(XContentFactory.jsonBuilder().startObject()
.startObject("script")
.field("inline", "script1")
.startObject("params")
.field("param1", "value1")
.endObject()
.endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
@ -86,8 +91,9 @@ public class UpdateRequestTests extends ESTestCase {
assertThat(params.get("param1").toString(), equalTo("value1"));
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject().startObject("script").startObject("params").field("param1", "value1")
.endObject().field("inline", "script1").endObject().endObject());
request.fromXContent(XContentFactory.jsonBuilder().startObject().startObject("script")
.startObject("params").field("param1", "value1").endObject()
.field("inline", "script1").endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
@ -100,9 +106,19 @@ public class UpdateRequestTests extends ESTestCase {
// script with params and upsert
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject().startObject("script").startObject("params").field("param1", "value1")
.endObject().field("inline", "script1").endObject().startObject("upsert").field("field1", "value1").startObject("compound")
.field("field2", "value2").endObject().endObject().endObject());
request.fromXContent(XContentFactory.jsonBuilder().startObject()
.startObject("script")
.startObject("params")
.field("param1", "value1")
.endObject()
.field("inline", "script1")
.endObject()
.startObject("upsert")
.field("field1", "value1")
.startObject("compound")
.field("field2", "value2")
.endObject()
.endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
@ -117,9 +133,19 @@ public class UpdateRequestTests extends ESTestCase {
assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject().startObject("upsert").field("field1", "value1").startObject("compound")
.field("field2", "value2").endObject().endObject().startObject("script").startObject("params").field("param1", "value1")
.endObject().field("inline", "script1").endObject().endObject());
request.fromXContent(XContentFactory.jsonBuilder().startObject()
.startObject("upsert")
.field("field1", "value1")
.startObject("compound")
.field("field2", "value2")
.endObject()
.endObject()
.startObject("script")
.startObject("params")
.field("param1", "value1")
.endObject()
.field("inline", "script1")
.endObject().endObject());
script = request.script();
assertThat(script, notNullValue());
assertThat(script.getScript(), equalTo("script1"));
@ -135,8 +161,9 @@ public class UpdateRequestTests extends ESTestCase {
// script with doc
request = new UpdateRequest("test", "type", "1");
request.source(XContentFactory.jsonBuilder().startObject().startObject("doc").field("field1", "value1").startObject("compound")
.field("field2", "value2").endObject().endObject().endObject());
request.fromXContent(XContentFactory.jsonBuilder().startObject()
.startObject("doc").field("field1", "value1").startObject("compound")
.field("field2", "value2").endObject().endObject().endObject());
Map<String, Object> doc = request.doc().sourceAsMap();
assertThat(doc.get("field1").toString(), equalTo("value1"));
assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2"));
@ -187,7 +214,7 @@ public class UpdateRequestTests extends ESTestCase {
public void testInvalidBodyThrowsParseException() throws Exception {
UpdateRequest request = new UpdateRequest("test", "type", "1");
try {
request.source(new byte[] { (byte) '"' });
request.fromXContent(new byte[] { (byte) '"' });
fail("Should have thrown a ElasticsearchParseException");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), equalTo("Failed to derive xcontent"));
@ -197,13 +224,56 @@ public class UpdateRequestTests extends ESTestCase {
// Related to issue 15338
public void testFieldsParsing() throws Exception {
UpdateRequest request = new UpdateRequest("test", "type1", "1")
.source(new BytesArray("{\"doc\": {\"field1\": \"value1\"}, \"fields\": \"_source\"}"));
.fromXContent(new BytesArray("{\"doc\": {\"field1\": \"value1\"}, \"fields\": \"_source\"}"));
assertThat(request.doc().sourceAsMap().get("field1").toString(), equalTo("value1"));
assertThat(request.fields(), arrayContaining("_source"));
request = new UpdateRequest("test", "type2", "2")
.source(new BytesArray("{\"doc\": {\"field2\": \"value2\"}, \"fields\": [\"field1\", \"field2\"]}"));
.fromXContent(new BytesArray("{\"doc\": {\"field2\": \"value2\"}, \"fields\": [\"field1\", \"field2\"]}"));
assertThat(request.doc().sourceAsMap().get("field2").toString(), equalTo("value2"));
assertThat(request.fields(), arrayContaining("field1", "field2"));
}
public void testFetchSourceParsing() throws Exception {
UpdateRequest request = new UpdateRequest("test", "type1", "1");
request.fromXContent(
XContentFactory.jsonBuilder().startObject().field("_source", true).endObject()
);
assertThat(request.fetchSource(), notNullValue());
assertThat(request.fetchSource().includes().length, equalTo(0));
assertThat(request.fetchSource().excludes().length, equalTo(0));
assertThat(request.fetchSource().fetchSource(), equalTo(true));
request.fromXContent(
XContentFactory.jsonBuilder().startObject().field("_source", false).endObject()
);
assertThat(request.fetchSource(), notNullValue());
assertThat(request.fetchSource().includes().length, equalTo(0));
assertThat(request.fetchSource().excludes().length, equalTo(0));
assertThat(request.fetchSource().fetchSource(), equalTo(false));
request.fromXContent(
XContentFactory.jsonBuilder().startObject().field("_source", "path.inner.*").endObject()
);
assertThat(request.fetchSource(), notNullValue());
assertThat(request.fetchSource().fetchSource(), equalTo(true));
assertThat(request.fetchSource().includes().length, equalTo(1));
assertThat(request.fetchSource().excludes().length, equalTo(0));
assertThat(request.fetchSource().includes()[0], equalTo("path.inner.*"));
request.fromXContent(
XContentFactory.jsonBuilder().startObject()
.startObject("_source")
.field("includes", "path.inner.*")
.field("excludes", "another.inner.*")
.endObject()
.endObject()
);
assertThat(request.fetchSource(), notNullValue());
assertThat(request.fetchSource().fetchSource(), equalTo(true));
assertThat(request.fetchSource().includes().length, equalTo(1));
assertThat(request.fetchSource().excludes().length, equalTo(1));
assertThat(request.fetchSource().includes()[0], equalTo("path.inner.*"));
assertThat(request.fetchSource().excludes()[0], equalTo("another.inner.*"));
}
}

View File

@ -43,6 +43,9 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "--daemonize");
runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "-p", "/tmp/pid");
runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "--pidfile", "/tmp/pid");
runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "-q");
runTestThatVersionIsMutuallyExclusiveToOtherOptions("--version", "--quiet");
runTestThatVersionIsReturned("-V");
runTestThatVersionIsReturned("--version");
}
@ -66,7 +69,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
}
private void runTestVersion(int expectedStatus, Consumer<String> outputConsumer, String... args) throws Exception {
runTest(expectedStatus, false, outputConsumer, (foreground, pidFile, esSettings) -> {}, args);
runTest(expectedStatus, false, outputConsumer, (foreground, pidFile, quiet, esSettings) -> {}, args);
}
public void testPositionalArgs() throws Exception {
@ -74,21 +77,21 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.USAGE,
false,
output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")),
(foreground, pidFile, esSettings) -> {},
(foreground, pidFile, quiet, esSettings) -> {},
"foo"
);
runTest(
ExitCodes.USAGE,
false,
output -> assertThat(output, containsString("Positional arguments not allowed, found [foo, bar]")),
(foreground, pidFile, esSettings) -> {},
(foreground, pidFile, quiet, esSettings) -> {},
"foo", "bar"
);
runTest(
ExitCodes.USAGE,
false,
output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")),
(foreground, pidFile, esSettings) -> {},
(foreground, pidFile, quiet, esSettings) -> {},
"-E", "foo=bar", "foo", "-E", "baz=qux"
);
}
@ -109,7 +112,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
expectedStatus,
expectedInit,
outputConsumer,
(foreground, pidFile, esSettings) -> assertThat(pidFile.toString(), equalTo(expectedPidFile.toString())),
(foreground, pidFile, quiet, esSettings) -> assertThat(pidFile.toString(), equalTo(expectedPidFile.toString())),
args);
}
@ -124,7 +127,22 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.OK,
true,
output -> {},
(foreground, pidFile, esSettings) -> assertThat(foreground, equalTo(!expectedDaemonize)),
(foreground, pidFile, quiet, esSettings) -> assertThat(foreground, equalTo(!expectedDaemonize)),
args);
}
public void testThatParsingQuietOptionWorks() throws Exception {
runQuietTest(true, "-q");
runQuietTest(true, "--quiet");
runQuietTest(false);
}
private void runQuietTest(final boolean expectedQuiet, final String... args) throws Exception {
runTest(
ExitCodes.OK,
true,
output -> {},
(foreground, pidFile, quiet, esSettings) -> assertThat(quiet, equalTo(expectedQuiet)),
args);
}
@ -133,7 +151,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.OK,
true,
output -> {},
(foreground, pidFile, esSettings) -> {
(foreground, pidFile, quiet, esSettings) -> {
assertThat(esSettings.size(), equalTo(2));
assertThat(esSettings, hasEntry("foo", "bar"));
assertThat(esSettings, hasEntry("baz", "qux"));
@ -147,7 +165,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.USAGE,
false,
output -> assertThat(output, containsString("Setting [foo] must not be empty")),
(foreground, pidFile, esSettings) -> {},
(foreground, pidFile, quiet, esSettings) -> {},
"-E", "foo="
);
}
@ -157,7 +175,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
ExitCodes.USAGE,
false,
output -> assertThat(output, containsString("network.host is not a recognized option")),
(foreground, pidFile, esSettings) -> {},
(foreground, pidFile, quiet, esSettings) -> {},
"--network.host");
}

View File

@ -47,7 +47,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
*/
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
// this test sometimes fails in recovery when the recovery is reset, increasing the logging level to help debug
@TestLogging("indices.recovery:DEBUG")
@TestLogging("org.elasticsearch.indices.recovery:DEBUG")
public class RepositoryUpgradabilityIT extends AbstractSnapshotIntegTestCase {
/**

View File

@ -31,7 +31,6 @@ import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.fd.FaultDetection;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
@ -65,7 +64,7 @@ import static org.hamcrest.Matchers.nullValue;
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
@ESIntegTestCase.SuppressLocalMode
@TestLogging("_root:DEBUG,cluster.service:TRACE,discovery.zen:TRACE")
@TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE")
public class MinimumMasterNodesIT extends ESIntegTestCase {
@Override
@ -364,7 +363,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
public void testCanNotPublishWithoutMinMastNodes() throws Exception {
Settings settings = Settings.builder()
.put("discovery.type", "zen")
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1h") // disable it
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up
@ -379,7 +377,6 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
new TwoPartitions(Collections.singleton(master), otherNodes),
new NetworkDelay(TimeValue.timeValueMinutes(1)));
internalCluster().setDisruptionScheme(partition);
partition.startDisrupting();
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Exception> failure = new AtomicReference<>();
@ -393,6 +390,8 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
logger.debug("--> starting the disruption, preventing cluster state publishing");
partition.startDisrupting();
MetaData.Builder metaData = MetaData.builder(currentState.metaData()).persistentSettings(
Settings.builder().put(currentState.metaData().persistentSettings()).put("_SHOULD_NOT_BE_THERE_", true).build()
);

View File

@ -363,7 +363,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
}
@TestLogging("_root:debug,action.admin.cluster.tasks:trace")
@TestLogging("_root:debug,org.elasticsearch.action.admin.cluster.tasks:trace")
public void testPendingUpdateTask() throws Exception {
Settings settings = Settings.builder()
.put("discovery.type", "local")

View File

@ -677,18 +677,30 @@ public class ClusterServiceTests extends ESTestCase {
latch.await();
}
@TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level
@TestLogging("org.elasticsearch.cluster.service:TRACE") // To ensure that we log cluster state events on TRACE level
public void testClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG,
"*processing [test1]: took [1s] no change in cluster_state"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE,
"*failed to execute cluster state update in [2s]*"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG,
"*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test1",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.DEBUG,
"*processing [test1]: took [1s] no change in cluster_state"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.TRACE,
"*failed to execute cluster state update in [2s]*"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.DEBUG,
"*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)"));
Logger rootLogger = LogManager.getRootLogger();
Loggers.addAppender(rootLogger, mockAppender);
Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service");
Loggers.addAppender(clusterLogger, mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(4);
clusterService.currentTimeOverride = System.nanoTime();
@ -743,7 +755,7 @@ public class ClusterServiceTests extends ESTestCase {
fail();
}
});
// Additional update task to make sure all previous logging made it to the logger
// Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
@ -763,25 +775,41 @@ public class ClusterServiceTests extends ESTestCase {
});
latch.await();
} finally {
Loggers.removeAppender(rootLogger, mockAppender);
Loggers.removeAppender(clusterLogger, mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}
@TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level
@TestLogging("org.elasticsearch.cluster.service:WARN") // To ensure that we log cluster state events on WARN level
public void testLongClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low",
"cluster.service", Level.WARN, "*cluster state update task [test1] took [*] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN,
"*cluster state update task [test2] took [32s] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN,
"*cluster state update task [test3] took [33s] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN,
"*cluster state update task [test4] took [34s] above the warn threshold of *"));
mockAppender.addExpectation(
new MockLogAppender.UnseenEventExpectation(
"test1 shouldn't see because setting is too low",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.WARN,
"*cluster state update task [test1] took [*] above the warn threshold of *"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test2",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.WARN,
"*cluster state update task [test2] took [32s] above the warn threshold of *"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test3",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.WARN,
"*cluster state update task [test3] took [33s] above the warn threshold of *"));
mockAppender.addExpectation(
new MockLogAppender.SeenEventExpectation(
"test4",
"org.elasticsearch.cluster.service.ClusterServiceTests$TimedClusterService",
Level.WARN,
"*cluster state update task [test4] took [34s] above the warn threshold of *"));
Logger rootLogger = LogManager.getRootLogger();
Loggers.addAppender(rootLogger, mockAppender);
Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service");
Loggers.addAppender(clusterLogger, mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(5);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
@ -857,7 +885,7 @@ public class ClusterServiceTests extends ESTestCase {
fail();
}
});
// Additional update task to make sure all previous logging made it to the logger
// Additional update task to make sure all previous logging made it to the loggerName
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
@Override
@ -877,7 +905,7 @@ public class ClusterServiceTests extends ESTestCase {
});
latch.await();
} finally {
Loggers.removeAppender(rootLogger, mockAppender);
Loggers.removeAppender(clusterLogger, mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}

Some files were not shown because too many files have changed in this diff Show More