diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index b8f50846c0e..09f0062352d 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -45,18 +45,6 @@ - - - - - - - - - - - - @@ -151,13 +139,6 @@ - - - - - - - diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index f105b15d665..0caae77d7de 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -170,7 +170,8 @@ public class TransportClusterHealthAction } final ClusterState state = clusterService.state(); - final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, null, logger, threadPool.getThreadContext()); + final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, + null, logger, threadPool.getThreadContext()); if (request.timeout().millis() == 0) { listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0)); return; @@ -209,8 +210,8 @@ public class TransportClusterHealthAction return readyCounter == waitFor; } - private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, final int waitFor, - boolean timedOut) { + private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, + final int waitFor, boolean timedOut) { ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(), gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime()); int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver); @@ -325,7 +326,7 @@ public class TransportClusterHealthAction // one of the specified indices is not there - treat it as RED. ClusterHealthResponse response = new ClusterHealthResponse(clusterState.getClusterName().value(), Strings.EMPTY_ARRAY, clusterState, numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), - pendingTaskTimeInQueue); + pendingTaskTimeInQueue); response.setStatus(ClusterHealthStatus.RED); return response; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index fbee68ab3fc..cdef2a03b53 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -41,8 +41,8 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { ClusterStatsNodeResponse() { } - public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus, NodeInfo nodeInfo, - NodeStats nodeStats, ShardStats[] shardsStats) { + public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus, + NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats) { super(node); this.nodeInfo = nodeInfo; this.nodeStats = nodeStats; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 2c24d285221..01ef94c428a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -171,7 +171,8 @@ public final class SearchPhaseController { final TopDocsAndMaxScore td = queryResult.consumeTopDocs(); assert td != null; topDocsStats.add(td); - if (td.topDocs.scoreDocs.length > 0) { // make sure we set the shard index before we add it - the consumer didn't do that yet + // make sure we set the shard index before we add it - the consumer didn't do that yet + if (td.topDocs.scoreDocs.length > 0) { setShardIndex(td.topDocs, queryResult.getShardIndex()); topDocs.add(td.topDocs); } @@ -308,7 +309,8 @@ public final class SearchPhaseController { * completion suggestion ordered by suggestion name */ public InternalSearchResponse merge(boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase, - Collection fetchResults, IntFunction resultsLookup) { + Collection fetchResults, + IntFunction resultsLookup) { if (reducedQueryPhase.isEmptyResult) { return InternalSearchResponse.empty(); } @@ -416,7 +418,8 @@ public final class SearchPhaseController { * Reduces the given query results and consumes all aggregations and profile results. * @param queryResults a list of non-null query shard results */ - public ReducedQueryPhase reducedQueryPhase(Collection queryResults, boolean isScrollRequest, boolean trackTotalHits) { + public ReducedQueryPhase reducedQueryPhase(Collection queryResults, + boolean isScrollRequest, boolean trackTotalHits) { return reducedQueryPhase(queryResults, null, new ArrayList<>(), new TopDocsStats(trackTotalHits), 0, isScrollRequest); } @@ -441,7 +444,8 @@ public final class SearchPhaseController { Boolean terminatedEarly = null; if (queryResults.isEmpty()) { // early terminate we have nothing to reduce return new ReducedQueryPhase(topDocsStats.totalHits, topDocsStats.fetchHits, topDocsStats.maxScore, - timedOut, terminatedEarly, null, null, null, EMPTY_DOCS, null, null, numReducePhases, false, 0, 0, true); + timedOut, terminatedEarly, null, null, null, EMPTY_DOCS, null, + null, numReducePhases, false, 0, 0, true); } final QuerySearchResult firstResult = queryResults.stream().findFirst().get().queryResult(); final boolean hasSuggest = firstResult.suggest() != null; @@ -671,7 +675,8 @@ public final class SearchPhaseController { } if (hasTopDocs) { TopDocs reducedTopDocs = controller.mergeTopDocs(Arrays.asList(topDocsBuffer), - querySearchResult.from() + querySearchResult.size() // we have to merge here in the same way we collect on a shard + // we have to merge here in the same way we collect on a shard + querySearchResult.from() + querySearchResult.size() , 0); Arrays.fill(topDocsBuffer, null); topDocsBuffer[0] = reducedTopDocs; diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java index fda393e375c..938489d6cbe 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java @@ -38,7 +38,8 @@ import java.util.Iterator; import java.util.List; import java.util.Set; -public class MultiTermVectorsRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { +public class MultiTermVectorsRequest extends ActionRequest + implements Iterable, CompositeIndicesRequest, RealtimeRequest { String preference; List requests = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index 4cd02caf91c..dc849ca3d13 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -616,18 +616,21 @@ public class TermVectorsRequest extends SingleShardRequest i termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map())); } else if (FILTER.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.filterSettings(readFilterSettings(parser)); - } else if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { // the following is important for multi request parsing. + } else if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { + // the following is important for multi request parsing. termVectorsRequest.index = parser.text(); } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.type = parser.text(); } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { if (termVectorsRequest.doc != null) { - throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!"); + throw new ElasticsearchParseException("failed to parse term vectors request. " + + "either [id] or [doc] can be specified, but not both!"); } termVectorsRequest.id = parser.text(); } else if (DOC.match(currentFieldName, parser.getDeprecationHandler())) { if (termVectorsRequest.id != null) { - throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!"); + throw new ElasticsearchParseException("failed to parse term vectors request. " + + "either [id] or [doc] can be specified, but not both!"); } termVectorsRequest.doc(jsonBuilder().copyCurrentStructure(parser)); } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { @@ -653,7 +656,8 @@ public class TermVectorsRequest extends SingleShardRequest i if (e.getValue() instanceof String) { mapStrStr.put(e.getKey(), (String) e.getValue()); } else { - throw new ElasticsearchParseException("expecting the analyzer at [{}] to be a String, but found [{}] instead", e.getKey(), e.getValue().getClass()); + throw new ElasticsearchParseException("expecting the analyzer at [{}] to be a String, but found [{}] instead", + e.getKey(), e.getValue().getClass()); } } return mapStrStr; @@ -682,7 +686,8 @@ public class TermVectorsRequest extends SingleShardRequest i } else if (currentFieldName.equals("max_word_length")) { settings.maxWordLength = parser.intValue(); } else { - throw new ElasticsearchParseException("failed to parse term vectors request. the field [{}] is not valid for filter parameter for term vector request", currentFieldName); + throw new ElasticsearchParseException("failed to parse term vectors request. " + + "the field [{}] is not valid for filter parameter for term vector request", currentFieldName); } } } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index 01a9812516b..9159a07e83c 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -197,7 +197,8 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj return builder; } - private void buildField(XContentBuilder builder, final CharsRefBuilder spare, Fields theFields, Iterator fieldIter) throws IOException { + private void buildField(XContentBuilder builder, final CharsRefBuilder spare, + Fields theFields, Iterator fieldIter) throws IOException { String fieldName = fieldIter.next(); builder.startObject(fieldName); Terms curTerms = theFields.terms(fieldName); @@ -213,7 +214,8 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj builder.endObject(); } - private void buildTerm(XContentBuilder builder, final CharsRefBuilder spare, Terms curTerms, TermsEnum termIter, BoostAttribute boostAtt) throws IOException { + private void buildTerm(XContentBuilder builder, final CharsRefBuilder spare, Terms curTerms, + TermsEnum termIter, BoostAttribute boostAtt) throws IOException { // start term, optimized writing BytesRef term = termIter.next(); spare.copyUTF8Bytes(term); @@ -235,7 +237,8 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj // boolean that says if these values actually were requested. // However, we can assume that they were not if the statistic values are // <= 0. - assert (((termIter.docFreq() > 0) && (termIter.totalTermFreq() > 0)) || ((termIter.docFreq() == -1) && (termIter.totalTermFreq() == -1))); + assert (((termIter.docFreq() > 0) && (termIter.totalTermFreq() > 0)) || + ((termIter.docFreq() == -1) && (termIter.totalTermFreq() == -1))); int docFreq = termIter.docFreq(); if (docFreq > 0) { builder.field(FieldStrings.DOC_FREQ, docFreq); @@ -349,12 +352,13 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj this.exists = exists; } - public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, Fields topLevelFields) throws IOException { + public void setFields(Fields termVectorsByField, Set selectedFields, + EnumSet flags, Fields topLevelFields) throws IOException { setFields(termVectorsByField, selectedFields, flags, topLevelFields, null, null); } - public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, Fields topLevelFields, @Nullable AggregatedDfs dfs, - TermVectorsFilter termVectorsFilter) throws IOException { + public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, + Fields topLevelFields, @Nullable AggregatedDfs dfs, TermVectorsFilter termVectorsFilter) throws IOException { TermVectorsWriter tvw = new TermVectorsWriter(this); if (termVectorsByField != null) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java index 9aca80b533f..d38a980c589 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java @@ -141,10 +141,12 @@ final class TermVectorsWriter { numFieldsWritten++; } response.setTermVectorsField(output); - response.setHeader(writeHeader(numFieldsWritten, flags.contains(Flag.TermStatistics), flags.contains(Flag.FieldStatistics), hasScores)); + response.setHeader(writeHeader(numFieldsWritten, flags.contains(Flag.TermStatistics), + flags.contains(Flag.FieldStatistics), hasScores)); } - private BytesReference writeHeader(int numFieldsWritten, boolean getTermStatistics, boolean getFieldStatistics, boolean scores) throws IOException { + private BytesReference writeHeader(int numFieldsWritten, boolean getTermStatistics, + boolean getFieldStatistics, boolean scores) throws IOException { // now, write the information about offset of the terms in the // termVectors field BytesStreamOutput header = new BytesStreamOutput(); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java index 029ff76a14f..0b86ce32f3f 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java @@ -64,16 +64,20 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction shardRequests = new HashMap<>(); for (int i = 0; i < request.requests.size(); i++) { TermVectorsRequest termVectorsRequest = request.requests.get(i); - termVectorsRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorsRequest.routing(), termVectorsRequest.index())); + termVectorsRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorsRequest.routing(), + termVectorsRequest.index())); if (!clusterState.metaData().hasConcreteIndex(termVectorsRequest.index())) { responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(termVectorsRequest.index(), termVectorsRequest.type(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index())))); continue; } String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName(); - if (termVectorsRequest.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) { - responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(), - new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]")))); + if (termVectorsRequest.routing() == null && + clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) { + responses.set(i, new MultiTermVectorsItemResponse(null, + new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(), + new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]")))); continue; } ShardId shardId = clusterService.operationRouting().shardId(clusterState, concreteSingleIndex, diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index 6796d23eaad..e8d6c1bcb4f 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -36,7 +36,8 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -public class TransportShardMultiTermsVectorAction extends TransportSingleShardAction { +public class TransportShardMultiTermsVectorAction extends + TransportSingleShardAction { private final IndicesService indicesService; @@ -86,7 +87,8 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc if (TransportActions.isShardNotAvailableException(e)) { throw e; } else { - logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), e); + logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", + shardId, termVectorsRequest.type(), termVectorsRequest.id()), e); response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), e)); } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java index 49a78275669..dcd0fa1b911 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java @@ -85,7 +85,8 @@ public class TransportTermVectorsAction extends TransportSingleShardAction listener) throws IOException { + protected void asyncShardOperation(TermVectorsRequest request, ShardId shardId, + ActionListener listener) throws IOException { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.getShard(shardId.id()); if (request.realtime()) { // it's a realtime request which is not subject to refresh cycles diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 9cf85c1c773..8561d106bdf 100644 --- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -75,7 +75,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio UpdateHelper updateHelper, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, AutoCreateIndex autoCreateIndex, NodeClient client) { - super(UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpdateRequest::new); + super(UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, UpdateRequest::new); this.updateHelper = updateHelper; this.indicesService = indicesService; this.autoCreateIndex = autoCreateIndex; @@ -114,7 +115,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio protected void doExecute(Task task, final UpdateRequest request, final ActionListener listener) { // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { - client.admin().indices().create(new CreateIndexRequest().index(request.index()).cause("auto(update api)").masterNodeTimeout(request.timeout()), new ActionListener() { + client.admin().indices().create(new CreateIndexRequest().index(request.index()).cause("auto(update api)") + .masterNodeTimeout(request.timeout()), new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { innerExecute(task, request, listener); @@ -177,11 +179,14 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio final BytesReference upsertSourceBytes = upsertRequest.source(); client.bulk(toSingleItemBulkRequest(upsertRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), + response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), + response.getVersion(), response.getResult()); if (request.fetchSource() != null && request.fetchSource().fetchSource()) { Tuple> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true, upsertRequest.getContentType()); - update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), + sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); } else { update.setGetResult(null); } @@ -197,8 +202,11 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio final BytesReference indexSourceBytes = indexRequest.source(); client.bulk(toSingleItemBulkRequest(indexRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); - update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), + response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), + response.getVersion(), response.getResult()); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), + result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) @@ -208,8 +216,11 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio DeleteRequest deleteRequest = result.action(); client.bulk(toSingleItemBulkRequest(deleteRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); - update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), + response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), + response.getVersion(), response.getResult()); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), + result.updatedSourceAsMap(), result.updateSourceContentType(), null)); update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 6fb0580bfe3..a4fdce17d09 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -149,11 +149,13 @@ public class UpdateRequest extends InstanceShardOperationRequest } else { if (version != Versions.MATCH_ANY && retryOnConflict > 0) { - validationException = addValidationError("can't provide both retry_on_conflict and a specific version", validationException); + validationException = addValidationError("can't provide both retry_on_conflict and a specific version", + validationException); } if (!versionType.validateVersionForWrites(version)) { - validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); + validationException = addValidationError("illegal version value [" + version + "] for version type [" + + versionType.name() + "]", validationException); } } @@ -618,8 +620,8 @@ public class UpdateRequest extends InstanceShardOperationRequest } /** - * Sets the index request to be used if the document does not exists. Otherwise, a {@link org.elasticsearch.index.engine.DocumentMissingException} - * is thrown. + * Sets the index request to be used if the document does not exists. Otherwise, a + * {@link org.elasticsearch.index.engine.DocumentMissingException} is thrown. */ public UpdateRequest upsert(IndexRequest upsertRequest) { this.upsertRequest = upsertRequest; diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index 9d1fd4a677f..181dba6a107 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -243,8 +243,8 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder creating a snapshot is allowed when the cluster is read only"); try { setClusterReadOnly(true); - assertThat( - client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1").setWaitForCompletion(true).get().status(), - equalTo(RestStatus.OK) - ); + assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") + .setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); } finally { setClusterReadOnly(false); } @@ -107,11 +105,8 @@ public class SnapshotBlocksIT extends ESIntegTestCase { logger.info("--> creating a snapshot is not blocked when an index is read only"); try { enableIndexBlock(INDEX_NAME, SETTING_READ_ONLY); - assertThat( - client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") - .setIndices(COMMON_INDEX_NAME_MASK).setWaitForCompletion(true).get().status(), - equalTo(RestStatus.OK) - ); + assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") + .setIndices(COMMON_INDEX_NAME_MASK).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); } finally { disableIndexBlock(INDEX_NAME, SETTING_READ_ONLY); } @@ -119,16 +114,11 @@ public class SnapshotBlocksIT extends ESIntegTestCase { logger.info("--> creating a snapshot is blocked when an index is blocked for reads"); try { enableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ); - assertBlocked( - client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2").setIndices(COMMON_INDEX_NAME_MASK), - IndexMetaData.INDEX_READ_BLOCK - ); + assertBlocked(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") + .setIndices(COMMON_INDEX_NAME_MASK), IndexMetaData.INDEX_READ_BLOCK); logger.info("--> creating a snapshot is not blocked when an read-blocked index is not part of the snapshot"); - assertThat( - client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2"). - setIndices(OTHER_INDEX_NAME).setWaitForCompletion(true).get().status(), - equalTo(RestStatus.OK) - ); + assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") + .setIndices(OTHER_INDEX_NAME).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); } finally { disableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ); } @@ -151,10 +141,8 @@ public class SnapshotBlocksIT extends ESIntegTestCase { logger.info("--> restoring a snapshot is blocked when the cluster is read only"); try { setClusterReadOnly(true); - assertBlocked( - client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), - MetaData.CLUSTER_READ_ONLY_BLOCK - ); + assertBlocked(client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), + MetaData.CLUSTER_READ_ONLY_BLOCK); } finally { setClusterReadOnly(false); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java index f7114a52f3d..e2a07063d48 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -40,8 +40,8 @@ public class ClusterStateRequestTests extends ESTestCase { ClusterStateRequest clusterStateRequest = new ClusterStateRequest().routingTable(randomBoolean()).metaData(randomBoolean()) .nodes(randomBoolean()).blocks(randomBoolean()).indices("testindex", "testindex2").indicesOptions(indicesOptions); - Version testVersion = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), - Version.CURRENT); + Version testVersion = VersionUtils.randomVersionBetween(random(), + Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(testVersion); clusterStateRequest.writeTo(output); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 4d6dc3bf43b..bf77cdeebd0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -54,8 +54,8 @@ public class ClusterStatsIT extends ESIntegTestCase { private void waitForNodes(int numNodes) { ClusterHealthResponse actionGet = client().admin().cluster() - .health(Requests.clusterHealthRequest().waitForEvents(Priority.LANGUID).waitForNodes(Integer.toString(numNodes))) - .actionGet(); + .health(Requests.clusterHealthRequest().waitForEvents(Priority.LANGUID) + .waitForNodes(Integer.toString(numNodes))).actionGet(); assertThat(actionGet.isTimedOut(), is(false)); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java index eb2f4b6904d..38f872dea2f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java @@ -64,13 +64,16 @@ public class SearchRequestBuilderTests extends ESTestCase { public void testSearchSourceBuilderToString() { SearchRequestBuilder searchRequestBuilder = client.prepareSearch(); searchRequestBuilder.setSource(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value"))); - assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value")).toString())); + assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder() + .query(QueryBuilders.termQuery("field", "value")).toString())); } public void testThatToStringDoesntWipeRequestSource() { - SearchRequestBuilder searchRequestBuilder = client.prepareSearch().setSource(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value"))); + SearchRequestBuilder searchRequestBuilder = client.prepareSearch() + .setSource(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value"))); String preToString = searchRequestBuilder.request().toString(); - assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value")).toString())); + assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder() + .query(QueryBuilders.termQuery("field", "value")).toString())); String postToString = searchRequestBuilder.request().toString(); assertThat(preToString, equalTo(postToString)); } diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java b/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java index 4be46c4fc9e..0393f5929ce 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -272,8 +272,10 @@ public abstract class AbstractTermVectorsTestCase extends ESIntegTestCase { configs.add(config); } // always adds a test that fails - configs.add(new TestConfig(new TestDoc("doesnt_exist", new TestFieldSetting[]{}, new String[]{}).index("doesn't_exist").alias("doesn't_exist"), - new String[]{"doesnt_exist"}, true, true, true).expectedException(org.elasticsearch.index.IndexNotFoundException.class)); + configs.add(new TestConfig(new TestDoc("doesnt_exist", new TestFieldSetting[]{}, new String[]{}) + .index("doesn't_exist").alias("doesn't_exist"), + new String[]{"doesnt_exist"}, true, true, true) + .expectedException(org.elasticsearch.index.IndexNotFoundException.class)); refresh(); @@ -401,9 +403,10 @@ public abstract class AbstractTermVectorsTestCase extends ESIntegTestCase { } protected TermVectorsRequestBuilder getRequestForConfig(TestConfig config) { - return client().prepareTermVectors(randomBoolean() ? config.doc.index : config.doc.alias, config.doc.type, config.doc.id).setPayloads(config.requestPayloads) - .setOffsets(config.requestOffsets).setPositions(config.requestPositions).setFieldStatistics(true).setTermStatistics(true) - .setSelectedFields(config.selectedFields).setRealtime(false); + return client().prepareTermVectors(randomBoolean() ? config.doc.index : config.doc.alias, config.doc.type, config.doc.id) + .setPayloads(config.requestPayloads) + .setOffsets(config.requestOffsets).setPositions(config.requestPositions).setFieldStatistics(true).setTermStatistics(true) + .setSelectedFields(config.selectedFields).setRealtime(false); } protected Fields getTermVectorsFromLucene(DirectoryReader directoryReader, TestDoc doc) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index b5a596401cb..a45012dc4b3 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -765,7 +765,8 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { // check overridden by keyword analyzer ... if (perFieldAnalyzer.containsKey(fieldName)) { TermsEnum iterator = terms.iterator(); - assertThat("Analyzer for " + fieldName + " should have been overridden!", iterator.next().utf8ToString(), equalTo("some text here")); + assertThat("Analyzer for " + fieldName + " should have been overridden!", + iterator.next().utf8ToString(), equalTo("some text here")); assertThat(iterator.next(), nullValue()); } validFields.add(fieldName); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java index 2f75f6df1a8..08751ffe058 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java @@ -118,7 +118,8 @@ public class MultiTermVectorsIT extends AbstractTermVectorsTestCase { //Version from Lucene index refresh(); response = client().prepareMultiTermVectors() - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(Versions.MATCH_ANY).realtime(false)) + .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field") + .version(Versions.MATCH_ANY).realtime(false)) .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(1).realtime(false)) .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(2).realtime(false)) .get(); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index 216c1802956..9a8bb38d8cd 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -269,13 +269,14 @@ public class TermVectorsUnitTests extends ESTestCase { String ftOpts = FieldMapper.termVectorOptionsToString(ft); assertThat("with_positions_payloads", equalTo(ftOpts)); TextFieldMapper.Builder builder = new TextFieldMapper.Builder(null); - boolean exceptiontrown = false; + boolean exceptionThrown = false; try { TypeParsers.parseTermVector("", ftOpts, builder); } catch (MapperParsingException e) { - exceptiontrown = true; + exceptionThrown = true; } - assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", exceptiontrown, equalTo(false)); + assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", + exceptionThrown, equalTo(false)); } public void testTermVectorStringGenerationWithoutPositions() throws Exception {