Remove some unused code
This commit is contained in:
parent
8c40b2b54e
commit
0a293fad29
|
@ -26,7 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||
*/
|
||||
public class StoreRateLimiting {
|
||||
|
||||
public static interface Provider {
|
||||
public interface Provider {
|
||||
|
||||
StoreRateLimiting rateLimiting();
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
|
|||
public BulkShardRequest() {
|
||||
}
|
||||
|
||||
BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
|
||||
BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
|
||||
super(shardId);
|
||||
this.items = items;
|
||||
setRefreshPolicy(refreshPolicy);
|
||||
|
|
|
@ -344,7 +344,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) {
|
||||
final ShardId shardId = entry.getKey();
|
||||
final List<BulkItemRequest> requests = entry.getValue();
|
||||
BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.getRefreshPolicy(),
|
||||
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, bulkRequest.getRefreshPolicy(),
|
||||
requests.toArray(new BulkItemRequest[requests.size()]));
|
||||
bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel());
|
||||
bulkShardRequest.timeout(bulkRequest.timeout());
|
||||
|
|
|
@ -131,7 +131,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
// Advantage is that we're not opening a second searcher to retrieve the _source. Also
|
||||
// because we are working in the same searcher in engineGetResult we can be sure that a
|
||||
// doc isn't deleted between the initial get and this call.
|
||||
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext(), false);
|
||||
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext());
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
|
||||
} else {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);
|
||||
|
|
|
@ -36,5 +36,5 @@ public interface WriteResponse {
|
|||
* {@link RefreshPolicy#IMMEDIATE} should always mark this as true. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will only
|
||||
* set this to true if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}).
|
||||
*/
|
||||
public abstract void setForcedRefresh(boolean forcedRefresh);
|
||||
void setForcedRefresh(boolean forcedRefresh);
|
||||
}
|
||||
|
|
|
@ -298,7 +298,6 @@ public class ReplicationResponse extends ActionResponse {
|
|||
private static final String _SHARDS = "_shards";
|
||||
private static final String TOTAL = "total";
|
||||
private static final String SUCCESSFUL = "successful";
|
||||
private static final String PENDING = "pending";
|
||||
private static final String FAILED = "failed";
|
||||
private static final String FAILURES = "failures";
|
||||
|
||||
|
|
|
@ -595,7 +595,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
|
|||
} else if (currentFieldName.equals("per_field_analyzer") || currentFieldName.equals("perFieldAnalyzer")) {
|
||||
termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map()));
|
||||
} else if (currentFieldName.equals("filter")) {
|
||||
termVectorsRequest.filterSettings(readFilterSettings(parser, termVectorsRequest));
|
||||
termVectorsRequest.filterSettings(readFilterSettings(parser));
|
||||
} else if ("_index".equals(currentFieldName)) { // the following is important for multi request parsing.
|
||||
termVectorsRequest.index = parser.text();
|
||||
} else if ("_type".equals(currentFieldName)) {
|
||||
|
@ -641,7 +641,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
|
|||
return mapStrStr;
|
||||
}
|
||||
|
||||
private static FilterSettings readFilterSettings(XContentParser parser, TermVectorsRequest termVectorsRequest) throws IOException {
|
||||
private static FilterSettings readFilterSettings(XContentParser parser) throws IOException {
|
||||
FilterSettings settings = new FilterSettings();
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
|
|
|
@ -22,8 +22,6 @@ package org.elasticsearch.action.update;
|
|||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
@ -63,13 +61,11 @@ import java.util.Map;
|
|||
public class UpdateHelper extends AbstractComponent {
|
||||
|
||||
private final ScriptService scriptService;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public UpdateHelper(Settings settings, ScriptService scriptService, ClusterService clusterService) {
|
||||
public UpdateHelper(Settings settings, ScriptService scriptService) {
|
||||
super(settings);
|
||||
this.scriptService = scriptService;
|
||||
this.clusterService = clusterService;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -26,16 +26,16 @@ package org.elasticsearch.cluster;
|
|||
public interface ClusterInfoService {
|
||||
|
||||
/** The latest cluster information */
|
||||
public ClusterInfo getClusterInfo();
|
||||
ClusterInfo getClusterInfo();
|
||||
|
||||
/** Add a listener that will be called every time new information is gathered */
|
||||
public void addListener(Listener listener);
|
||||
void addListener(Listener listener);
|
||||
|
||||
/**
|
||||
* Interface for listeners to implement in order to perform actions when
|
||||
* new information about the cluster has been gathered
|
||||
*/
|
||||
public interface Listener {
|
||||
public void onNewInfo(ClusterInfo info);
|
||||
interface Listener {
|
||||
void onNewInfo(ClusterInfo info);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ public interface IndexTemplateFilter {
|
|||
*/
|
||||
boolean apply(CreateIndexClusterStateUpdateRequest request, IndexTemplateMetaData template);
|
||||
|
||||
static class Compound implements IndexTemplateFilter {
|
||||
class Compound implements IndexTemplateFilter {
|
||||
|
||||
private IndexTemplateFilter[] filters;
|
||||
|
||||
|
|
|
@ -19,11 +19,8 @@
|
|||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.TimestampParsingException;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -34,11 +31,9 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
@ -82,8 +77,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
|
||||
public static class Timestamp {
|
||||
|
||||
private static final FormatDateTimeFormatter EPOCH_MILLIS_PARSER = Joda.forPattern("epoch_millis");
|
||||
|
||||
public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter) throws TimestampParsingException {
|
||||
try {
|
||||
return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString));
|
||||
|
|
|
@ -35,8 +35,8 @@ import org.elasticsearch.common.regex.Regex;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
|
||||
|
@ -160,7 +160,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
throw new IndexTemplateAlreadyExistsException(request.name);
|
||||
}
|
||||
|
||||
validateAndAddTemplate(request, templateBuilder, indicesService, nodeServicesProvider, metaDataCreateIndexService);
|
||||
validateAndAddTemplate(request, templateBuilder, indicesService, nodeServicesProvider);
|
||||
|
||||
for (Alias alias : request.aliases) {
|
||||
AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter())
|
||||
|
@ -185,7 +185,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
}
|
||||
|
||||
private static void validateAndAddTemplate(final PutRequest request, IndexTemplateMetaData.Builder templateBuilder, IndicesService indicesService,
|
||||
NodeServicesProvider nodeServicesProvider, MetaDataCreateIndexService metaDataCreateIndexService) throws Exception {
|
||||
NodeServicesProvider nodeServicesProvider) throws Exception {
|
||||
Index createdIndex = null;
|
||||
final String temporaryIndexName = UUIDs.randomBase64UUID();
|
||||
try {
|
||||
|
@ -276,7 +276,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public static interface PutListener {
|
||||
public interface PutListener {
|
||||
|
||||
void onResponse(PutResponse response);
|
||||
|
||||
|
@ -391,7 +391,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public static interface RemoveListener {
|
||||
public interface RemoveListener {
|
||||
|
||||
void onResponse(RemoveResponse response);
|
||||
|
||||
|
|
|
@ -229,11 +229,4 @@ public class OperationRouting extends AbstractComponent {
|
|||
// of original index to hash documents
|
||||
return Math.floorMod(hash, indexMetaData.getRoutingNumShards()) / indexMetaData.getRoutingFactor();
|
||||
}
|
||||
|
||||
private void ensureNodeIdExists(DiscoveryNodes nodes, String nodeId) {
|
||||
if (!nodes.getDataNodes().keys().contains(nodeId)) {
|
||||
throw new IllegalArgumentException("No data node with id[" + nodeId + "] found");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -505,7 +505,6 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
}
|
||||
|
||||
private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) {
|
||||
ClusterInfo clusterInfo = allocation.clusterInfo();
|
||||
DiskUsage usage = usages.get(node.nodeId());
|
||||
if (usage == null) {
|
||||
// If there is no usage, and we have other nodes in the cluster,
|
||||
|
|
|
@ -23,5 +23,5 @@ package org.elasticsearch.common;
|
|||
* Generates opaque unique strings.
|
||||
*/
|
||||
interface UUIDGenerator {
|
||||
public String getBase64UUID();
|
||||
String getBase64UUID();
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ public enum GeoDistance implements Writeable {
|
|||
|
||||
@Override
|
||||
public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
|
||||
return new FactorFixedSourceDistance(sourceLatitude, sourceLongitude, unit);
|
||||
return new FactorFixedSourceDistance(sourceLatitude, sourceLongitude);
|
||||
}
|
||||
},
|
||||
/**
|
||||
|
@ -217,12 +217,12 @@ public enum GeoDistance implements Writeable {
|
|||
throw new IllegalArgumentException("No geo distance for [" + name + "]");
|
||||
}
|
||||
|
||||
public static interface FixedSourceDistance {
|
||||
public interface FixedSourceDistance {
|
||||
|
||||
double calculate(double targetLatitude, double targetLongitude);
|
||||
}
|
||||
|
||||
public static interface DistanceBoundingCheck {
|
||||
public interface DistanceBoundingCheck {
|
||||
|
||||
boolean isWithin(double targetLatitude, double targetLongitude);
|
||||
|
||||
|
@ -331,7 +331,7 @@ public enum GeoDistance implements Writeable {
|
|||
private final double sinA;
|
||||
private final double cosA;
|
||||
|
||||
public FactorFixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
|
||||
public FactorFixedSourceDistance(double sourceLatitude, double sourceLongitude) {
|
||||
this.sourceLongitude = sourceLongitude;
|
||||
this.a = Math.toRadians(90D - sourceLatitude);
|
||||
this.sinA = Math.sin(a);
|
||||
|
|
|
@ -19,20 +19,19 @@
|
|||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import org.locationtech.spatial4j.exception.InvalidShapeException;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import com.vividsolutions.jts.geom.Geometry;
|
||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||
import com.vividsolutions.jts.geom.LinearRing;
|
||||
import com.vividsolutions.jts.geom.MultiPolygon;
|
||||
import com.vividsolutions.jts.geom.Polygon;
|
||||
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.locationtech.spatial4j.exception.InvalidShapeException;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -579,7 +578,7 @@ public class PolygonBuilder extends ShapeBuilder {
|
|||
boolean direction = (component == 0 ^ orientation == Orientation.RIGHT);
|
||||
// set the points array accordingly (shell or hole)
|
||||
Coordinate[] points = (hole != null) ? hole.coordinates(false) : shell.coordinates(false);
|
||||
ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1, translated);
|
||||
ring(component, direction, orientation == Orientation.LEFT, points, 0, edges, offset, points.length-1, translated);
|
||||
return points.length-1;
|
||||
}
|
||||
|
||||
|
@ -594,7 +593,7 @@ public class PolygonBuilder extends ShapeBuilder {
|
|||
* number of points
|
||||
* @return Array of edges
|
||||
*/
|
||||
private static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
|
||||
private static Edge[] ring(int component, boolean direction, boolean handedness,
|
||||
Coordinate[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) {
|
||||
// calculate the direction of the points:
|
||||
// find the point a the top of the set and check its
|
||||
|
|
|
@ -47,7 +47,7 @@ public interface Scope {
|
|||
* when an instance of the requested object doesn't already exist in this
|
||||
* scope
|
||||
*/
|
||||
public <T> Provider<T> scope(Key<T> key, Provider<T> unscoped);
|
||||
<T> Provider<T> scope(Key<T> key, Provider<T> unscoped);
|
||||
|
||||
/**
|
||||
* A short but useful description of this scope. For comparison, the standard
|
||||
|
|
|
@ -36,7 +36,7 @@ import static java.util.Collections.emptySet;
|
|||
*/
|
||||
interface State {
|
||||
|
||||
static final State NONE = new State() {
|
||||
State NONE = new State() {
|
||||
@Override
|
||||
public State parent() {
|
||||
throw new UnsupportedOperationException();
|
||||
|
|
|
@ -29,7 +29,7 @@ public interface InternalFactory<T> {
|
|||
* ES:
|
||||
* An factory that returns a pre created instance.
|
||||
*/
|
||||
public static class Instance<T> implements InternalFactory<T> {
|
||||
class Instance<T> implements InternalFactory<T> {
|
||||
|
||||
private final T object;
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.common.lucene.all;
|
|||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
|
@ -38,12 +37,10 @@ public final class AllTokenStream extends TokenFilter {
|
|||
}
|
||||
|
||||
private final BytesRef payloadSpare = new BytesRef(new byte[1]);
|
||||
private final OffsetAttribute offsetAttribute;
|
||||
private final PayloadAttribute payloadAttribute;
|
||||
|
||||
AllTokenStream(TokenStream input, float boost) {
|
||||
super(input);
|
||||
offsetAttribute = addAttribute(OffsetAttribute.class);
|
||||
payloadAttribute = addAttribute(PayloadAttribute.class);
|
||||
payloadSpare.bytes[0] = SmallFloat.floatToByte315(boost);
|
||||
}
|
||||
|
|
|
@ -49,7 +49,6 @@ public final class SettingsFilter extends AbstractComponent {
|
|||
|
||||
public SettingsFilter(Settings settings, Collection<String> patterns) {
|
||||
super(settings);
|
||||
HashSet<String> set = new HashSet<>();
|
||||
for (String pattern : patterns) {
|
||||
if (isValidPattern(pattern) == false) {
|
||||
throw new IllegalArgumentException("invalid pattern: " + pattern);
|
||||
|
|
|
@ -33,7 +33,7 @@ import java.util.Map;
|
|||
*/
|
||||
public interface SettingsLoader {
|
||||
|
||||
static class Helper {
|
||||
class Helper {
|
||||
|
||||
public static Map<String, String> loadNestedFromMap(@Nullable Map map) {
|
||||
Map<String, String> settings = new HashMap<>();
|
||||
|
|
|
@ -80,7 +80,7 @@ public class PortsRange {
|
|||
return success;
|
||||
}
|
||||
|
||||
public static interface PortCallback {
|
||||
public interface PortCallback {
|
||||
boolean onPortNumber(int portNumber);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,5 +48,5 @@ public interface TransportAddress extends Writeable {
|
|||
|
||||
boolean isLoopbackOrLinkLocalAddress();
|
||||
|
||||
public String toString();
|
||||
String toString();
|
||||
}
|
||||
|
|
|
@ -26,6 +26,6 @@ import org.elasticsearch.common.lease.Releasable;
|
|||
public interface BigArray extends Releasable, Accountable {
|
||||
|
||||
/** Return the length of this array. */
|
||||
public long size();
|
||||
long size();
|
||||
|
||||
}
|
||||
|
|
|
@ -27,21 +27,21 @@ public interface DoubleArray extends BigArray {
|
|||
/**
|
||||
* Get an element given its index.
|
||||
*/
|
||||
public abstract double get(long index);
|
||||
double get(long index);
|
||||
|
||||
/**
|
||||
* Set a value at the given index and return the previous value.
|
||||
*/
|
||||
public abstract double set(long index, double value);
|
||||
double set(long index, double value);
|
||||
|
||||
/**
|
||||
* Increment value at the given index by <code>inc</code> and return the value.
|
||||
*/
|
||||
public abstract double increment(long index, double inc);
|
||||
double increment(long index, double inc);
|
||||
|
||||
/**
|
||||
* Fill slots between <code>fromIndex</code> inclusive to <code>toIndex</code> exclusive with <code>value</code>.
|
||||
*/
|
||||
public abstract void fill(long fromIndex, long toIndex, double value);
|
||||
void fill(long fromIndex, long toIndex, double value);
|
||||
|
||||
}
|
||||
|
|
|
@ -27,21 +27,21 @@ public interface FloatArray extends BigArray {
|
|||
/**
|
||||
* Get an element given its index.
|
||||
*/
|
||||
public abstract float get(long index);
|
||||
float get(long index);
|
||||
|
||||
/**
|
||||
* Set a value at the given index and return the previous value.
|
||||
*/
|
||||
public abstract float set(long index, float value);
|
||||
float set(long index, float value);
|
||||
|
||||
/**
|
||||
* Increment value at the given index by <code>inc</code> and return the value.
|
||||
*/
|
||||
public abstract float increment(long index, float inc);
|
||||
float increment(long index, float inc);
|
||||
|
||||
/**
|
||||
* Fill slots between <code>fromIndex</code> inclusive to <code>toIndex</code> exclusive with <code>value</code>.
|
||||
*/
|
||||
public abstract void fill(long fromIndex, long toIndex, float value);
|
||||
void fill(long fromIndex, long toIndex, float value);
|
||||
|
||||
}
|
||||
|
|
|
@ -27,21 +27,21 @@ public interface IntArray extends BigArray {
|
|||
/**
|
||||
* Get an element given its index.
|
||||
*/
|
||||
public abstract int get(long index);
|
||||
int get(long index);
|
||||
|
||||
/**
|
||||
* Set a value at the given index and return the previous value.
|
||||
*/
|
||||
public abstract int set(long index, int value);
|
||||
int set(long index, int value);
|
||||
|
||||
/**
|
||||
* Increment value at the given index by <code>inc</code> and return the value.
|
||||
*/
|
||||
public abstract int increment(long index, int inc);
|
||||
int increment(long index, int inc);
|
||||
|
||||
/**
|
||||
* Fill slots between <code>fromIndex</code> inclusive to <code>toIndex</code> exclusive with <code>value</code>.
|
||||
*/
|
||||
public abstract void fill(long fromIndex, long toIndex, int value);
|
||||
void fill(long fromIndex, long toIndex, int value);
|
||||
|
||||
}
|
||||
|
|
|
@ -27,21 +27,21 @@ public interface LongArray extends BigArray {
|
|||
/**
|
||||
* Get an element given its index.
|
||||
*/
|
||||
public abstract long get(long index);
|
||||
long get(long index);
|
||||
|
||||
/**
|
||||
* Set a value at the given index and return the previous value.
|
||||
*/
|
||||
public abstract long set(long index, long value);
|
||||
long set(long index, long value);
|
||||
|
||||
/**
|
||||
* Increment value at the given index by <code>inc</code> and return the value.
|
||||
*/
|
||||
public abstract long increment(long index, long inc);
|
||||
long increment(long index, long inc);
|
||||
|
||||
/**
|
||||
* Fill slots between <code>fromIndex</code> inclusive to <code>toIndex</code> exclusive with <code>value</code>.
|
||||
*/
|
||||
public abstract void fill(long fromIndex, long toIndex, long value);
|
||||
void fill(long fromIndex, long toIndex, long value);
|
||||
|
||||
}
|
||||
|
|
|
@ -263,8 +263,7 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
}
|
||||
|
||||
private void putSingleHeader(String key, String value, Map<String, String> newHeaders) {
|
||||
final String existingValue;
|
||||
if ((existingValue = newHeaders.putIfAbsent(key, value)) != null) {
|
||||
if (newHeaders.putIfAbsent(key, value) != null) {
|
||||
throw new IllegalArgumentException("value for key [" + key + "] already present");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.util.Map;
|
|||
*/
|
||||
public interface ToXContent {
|
||||
|
||||
public static interface Params {
|
||||
interface Params {
|
||||
String param(String key);
|
||||
|
||||
String param(String key, String defaultValue);
|
||||
|
@ -39,7 +39,7 @@ public interface ToXContent {
|
|||
Boolean paramAsBoolean(String key, Boolean defaultValue);
|
||||
}
|
||||
|
||||
public static final Params EMPTY_PARAMS = new Params() {
|
||||
Params EMPTY_PARAMS = new Params() {
|
||||
@Override
|
||||
public String param(String key) {
|
||||
return null;
|
||||
|
@ -62,7 +62,7 @@ public interface ToXContent {
|
|||
|
||||
};
|
||||
|
||||
public static class MapParams implements Params {
|
||||
class MapParams implements Params {
|
||||
|
||||
private final Map<String, String> params;
|
||||
|
||||
|
@ -95,7 +95,7 @@ public interface ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
public static class DelegatingMapParams extends MapParams {
|
||||
class DelegatingMapParams extends MapParams {
|
||||
|
||||
private final Params delegate;
|
||||
|
||||
|
|
|
@ -228,23 +228,13 @@ public abstract class AbstractXContentParser implements XContentParser {
|
|||
return readListOrderedMap(this);
|
||||
}
|
||||
|
||||
static interface MapFactory {
|
||||
interface MapFactory {
|
||||
Map<String, Object> newMap();
|
||||
}
|
||||
|
||||
static final MapFactory SIMPLE_MAP_FACTORY = new MapFactory() {
|
||||
@Override
|
||||
public Map<String, Object> newMap() {
|
||||
return new HashMap<>();
|
||||
}
|
||||
};
|
||||
static final MapFactory SIMPLE_MAP_FACTORY = HashMap::new;
|
||||
|
||||
static final MapFactory ORDERED_MAP_FACTORY = new MapFactory() {
|
||||
@Override
|
||||
public Map<String, Object> newMap() {
|
||||
return new LinkedHashMap<>();
|
||||
}
|
||||
};
|
||||
static final MapFactory ORDERED_MAP_FACTORY = LinkedHashMap::new;
|
||||
|
||||
static Map<String, Object> readMap(XContentParser parser) throws IOException {
|
||||
return readMap(parser, SIMPLE_MAP_FACTORY);
|
||||
|
|
|
@ -50,7 +50,6 @@ import java.util.HashSet;
|
|||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.elasticsearch.cluster.ClusterState.Builder;
|
||||
|
||||
|
@ -69,8 +68,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
|||
|
||||
private volatile boolean master = false;
|
||||
|
||||
private final AtomicBoolean initialStateSent = new AtomicBoolean();
|
||||
|
||||
private static final ConcurrentMap<ClusterName, ClusterGroup> clusterGroups = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
private volatile ClusterState lastProcessedClusterState;
|
||||
|
|
|
@ -197,7 +197,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
discoverySettings,
|
||||
clusterService.getClusterName());
|
||||
this.pingService.setPingContextProvider(this);
|
||||
this.membership = new MembershipAction(settings, clusterService, transportService, this, new MembershipListener());
|
||||
this.membership = new MembershipAction(settings, transportService, this, new MembershipListener());
|
||||
|
||||
this.joinThreadControl = new JoinThreadControl(threadPool);
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.discovery.zen.membership;
|
|||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -48,13 +47,13 @@ public class MembershipAction extends AbstractComponent {
|
|||
public static final String DISCOVERY_JOIN_VALIDATE_ACTION_NAME = "internal:discovery/zen/join/validate";
|
||||
public static final String DISCOVERY_LEAVE_ACTION_NAME = "internal:discovery/zen/leave";
|
||||
|
||||
public static interface JoinCallback {
|
||||
public interface JoinCallback {
|
||||
void onSuccess();
|
||||
|
||||
void onFailure(Throwable t);
|
||||
}
|
||||
|
||||
public static interface MembershipListener {
|
||||
public interface MembershipListener {
|
||||
void onJoin(DiscoveryNode node, JoinCallback callback);
|
||||
|
||||
void onLeave(DiscoveryNode node);
|
||||
|
@ -66,14 +65,11 @@ public class MembershipAction extends AbstractComponent {
|
|||
|
||||
private final MembershipListener listener;
|
||||
|
||||
private final ClusterService clusterService;
|
||||
|
||||
public MembershipAction(Settings settings, ClusterService clusterService, TransportService transportService, DiscoveryNodesProvider nodesProvider, MembershipListener listener) {
|
||||
public MembershipAction(Settings settings, TransportService transportService, DiscoveryNodesProvider nodesProvider, MembershipListener listener) {
|
||||
super(settings);
|
||||
this.transportService = transportService;
|
||||
this.nodesProvider = nodesProvider;
|
||||
this.listener = listener;
|
||||
this.clusterService = clusterService;
|
||||
|
||||
transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest::new, ThreadPool.Names.GENERIC, new JoinRequestRequestHandler());
|
||||
transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, ValidateJoinRequest::new, ThreadPool.Names.GENERIC, new ValidateJoinRequestRequestHandler());
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
|
@ -47,8 +46,6 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final NodeEnvironment nodeEnv;
|
||||
|
||||
private final GatewayMetaState metaState;
|
||||
|
||||
private final TransportNodesListGatewayMetaState listGatewayMetaState;
|
||||
|
@ -57,14 +54,13 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
private final IndicesService indicesService;
|
||||
private final NodeServicesProvider nodeServicesProvider;
|
||||
|
||||
public Gateway(Settings settings, ClusterService clusterService, NodeEnvironment nodeEnv, GatewayMetaState metaState,
|
||||
public Gateway(Settings settings, ClusterService clusterService, GatewayMetaState metaState,
|
||||
TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery,
|
||||
NodeServicesProvider nodeServicesProvider, IndicesService indicesService) {
|
||||
super(settings);
|
||||
this.nodeServicesProvider = nodeServicesProvider;
|
||||
this.indicesService = indicesService;
|
||||
this.clusterService = clusterService;
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.metaState = metaState;
|
||||
this.listGatewayMetaState = listGatewayMetaState;
|
||||
this.minimumMasterNodesProvider = discovery::getMinimumMasterNodes;
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -96,11 +95,11 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
|
|||
|
||||
@Inject
|
||||
public GatewayService(Settings settings, AllocationService allocationService, ClusterService clusterService,
|
||||
ThreadPool threadPool, NodeEnvironment nodeEnvironment, GatewayMetaState metaState,
|
||||
ThreadPool threadPool, GatewayMetaState metaState,
|
||||
TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery,
|
||||
NodeServicesProvider nodeServicesProvider, IndicesService indicesService) {
|
||||
super(settings);
|
||||
this.gateway = new Gateway(settings, clusterService, nodeEnvironment, metaState, listGatewayMetaState, discovery,
|
||||
this.gateway = new Gateway(settings, clusterService, metaState, listGatewayMetaState, discovery,
|
||||
nodeServicesProvider, indicesService);
|
||||
this.allocationService = allocationService;
|
||||
this.clusterService = clusterService;
|
||||
|
|
|
@ -157,7 +157,6 @@ public final class MergePolicyConfig {
|
|||
|
||||
MergePolicyConfig(ESLogger logger, IndexSettings indexSettings) {
|
||||
this.logger = logger;
|
||||
IndexScopedSettings scopedSettings = indexSettings.getScopedSettings();
|
||||
double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage
|
||||
ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING);
|
||||
int maxMergeAtOnce = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING);
|
||||
|
|
|
@ -41,7 +41,6 @@ import java.util.List;
|
|||
public class MultiOrdinals extends Ordinals {
|
||||
|
||||
private static final int OFFSETS_PAGE_SIZE = 1024;
|
||||
private static final int OFFSET_INIT_PAGE_COUNT = 16;
|
||||
|
||||
/**
|
||||
* Return true if this impl is going to be smaller than {@link SinglePackedOrdinals} by at least 20%.
|
||||
|
|
|
@ -110,7 +110,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
* <p>
|
||||
* Note: Call <b>must</b> release engine searcher associated with engineGetResult!
|
||||
*/
|
||||
public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) {
|
||||
public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) {
|
||||
if (!engineGetResult.exists()) {
|
||||
return new GetResult(shardId.getIndexName(), type, id, -1, false, null, null);
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
try {
|
||||
long now = System.nanoTime();
|
||||
fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, fields);
|
||||
GetResult getResult = innerGetLoadFromStoredFields(type, id, fields, fetchSourceContext, engineGetResult, mapperService, ignoreErrorsOnGeneratedFields);
|
||||
GetResult getResult = innerGetLoadFromStoredFields(type, id, fields, fetchSourceContext, engineGetResult, mapperService);
|
||||
if (getResult.isExists()) {
|
||||
existsMetric.inc(System.nanoTime() - now);
|
||||
} else {
|
||||
|
@ -183,7 +183,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
try {
|
||||
// break between having loaded it from translog (so we only have _source), and having a document to load
|
||||
if (get.docIdAndVersion() != null) {
|
||||
return innerGetLoadFromStoredFields(type, id, gFields, fetchSourceContext, get, mapperService, ignoreErrorsOnGeneratedFields);
|
||||
return innerGetLoadFromStoredFields(type, id, gFields, fetchSourceContext, get, mapperService);
|
||||
} else {
|
||||
Translog.Source source = get.source();
|
||||
|
||||
|
@ -316,7 +316,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService, boolean ignoreErrorsOnGeneratedFields) {
|
||||
private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) {
|
||||
Map<String, GetField> fields = null;
|
||||
BytesReference source = null;
|
||||
Versions.DocIdAndVersion docIdAndVersion = get.docIdAndVersion();
|
||||
|
|
|
@ -76,7 +76,7 @@ final class DocumentParser {
|
|||
docMapperParser, docMapper, source, parser);
|
||||
validateStart(parser);
|
||||
internalParseDocument(mapping, context, parser);
|
||||
validateEnd(source, parser);
|
||||
validateEnd(parser);
|
||||
} catch (Throwable t) {
|
||||
throw wrapInMapperParsingException(source, t);
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ final class DocumentParser {
|
|||
}
|
||||
}
|
||||
|
||||
private static void validateEnd(SourceToParse source, XContentParser parser) throws IOException {
|
||||
private static void validateEnd(XContentParser parser) throws IOException {
|
||||
XContentParser.Token token;// only check for end of tokens if we created the parser here
|
||||
// try to parse the next token, this should be null if the object is ended properly
|
||||
// but will throw a JSON exception if the extra tokens is not valid JSON (this will be handled by the catch)
|
||||
|
@ -368,7 +368,6 @@ final class DocumentParser {
|
|||
token = parser.nextToken();
|
||||
}
|
||||
|
||||
ObjectMapper update = null;
|
||||
innerParseObject(context, mapper, parser, currentFieldName, token);
|
||||
// restore the enable path flag
|
||||
if (nested.isNested()) {
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
|
||||
|
@ -35,7 +34,6 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
|
@ -316,7 +314,7 @@ public class MapperService extends AbstractIndexComponent {
|
|||
Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers);
|
||||
MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers);
|
||||
checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers);
|
||||
checkObjectsCompatibility(newMapper.type(), objectMappers, fieldMappers, updateAllTypes);
|
||||
checkObjectsCompatibility(objectMappers, updateAllTypes);
|
||||
|
||||
// 3. update lookup data-structures
|
||||
// this will in particular make sure that the merged fields are compatible with other types
|
||||
|
@ -381,7 +379,7 @@ public class MapperService extends AbstractIndexComponent {
|
|||
for (DocumentMapper mapper : docMappers(false)) {
|
||||
List<FieldMapper> fieldMappers = new ArrayList<>();
|
||||
Collections.addAll(fieldMappers, mapper.mapping().metadataMappers);
|
||||
MapperUtils.collect(mapper.root(), new ArrayList<ObjectMapper>(), fieldMappers);
|
||||
MapperUtils.collect(mapper.root(), new ArrayList<>(), fieldMappers);
|
||||
for (FieldMapper fieldMapper : fieldMappers) {
|
||||
assert fieldMapper.fieldType() == fieldTypes.get(fieldMapper.name()) : fieldMapper.name();
|
||||
}
|
||||
|
@ -449,7 +447,7 @@ public class MapperService extends AbstractIndexComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private void checkObjectsCompatibility(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) {
|
||||
private void checkObjectsCompatibility(Collection<ObjectMapper> objectMappers, boolean updateAllTypes) {
|
||||
assert Thread.holdsLock(this);
|
||||
|
||||
for (ObjectMapper newObjectMapper : objectMappers) {
|
||||
|
|
|
@ -310,8 +310,6 @@ public abstract class ParseContext {
|
|||
|
||||
private Field version;
|
||||
|
||||
private StringBuilder stringBuilder = new StringBuilder();
|
||||
|
||||
private final AllEntries allEntries;
|
||||
|
||||
private final List<Mapper> dynamicMappers;
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.document.NumericDocValuesField;
|
|||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.TimestampParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
|
@ -82,7 +81,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
|||
private String defaultTimestamp = Defaults.DEFAULT_TIMESTAMP;
|
||||
private Boolean ignoreMissing = null;
|
||||
|
||||
public Builder(MappedFieldType existing, Settings settings) {
|
||||
public Builder(MappedFieldType existing) {
|
||||
super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
|
||||
}
|
||||
|
||||
|
@ -130,7 +129,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
|
|||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0_alpha4)) {
|
||||
throw new IllegalArgumentException("[_timestamp] is removed in 5.0. As a replacement, you can use an ingest pipeline to add a field with the current timestamp to your documents.");
|
||||
}
|
||||
Builder builder = new Builder(parserContext.mapperService().fullName(NAME), parserContext.mapperService().getIndexSettings().getSettings());
|
||||
Builder builder = new Builder(parserContext.mapperService().fullName(NAME));
|
||||
boolean defaultSet = false;
|
||||
Boolean ignoreMissing = null;
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
|
|
@ -405,7 +405,6 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
|
|||
private static Query parseQueryString(ExtendedCommonTermsQuery query, Object queryString, String field, Analyzer analyzer,
|
||||
String lowFreqMinimumShouldMatch, String highFreqMinimumShouldMatch) throws IOException {
|
||||
// Logic similar to QueryParser#getFieldQuery
|
||||
int count = 0;
|
||||
try (TokenStream source = analyzer.tokenStream(field, queryString.toString())) {
|
||||
source.reset();
|
||||
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
|
||||
|
@ -414,7 +413,6 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
|
|||
// UTF-8
|
||||
builder.copyChars(termAtt);
|
||||
query.add(new Term(field, builder.toBytesRef()));
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -43,8 +43,6 @@ public class SnapshotStatus {
|
|||
|
||||
private Translog translog = new Translog();
|
||||
|
||||
private Throwable failure;
|
||||
|
||||
public Stage stage() {
|
||||
return this.stage;
|
||||
}
|
||||
|
@ -70,10 +68,6 @@ public class SnapshotStatus {
|
|||
this.time = time;
|
||||
}
|
||||
|
||||
public void failed(Throwable failure) {
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
public Index index() {
|
||||
return index;
|
||||
}
|
||||
|
|
|
@ -202,7 +202,7 @@ public class TermVectorsService {
|
|||
|
||||
/* generate term vectors from fetched document fields */
|
||||
GetResult getResult = indexShard.getService().get(
|
||||
get, request.id(), request.type(), validFields.toArray(Strings.EMPTY_ARRAY), null, false);
|
||||
get, request.id(), request.type(), validFields.toArray(Strings.EMPTY_ARRAY), null);
|
||||
Fields generatedTermVectors = generateTermVectors(indexShard, getResult.getFields().values(), request.offsets(), request.perFieldAnalyzer(), validFields);
|
||||
|
||||
/* merge with existing Fields */
|
||||
|
|
|
@ -572,10 +572,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
}
|
||||
}
|
||||
|
||||
private boolean isReferencedGeneration(long generation) { // used to make decisions if a file can be deleted
|
||||
return generation >= lastCommittedTranslogFileGeneration;
|
||||
}
|
||||
|
||||
public TranslogConfig getConfig() {
|
||||
return config;
|
||||
}
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
package org.elasticsearch.indices;
|
||||
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -34,8 +34,6 @@ import org.elasticsearch.index.engine.FlushNotAllowedEngineException;
|
|||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.shard.IndexingOperationListener;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
@ -94,10 +92,6 @@ public class IndexingMemoryController extends AbstractComponent implements Index
|
|||
private final ShardsIndicesStatusChecker statusChecker;
|
||||
|
||||
IndexingMemoryController(Settings settings, ThreadPool threadPool, Iterable<IndexShard> indexServices) {
|
||||
this(settings, threadPool, indexServices, JvmInfo.jvmInfo().getMem().getHeapMax().bytes());
|
||||
}
|
||||
|
||||
IndexingMemoryController(Settings settings, ThreadPool threadPool, Iterable<IndexShard> indexServices, long jvmMemoryInBytes) {
|
||||
super(settings);
|
||||
this.indexShards = indexServices;
|
||||
|
||||
|
|
|
@ -20,13 +20,9 @@
|
|||
package org.elasticsearch.monitor.jvm;
|
||||
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
|
@ -45,7 +41,6 @@ import java.util.concurrent.TimeUnit;
|
|||
public class HotThreads {
|
||||
|
||||
private static final Object mutex = new Object();
|
||||
private static final ESLogger logger = Loggers.getLogger(HotThreads.class);
|
||||
|
||||
private static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime");
|
||||
|
||||
|
|
|
@ -409,7 +409,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
|||
Repository repository = repositoryInjector.getInstance(Repository.class);
|
||||
IndexShardRepository indexShardRepository = repositoryInjector.getInstance(IndexShardRepository.class);
|
||||
repository.start();
|
||||
return new RepositoryHolder(repositoryMetaData.type(), repositoryMetaData.settings(), repositoryInjector, repository, indexShardRepository);
|
||||
return new RepositoryHolder(repositoryMetaData.type(), repositoryMetaData.settings(), repository, indexShardRepository);
|
||||
} catch (Throwable t) {
|
||||
logger.warn("failed to create repository [{}][{}]", t, repositoryMetaData.type(), repositoryMetaData.name());
|
||||
throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", t);
|
||||
|
@ -473,7 +473,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
|
|||
private final Repository repository;
|
||||
private final IndexShardRepository indexShardRepository;
|
||||
|
||||
public RepositoryHolder(String type, Settings settings, Injector injector, Repository repository, IndexShardRepository indexShardRepository) {
|
||||
public RepositoryHolder(String type, Settings settings,Repository repository, IndexShardRepository indexShardRepository) {
|
||||
this.type = type;
|
||||
this.settings = settings;
|
||||
this.repository = repository;
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
|
@ -38,7 +37,7 @@ import static org.elasticsearch.rest.action.support.RestTable.pad;
|
|||
*/
|
||||
public abstract class AbstractCatAction extends BaseRestHandler {
|
||||
|
||||
public AbstractCatAction(Settings settings, RestController controller) {
|
||||
public AbstractCatAction(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public class RestAliasAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestAliasAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/aliases", this);
|
||||
controller.registerHandler(GET, "/_cat/aliases/{alias}", this);
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ public class RestAllocationAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestAllocationAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/allocation", this);
|
||||
controller.registerHandler(GET, "/_cat/allocation/{nodes}", this);
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ public class RestCountAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestCountAction(Settings settings, RestController restController, RestController controller, IndicesQueriesRegistry indicesQueriesRegistry) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
restController.registerHandler(GET, "/_cat/count", this);
|
||||
restController.registerHandler(GET, "/_cat/count/{index}", this);
|
||||
this.indicesQueriesRegistry = indicesQueriesRegistry;
|
||||
|
|
|
@ -44,7 +44,7 @@ public class RestFielddataAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestFielddataAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/fielddata", this);
|
||||
controller.registerHandler(GET, "/_cat/fielddata/{fields}", this);
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public class RestHealthAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestHealthAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/health", this);
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ public class RestIndicesAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestIndicesAction(Settings settings, RestController controller, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
controller.registerHandler(GET, "/_cat/indices", this);
|
||||
controller.registerHandler(GET, "/_cat/indices/{index}", this);
|
||||
|
|
|
@ -40,7 +40,7 @@ public class RestMasterAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestMasterAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/master", this);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ public class RestNodeAttrsAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestNodeAttrsAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/nodeattrs", this);
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestNodesAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/nodes", this);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET;
|
|||
public class RestPendingClusterTasksAction extends AbstractCatAction {
|
||||
@Inject
|
||||
public RestPendingClusterTasksAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/pending_tasks", this);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public class RestPluginsAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestPluginsAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/plugins", this);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ public class RestRecoveryAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestRecoveryAction(Settings settings, RestController restController, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
restController.registerHandler(GET, "/_cat/recovery", this);
|
||||
restController.registerHandler(GET, "/_cat/recovery/{index}", this);
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET;
|
|||
public class RestRepositoriesAction extends AbstractCatAction {
|
||||
@Inject
|
||||
public RestRepositoriesAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/repositories", this);
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ public class RestSegmentsAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestSegmentsAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/segments", this);
|
||||
controller.registerHandler(GET, "/_cat/segments/{index}", this);
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestShardsAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/shards", this);
|
||||
controller.registerHandler(GET, "/_cat/shards/{index}", this);
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET;
|
|||
public class RestSnapshotAction extends AbstractCatAction {
|
||||
@Inject
|
||||
public RestSnapshotAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/snapshots/{repository}", this);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ public class RestTasksAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestTasksAction(Settings settings, RestController controller, ClusterService clusterService) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/tasks", this);
|
||||
this.clusterService = clusterService;
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ public class RestThreadPoolAction extends AbstractCatAction {
|
|||
|
||||
@Inject
|
||||
public RestThreadPoolAction(Settings settings, RestController controller) {
|
||||
super(settings, controller);
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_cat/thread_pool", this);
|
||||
}
|
||||
|
||||
|
|
|
@ -51,13 +51,13 @@ public class RestIndexAction extends BaseRestHandler {
|
|||
controller.registerHandler(POST, "/{index}/{type}", this); // auto id creation
|
||||
controller.registerHandler(PUT, "/{index}/{type}/{id}", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/{id}", this);
|
||||
CreateHandler createHandler = new CreateHandler(settings, controller);
|
||||
CreateHandler createHandler = new CreateHandler(settings);
|
||||
controller.registerHandler(PUT, "/{index}/{type}/{id}/_create", createHandler);
|
||||
controller.registerHandler(POST, "/{index}/{type}/{id}/_create", createHandler);
|
||||
}
|
||||
|
||||
final class CreateHandler extends BaseRestHandler {
|
||||
protected CreateHandler(Settings settings, RestController controller) {
|
||||
protected CreateHandler(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
|
|
|
@ -236,7 +236,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
|
|||
}
|
||||
|
||||
ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang);
|
||||
if (canExecuteScript(lang, scriptEngineService, script.getType(), scriptContext) == false) {
|
||||
if (canExecuteScript(lang, script.getType(), scriptContext) == false) {
|
||||
throw new IllegalStateException("scripts of type [" + script.getType() + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled");
|
||||
}
|
||||
|
||||
|
@ -357,7 +357,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
|
|||
ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(scriptLang);
|
||||
//we don't know yet what the script will be used for, but if all of the operations for this lang with
|
||||
//indexed scripts are disabled, it makes no sense to even compile it.
|
||||
if (isAnyScriptContextEnabled(scriptLang, scriptEngineService, ScriptType.STORED)) {
|
||||
if (isAnyScriptContextEnabled(scriptLang, ScriptType.STORED)) {
|
||||
Object compiled = scriptEngineService.compile(id, template.getScript(), Collections.emptyMap());
|
||||
if (compiled == null) {
|
||||
throw new IllegalArgumentException("Unable to parse [" + template.getScript() +
|
||||
|
@ -466,16 +466,16 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
|
|||
return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript, lookup, script.getParams());
|
||||
}
|
||||
|
||||
private boolean isAnyScriptContextEnabled(String lang, ScriptEngineService scriptEngineService, ScriptType scriptType) {
|
||||
private boolean isAnyScriptContextEnabled(String lang, ScriptType scriptType) {
|
||||
for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) {
|
||||
if (canExecuteScript(lang, scriptEngineService, scriptType, scriptContext)) {
|
||||
if (canExecuteScript(lang, scriptType, scriptContext)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean canExecuteScript(String lang, ScriptEngineService scriptEngineService, ScriptType scriptType, ScriptContext scriptContext) {
|
||||
private boolean canExecuteScript(String lang, ScriptType scriptType, ScriptContext scriptContext) {
|
||||
assert lang != null;
|
||||
if (scriptContextRegistry.isSupportedContext(scriptContext) == false) {
|
||||
throw new IllegalArgumentException("script context [" + scriptContext.getKey() + "] not supported");
|
||||
|
@ -556,7 +556,7 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
|
|||
try {
|
||||
//we don't know yet what the script will be used for, but if all of the operations for this lang
|
||||
// with file scripts are disabled, it makes no sense to even compile it and cache it.
|
||||
if (isAnyScriptContextEnabled(engineService.getType(), engineService, ScriptType.FILE)) {
|
||||
if (isAnyScriptContextEnabled(engineService.getType(), ScriptType.FILE)) {
|
||||
logger.info("compiling script file [{}]", file.toAbsolutePath());
|
||||
try (InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) {
|
||||
String script = Streams.copyToString(reader);
|
||||
|
|
|
@ -19,19 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectFloatHashMap;
|
||||
import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
|
@ -57,8 +45,8 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.InnerHitBuilder;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.SearchOperationListener;
|
||||
|
@ -101,7 +89,18 @@ import org.elasticsearch.search.sort.SortAndFormats;
|
|||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectFloatHashMap;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -844,20 +843,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
}
|
||||
}
|
||||
|
||||
private void shortcutDocIdsToLoadForScanning(SearchContext context) {
|
||||
TopDocs topDocs = context.queryResult().topDocs();
|
||||
if (topDocs.scoreDocs.length == 0) {
|
||||
// no more docs...
|
||||
context.docIdsToLoad(EMPTY_DOC_IDS, 0, 0);
|
||||
return;
|
||||
}
|
||||
int[] docIdsToLoad = new int[topDocs.scoreDocs.length];
|
||||
for (int i = 0; i < docIdsToLoad.length; i++) {
|
||||
docIdsToLoad[i] = topDocs.scoreDocs[i].doc;
|
||||
}
|
||||
context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
|
||||
}
|
||||
|
||||
private void processScroll(InternalScrollSearchRequest request, SearchContext context) {
|
||||
// process scroll
|
||||
context.from(context.from() + context.size());
|
||||
|
|
|
@ -82,6 +82,7 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector {
|
|||
}
|
||||
|
||||
/** Set the deferred collectors. */
|
||||
@Override
|
||||
public void setDeferredCollector(Iterable<BucketCollector> deferredCollectors) {
|
||||
this.collector = BucketCollector.wrap(deferredCollectors);
|
||||
}
|
||||
|
|
|
@ -79,6 +79,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
|||
}
|
||||
|
||||
/** Set the deferred collectors. */
|
||||
@Override
|
||||
public void setDeferredCollector(Iterable<BucketCollector> deferredCollectors) {
|
||||
this.deferred = BucketCollector.wrap(deferredCollectors);
|
||||
}
|
||||
|
|
|
@ -34,18 +34,13 @@ import java.io.IOException;
|
|||
*/
|
||||
public abstract class DeferringBucketCollector extends BucketCollector {
|
||||
|
||||
private BucketCollector collector;
|
||||
/** Sole constructor. */
|
||||
public DeferringBucketCollector() {}
|
||||
|
||||
/** Set the deferred collectors. */
|
||||
public void setDeferredCollector(Iterable<BucketCollector> deferredCollectors) {
|
||||
this.collector = BucketCollector.wrap(deferredCollectors);
|
||||
}
|
||||
public abstract void setDeferredCollector(Iterable<BucketCollector> deferredCollectors);
|
||||
|
||||
|
||||
public final void replay(long... selectedBuckets) throws IOException
|
||||
{
|
||||
public final void replay(long... selectedBuckets) throws IOException {
|
||||
prepareSelectedBuckets(selectedBuckets);
|
||||
}
|
||||
|
||||
|
|
|
@ -260,7 +260,6 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
}
|
||||
|
||||
private List<B> ranges;
|
||||
private Map<String, B> rangeMap;
|
||||
protected DocValueFormat format;
|
||||
protected boolean keyed;
|
||||
|
||||
|
@ -333,7 +332,6 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
ranges.add(getFactory().createBucket(key, in.readDouble(), in.readDouble(), in.readVLong(), InternalAggregations.readAggregations(in), keyed, format));
|
||||
}
|
||||
this.ranges = ranges;
|
||||
this.rangeMap = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.search.aggregations.metrics.scripted;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.LeafSearchScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -34,6 +33,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
|||
import org.elasticsearch.search.aggregations.metrics.MetricsAggregator;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.AggregationContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -52,7 +52,6 @@ public class ScriptedMetricAggregator extends MetricsAggregator {
|
|||
super(name, context, parent, pipelineAggregators, metaData);
|
||||
this.params = params;
|
||||
ScriptService scriptService = context.searchContext().scriptService();
|
||||
ClusterState state = context.searchContext().getQueryShardContext().getClusterState();
|
||||
if (initScript != null) {
|
||||
scriptService.executable(initScript, ScriptContext.Standard.AGGS, Collections.emptyMap()).run();
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ public class ValuesSourceConfig<VS extends ValuesSource> {
|
|||
private SearchScript script;
|
||||
private ValueType scriptValueType;
|
||||
private boolean unmapped = false;
|
||||
private String formatPattern;
|
||||
private DocValueFormat format = DocValueFormat.RAW;
|
||||
private Object missing;
|
||||
private DateTimeZone timeZone;
|
||||
|
|
|
@ -110,7 +110,7 @@ public class FastVectorHighlighter implements Highlighter {
|
|||
if (!forceSource && mapper.fieldType().stored()) {
|
||||
fragmentsBuilder = new SimpleFragmentsBuilder(mapper, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
} else {
|
||||
fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, hitContext, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
}
|
||||
} else {
|
||||
fragListBuilder = field.fieldOptions().fragmentOffset() == -1 ? new SimpleFragListBuilder() : new SimpleFragListBuilder(field.fieldOptions().fragmentOffset());
|
||||
|
@ -118,13 +118,13 @@ public class FastVectorHighlighter implements Highlighter {
|
|||
if (!forceSource && mapper.fieldType().stored()) {
|
||||
fragmentsBuilder = new ScoreOrderFragmentsBuilder(field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
} else {
|
||||
fragmentsBuilder = new SourceScoreOrderFragmentsBuilder(mapper, context, hitContext, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
fragmentsBuilder = new SourceScoreOrderFragmentsBuilder(mapper, context, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
}
|
||||
} else {
|
||||
if (!forceSource && mapper.fieldType().stored()) {
|
||||
fragmentsBuilder = new SimpleFragmentsBuilder(mapper, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
} else {
|
||||
fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, hitContext, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
fragmentsBuilder = new SourceSimpleFragmentsBuilder(mapper, context, field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
|
|||
import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo;
|
||||
import org.apache.lucene.search.vectorhighlight.ScoreOrderFragmentsBuilder;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
|
@ -43,14 +42,11 @@ public class SourceScoreOrderFragmentsBuilder extends ScoreOrderFragmentsBuilder
|
|||
|
||||
private final SearchContext searchContext;
|
||||
|
||||
private final FetchSubPhase.HitContext hitContext;
|
||||
|
||||
public SourceScoreOrderFragmentsBuilder(FieldMapper mapper, SearchContext searchContext,
|
||||
FetchSubPhase.HitContext hitContext, String[] preTags, String[] postTags, BoundaryScanner boundaryScanner) {
|
||||
public SourceScoreOrderFragmentsBuilder(FieldMapper mapper, SearchContext searchContext, String[] preTags, String[] postTags,
|
||||
BoundaryScanner boundaryScanner) {
|
||||
super(preTags, postTags, boundaryScanner);
|
||||
this.mapper = mapper;
|
||||
this.searchContext = searchContext;
|
||||
this.hitContext = hitContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
|
@ -38,13 +37,10 @@ public class SourceSimpleFragmentsBuilder extends SimpleFragmentsBuilder {
|
|||
|
||||
private final SearchContext searchContext;
|
||||
|
||||
private final FetchSubPhase.HitContext hitContext;
|
||||
|
||||
public SourceSimpleFragmentsBuilder(FieldMapper mapper, SearchContext searchContext,
|
||||
FetchSubPhase.HitContext hitContext, String[] preTags, String[] postTags, BoundaryScanner boundaryScanner) {
|
||||
public SourceSimpleFragmentsBuilder(FieldMapper mapper, SearchContext searchContext, String[] preTags, String[] postTags,
|
||||
BoundaryScanner boundaryScanner) {
|
||||
super(mapper, preTags, postTags, boundaryScanner);
|
||||
this.searchContext = searchContext;
|
||||
this.hitContext = hitContext;
|
||||
}
|
||||
|
||||
public static final Field[] EMPTY_FIELDS = new Field[0];
|
||||
|
|
|
@ -157,11 +157,4 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
|
|||
public void rewrite(QueryShardContext context) throws IOException {
|
||||
shardSearchLocalRequest.rewrite(context);
|
||||
}
|
||||
|
||||
private ShardSearchTransportRequest shallowCopy(ShardSearchLocalRequest rewritten) {
|
||||
ShardSearchTransportRequest newRequest = new ShardSearchTransportRequest();
|
||||
newRequest.originalIndices = originalIndices;
|
||||
newRequest.shardSearchLocalRequest = rewritten;
|
||||
return newRequest;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -98,7 +98,6 @@ public final class CompletionSuggestion extends Suggest.Suggestion<CompletionSug
|
|||
// combine suggestion entries from participating shards on the coordinating node
|
||||
// the global top <code>size</code> entries are collected from the shard results
|
||||
// using a priority queue
|
||||
Comparator<Suggest.Suggestion.Entry.Option> optionComparator = sortComparator();
|
||||
OptionPriorityQueue priorityQueue = new OptionPriorityQueue(size, sortComparator());
|
||||
for (Suggest.Suggestion<Entry> entries : toReduce) {
|
||||
assert entries.getEntries().size() == 1 : "CompletionSuggestion must have only one entry";
|
||||
|
|
|
@ -177,7 +177,7 @@ public class StrictISODateTimeFormat {
|
|||
if (workingFields.contains(DateTimeFieldType.monthOfYear())) {
|
||||
reducedPrec = dateByMonth(bld, workingFields, extended, strictISO);
|
||||
} else if (workingFields.contains(DateTimeFieldType.dayOfYear())) {
|
||||
reducedPrec = dateByOrdinal(bld, workingFields, extended, strictISO);
|
||||
reducedPrec = dateByOrdinal(bld, workingFields, extended);
|
||||
} else if (workingFields.contains(DateTimeFieldType.weekOfWeekyear())) {
|
||||
reducedPrec = dateByWeek(bld, workingFields, extended, strictISO);
|
||||
} else if (workingFields.contains(DateTimeFieldType.dayOfMonth())) {
|
||||
|
@ -288,14 +288,12 @@ public class StrictISODateTimeFormat {
|
|||
* @param bld the builder
|
||||
* @param fields the fields
|
||||
* @param extended true to use extended format
|
||||
* @param strictISO true to only allow ISO formats
|
||||
* @since 1.1
|
||||
*/
|
||||
private static boolean dateByOrdinal(
|
||||
DateTimeFormatterBuilder bld,
|
||||
Collection<DateTimeFieldType> fields,
|
||||
boolean extended,
|
||||
boolean strictISO) {
|
||||
boolean extended) {
|
||||
|
||||
boolean reducedPrec = false;
|
||||
if (fields.remove(DateTimeFieldType.year())) {
|
||||
|
|
|
@ -48,12 +48,9 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
|||
|
||||
// Wait for all 3 nodes to be up
|
||||
logger.info("--> waiting for 3 nodes to be up");
|
||||
assertBusy(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
assertBusy(() -> {
|
||||
NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get();
|
||||
assertThat(resp.getNodes().size(), equalTo(3));
|
||||
}
|
||||
});
|
||||
|
||||
logger.info("--> creating 'test' index");
|
||||
|
@ -126,7 +123,6 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
|||
|
||||
Map<DiscoveryNode, NodeExplanation> explanations = cae.getNodeExplanations();
|
||||
|
||||
Float noAttrWeight = -1f;
|
||||
Float barAttrWeight = -1f;
|
||||
Float fooBarAttrWeight = -1f;
|
||||
for (Map.Entry<DiscoveryNode, NodeExplanation> entry : explanations.entrySet()) {
|
||||
|
@ -134,7 +130,6 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
|||
String nodeName = node.getName();
|
||||
NodeExplanation explanation = entry.getValue();
|
||||
ClusterAllocationExplanation.FinalDecision finalDecision = explanation.getFinalDecision();
|
||||
String finalExplanation = explanation.getFinalExplanation();
|
||||
ClusterAllocationExplanation.StoreCopy storeCopy = explanation.getStoreCopy();
|
||||
Decision d = explanation.getDecision();
|
||||
float weight = explanation.getWeight();
|
||||
|
@ -143,7 +138,6 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
|||
assertEquals(d.type(), Decision.Type.NO);
|
||||
if (noAttrNode.equals(nodeName)) {
|
||||
assertThat(d.toString(), containsString("node does not match index include filters [foo:\"bar\"]"));
|
||||
noAttrWeight = weight;
|
||||
assertNull(storeStatus);
|
||||
assertEquals("the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
explanation.getFinalExplanation());
|
||||
|
|
|
@ -47,7 +47,6 @@ public final class ClusterAllocationExplainTests extends ESSingleNodeTestCase {
|
|||
NodeExplanation explanation = cae.getNodeExplanations().values().iterator().next();
|
||||
ClusterAllocationExplanation.FinalDecision fd = explanation.getFinalDecision();
|
||||
ClusterAllocationExplanation.StoreCopy storeCopy = explanation.getStoreCopy();
|
||||
String finalExplanation = explanation.getFinalExplanation();
|
||||
Decision d = explanation.getDecision();
|
||||
assertNotNull("should have a decision", d);
|
||||
assertEquals(Decision.Type.NO, d.type());
|
||||
|
@ -76,7 +75,6 @@ public final class ClusterAllocationExplainTests extends ESSingleNodeTestCase {
|
|||
d = explanation.getDecision();
|
||||
fd = explanation.getFinalDecision();
|
||||
storeCopy = explanation.getStoreCopy();
|
||||
finalExplanation = explanation.getFinalExplanation();
|
||||
assertNotNull("should have a decision", d);
|
||||
assertEquals(Decision.Type.NO, d.type());
|
||||
assertEquals(ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED, fd);
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
|
@ -215,9 +214,7 @@ public final class ClusterAllocationExplanationTests extends ESTestCase {
|
|||
assertEquals(allocationDelay, cae2.getAllocationDelayMillis());
|
||||
assertEquals(remainingDelay, cae2.getRemainingDelayMillis());
|
||||
for (Map.Entry<DiscoveryNode, NodeExplanation> entry : cae2.getNodeExplanations().entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
NodeExplanation explanation = entry.getValue();
|
||||
IndicesShardStoresResponse.StoreStatus status = explanation.getStoreStatus();
|
||||
assertNotNull(explanation.getStoreStatus());
|
||||
assertNotNull(explanation.getDecision());
|
||||
assertEquals(nodeWeight, explanation.getWeight());
|
||||
|
|
|
@ -29,11 +29,11 @@ public class BulkShardRequestTests extends ESTestCase {
|
|||
public void testToString() {
|
||||
String index = randomSimpleString(random(), 10);
|
||||
int count = between(1, 100);
|
||||
BulkShardRequest r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), RefreshPolicy.NONE, new BulkItemRequest[count]);
|
||||
BulkShardRequest r = new BulkShardRequest(new ShardId(index, "ignored", 0), RefreshPolicy.NONE, new BulkItemRequest[count]);
|
||||
assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests", r.toString());
|
||||
r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), RefreshPolicy.IMMEDIATE, new BulkItemRequest[count]);
|
||||
r = new BulkShardRequest(new ShardId(index, "ignored", 0), RefreshPolicy.IMMEDIATE, new BulkItemRequest[count]);
|
||||
assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests and a refresh", r.toString());
|
||||
r = new BulkShardRequest(null, new ShardId(index, "ignored", 0), RefreshPolicy.WAIT_UNTIL, new BulkItemRequest[count]);
|
||||
r = new BulkShardRequest(new ShardId(index, "ignored", 0), RefreshPolicy.WAIT_UNTIL, new BulkItemRequest[count]);
|
||||
assertEquals("BulkShardRequest to [" + index + "] containing [" + count + "] requests blocking until refresh", r.toString());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -220,7 +220,6 @@ public class ClusterStateCreationUtils {
|
|||
* Creates a cluster state with no index
|
||||
*/
|
||||
public static ClusterState stateWithNoShard() {
|
||||
int numberOfNodes = 2;
|
||||
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
|
||||
discoBuilder.localNodeId(newNode(0).getId());
|
||||
discoBuilder.masterNodeId(newNode(1).getId());
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.hamcrest.Matcher;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -55,7 +54,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
|
@ -963,21 +961,6 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
|||
return randomBoolean() ? "test" : "alias";
|
||||
}
|
||||
|
||||
private Map<String, Integer> getFieldStatistics(Map<String, Object> stats, String fieldName) throws IOException {
|
||||
return (Map<String, Integer>) ((Map<String, Object>) stats.get(fieldName)).get("field_statistics");
|
||||
}
|
||||
|
||||
private Map<String, Integer> getTermStatistics(Map<String, Object> stats, String fieldName, String term) {
|
||||
return (Map<String, Integer>) ((Map<String, Object>) ((Map<String, Object>) stats.get(fieldName)).get("terms")).get(term);
|
||||
}
|
||||
|
||||
private Matcher<Integer> equalOrLessThanTo(Integer value, boolean isEqual) {
|
||||
if (isEqual) {
|
||||
return equalTo(value);
|
||||
}
|
||||
return lessThan(value);
|
||||
}
|
||||
|
||||
public void testTermVectorsWithVersion() {
|
||||
assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
|
||||
.setSettings(Settings.builder().put("index.refresh_interval", -1)));
|
||||
|
|
|
@ -135,7 +135,7 @@ public class UpdateRequestTests extends ESTestCase {
|
|||
TimeValue providedTTLValue = TimeValue.parseTimeValue(randomTimeValue(), null, "ttl");
|
||||
Settings settings = settings(Version.CURRENT).build();
|
||||
|
||||
UpdateHelper updateHelper = new UpdateHelper(settings, null, null);
|
||||
UpdateHelper updateHelper = new UpdateHelper(settings, null);
|
||||
|
||||
// We just upsert one document with ttl
|
||||
IndexRequest indexRequest = new IndexRequest("test", "type1", "1")
|
||||
|
|
|
@ -141,15 +141,6 @@ public class SimpleBlocksIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void canNotIndexExists(String index) {
|
||||
try {
|
||||
IndicesExistsResponse r = client().admin().indices().prepareExists(index).execute().actionGet();
|
||||
fail();
|
||||
} catch (ClusterBlockException e) {
|
||||
// all is well
|
||||
}
|
||||
}
|
||||
|
||||
private void setIndexReadOnly(String index, Object value) {
|
||||
HashMap<String, Object> newSettings = new HashMap<>();
|
||||
newSettings.put(IndexMetaData.SETTING_READ_ONLY, value);
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.bwcompat;
|
|||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESBackcompatTestCase;
|
||||
|
@ -46,7 +45,7 @@ public class NodesStatsBasicBackwardsCompatIT extends ESBackcompatTestCase {
|
|||
for (NodeInfo n : nodesInfo.getNodes()) {
|
||||
TransportClient tc = TransportClient.builder().settings(settings).build().addTransportAddress(n.getNode().getAddress());
|
||||
// Just verify that the NS can be sent and serialized/deserialized between nodes with basic indices
|
||||
NodesStatsResponse ns = tc.admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
|
||||
tc.admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
|
||||
tc.close();
|
||||
}
|
||||
}
|
||||
|
@ -78,7 +77,7 @@ public class NodesStatsBasicBackwardsCompatIT extends ESBackcompatTestCase {
|
|||
method.invoke(nsBuilder);
|
||||
}
|
||||
}
|
||||
NodesStatsResponse ns = nsBuilder.execute().actionGet();
|
||||
nsBuilder.execute().actionGet();
|
||||
tc.close();
|
||||
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
|
||||
static class FakeCatRestHandler extends AbstractCatAction {
|
||||
public FakeCatRestHandler() {
|
||||
super(null, null);
|
||||
super(null);
|
||||
}
|
||||
@Override
|
||||
protected void doRequest(RestRequest request, RestChannel channel, NodeClient client) {}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -40,7 +39,7 @@ public class GatewayServiceTests extends ESTestCase {
|
|||
.put("http.enabled", "false")
|
||||
.put("discovery.type", "local")
|
||||
.put(settings.build()).build(),
|
||||
null, clusterService, null, null, null, null, new NoopDiscovery(), null, null);
|
||||
null, clusterService, null, null, null, new NoopDiscovery(), null, null);
|
||||
}
|
||||
|
||||
public void testDefaultRecoverAfterTime() throws IOException {
|
||||
|
|
|
@ -193,7 +193,6 @@ public class IndexModuleTests extends ESTestCase {
|
|||
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, settings);
|
||||
IndexModule module = new IndexModule(indexSettings, null,
|
||||
new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap()));
|
||||
Consumer<Settings> listener = (s) -> {};
|
||||
module.addIndexEventListener(eventListener);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, indicesQueryCache, mapperRegistry,
|
||||
new IndicesFieldDataCache(settings, this.listener));
|
||||
|
|
|
@ -77,7 +77,6 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
|
||||
public void testFilteringAliases() throws Exception {
|
||||
IndexService indexService = createIndex("test", Settings.EMPTY);
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
add(indexService, "cats", filter(termQuery("animal", "cat")));
|
||||
add(indexService, "dogs", filter(termQuery("animal", "dog")));
|
||||
add(indexService, "all", null);
|
||||
|
@ -101,7 +100,6 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
|
||||
public void testAliasFilters() throws Exception {
|
||||
IndexService indexService = createIndex("test", Settings.EMPTY);
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
|
||||
add(indexService, "cats", filter(termQuery("animal", "cat")));
|
||||
add(indexService, "dogs", filter(termQuery("animal", "dog")));
|
||||
|
@ -118,7 +116,6 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
|
||||
public void testRemovedAliasFilter() throws Exception {
|
||||
IndexService indexService = createIndex("test", Settings.EMPTY);
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
|
||||
add(indexService, "cats", filter(termQuery("animal", "cat")));
|
||||
remove(indexService, "cats");
|
||||
|
@ -132,7 +129,6 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
|
||||
public void testUnknownAliasFilter() throws Exception {
|
||||
IndexService indexService = createIndex("test", Settings.EMPTY);
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
|
||||
add(indexService, "cats", filter(termQuery("animal", "cat")));
|
||||
add(indexService, "dogs", filter(termQuery("animal", "dog")));
|
||||
|
|
|
@ -45,8 +45,6 @@ public class SettingsListenerIT extends ESIntegTestCase {
|
|||
|
||||
public static class SettingsListenerPlugin extends Plugin {
|
||||
private final SettingsTestingService service = new SettingsTestingService();
|
||||
private static final Setting<Integer> SETTING = Setting.intSetting("index.test.new.setting", 0,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
|
||||
@Override
|
||||
public List<Setting<?>> getSettings() {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue