mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-26 06:46:10 +00:00
parent
a5406e1ffa
commit
ebcbe5d4c5
@ -257,7 +257,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
}
|
||||
|
||||
/**
|
||||
* Retruns <code>true</code> iff the given class is a registered for an exception to be read.
|
||||
* Returns <code>true</code> iff the given class is a registered for an exception to be read.
|
||||
*/
|
||||
public static boolean isRegistered(Class<? extends Throwable> exception) {
|
||||
return CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.containsKey(exception);
|
||||
@ -372,7 +372,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the root cause of this exception or mupltiple if different shards caused different exceptions
|
||||
* Returns the root cause of this exception or multiple if different shards caused different exceptions
|
||||
*/
|
||||
public ElasticsearchException[] guessRootCauses() {
|
||||
final Throwable cause = getCause();
|
||||
@ -383,7 +383,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the root cause of this exception or mupltiple if different shards caused different exceptions.
|
||||
* Returns the root cause of this exception or multiple if different shards caused different exceptions.
|
||||
* If the given exception is not an instance of {@link org.elasticsearch.ElasticsearchException} an empty array
|
||||
* is returned.
|
||||
*/
|
||||
|
@ -40,7 +40,7 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets to reutrn all the data.
|
||||
* Sets to return all the data.
|
||||
*/
|
||||
public NodesInfoRequestBuilder all() {
|
||||
request.all();
|
||||
|
@ -73,7 +73,7 @@ public class IndexConstraint {
|
||||
}
|
||||
|
||||
/**
|
||||
* @return On what property of a field the contraint is going to be applied on (min or max value)
|
||||
* @return On what property of a field the constraint is going to be applied on (min or max value)
|
||||
*/
|
||||
public Property getProperty() {
|
||||
return property;
|
||||
|
@ -372,7 +372,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
}
|
||||
|
||||
/**
|
||||
* The source of the document to index, recopied to a new array if it is unsage.
|
||||
* The source of the document to index, recopied to a new array if it is unsafe.
|
||||
*/
|
||||
public BytesReference source() {
|
||||
return source;
|
||||
|
@ -164,7 +164,7 @@ public class PutIndexedScriptRequest extends ActionRequest<PutIndexedScriptReque
|
||||
}
|
||||
|
||||
/**
|
||||
* The source of the document to index, recopied to a new array if it is unsage.
|
||||
* The source of the document to index, recopied to a new array if it is unsafe.
|
||||
*/
|
||||
public BytesReference source() {
|
||||
return source;
|
||||
|
@ -55,7 +55,7 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of seach contexts that were freed. If this is <code>0</code> the assumption can be made,
|
||||
* @return The number of search contexts that were freed. If this is <code>0</code> the assumption can be made,
|
||||
* that the scroll id specified in the request did not exist. (never existed, was expired, or completely consumed)
|
||||
*/
|
||||
public int getNumFreed() {
|
||||
|
@ -223,7 +223,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the boost a specific index will receive when the query is executeed against it.
|
||||
* Sets the boost a specific index will receive when the query is executed against it.
|
||||
*
|
||||
* @param index The index to apply the boost against
|
||||
* @param indexBoost The boost to apply to the index
|
||||
|
@ -486,7 +486,7 @@ public final class TermVectorsFields extends Fields {
|
||||
|
||||
// read a vInt. this is used if the integer might be negative. In this case,
|
||||
// the writer writes a 0 for -1 or value +1 and accordingly we have to
|
||||
// substract 1 again
|
||||
// subtract 1 again
|
||||
// adds one to mock not existing term freq
|
||||
int readPotentiallyNegativeVInt(StreamInput stream) throws IOException {
|
||||
return stream.readVInt() - 1;
|
||||
@ -494,7 +494,7 @@ public final class TermVectorsFields extends Fields {
|
||||
|
||||
// read a vLong. this is used if the integer might be negative. In this
|
||||
// case, the writer writes a 0 for -1 or value +1 and accordingly we have to
|
||||
// substract 1 again
|
||||
// subtract 1 again
|
||||
// adds one to mock not existing term freq
|
||||
long readPotentiallyNegativeVLong(StreamInput stream) throws IOException {
|
||||
return stream.readVLong() - 1;
|
||||
|
@ -108,7 +108,7 @@ class JNANatives {
|
||||
if (value == JNACLibrary.RLIM_INFINITY) {
|
||||
return "unlimited";
|
||||
} else {
|
||||
// TODO, on java 8 use Long.toUnsignedString, since thats what it is.
|
||||
// TODO, on java 8 use Long.toUnsignedString, since that's what it is.
|
||||
return Long.toString(value);
|
||||
}
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ final class Seccomp {
|
||||
int prctl(int option, NativeLong arg2, NativeLong arg3, NativeLong arg4, NativeLong arg5);
|
||||
/**
|
||||
* used to call seccomp(2), its too new...
|
||||
* this is the only way, DONT use it on some other architecture unless you know wtf you are doing
|
||||
* this is the only way, DON'T use it on some other architecture unless you know wtf you are doing
|
||||
*/
|
||||
NativeLong syscall(NativeLong number, Object... args);
|
||||
};
|
||||
|
@ -93,7 +93,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
|
||||
// because they would make obtain/release too costly: we really need constant-time
|
||||
// operations.
|
||||
// Ultimately a better solution would be to only store one kind of data and have the
|
||||
// ability to intepret it either as a source of bytes, doubles, longs, etc. eg. thanks
|
||||
// ability to interpret it either as a source of bytes, doubles, longs, etc. eg. thanks
|
||||
// to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues
|
||||
// that would need to be addressed such as garbage collection of native memory or safety
|
||||
// of Unsafe writes.
|
||||
|
@ -107,7 +107,7 @@ public class Requests {
|
||||
}
|
||||
|
||||
/**
|
||||
* Creats a new bulk request.
|
||||
* Creates a new bulk request.
|
||||
*/
|
||||
public static BulkRequest bulkRequest() {
|
||||
return new BulkRequest();
|
||||
|
@ -38,9 +38,9 @@ public interface LocalNodeMasterListener {
|
||||
* The name of the executor that the implementation of the callbacks of this lister should be executed on. The thread
|
||||
* that is responsible for managing instances of this lister is the same thread handling the cluster state events. If
|
||||
* the work done is the callbacks above is inexpensive, this value may be {@link org.elasticsearch.threadpool.ThreadPool.Names#SAME SAME}
|
||||
* (indicating that the callbaks will run on the same thread as the cluster state events are fired with). On the other hand,
|
||||
* (indicating that the callbacks will run on the same thread as the cluster state events are fired with). On the other hand,
|
||||
* if the logic in the callbacks are heavier and take longer to process (or perhaps involve blocking due to IO operations),
|
||||
* prefer to execute them on a separte more appropriate executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC}
|
||||
* prefer to execute them on a separate more appropriate executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC}
|
||||
* or {@link org.elasticsearch.threadpool.ThreadPool.Names#MANAGEMENT MANAGEMENT}).
|
||||
*
|
||||
* @return The name of the executor that will run the callbacks of this listener.
|
||||
|
@ -959,7 +959,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
|
||||
public MetaData build() {
|
||||
// TODO: We should move these datastructures to IndexNameExpressionResolver, this will give the following benefits:
|
||||
// 1) The datastructures will only be rebuilded when needed. Now during serailizing we rebuild these datastructures
|
||||
// 1) The datastructures will only be rebuilded when needed. Now during serializing we rebuild these datastructures
|
||||
// while these datastructures aren't even used.
|
||||
// 2) The aliasAndIndexLookup can be updated instead of rebuilding it all the time.
|
||||
|
||||
|
@ -611,7 +611,7 @@ public class AllocationService extends AbstractComponent {
|
||||
return routingNodes;
|
||||
}
|
||||
|
||||
/** ovrride this to control time based decisions during allocation */
|
||||
/** override this to control time based decisions during allocation */
|
||||
protected long currentNanoTime() {
|
||||
return System.nanoTime();
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ import java.util.List;
|
||||
|
||||
/**
|
||||
* This {@link RoutingAllocation} keeps a shard which routing
|
||||
* allocation has faild
|
||||
* allocation has failed.
|
||||
*/
|
||||
public class FailedRerouteAllocation extends RoutingAllocation {
|
||||
|
||||
|
@ -571,7 +571,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates all given shards on the minimal eligable node for the shards index
|
||||
* Allocates all given shards on the minimal eligible node for the shards index
|
||||
* with respect to the weight function. All given shards must be unassigned.
|
||||
*/
|
||||
private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned) {
|
||||
@ -611,7 +611,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
* The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with 2 replica and 1 shard would look like:
|
||||
* [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)]
|
||||
* if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with
|
||||
* the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ingoreUnassigned.
|
||||
* the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned.
|
||||
*/
|
||||
ShardRouting[] primary = unassigned.drain();
|
||||
ShardRouting[] secondary = new ShardRouting[primary.length];
|
||||
@ -733,7 +733,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
secondary = tmp;
|
||||
secondaryLength = 0;
|
||||
} while (primaryLength > 0);
|
||||
// clear everything we have either added it or moved to ingoreUnassigned
|
||||
// clear everything we have either added it or moved to ignoreUnassigned
|
||||
return changed;
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@ import java.util.Map;
|
||||
* <p>
|
||||
* Awareness can also be used to prevent over-allocation in the case of node or
|
||||
* even "zone" failure. For example in cloud-computing infrastructures like
|
||||
* Amazone AWS a cluster might span over multiple "zones". Awareness can be used
|
||||
* Amazon AWS a cluster might span over multiple "zones". Awareness can be used
|
||||
* to distribute replicas to individual zones by setting:
|
||||
* <pre>
|
||||
* cluster.routing.allocation.awareness.attributes: zone
|
||||
|
@ -30,7 +30,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
* Similar to the {@link ClusterRebalanceAllocationDecider} this
|
||||
* {@link AllocationDecider} controls the number of currently in-progress
|
||||
* re-balance (relocation) operations and restricts node allocations if the
|
||||
* configured threashold is reached. The default number of concurrent rebalance
|
||||
* configured threshold is reached. The default number of concurrent rebalance
|
||||
* operations is set to <tt>2</tt>
|
||||
* <p>
|
||||
* Re-balance operations can be controlled in real-time via the cluster update API using
|
||||
|
@ -282,7 +282,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
|
||||
/**
|
||||
* Returns the size of all shards that are currently being relocated to
|
||||
* the node, but may not be finished transfering yet.
|
||||
* the node, but may not be finished transferring yet.
|
||||
*
|
||||
* If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size
|
||||
* of all shards
|
||||
|
@ -41,7 +41,7 @@ public class DiscoverySettings extends AbstractComponent {
|
||||
public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
|
||||
/**
|
||||
* sets the timeout for a complete publishing cycle, including both sending and committing. the master
|
||||
* will continute to process the next cluster state update after this time has elapsed
|
||||
* will continue to process the next cluster state update after this time has elapsed
|
||||
**/
|
||||
public static final Setting<TimeValue> PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
|
||||
|
||||
|
@ -328,7 +328,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
||||
ClusterState newNodeSpecificClusterState = null;
|
||||
synchronized (this) {
|
||||
// we do the marshaling intentionally, to check it works well...
|
||||
// check if we publsihed cluster state at least once and node was in the cluster when we published cluster state the last time
|
||||
// check if we published cluster state at least once and node was in the cluster when we published cluster state the last time
|
||||
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode().id())) {
|
||||
// both conditions are true - which means we can try sending cluster state as diffs
|
||||
if (clusterStateDiffBytes == null) {
|
||||
|
@ -79,7 +79,7 @@ public class ElectMasterService extends AbstractComponent {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the given nodes sorted by likelyhood of being elected as master, most likely first.
|
||||
* Returns the given nodes sorted by likelihood of being elected as master, most likely first.
|
||||
* Non-master nodes are not removed but are rather put in the end
|
||||
*/
|
||||
public List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {
|
||||
|
@ -342,7 +342,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
|
||||
// sort the nodes by likelihood of being an active master
|
||||
List<DiscoveryNode> sortedNodesToPing = electMasterService.sortByMasterLikelihood(nodesToPingSet);
|
||||
|
||||
// new add the the unicast targets first
|
||||
// new add the unicast targets first
|
||||
List<DiscoveryNode> nodesToPing = CollectionUtils.arrayAsArrayList(configuredTargetNodes);
|
||||
nodesToPing.addAll(sortedNodesToPing);
|
||||
|
||||
|
@ -166,7 +166,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
||||
/**
|
||||
* Called by the response handler of the async action to fetch data. Verifies that its still working
|
||||
* on the same cache generation, otherwise the results are discarded. It then goes and fills the relevant data for
|
||||
* the shard (response + failures), issueing a reroute at the end of it to make sure there will be another round
|
||||
* the shard (response + failures), issuing a reroute at the end of it to make sure there will be another round
|
||||
* of allocations taking this new data into account.
|
||||
*/
|
||||
protected synchronized void processAsyncFetch(ShardId shardId, T[] responses, FailedNodeException[] failures) {
|
||||
|
@ -255,7 +255,7 @@ public abstract class MetaDataStateFormat<T> {
|
||||
List<PathAndStateId> files = new ArrayList<>();
|
||||
long maxStateId = -1;
|
||||
boolean maxStateIdIsLegacy = true;
|
||||
if (dataLocations != null) { // select all eligable files first
|
||||
if (dataLocations != null) { // select all eligible files first
|
||||
for (Path dataLocation : dataLocations) {
|
||||
final Path stateDir = dataLocation.resolve(STATE_DIR_NAME);
|
||||
// now, iterate over the current versions, and find latest one
|
||||
|
@ -39,7 +39,7 @@ public class ESHttpContentDecompressor extends HttpContentDecompressor {
|
||||
// compression is enabled so handle the request according to the headers (compressed and uncompressed)
|
||||
return super.newContentDecoder(contentEncoding);
|
||||
} else {
|
||||
// if compression is disabled only allow "indentity" (uncompressed) requests
|
||||
// if compression is disabled only allow "identity" (uncompressed) requests
|
||||
if (HttpHeaders.Values.IDENTITY.equals(contentEncoding)) {
|
||||
// nothing to handle here
|
||||
return null;
|
||||
|
@ -132,7 +132,7 @@ public final class CorsConfig {
|
||||
* xhr.withCredentials = true;
|
||||
* </pre>
|
||||
* The default value for 'withCredentials' is false in which case no cookies are sent.
|
||||
* Settning this to true will included cookies in cross origin requests.
|
||||
* Setting this to true will included cookies in cross origin requests.
|
||||
*
|
||||
* @return {@code true} if cookies are supported.
|
||||
*/
|
||||
@ -205,7 +205,7 @@ public final class CorsConfig {
|
||||
* and this setting will check that the Origin is valid and if it is not valid no
|
||||
* further processing will take place, and a error will be returned to the calling client.
|
||||
*
|
||||
* @return {@code true} if a CORS request should short-curcuit upon receiving an invalid Origin header.
|
||||
* @return {@code true} if a CORS request should short-circuit upon receiving an invalid Origin header.
|
||||
*/
|
||||
public boolean isShortCircuit() {
|
||||
return shortCircuit;
|
||||
|
@ -133,7 +133,7 @@ public final class CorsConfigBuilder {
|
||||
/**
|
||||
* Web browsers may set the 'Origin' request header to 'null' if a resource is loaded
|
||||
* from the local file system. Calling this method will enable a successful CORS response
|
||||
* with a wildcard for the the CORS response header 'Access-Control-Allow-Origin'.
|
||||
* with a wildcard for the CORS response header 'Access-Control-Allow-Origin'.
|
||||
*
|
||||
* @return {@link CorsConfigBuilder} to support method chaining.
|
||||
*/
|
||||
|
@ -143,7 +143,7 @@ public final class IndexModule {
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link IndexStore} type to this index module. Typically stores are registered with a refrence to
|
||||
* Adds an {@link IndexStore} type to this index module. Typically stores are registered with a reference to
|
||||
* it's constructor:
|
||||
* <pre>
|
||||
* indexModule.addIndexStore("my_store_type", MyStore::new);
|
||||
|
@ -691,7 +691,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
||||
}
|
||||
|
||||
boolean mustReschedule() {
|
||||
// don't re-schedule if its closed or if we dont' have a single shard here..., we are done
|
||||
// don't re-schedule if its closed or if we don't have a single shard here..., we are done
|
||||
return indexService.closed.get() == false
|
||||
&& closed.get() == false && interval.millis() > 0;
|
||||
}
|
||||
|
@ -329,7 +329,7 @@ public final class IndexSettings {
|
||||
public boolean isShadowReplicaIndex() { return isShadowReplicaIndex; }
|
||||
|
||||
/**
|
||||
* Returns the node settings. The settings retured from {@link #getSettings()} are a merged version of the
|
||||
* Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the
|
||||
* index settings and the node settings where node settings are overwritten by index settings.
|
||||
*/
|
||||
public Settings getNodeSettings() {
|
||||
|
@ -22,7 +22,7 @@ package org.elasticsearch.index.engine;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
/** Holds a deleted version, which just adds a timestmap to {@link VersionValue} so we know when we can expire the deletion. */
|
||||
/** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */
|
||||
|
||||
class DeleteVersionValue extends VersionValue {
|
||||
private final long time;
|
||||
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
||||
* An engine is already closed.
|
||||
* <p>
|
||||
* Note, the relationship between shard and engine indicates that engine closed is shard closed, and
|
||||
* we might get something slipping through the the shard and into the engine while the shard is closing.
|
||||
* we might get something slipping through the shard and into the engine while the shard is closing.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
@ -321,7 +321,7 @@ public final class OrdinalsBuilder implements Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Retruns the current ordinal or <tt>0</tt> if this build has not been advanced via
|
||||
* Returns the current ordinal or <tt>0</tt> if this build has not been advanced via
|
||||
* {@link #nextOrdinal()}.
|
||||
*/
|
||||
public long currentOrdinal() {
|
||||
@ -457,7 +457,7 @@ public final class OrdinalsBuilder implements Closeable {
|
||||
* This method iterates all terms in the given {@link TermsEnum} and
|
||||
* associates each terms ordinal with the terms documents. The caller must
|
||||
* exhaust the returned {@link BytesRefIterator} which returns all values
|
||||
* where the first returned value is associted with the ordinal <tt>1</tt>
|
||||
* where the first returned value is associated with the ordinal <tt>1</tt>
|
||||
* etc.
|
||||
* <p>
|
||||
* If the {@link TermsEnum} contains prefix coded numerical values the terms
|
||||
|
@ -85,7 +85,7 @@ public final class ExtractQueryTermsService {
|
||||
/**
|
||||
* Extracts all query terms from the provided query and adds it to specified list.
|
||||
*
|
||||
* From boolean query with no should clauses or phrase queries only the the longest term are selected,
|
||||
* From boolean query with no should clauses or phrase queries only the longest term are selected,
|
||||
* since that those terms are likely to be the rarest. Boolean query's must_not clauses are always ignored.
|
||||
*
|
||||
* If from part of the query, no query terms can be extracted then term extraction is stopped and
|
||||
|
@ -41,7 +41,7 @@ public class PercolateStats implements Streamable, ToXContent {
|
||||
private long numQueries;
|
||||
|
||||
/**
|
||||
* Noop constructor for serialazation purposes.
|
||||
* Noop constructor for serialization purposes.
|
||||
*/
|
||||
public PercolateStats() {
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
|
||||
|
||||
/**
|
||||
* Sets the minimum number of high frequent query terms that need to match in order to
|
||||
* produce a hit when there are no low frequen terms.
|
||||
* produce a hit when there are no low frequent terms.
|
||||
*/
|
||||
public CommonTermsQueryBuilder highFreqMinimumShouldMatch(String highFreqMinimumShouldMatch) {
|
||||
this.highFreqMinimumShouldMatch = highFreqMinimumShouldMatch;
|
||||
|
@ -154,7 +154,7 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> i
|
||||
}
|
||||
|
||||
private FuzzyQueryBuilder() {
|
||||
// for protoype
|
||||
// for prototype
|
||||
this.fieldName = null;
|
||||
this.value = null;
|
||||
}
|
||||
|
@ -251,7 +251,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
||||
GeoPoint luceneBottomRight = new GeoPoint(bottomRight);
|
||||
if (GeoValidationMethod.isCoerce(validationMethod)) {
|
||||
// Special case: if the difference between the left and right is 360 and the right is greater than the left, we are asking for
|
||||
// the complete longitude range so need to set longitude to the complete longditude range
|
||||
// the complete longitude range so need to set longitude to the complete longitude range
|
||||
double right = luceneBottomRight.getLon();
|
||||
double left = luceneTopLeft.getLon();
|
||||
|
||||
|
@ -197,7 +197,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
|
||||
return this.optimizeBbox;
|
||||
}
|
||||
|
||||
/** Set validaton method for geo coordinates. */
|
||||
/** Set validation method for geo coordinates. */
|
||||
public void setValidationMethod(GeoValidationMethod method) {
|
||||
this.validationMethod = method;
|
||||
}
|
||||
|
@ -634,7 +634,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
|
||||
}
|
||||
|
||||
/**
|
||||
* The analyzer that will be used to analyze the text. Defaults to the analyzer associated with the fied.
|
||||
* The analyzer that will be used to analyze the text. Defaults to the analyzer associated with the field.
|
||||
*/
|
||||
public MoreLikeThisQueryBuilder analyzer(String analyzer) {
|
||||
this.analyzer = analyzer;
|
||||
@ -703,7 +703,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
|
||||
* Converts an array of String ids to and Item[].
|
||||
* @param ids the ids to convert
|
||||
* @return the new items array
|
||||
* @deprecated construct the items array externaly and use it in the constructor / setter
|
||||
* @deprecated construct the items array externally and use it in the constructor / setter
|
||||
*/
|
||||
@Deprecated
|
||||
public static Item[] ids(String... ids) {
|
||||
|
@ -29,7 +29,7 @@ import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Parser for the The More Like This Query (MLT Query) which finds documents that are "like" a given set of documents.
|
||||
* Parser for the More Like This Query (MLT Query) which finds documents that are "like" a given set of documents.
|
||||
*
|
||||
* The documents are provided as a set of strings and/or a list of {@link Item}.
|
||||
*/
|
||||
|
@ -284,7 +284,7 @@ public class MultiMatchQuery extends MatchQuery {
|
||||
} catch (RuntimeException ex) {
|
||||
// we can't parse it just use the incoming value -- it will
|
||||
// just have a DF of 0 at the end of the day and will be ignored
|
||||
// Note that this is like lenient = true allways
|
||||
// Note that this is like lenient = true always
|
||||
}
|
||||
return new Term(field, value);
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ public final class ShardPath {
|
||||
totFreeSpace += nodePath.fileStore.getUsableSpace();
|
||||
}
|
||||
|
||||
// Very rough heurisic of how much disk space we expect the shard will use over its lifetime, the max of current average
|
||||
// Very rough heuristic of how much disk space we expect the shard will use over its lifetime, the max of current average
|
||||
// shard size across the cluster and 5% of the total available free space on this node:
|
||||
long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0));
|
||||
|
||||
@ -215,7 +215,7 @@ public final class ShardPath {
|
||||
// TODO: this is a hack!! We should instead keep track of incoming (relocated) shards since we know
|
||||
// how large they will be once they're done copying, instead of a silly guess for such cases:
|
||||
|
||||
// Very rough heurisic of how much disk space we expect the shard will use over its lifetime, the max of current average
|
||||
// Very rough heuristic of how much disk space we expect the shard will use over its lifetime, the max of current average
|
||||
// shard size across the cluster and 5% of the total available free space on this node:
|
||||
long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0));
|
||||
|
||||
|
@ -64,7 +64,7 @@ final class StoreRecovery {
|
||||
* @param indexShard the index shard instance to recovery the shard into
|
||||
* @param indexShouldExists <code>true</code> iff the index should exist on disk ie. has the shard been allocated previously on the shards store.
|
||||
* @param localNode the reference to the local node
|
||||
* @return <code>true</code> if the the shard has been recovered successfully, <code>false</code> if the recovery
|
||||
* @return <code>true</code> if the shard has been recovered successfully, <code>false</code> if the recovery
|
||||
* has been ignored due to a concurrent modification of if the clusters state has changed due to async updates.
|
||||
* @see Store
|
||||
*/
|
||||
@ -86,7 +86,7 @@ final class StoreRecovery {
|
||||
* previously created index snapshot into an existing initializing shard.
|
||||
* @param indexShard the index shard instance to recovery the snapshot from
|
||||
* @param repository the repository holding the physical files the shard should be recovered from
|
||||
* @return <code>true</code> if the the shard has been recovered successfully, <code>false</code> if the recovery
|
||||
* @return <code>true</code> if the shard has been recovered successfully, <code>false</code> if the recovery
|
||||
* has been ignored due to a concurrent modification of if the clusters state has changed due to async updates.
|
||||
*/
|
||||
boolean recoverFromRepository(final IndexShard indexShard, IndexShardRepository repository, DiscoveryNode localNode) {
|
||||
|
@ -728,7 +728,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a BWC layer to ensure we update the snapshots metdata with the corresponding hashes before we compare them.
|
||||
* This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them.
|
||||
* The new logic for StoreFileMetaData reads the entire <tt>.si</tt> and <tt>segments.n</tt> files to strengthen the
|
||||
* comparison of the files on a per-segment / per-commit level.
|
||||
*/
|
||||
|
@ -42,7 +42,7 @@ import static java.util.Collections.unmodifiableMap;
|
||||
/**
|
||||
* Contains information about all snapshot for the given shard in repository
|
||||
* <p>
|
||||
* This class is used to find files that were already snapshoted and clear out files that no longer referenced by any
|
||||
* This class is used to find files that were already snapshotted and clear out files that no longer referenced by any
|
||||
* snapshots
|
||||
*/
|
||||
public class BlobStoreIndexShardSnapshots implements Iterable<SnapshotFiles>, ToXContent, FromXContentBuilder<BlobStoreIndexShardSnapshots> {
|
||||
|
@ -97,7 +97,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
/*
|
||||
* TODO
|
||||
* - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this) but we can refactor as we go
|
||||
* - use a simple BufferedOuputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we need to be able to do random access reads even from the buffer
|
||||
* - use a simple BufferedOutputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we need to be able to do random access reads even from the buffer
|
||||
* - we need random exception on the FileSystem API tests for all this.
|
||||
* - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already fsynced far enough
|
||||
*/
|
||||
|
@ -515,7 +515,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
if (success == false) {
|
||||
addPendingDelete(index, indexSettings);
|
||||
}
|
||||
// this is a pure protection to make sure this index doesn't get re-imported as a dangeling index.
|
||||
// this is a pure protection to make sure this index doesn't get re-imported as a dangling index.
|
||||
// we should in the future rather write a tombstone rather than wiping the metadata.
|
||||
MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index.getName()));
|
||||
}
|
||||
|
@ -403,7 +403,7 @@ public class RecoverySourceHandler {
|
||||
} catch (IllegalIndexShardStateException e) {
|
||||
// we can ignore this exception since, on the other node, when it moved to phase3
|
||||
// it will also send shard started, which might cause the index shard we work against
|
||||
// to move be closed by the time we get to the the relocated method
|
||||
// to move be closed by the time we get to the relocated method
|
||||
}
|
||||
}
|
||||
stopWatch.stop();
|
||||
|
@ -68,7 +68,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||
*/
|
||||
public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable {
|
||||
|
||||
// TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a seperate public service
|
||||
// TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a separate public service
|
||||
public static final Setting<TimeValue> INDICES_STORE_DELETE_SHARD_TIMEOUT = Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER);
|
||||
public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists";
|
||||
private static final EnumSet<IndexShardState> ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED);
|
||||
|
@ -254,7 +254,7 @@ public final class IngestDocument {
|
||||
* If the path identifies a list, the value will be appended to the existing list.
|
||||
* If the path identifies a scalar, the scalar will be converted to a list and
|
||||
* the provided value will be added to the newly created list.
|
||||
* Supports multiple values too provided in forms of list, in that case all the values will be appeneded to the
|
||||
* Supports multiple values too provided in forms of list, in that case all the values will be appended to the
|
||||
* existing (or newly created) list.
|
||||
* @param path The path within the document in dot-notation
|
||||
* @param value The value or values to append to the existing ones
|
||||
@ -270,7 +270,7 @@ public final class IngestDocument {
|
||||
* If the path identifies a list, the value will be appended to the existing list.
|
||||
* If the path identifies a scalar, the scalar will be converted to a list and
|
||||
* the provided value will be added to the newly created list.
|
||||
* Supports multiple values too provided in forms of list, in that case all the values will be appeneded to the
|
||||
* Supports multiple values too provided in forms of list, in that case all the values will be appended to the
|
||||
* existing (or newly created) list.
|
||||
* @param fieldPathTemplate Resolves to the path with dot-notation within the document
|
||||
* @param valueSource The value source that will produce the value or values to append to the existing ones
|
||||
|
@ -284,7 +284,7 @@ class InstallPluginCommand extends CliTool.Command {
|
||||
List<PluginsService.Bundle> bundles = PluginsService.getPluginBundles(pluginsDir);
|
||||
|
||||
// if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins
|
||||
// thats always the first bundle
|
||||
// that's always the first bundle
|
||||
if (isolated == false) {
|
||||
jars.addAll(bundles.get(0).urls);
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ public class PluginsService extends AbstractComponent {
|
||||
}
|
||||
}
|
||||
|
||||
// we don't log jars in lib/ we really shouldnt log modules,
|
||||
// we don't log jars in lib/ we really shouldn't log modules,
|
||||
// but for now: just be transparent so we can debug any potential issues
|
||||
Set<String> moduleNames = new HashSet<>();
|
||||
Set<String> jvmPluginNames = new HashSet<>();
|
||||
|
@ -528,7 +528,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||
}
|
||||
|
||||
/**
|
||||
* In v2.0.0 we changed the matadata file format
|
||||
* In v2.0.0 we changed the metadata file format
|
||||
* @return true if legacy version should be used false otherwise
|
||||
*/
|
||||
public static boolean legacyMetaData(Version version) {
|
||||
|
@ -88,7 +88,7 @@ public class RestNodesInfoAction extends BaseRestHandler {
|
||||
|
||||
final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds);
|
||||
nodesInfoRequest.timeout(request.param("timeout"));
|
||||
// shortcut, dont do checks if only all is specified
|
||||
// shortcut, don't do checks if only all is specified
|
||||
if (metrics.size() == 1 && metrics.contains("_all")) {
|
||||
nodesInfoRequest.all();
|
||||
} else {
|
||||
|
@ -66,7 +66,7 @@ public class ScriptModule extends AbstractModule {
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is called after all modules have been processed but before we actually validate all settings. This allwos the
|
||||
* This method is called after all modules have been processed but before we actually validate all settings. This allows the
|
||||
* script extensions to add all their settings.
|
||||
*/
|
||||
public void prepareSettings(SettingsModule settingsModule) {
|
||||
|
@ -247,7 +247,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
|
||||
throw new ScriptException("scripts of type [" + script.getType() + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled");
|
||||
}
|
||||
|
||||
// TODO: fix this through some API or something, thats wrong
|
||||
// TODO: fix this through some API or something, that's wrong
|
||||
// special exception to prevent expressions from compiling as update or mapping scripts
|
||||
boolean expression = "expression".equals(script.getLang());
|
||||
boolean notSupported = scriptContext.getKey().equals(ScriptContext.Standard.UPDATE.getKey());
|
||||
|
@ -133,7 +133,7 @@ public abstract class AggregatorBase extends Aggregator {
|
||||
}
|
||||
|
||||
/**
|
||||
* Can be overriden by aggregator implementation to be called back when the collection phase starts.
|
||||
* Can be overridden by aggregator implementation to be called back when the collection phase starts.
|
||||
*/
|
||||
protected void doPreCollection() throws IOException {
|
||||
}
|
||||
@ -252,7 +252,7 @@ public abstract class AggregatorBase extends Aggregator {
|
||||
protected void doClose() {}
|
||||
|
||||
/**
|
||||
* Can be overriden by aggregator implementation to be called back when the collection phase ends.
|
||||
* Can be overridden by aggregator implementation to be called back when the collection phase ends.
|
||||
*/
|
||||
protected void doPostCollection() throws IOException {
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ public class NestedAggregator extends SingleBucketAggregator {
|
||||
// So the trick is to set at the last moment just before needed and we can use its child filter as the
|
||||
// parent filter.
|
||||
|
||||
// Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption
|
||||
// Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the assumption
|
||||
// that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during
|
||||
// aggs execution
|
||||
Query parentFilterNotCached = findClosestNestedPath(parent());
|
||||
|
@ -66,7 +66,7 @@ final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory.L
|
||||
int precision = HyperLogLogPlusPlus.DEFAULT_PRECISION;
|
||||
while (parent != null) {
|
||||
if (parent instanceof SingleBucketAggregator == false) {
|
||||
// if the parent creates buckets, we substract 5 to the precision,
|
||||
// if the parent creates buckets, we subtract 5 to the precision,
|
||||
// which will effectively divide the memory usage of each counter by 32
|
||||
precision -= 5;
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ import java.nio.ByteOrder;
|
||||
* requires more space and makes hyperloglog (which is less accurate) used sooner,
|
||||
* this is also considerably faster.
|
||||
*
|
||||
* Trying to understand what this class does whithout having read the paper is
|
||||
* Trying to understand what this class does without having read the paper is
|
||||
* considered adventurous.
|
||||
*/
|
||||
public final class HyperLogLogPlusPlus implements Releasable {
|
||||
|
@ -169,7 +169,7 @@ public class HoltWintersModel extends MovAvgModel {
|
||||
this.pad = pad;
|
||||
|
||||
// Only pad if we are multiplicative and padding is enabled
|
||||
// The padding amount is not currently user-configurable...i dont see a reason to expose it?
|
||||
// The padding amount is not currently user-configurable...i don't see a reason to expose it?
|
||||
this.padding = seasonalityType.equals(SeasonalityType.MULTIPLICATIVE) && pad ? 0.0000000001 : 0;
|
||||
}
|
||||
|
||||
|
@ -442,7 +442,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the hightlighter builder for this request.
|
||||
* Gets the highlighter builder for this request.
|
||||
*/
|
||||
public HighlightBuilder highlighter() {
|
||||
return highlightBuilder;
|
||||
@ -679,7 +679,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the boost a specific index will receive when the query is executeed
|
||||
* Sets the boost a specific index will receive when the query is executed
|
||||
* against it.
|
||||
*
|
||||
* @param index
|
||||
@ -697,7 +697,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
||||
|
||||
/**
|
||||
* Gets the boost a specific indices will receive when the query is
|
||||
* executeed against them.
|
||||
* executed against them.
|
||||
*/
|
||||
public ObjectFloatHashMap<String> indexBoost() {
|
||||
return indexBoost;
|
||||
|
@ -41,7 +41,7 @@ import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Simple helper class for {@link FastVectorHighlighter} {@link FragmentsBuilder} implemenations.
|
||||
* Simple helper class for {@link FastVectorHighlighter} {@link FragmentsBuilder} implementations.
|
||||
*/
|
||||
public final class FragmentBuilderHelper {
|
||||
|
||||
|
@ -27,7 +27,7 @@ import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
||||
/*
|
||||
* Can iterate over the positions of a term an arbotrary number of times.
|
||||
* Can iterate over the positions of a term an arbitrary number of times.
|
||||
* */
|
||||
public class CachedPositionIterator extends PositionIterator {
|
||||
|
||||
|
@ -236,7 +236,7 @@ public class IndexFieldTerm implements Iterable<TermPosition> {
|
||||
/*
|
||||
* A user might decide inside a script to call get with _POSITIONS and then
|
||||
* a second time with _PAYLOADS. If the positions were recorded but the
|
||||
* payloads were not, the user will not have access to them. Therfore, throw
|
||||
* payloads were not, the user will not have access to them. Therefore, throw
|
||||
* exception here explaining how to call get().
|
||||
*/
|
||||
public void validateFlags(int flags2) {
|
||||
|
@ -34,7 +34,7 @@ import java.util.List;
|
||||
* - doSetNextReader()
|
||||
* - needsScores()
|
||||
*
|
||||
* InternalProfiler facilitates the linking of the the Collector graph
|
||||
* InternalProfiler facilitates the linking of the Collector graph
|
||||
*/
|
||||
public class InternalProfileCollector implements Collector {
|
||||
|
||||
|
@ -51,7 +51,7 @@ public final class ProfileBreakdown {
|
||||
*/
|
||||
private final long[] timings;
|
||||
|
||||
/** Scrach to store the current timing type. */
|
||||
/** Scratch to store the current timing type. */
|
||||
private TimingType currentTimingType;
|
||||
|
||||
/**
|
||||
|
@ -31,7 +31,7 @@ public final class Profilers {
|
||||
private final ContextIndexSearcher searcher;
|
||||
private final List<Profiler> profilers;
|
||||
|
||||
/** Sole constructor. This {@link Profilers} instance will initiall wrap one {@link Profiler}. */
|
||||
/** Sole constructor. This {@link Profilers} instance will initially wrap one {@link Profiler}. */
|
||||
public Profilers(ContextIndexSearcher searcher) {
|
||||
this.searcher = searcher;
|
||||
this.profilers = new ArrayList<>();
|
||||
|
@ -45,7 +45,7 @@ public interface Rescorer {
|
||||
* Modifies the result of the previously executed search ({@link TopDocs})
|
||||
* in place based on the given {@link RescoreSearchContext}.
|
||||
*
|
||||
* @param topDocs the result of the previously exectued search
|
||||
* @param topDocs the result of the previously executed search
|
||||
* @param context the current {@link SearchContext}. This will never be <code>null</code>.
|
||||
* @param rescoreContext the {@link RescoreSearchContext}. This will never be <code>null</code>
|
||||
* @throws IOException if an {@link IOException} occurs during rescoring
|
||||
@ -66,7 +66,7 @@ public interface Rescorer {
|
||||
Explanation sourceExplanation) throws IOException;
|
||||
|
||||
/**
|
||||
* Parses the {@link RescoreSearchContext} for this impelementation
|
||||
* Parses the {@link RescoreSearchContext} for this implementation
|
||||
*
|
||||
* @param parser the parser to read the context from
|
||||
* @param context the current shard context
|
||||
@ -76,7 +76,7 @@ public interface Rescorer {
|
||||
public RescoreSearchContext parse(XContentParser parser, QueryShardContext context) throws IOException;
|
||||
|
||||
/**
|
||||
* Extracts all terms needed to exectue this {@link Rescorer}. This method
|
||||
* Extracts all terms needed to execute this {@link Rescorer}. This method
|
||||
* is executed in a distributed frequency collection roundtrip for
|
||||
* {@link SearchType#DFS_QUERY_AND_FETCH} and
|
||||
* {@link SearchType#DFS_QUERY_THEN_FETCH}
|
||||
@ -84,8 +84,8 @@ public interface Rescorer {
|
||||
public void extractTerms(SearchContext context, RescoreSearchContext rescoreContext, Set<Term> termsSet);
|
||||
|
||||
/*
|
||||
* TODO: At this point we only have one implemenation which modifies the
|
||||
* TopDocs given. Future implemenations might return actual resutls that
|
||||
* TODO: At this point we only have one implementation which modifies the
|
||||
* TopDocs given. Future implementations might return actual results that
|
||||
* contain information about the rescore context. For example a pair wise
|
||||
* reranker might return the feature vector for the top N window in order to
|
||||
* merge results on the callers side. For now we don't have a return type at
|
||||
|
@ -85,10 +85,10 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
|
||||
* Sets the maximum percentage of the terms that at most considered to be
|
||||
* misspellings in order to form a correction. This method accepts a float
|
||||
* value in the range [0..1) as a fraction of the actual query terms a
|
||||
* number <tt>>=1</tt> as an absolut number of query terms.
|
||||
* number <tt>>=1</tt> as an absolute number of query terms.
|
||||
*
|
||||
* The default is set to <tt>1.0</tt> which corresponds to that only
|
||||
* corrections with at most 1 missspelled term are returned.
|
||||
* corrections with at most 1 misspelled term are returned.
|
||||
*/
|
||||
public PhraseSuggestionBuilder maxErrors(Float maxErrors) {
|
||||
this.maxErrors = maxErrors;
|
||||
@ -288,7 +288,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
|
||||
}
|
||||
|
||||
/**
|
||||
* A "stupid-backoff" smoothing model simialr to <a
|
||||
* A "stupid-backoff" smoothing model similar to <a
|
||||
* href="http://en.wikipedia.org/wiki/Katz's_back-off_model"> Katz's
|
||||
* Backoff</a>. This model is used as the default if no model is configured.
|
||||
* <p>
|
||||
|
@ -704,7 +704,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||
|
||||
// this code is a take on guava's HostAndPort, like a HostAndPortRange
|
||||
|
||||
// pattern for validating ipv6 bracked addresses.
|
||||
// pattern for validating ipv6 bracket addresses.
|
||||
// not perfect, but PortsRange should take care of any port range validation, not a regex
|
||||
private static final Pattern BRACKET_PATTERN = Pattern.compile("^\\[(.*:.*)\\](?::([\\d\\-]*))?$");
|
||||
|
||||
|
@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
/**
|
||||
* Checking simple filtering capabilites of the cluster state
|
||||
* Checking simple filtering capabilities of the cluster state
|
||||
*
|
||||
*/
|
||||
public class SimpleClusterStateIT extends ESIntegTestCase {
|
||||
|
@ -51,7 +51,7 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
||||
/**
|
||||
* A base testscase that allows to run tests based on the output of the CAT API
|
||||
* A base testcase that allows to run tests based on the output of the CAT API
|
||||
* The input is a line based cat/shards output like:
|
||||
* kibana-int 0 p STARTED 2 24.8kb 10.202.245.2 r5-9-35
|
||||
*
|
||||
|
@ -755,7 +755,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_id", "node1,node2")).numberOfShards(2).numberOfReplicas(0))
|
||||
.build();
|
||||
|
||||
// we use a second index here (test1) that never gets assigned otherwise allocateUnassinged is never called if we don't have unassigned shards.
|
||||
// we use a second index here (test1) that never gets assigned otherwise allocateUnassigned is never called if we don't have unassigned shards.
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsNew(metaData.index("test"))
|
||||
.addAsNew(metaData.index("test1"))
|
||||
|
@ -163,7 +163,7 @@ public class ShapeBuilderTests extends ESTestCase {
|
||||
)
|
||||
.build();
|
||||
|
||||
// LineString that needs to be wrappped
|
||||
// LineString that needs to be wrapped
|
||||
ShapeBuilders.newMultiLinestring()
|
||||
.linestring(new LineStringBuilder(new CoordinatesBuilder()
|
||||
.coordinate(150.0, 60.0)
|
||||
|
@ -332,7 +332,7 @@ public class SimpleJodaTests extends ESTestCase {
|
||||
Joda.EpochTimePrinter epochTimePrinter = new Joda.EpochTimePrinter(false);
|
||||
epochTimePrinter.printTo(buffer, now, Locale.ROOT);
|
||||
assertThat(buffer.length(), is(10));
|
||||
// only check the last digit, as seconds go from 0-99 in the unix timestamp and dont stop at 60
|
||||
// only check the last digit, as seconds go from 0-99 in the unix timestamp and don't stop at 60
|
||||
assertThat(buffer.toString(), endsWith(String.valueOf(now.getSecondOfMinute() % 10)));
|
||||
|
||||
buffer = new StringBuffer();
|
||||
|
@ -754,7 +754,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
/**
|
||||
* A 4 node cluster with m_m_n set to 3 and each node has one unicast enpoint. One node partitions from the master node.
|
||||
* A 4 node cluster with m_m_n set to 3 and each node has one unicast endpoint. One node partitions from the master node.
|
||||
* The temporal unicast responses is empty. When partition is solved the one ping response contains a master node.
|
||||
* The rejoining node should take this master node and connect.
|
||||
*/
|
||||
@ -971,7 +971,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an asymetric break between a master and one of the nodes and makes
|
||||
* Adds an asymmetric break between a master and one of the nodes and makes
|
||||
* sure that the node is removed form the cluster, that the node start pinging and that
|
||||
* the cluster reforms when healed.
|
||||
*/
|
||||
|
@ -56,7 +56,7 @@ public class MultiFieldCopyToMapperTests extends ESTestCase {
|
||||
assertThat(e.getMessage(), equalTo("copy_to in multi fields is not allowed. Found the copy_to in field [c] which is within a multi field."));
|
||||
}
|
||||
|
||||
// now test that with an older version the pasring just works
|
||||
// now test that with an older version the parsing just works
|
||||
indexVersion = randomFrom(versionsWithAndWithoutExpectedExceptions.v2());
|
||||
mapperService = MapperTestUtils.newMapperService(createTempDir(), Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, indexVersion).build());
|
||||
DocumentMapper documentMapper = mapperService.parse("type", new CompressedXContent(mapping.string()), true);
|
||||
@ -86,8 +86,8 @@ public class MultiFieldCopyToMapperTests extends ESTestCase {
|
||||
return mapping;
|
||||
}
|
||||
|
||||
// returs a tuple where
|
||||
// v1 is a list of versions for which we expect an excpetion when a copy_to in multi fields is found and
|
||||
// returns a tuple where
|
||||
// v1 is a list of versions for which we expect an exception when a copy_to in multi fields is found and
|
||||
// v2 is older versions where we throw no exception and we just log a warning
|
||||
private static Tuple<List<Version>, List<Version>> versionsWithAndWithoutExpectedExceptions() {
|
||||
List<Version> versionsWithException = new ArrayList<>();
|
||||
|
@ -212,7 +212,7 @@ public class MultiFieldTests extends ESSingleNodeTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
// The fielddata settings need to be the same after deserializing/re-serialsing, else unneccesary mapping sync's can be triggered
|
||||
// The fielddata settings need to be the same after deserializing/re-serialsing, else unnecessary mapping sync's can be triggered
|
||||
public void testMultiFieldsFieldDataSettingsInConsistentOrder() throws Exception {
|
||||
final String MY_MULTI_FIELD = "multi_field";
|
||||
|
||||
|
@ -143,7 +143,7 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase<GeoBo
|
||||
tester.invalidateCoordinate(builder, true);
|
||||
fail("expected exception for broken " + tester.getClass().getName() + " coordinate");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// exptected
|
||||
// expected
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ public class PreBuiltAnalyzerIntegrationIT extends ESIntegTestCase {
|
||||
// check that all above configured analyzers have been loaded
|
||||
assertThatAnalyzersHaveBeenLoaded(loadedAnalyzers);
|
||||
|
||||
// check that all of the prebuiltanalyzers are still open
|
||||
// check that all of the prebuilt analyzers are still open
|
||||
assertLuceneAnalyzersAreNotClosed(loadedAnalyzers);
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ public class FlushIT extends ESIntegTestCase {
|
||||
@Override
|
||||
public void onResponse(FlushResponse flushResponse) {
|
||||
try {
|
||||
// dont' use assertAllSuccesssful it uses a randomized context that belongs to a different thread
|
||||
// don't use assertAllSuccessful it uses a randomized context that belongs to a different thread
|
||||
assertThat("Unexpected ShardFailures: " + Arrays.toString(flushResponse.getShardFailures()), flushResponse.getFailedShards(), equalTo(0));
|
||||
latch.countDown();
|
||||
} catch (Throwable ex) {
|
||||
|
@ -192,7 +192,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(false));
|
||||
|
||||
// add a transport delegate that will prevent the shard active request to succeed the first time after relocation has finished.
|
||||
// node_1 will then wait for the next cluster state change before it tries a next attempt to delet the shard.
|
||||
// node_1 will then wait for the next cluster state change before it tries a next attempt to delete the shard.
|
||||
MockTransportService transportServiceNode_1 = (MockTransportService) internalCluster().getInstance(TransportService.class, node_1);
|
||||
TransportService transportServiceNode_2 = internalCluster().getInstance(TransportService.class, node_2);
|
||||
final CountDownLatch shardActiveRequestSent = new CountDownLatch(1);
|
||||
|
@ -63,7 +63,7 @@ public class ProcessProbeTests extends ESTestCase {
|
||||
// CPU percent can be negative if the system recent cpu usage is not available
|
||||
assertThat(cpu.getPercent(), anyOf(lessThan((short) 0), allOf(greaterThanOrEqualTo((short) 0), lessThanOrEqualTo((short) 100))));
|
||||
|
||||
// CPU time can return -1 if the the platform does not support this operation, let's see which platforms fail
|
||||
// CPU time can return -1 if the platform does not support this operation, let's see which platforms fail
|
||||
assertThat(cpu.total, greaterThan(0L));
|
||||
|
||||
ProcessStats.Mem mem = stats.getMem();
|
||||
|
@ -55,7 +55,7 @@ import static org.hamcrest.Matchers.nullValue;
|
||||
*/
|
||||
public class ConcurrentPercolatorIT extends ESIntegTestCase {
|
||||
public void testSimpleConcurrentPercolator() throws Exception {
|
||||
// We need to index a document / define mapping, otherwise field1 doesn't get reconized as number field.
|
||||
// We need to index a document / define mapping, otherwise field1 doesn't get recognized as number field.
|
||||
// If we don't do this, then 'test2' percolate query gets parsed as a TermQuery and not a RangeQuery.
|
||||
// The percolate api doesn't parse the doc if no queries have registered, so it can't lazily create a mapping
|
||||
assertAcked(prepareCreate("index").addMapping("type", "field1", "type=long", "field2", "type=string")); // random # shards better has a mapping!
|
||||
|
@ -166,7 +166,7 @@ public class DateHistogramOffsetIT extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
/**
|
||||
* @param bucket the bucket to check asssertions for
|
||||
* @param bucket the bucket to check assertions for
|
||||
* @param key the expected key
|
||||
* @param expectedSize the expected size of the bucket
|
||||
*/
|
||||
|
@ -273,7 +273,7 @@ public class MovAvgIT extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
/**
|
||||
* Exponentionally weighted (EWMA, Single exponential) moving avg
|
||||
* Exponentially weighted (EWMA, Single exponential) moving avg
|
||||
*
|
||||
* @param window Window of values to compute movavg for
|
||||
*/
|
||||
|
@ -1727,7 +1727,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
||||
response = search.get();
|
||||
assertHighlight(response, 0, "text", 0, hlQueryMatcher);
|
||||
|
||||
// Make sure the the highlightQuery is taken into account when it is set on the highlight context instead of the field
|
||||
// Make sure the highlightQuery is taken into account when it is set on the highlight context instead of the field
|
||||
highlightBuilder.highlightQuery(matchQuery("text", "query"));
|
||||
field.highlighterType("fvh").highlightQuery(null);
|
||||
response = search.get();
|
||||
|
@ -115,7 +115,7 @@ public class InnerHitsIT extends ESIntegTestCase {
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addNestedInnerHits("comment", "comments",
|
||||
new InnerHitsBuilder.InnerHit().setQuery(matchQuery("comments.message", "fox")));
|
||||
// Inner hits can be defined in two ways: 1) with the query 2) as seperate inner_hit definition
|
||||
// Inner hits can be defined in two ways: 1) with the query 2) as separate inner_hit definition
|
||||
SearchRequest[] searchRequests = new SearchRequest[]{
|
||||
client().prepareSearch("articles").setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHits("comment", null))).request(),
|
||||
client().prepareSearch("articles").setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")))
|
||||
@ -142,7 +142,7 @@ public class InnerHitsIT extends ESIntegTestCase {
|
||||
innerHitsBuilder.addNestedInnerHits("comment", "comments",
|
||||
new InnerHitsBuilder.InnerHit().setQuery(matchQuery("comments.message", "elephant")));
|
||||
// Inner hits can be defined in two ways: 1) with the query 2) as
|
||||
// seperate inner_hit definition
|
||||
// separate inner_hit definition
|
||||
searchRequests = new SearchRequest[] {
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", matchQuery("comments.message", "elephant")))
|
||||
|
@ -30,7 +30,7 @@ import java.io.IOException;
|
||||
public class QueryRescoreModeTests extends ESTestCase {
|
||||
|
||||
/**
|
||||
* Test @link {@link QueryRescoreMode} enum ordinals and names, since serilaization relies on it
|
||||
* Test @link {@link QueryRescoreMode} enum ordinals and names, since serialization relies on it
|
||||
*/
|
||||
public void testQueryRescoreMode() throws IOException {
|
||||
float primary = randomFloat();
|
||||
|
Loading…
x
Reference in New Issue
Block a user