Merge remote-tracking branch 'upstream/feature-suggest-refactoring' into term-suggest-build

This commit is contained in:
Ali Beyad 2016-02-11 18:40:29 -05:00
commit 5a22da1eba
157 changed files with 1918 additions and 672 deletions

View File

@ -257,7 +257,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
} }
/** /**
* Retruns <code>true</code> iff the given class is a registered for an exception to be read. * Returns <code>true</code> iff the given class is a registered for an exception to be read.
*/ */
public static boolean isRegistered(Class<? extends Throwable> exception) { public static boolean isRegistered(Class<? extends Throwable> exception) {
return CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.containsKey(exception); return CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.containsKey(exception);
@ -372,7 +372,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
} }
/** /**
* Returns the root cause of this exception or mupltiple if different shards caused different exceptions * Returns the root cause of this exception or multiple if different shards caused different exceptions
*/ */
public ElasticsearchException[] guessRootCauses() { public ElasticsearchException[] guessRootCauses() {
final Throwable cause = getCause(); final Throwable cause = getCause();
@ -383,7 +383,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
} }
/** /**
* Returns the root cause of this exception or mupltiple if different shards caused different exceptions. * Returns the root cause of this exception or multiple if different shards caused different exceptions.
* If the given exception is not an instance of {@link org.elasticsearch.ElasticsearchException} an empty array * If the given exception is not an instance of {@link org.elasticsearch.ElasticsearchException} an empty array
* is returned. * is returned.
*/ */

View File

@ -40,7 +40,7 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
} }
/** /**
* Sets to reutrn all the data. * Sets to return all the data.
*/ */
public NodesInfoRequestBuilder all() { public NodesInfoRequestBuilder all() {
request.all(); request.all();

View File

@ -73,7 +73,7 @@ public class IndexConstraint {
} }
/** /**
* @return On what property of a field the contraint is going to be applied on (min or max value) * @return On what property of a field the constraint is going to be applied on (min or max value)
*/ */
public Property getProperty() { public Property getProperty() {
return property; return property;

View File

@ -372,7 +372,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
} }
/** /**
* The source of the document to index, recopied to a new array if it is unsage. * The source of the document to index, recopied to a new array if it is unsafe.
*/ */
public BytesReference source() { public BytesReference source() {
return source; return source;

View File

@ -164,7 +164,7 @@ public class PutIndexedScriptRequest extends ActionRequest<PutIndexedScriptReque
} }
/** /**
* The source of the document to index, recopied to a new array if it is unsage. * The source of the document to index, recopied to a new array if it is unsafe.
*/ */
public BytesReference source() { public BytesReference source() {
return source; return source;

View File

@ -88,7 +88,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) { void processIndexRequest(Task task, String action, ActionListener listener, ActionFilterChain chain, IndexRequest indexRequest) {
executionService.execute(indexRequest, t -> { executionService.executeIndexRequest(indexRequest, t -> {
logger.error("failed to execute pipeline [{}]", t, indexRequest.getPipeline()); logger.error("failed to execute pipeline [{}]", t, indexRequest.getPipeline());
listener.onFailure(t); listener.onFailure(t);
}, success -> { }, success -> {
@ -102,7 +102,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
void processBulkIndexRequest(Task task, BulkRequest original, String action, ActionFilterChain chain, ActionListener<BulkResponse> listener) { void processBulkIndexRequest(Task task, BulkRequest original, String action, ActionFilterChain chain, ActionListener<BulkResponse> listener) {
BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original);
executionService.execute(() -> bulkRequestModifier, (indexRequest, throwable) -> { executionService.executeBulkRequest(() -> bulkRequestModifier, (indexRequest, throwable) -> {
logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id(), throwable); logger.debug("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id(), throwable);
bulkRequestModifier.markCurrentItemAsFailed(throwable); bulkRequestModifier.markCurrentItemAsFailed(throwable);
}, (throwable) -> { }, (throwable) -> {

View File

@ -69,7 +69,9 @@ final class WriteableIngestDocument implements Writeable<WriteableIngestDocument
builder.startObject("doc"); builder.startObject("doc");
Map<IngestDocument.MetaData, String> metadataMap = ingestDocument.extractMetadata(); Map<IngestDocument.MetaData, String> metadataMap = ingestDocument.extractMetadata();
for (Map.Entry<IngestDocument.MetaData, String> metadata : metadataMap.entrySet()) { for (Map.Entry<IngestDocument.MetaData, String> metadata : metadataMap.entrySet()) {
builder.field(metadata.getKey().getFieldName(), metadata.getValue()); if (metadata.getValue() != null) {
builder.field(metadata.getKey().getFieldName(), metadata.getValue());
}
} }
builder.field("_source", ingestDocument.getSourceAndMetadata()); builder.field("_source", ingestDocument.getSourceAndMetadata());
builder.startObject("_ingest"); builder.startObject("_ingest");

View File

@ -55,7 +55,7 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont
} }
/** /**
* @return The number of seach contexts that were freed. If this is <code>0</code> the assumption can be made, * @return The number of search contexts that were freed. If this is <code>0</code> the assumption can be made,
* that the scroll id specified in the request did not exist. (never existed, was expired, or completely consumed) * that the scroll id specified in the request did not exist. (never existed, was expired, or completely consumed)
*/ */
public int getNumFreed() { public int getNumFreed() {

View File

@ -223,7 +223,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
} }
/** /**
* Sets the boost a specific index will receive when the query is executeed against it. * Sets the boost a specific index will receive when the query is executed against it.
* *
* @param index The index to apply the boost against * @param index The index to apply the boost against
* @param indexBoost The boost to apply to the index * @param indexBoost The boost to apply to the index

View File

@ -415,7 +415,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
public static class RetryOnPrimaryException extends ElasticsearchException { public static class RetryOnPrimaryException extends ElasticsearchException {
public RetryOnPrimaryException(ShardId shardId, String msg) { public RetryOnPrimaryException(ShardId shardId, String msg) {
super(msg); this(shardId, msg, null);
}
public RetryOnPrimaryException(ShardId shardId, String msg, Throwable cause) {
super(msg, cause);
setShard(shardId); setShard(shardId);
} }
@ -801,6 +805,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
* relocating copies * relocating copies
*/ */
final class ReplicationPhase extends AbstractRunnable { final class ReplicationPhase extends AbstractRunnable {
private final ReplicationTask task; private final ReplicationTask task;
private final ReplicaRequest replicaRequest; private final ReplicaRequest replicaRequest;
private final Response finalResponse; private final Response finalResponse;
@ -982,9 +987,17 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
} }
@Override @Override
public void onFailure(Throwable t) { public void onFailure(Throwable shardFailedError) {
// TODO: handle catastrophic non-channel failures if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) {
onReplicaFailure(nodeId, exp); ShardRouting primaryShard = indexShardReference.routingEntry();
String message = String.format(Locale.ROOT, "primary shard [%s] was demoted while failing replica shard [%s] for [%s]", primaryShard, shard, exp);
// we are no longer the primary, fail ourselves and start over
indexShardReference.failShard(message, shardFailedError);
forceFinishAsFailed(new RetryOnPrimaryException(shardId, message, shardFailedError));
} else {
assert false : shardFailedError;
onReplicaFailure(nodeId, exp);
}
} }
} }
); );
@ -1070,7 +1083,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
interface IndexShardReference extends Releasable { interface IndexShardReference extends Releasable {
boolean isRelocated(); boolean isRelocated();
void failShard(String reason, @Nullable Throwable e);
ShardRouting routingEntry(); ShardRouting routingEntry();
} }
@ -1098,6 +1111,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
return indexShard.state() == IndexShardState.RELOCATED; return indexShard.state() == IndexShardState.RELOCATED;
} }
@Override
public void failShard(String reason, @Nullable Throwable e) {
indexShard.failShard(reason, e);
}
@Override @Override
public ShardRouting routingEntry() { public ShardRouting routingEntry() {
return indexShard.routingEntry(); return indexShard.routingEntry();

View File

@ -486,7 +486,7 @@ public final class TermVectorsFields extends Fields {
// read a vInt. this is used if the integer might be negative. In this case, // read a vInt. this is used if the integer might be negative. In this case,
// the writer writes a 0 for -1 or value +1 and accordingly we have to // the writer writes a 0 for -1 or value +1 and accordingly we have to
// substract 1 again // subtract 1 again
// adds one to mock not existing term freq // adds one to mock not existing term freq
int readPotentiallyNegativeVInt(StreamInput stream) throws IOException { int readPotentiallyNegativeVInt(StreamInput stream) throws IOException {
return stream.readVInt() - 1; return stream.readVInt() - 1;
@ -494,7 +494,7 @@ public final class TermVectorsFields extends Fields {
// read a vLong. this is used if the integer might be negative. In this // read a vLong. this is used if the integer might be negative. In this
// case, the writer writes a 0 for -1 or value +1 and accordingly we have to // case, the writer writes a 0 for -1 or value +1 and accordingly we have to
// substract 1 again // subtract 1 again
// adds one to mock not existing term freq // adds one to mock not existing term freq
long readPotentiallyNegativeVLong(StreamInput stream) throws IOException { long readPotentiallyNegativeVLong(StreamInput stream) throws IOException {
return stream.readVLong() - 1; return stream.readVLong() - 1;

View File

@ -254,10 +254,6 @@ final class Bootstrap {
INSTANCE = new Bootstrap(); INSTANCE = new Bootstrap();
boolean foreground = !"false".equals(System.getProperty("es.foreground", System.getProperty("es-foreground"))); boolean foreground = !"false".equals(System.getProperty("es.foreground", System.getProperty("es-foreground")));
// handle the wrapper system property, if its a service, don't run as a service
if (System.getProperty("wrapper.service", "XXX").equalsIgnoreCase("true")) {
foreground = false;
}
Environment environment = initialSettings(foreground); Environment environment = initialSettings(foreground);
Settings settings = environment.settings(); Settings settings = environment.settings();

View File

@ -108,7 +108,7 @@ class JNANatives {
if (value == JNACLibrary.RLIM_INFINITY) { if (value == JNACLibrary.RLIM_INFINITY) {
return "unlimited"; return "unlimited";
} else { } else {
// TODO, on java 8 use Long.toUnsignedString, since thats what it is. // TODO, on java 8 use Long.toUnsignedString, since that's what it is.
return Long.toString(value); return Long.toString(value);
} }
} }

View File

@ -104,7 +104,7 @@ final class Seccomp {
int prctl(int option, NativeLong arg2, NativeLong arg3, NativeLong arg4, NativeLong arg5); int prctl(int option, NativeLong arg2, NativeLong arg3, NativeLong arg4, NativeLong arg5);
/** /**
* used to call seccomp(2), its too new... * used to call seccomp(2), its too new...
* this is the only way, DONT use it on some other architecture unless you know wtf you are doing * this is the only way, DON'T use it on some other architecture unless you know wtf you are doing
*/ */
NativeLong syscall(NativeLong number, Object... args); NativeLong syscall(NativeLong number, Object... args);
}; };

View File

@ -93,7 +93,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
// because they would make obtain/release too costly: we really need constant-time // because they would make obtain/release too costly: we really need constant-time
// operations. // operations.
// Ultimately a better solution would be to only store one kind of data and have the // Ultimately a better solution would be to only store one kind of data and have the
// ability to intepret it either as a source of bytes, doubles, longs, etc. eg. thanks // ability to interpret it either as a source of bytes, doubles, longs, etc. eg. thanks
// to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues // to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues
// that would need to be addressed such as garbage collection of native memory or safety // that would need to be addressed such as garbage collection of native memory or safety
// of Unsafe writes. // of Unsafe writes.

View File

@ -107,7 +107,7 @@ public class Requests {
} }
/** /**
* Creats a new bulk request. * Creates a new bulk request.
*/ */
public static BulkRequest bulkRequest() { public static BulkRequest bulkRequest() {
return new BulkRequest(); return new BulkRequest();

View File

@ -38,9 +38,9 @@ public interface LocalNodeMasterListener {
* The name of the executor that the implementation of the callbacks of this lister should be executed on. The thread * The name of the executor that the implementation of the callbacks of this lister should be executed on. The thread
* that is responsible for managing instances of this lister is the same thread handling the cluster state events. If * that is responsible for managing instances of this lister is the same thread handling the cluster state events. If
* the work done is the callbacks above is inexpensive, this value may be {@link org.elasticsearch.threadpool.ThreadPool.Names#SAME SAME} * the work done is the callbacks above is inexpensive, this value may be {@link org.elasticsearch.threadpool.ThreadPool.Names#SAME SAME}
* (indicating that the callbaks will run on the same thread as the cluster state events are fired with). On the other hand, * (indicating that the callbacks will run on the same thread as the cluster state events are fired with). On the other hand,
* if the logic in the callbacks are heavier and take longer to process (or perhaps involve blocking due to IO operations), * if the logic in the callbacks are heavier and take longer to process (or perhaps involve blocking due to IO operations),
* prefer to execute them on a separte more appropriate executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC} * prefer to execute them on a separate more appropriate executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC}
* or {@link org.elasticsearch.threadpool.ThreadPool.Names#MANAGEMENT MANAGEMENT}). * or {@link org.elasticsearch.threadpool.ThreadPool.Names#MANAGEMENT MANAGEMENT}).
* *
* @return The name of the executor that will run the callbacks of this listener. * @return The name of the executor that will run the callbacks of this listener.

View File

@ -959,7 +959,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
public MetaData build() { public MetaData build() {
// TODO: We should move these datastructures to IndexNameExpressionResolver, this will give the following benefits: // TODO: We should move these datastructures to IndexNameExpressionResolver, this will give the following benefits:
// 1) The datastructures will only be rebuilded when needed. Now during serailizing we rebuild these datastructures // 1) The datastructures will only be rebuilded when needed. Now during serializing we rebuild these datastructures
// while these datastructures aren't even used. // while these datastructures aren't even used.
// 2) The aliasAndIndexLookup can be updated instead of rebuilding it all the time. // 2) The aliasAndIndexLookup can be updated instead of rebuilding it all the time.

View File

@ -611,7 +611,7 @@ public class AllocationService extends AbstractComponent {
return routingNodes; return routingNodes;
} }
/** ovrride this to control time based decisions during allocation */ /** override this to control time based decisions during allocation */
protected long currentNanoTime() { protected long currentNanoTime() {
return System.nanoTime(); return System.nanoTime();
} }

View File

@ -30,7 +30,7 @@ import java.util.List;
/** /**
* This {@link RoutingAllocation} keeps a shard which routing * This {@link RoutingAllocation} keeps a shard which routing
* allocation has faild * allocation has failed.
*/ */
public class FailedRerouteAllocation extends RoutingAllocation { public class FailedRerouteAllocation extends RoutingAllocation {

View File

@ -571,7 +571,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
} }
/** /**
* Allocates all given shards on the minimal eligable node for the shards index * Allocates all given shards on the minimal eligible node for the shards index
* with respect to the weight function. All given shards must be unassigned. * with respect to the weight function. All given shards must be unassigned.
*/ */
private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned) { private boolean allocateUnassigned(RoutingNodes.UnassignedShards unassigned) {
@ -611,7 +611,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with 2 replica and 1 shard would look like: * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with 2 replica and 1 shard would look like:
* [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)] * [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)]
* if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with
* the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ingoreUnassigned. * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned.
*/ */
ShardRouting[] primary = unassigned.drain(); ShardRouting[] primary = unassigned.drain();
ShardRouting[] secondary = new ShardRouting[primary.length]; ShardRouting[] secondary = new ShardRouting[primary.length];
@ -733,7 +733,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
secondary = tmp; secondary = tmp;
secondaryLength = 0; secondaryLength = 0;
} while (primaryLength > 0); } while (primaryLength > 0);
// clear everything we have either added it or moved to ingoreUnassigned // clear everything we have either added it or moved to ignoreUnassigned
return changed; return changed;
} }

View File

@ -54,7 +54,7 @@ import java.util.Map;
* <p> * <p>
* Awareness can also be used to prevent over-allocation in the case of node or * Awareness can also be used to prevent over-allocation in the case of node or
* even "zone" failure. For example in cloud-computing infrastructures like * even "zone" failure. For example in cloud-computing infrastructures like
* Amazone AWS a cluster might span over multiple "zones". Awareness can be used * Amazon AWS a cluster might span over multiple "zones". Awareness can be used
* to distribute replicas to individual zones by setting: * to distribute replicas to individual zones by setting:
* <pre> * <pre>
* cluster.routing.allocation.awareness.attributes: zone * cluster.routing.allocation.awareness.attributes: zone

View File

@ -30,7 +30,7 @@ import org.elasticsearch.common.settings.Settings;
* Similar to the {@link ClusterRebalanceAllocationDecider} this * Similar to the {@link ClusterRebalanceAllocationDecider} this
* {@link AllocationDecider} controls the number of currently in-progress * {@link AllocationDecider} controls the number of currently in-progress
* re-balance (relocation) operations and restricts node allocations if the * re-balance (relocation) operations and restricts node allocations if the
* configured threashold is reached. The default number of concurrent rebalance * configured threshold is reached. The default number of concurrent rebalance
* operations is set to <tt>2</tt> * operations is set to <tt>2</tt>
* <p> * <p>
* Re-balance operations can be controlled in real-time via the cluster update API using * Re-balance operations can be controlled in real-time via the cluster update API using

View File

@ -282,7 +282,7 @@ public class DiskThresholdDecider extends AllocationDecider {
/** /**
* Returns the size of all shards that are currently being relocated to * Returns the size of all shards that are currently being relocated to
* the node, but may not be finished transfering yet. * the node, but may not be finished transferring yet.
* *
* If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size * If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size
* of all shards * of all shards

View File

@ -93,7 +93,7 @@ import java.util.Locale;
* <a href="http://www.faqs.org/rfcs/rfc3548.html">RFC3548</a>.</li> * <a href="http://www.faqs.org/rfcs/rfc3548.html">RFC3548</a>.</li>
* <li><em>Throws exceptions instead of returning null values.</em> Because some operations * <li><em>Throws exceptions instead of returning null values.</em> Because some operations
* (especially those that may permit the GZIP option) use IO streams, there * (especially those that may permit the GZIP option) use IO streams, there
* is a possiblity of an java.io.IOException being thrown. After some discussion and * is a possibility of an java.io.IOException being thrown. After some discussion and
* thought, I've changed the behavior of the methods to throw java.io.IOExceptions * thought, I've changed the behavior of the methods to throw java.io.IOExceptions
* rather than return null if ever there's an error. I think this is more * rather than return null if ever there's an error. I think this is more
* appropriate, though it will require some changes to your code. Sorry, * appropriate, though it will require some changes to your code. Sorry,
@ -1511,7 +1511,7 @@ public class Base64 {
if (suspendEncoding) { if (suspendEncoding) {
this.out.write(theByte); this.out.write(theByte);
return; return;
} // end if: supsended } // end if: suspended
// Encode? // Encode?
if (encode) { if (encode) {
@ -1565,7 +1565,7 @@ public class Base64 {
if (suspendEncoding) { if (suspendEncoding) {
this.out.write(theBytes, off, len); this.out.write(theBytes, off, len);
return; return;
} // end if: supsended } // end if: suspended
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
write(theBytes[off + i]); write(theBytes[off + i]);

View File

@ -147,7 +147,7 @@ public class PagedBytesReference implements BytesReference {
bytearray.get(offset, length, ref); bytearray.get(offset, length, ref);
// undo the single-page optimization by ByteArray.get(), otherwise // undo the single-page optimization by ByteArray.get(), otherwise
// a materialized stream will contain traling garbage/zeros // a materialized stream will contain trailing garbage/zeros
byte[] result = ref.bytes; byte[] result = ref.bytes;
if (result.length != length || ref.offset != 0) { if (result.length != length || ref.offset != 0) {
result = Arrays.copyOfRange(result, ref.offset, ref.offset + length); result = Arrays.copyOfRange(result, ref.offset, ref.offset + length);
@ -403,7 +403,7 @@ public class PagedBytesReference implements BytesReference {
return -1; return -1;
} }
final int numBytesToCopy = Math.min(len, length - pos); // copy the full lenth or the remaining part final int numBytesToCopy = Math.min(len, length - pos); // copy the full length or the remaining part
// current offset into the underlying ByteArray // current offset into the underlying ByteArray
long byteArrayOffset = offset + pos; long byteArrayOffset = offset + pos;

View File

@ -33,7 +33,7 @@ package org.elasticsearch.common.component;
* following logic can be applied: * following logic can be applied:
* <pre> * <pre>
* public void stop() { * public void stop() {
* if (!lifeccycleState.moveToStopped()) { * if (!lifecycleState.moveToStopped()) {
* return; * return;
* } * }
* // continue with stop logic * // continue with stop logic
@ -50,7 +50,7 @@ package org.elasticsearch.common.component;
* if (!lifecycleState.moveToClosed()) { * if (!lifecycleState.moveToClosed()) {
* return; * return;
* } * }
* // perofrm close logic here * // perform close logic here
* } * }
* </pre> * </pre>
*/ */

View File

@ -124,7 +124,7 @@ public abstract class CompressedIndexInput extends IndexInput {
@Override @Override
public void readBytes(byte[] b, int offset, int len) throws IOException { public void readBytes(byte[] b, int offset, int len) throws IOException {
int result = read(b, offset, len, true /* we want to have full reads, thats the contract... */); int result = read(b, offset, len, true /* we want to have full reads, that's the contract... */);
if (result < len) { if (result < len) {
throw new EOFException(); throw new EOFException();
} }

View File

@ -121,7 +121,7 @@ public abstract class CompressedStreamInput extends StreamInput {
@Override @Override
public void readBytes(byte[] b, int offset, int len) throws IOException { public void readBytes(byte[] b, int offset, int len) throws IOException {
int result = read(b, offset, len, true /* we want to have full reads, thats the contract... */); int result = read(b, offset, len, true /* we want to have full reads, that's the contract... */);
if (result < len) { if (result < len) {
throw new EOFException(); throw new EOFException();
} }

View File

@ -52,7 +52,7 @@ public class DeflateCompressor implements Compressor {
private static final byte[] HEADER = new byte[] { 'D', 'F', 'L', '\0' }; private static final byte[] HEADER = new byte[] { 'D', 'F', 'L', '\0' };
// 3 is a good trade-off between speed and compression ratio // 3 is a good trade-off between speed and compression ratio
private static final int LEVEL = 3; private static final int LEVEL = 3;
// We use buffering on the input and ouput of in/def-laters in order to // We use buffering on the input and output of in/def-laters in order to
// limit the number of JNI calls // limit the number of JNI calls
private static final int BUFFER_SIZE = 4096; private static final int BUFFER_SIZE = 4096;

View File

@ -24,7 +24,7 @@ import java.lang.annotation.Target;
import static java.lang.annotation.RetentionPolicy.RUNTIME; import static java.lang.annotation.RetentionPolicy.RUNTIME;
/** /**
* Acccompanies a {@literal @}{@link org.elasticsearch.common.inject.Provides Provides} method annotation in a * Accompanies a {@literal @}{@link org.elasticsearch.common.inject.Provides Provides} method annotation in a
* private module to indicate that the provided binding is exposed. * private module to indicate that the provided binding is exposed.
* *
* @author jessewilson@google.com (Jesse Wilson) * @author jessewilson@google.com (Jesse Wilson)

View File

@ -489,7 +489,7 @@ class InjectorImpl implements Injector, Lookups {
ParameterizedType parameterizedType = (ParameterizedType) typeLiteralType; ParameterizedType parameterizedType = (ParameterizedType) typeLiteralType;
Type innerType = parameterizedType.getActualTypeArguments()[0]; Type innerType = parameterizedType.getActualTypeArguments()[0];
// this is unforunate. We don't support building TypeLiterals for type variable like 'T'. If // this is unfortunate. We don't support building TypeLiterals for type variable like 'T'. If
// this proves problematic, we can probably fix TypeLiteral to support type variables // this proves problematic, we can probably fix TypeLiteral to support type variables
if (!(innerType instanceof Class) if (!(innerType instanceof Class)
&& !(innerType instanceof GenericArrayType) && !(innerType instanceof GenericArrayType)

View File

@ -41,7 +41,7 @@ import java.util.Objects;
* TypeLiteral}. * TypeLiteral}.
* <p> * <p>
* Keys do not differentiate between primitive types (int, char, etc.) and * Keys do not differentiate between primitive types (int, char, etc.) and
* their correpsonding wrapper types (Integer, Character, etc.). Primitive * their corresponding wrapper types (Integer, Character, etc.). Primitive
* types will be replaced with their wrapper types when keys are created. * types will be replaced with their wrapper types when keys are created.
* *
* @author crazybob@google.com (Bob Lee) * @author crazybob@google.com (Bob Lee)

View File

@ -30,7 +30,7 @@ import java.util.List;
import java.util.Set; import java.util.Set;
/** /**
* Internal respresentation of a constructor annotated with * Internal representation of a constructor annotated with
* {@link AssistedInject} * {@link AssistedInject}
* *
* @author jmourits@google.com (Jerome Mourits) * @author jmourits@google.com (Jerome Mourits)

View File

@ -24,7 +24,7 @@ import java.util.Arrays;
import java.util.List; import java.util.List;
/** /**
* A list of {@link TypeLiteral}s to match an injectable Constructor's assited * A list of {@link TypeLiteral}s to match an injectable Constructor's assisted
* parameter types to the corresponding factory method. * parameter types to the corresponding factory method.
* *
* @author jmourits@google.com (Jerome Mourits) * @author jmourits@google.com (Jerome Mourits)

View File

@ -91,7 +91,7 @@ public final class Dependency<T> {
/** /**
* Returns the index of this dependency in the injection point's parameter list, or {@code -1} if * Returns the index of this dependency in the injection point's parameter list, or {@code -1} if
* this dependency does not belong to a parameter list. Only method and constuctor dependencies * this dependency does not belong to a parameter list. Only method and constructor dependencies
* are elements in a parameter list. * are elements in a parameter list.
*/ */
public int getParameterIndex() { public int getParameterIndex() {

View File

@ -125,7 +125,7 @@ public final class InjectionPoint {
return Collections.unmodifiableList(dependencies); return Collections.unmodifiableList(dependencies);
} }
// This metohd is necessary to create a Dependency<T> with proper generic type information // This method is necessary to create a Dependency<T> with proper generic type information
private <T> Dependency<T> newDependency(Key<T> key, boolean allowsNull, int parameterIndex) { private <T> Dependency<T> newDependency(Key<T> key, boolean allowsNull, int parameterIndex) {
return new Dependency<>(this, key, allowsNull, parameterIndex); return new Dependency<>(this, key, allowsNull, parameterIndex);
} }

View File

@ -34,7 +34,7 @@ import java.util.Objects;
*/ */
public final class ProviderLookup<T> implements Element { public final class ProviderLookup<T> implements Element {
// NOTE: this class is not part of guice and was added so the provder lookup's key can be acessible for tests // NOTE: this class is not part of guice and was added so the provider lookup's key can be accessible for tests
public static class ProviderImpl<T> implements Provider<T> { public static class ProviderImpl<T> implements Provider<T> {
private ProviderLookup<T> lookup; private ProviderLookup<T> lookup;

View File

@ -28,7 +28,7 @@ import java.io.IOException;
/** /**
* This exception can be used to wrap a given, not serializable exception * This exception can be used to wrap a given, not serializable exception
* to serialize via {@link StreamOutput#writeThrowable(Throwable)}. * to serialize via {@link StreamOutput#writeThrowable(Throwable)}.
* This class will perserve the stacktrace as well as the suppressed exceptions of * This class will preserve the stacktrace as well as the suppressed exceptions of
* the throwable it was created with instead of it's own. The stacktrace has no indication * the throwable it was created with instead of it's own. The stacktrace has no indication
* of where this exception was created. * of where this exception was created.
*/ */

View File

@ -39,6 +39,7 @@ import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.search.suggest.SuggestionBuilder; import org.elasticsearch.search.suggest.SuggestionBuilder;
import org.elasticsearch.search.suggest.completion.context.QueryContext;
import org.elasticsearch.search.suggest.phrase.SmoothingModel; import org.elasticsearch.search.suggest.phrase.SmoothingModel;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.joda.time.DateTime; import org.joda.time.DateTime;
@ -693,6 +694,13 @@ public abstract class StreamInput extends InputStream {
return readNamedWriteable(SuggestionBuilder.class); return readNamedWriteable(SuggestionBuilder.class);
} }
/**
* Reads a completion {@link QueryContext} from the current stream
*/
public QueryContext readCompletionSuggestionQueryContext() throws IOException {
return readNamedWriteable(QueryContext.class);
}
/** /**
* Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream * Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream
*/ */
@ -706,7 +714,7 @@ public abstract class StreamInput extends InputStream {
public SmoothingModel readPhraseSuggestionSmoothingModel() throws IOException { public SmoothingModel readPhraseSuggestionSmoothingModel() throws IOException {
return readNamedWriteable(SmoothingModel.class); return readNamedWriteable(SmoothingModel.class);
} }
/** /**
* Reads a {@link Task.Status} from the current stream. * Reads a {@link Task.Status} from the current stream.
*/ */

View File

@ -38,6 +38,7 @@ import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.search.suggest.SuggestionBuilder; import org.elasticsearch.search.suggest.SuggestionBuilder;
import org.elasticsearch.search.suggest.completion.context.QueryContext;
import org.elasticsearch.search.suggest.phrase.SmoothingModel; import org.elasticsearch.search.suggest.phrase.SmoothingModel;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.joda.time.ReadableInstant; import org.joda.time.ReadableInstant;
@ -678,7 +679,7 @@ public abstract class StreamOutput extends OutputStream {
public void writePhraseSuggestionSmoothingModel(SmoothingModel smoothinModel) throws IOException { public void writePhraseSuggestionSmoothingModel(SmoothingModel smoothinModel) throws IOException {
writeNamedWriteable(smoothinModel); writeNamedWriteable(smoothinModel);
} }
/** /**
* Writes a {@link Task.Status} to the current stream. * Writes a {@link Task.Status} to the current stream.
*/ */
@ -717,4 +718,11 @@ public abstract class StreamOutput extends OutputStream {
public void writeSuggestion(SuggestionBuilder suggestion) throws IOException { public void writeSuggestion(SuggestionBuilder suggestion) throws IOException {
writeNamedWriteable(suggestion); writeNamedWriteable(suggestion);
} }
/**
* Writes a completion {@link QueryContext} to the current stream
*/
public void writeCompletionSuggestionQueryContext(QueryContext queryContext) throws IOException {
writeNamedWriteable(queryContext);
}
} }

View File

@ -627,7 +627,7 @@ public class Lucene {
} }
/** /**
* Parses the version string lenient and returns the the default value if the given string is null or emtpy * Parses the version string lenient and returns the default value if the given string is null or emtpy
*/ */
public static Version parseVersionLenient(String toParse, Version defaultValue) { public static Version parseVersionLenient(String toParse, Version defaultValue) {
return LenientParser.parse(toParse, defaultValue); return LenientParser.parse(toParse, defaultValue);

View File

@ -21,7 +21,7 @@ package org.elasticsearch.common.recycler;
abstract class FilterRecycler<T> implements Recycler<T> { abstract class FilterRecycler<T> implements Recycler<T> {
/** Get the delegate instance to foward calls to. */ /** Get the delegate instance to forward calls to. */
protected abstract Recycler<T> getDelegate(); protected abstract Recycler<T> getDelegate();
/** Wrap a recycled reference. */ /** Wrap a recycled reference. */

View File

@ -47,7 +47,7 @@ import java.util.stream.Collectors;
* A setting. Encapsulates typical stuff like default value, parsing, and scope. * A setting. Encapsulates typical stuff like default value, parsing, and scope.
* Some (dynamic=true) can by modified at run time using the API. * Some (dynamic=true) can by modified at run time using the API.
* All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure * All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure
* together with {@link AbstractScopedSettings}. This class contains several untility methods that makes it straight forward * together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward
* to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this: * to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this:
* <pre>{@code * <pre>{@code
* public static final Setting<Boolean>; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, false, Scope.CLUSTER);} * public static final Setting<Boolean>; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, false, Scope.CLUSTER);}

View File

@ -471,7 +471,7 @@ public final class Settings implements ToXContent {
/** /**
* Returns the setting value (as size) associated with the setting key. Provided values can either be * Returns the setting value (as size) associated with the setting key. Provided values can either be
* absolute values (intepreted as a number of bytes), byte sizes (eg. 1mb) or percentage of the heap size * absolute values (interpreted as a number of bytes), byte sizes (eg. 1mb) or percentage of the heap size
* (eg. 12%). If it does not exists, parses the default value provided. * (eg. 12%). If it does not exists, parses the default value provided.
*/ */
public ByteSizeValue getAsMemory(String setting, String defaultValue) throws SettingsException { public ByteSizeValue getAsMemory(String setting, String defaultValue) throws SettingsException {
@ -480,7 +480,7 @@ public final class Settings implements ToXContent {
/** /**
* Returns the setting value (as size) associated with the setting key. Provided values can either be * Returns the setting value (as size) associated with the setting key. Provided values can either be
* absolute values (intepreted as a number of bytes), byte sizes (eg. 1mb) or percentage of the heap size * absolute values (interpreted as a number of bytes), byte sizes (eg. 1mb) or percentage of the heap size
* (eg. 12%). If it does not exists, parses the default value provided. * (eg. 12%). If it does not exists, parses the default value provided.
*/ */
public ByteSizeValue getAsMemory(String[] settings, String defaultValue) throws SettingsException { public ByteSizeValue getAsMemory(String[] settings, String defaultValue) throws SettingsException {

View File

@ -112,7 +112,7 @@ abstract class AbstractPagedHashMap implements Releasable {
} }
} }
// The only entries which have not been put in their final position in the previous loop are those that were stored in a slot that // The only entries which have not been put in their final position in the previous loop are those that were stored in a slot that
// is < slot(key, mask). This only happens when slot(key, mask) returned a slot that was close to the end of the array and colision // is < slot(key, mask). This only happens when slot(key, mask) returned a slot that was close to the end of the array and collision
// resolution has put it back in the first slots. This time, collision resolution will have put them at the beginning of the newly // resolution has put it back in the first slots. This time, collision resolution will have put them at the beginning of the newly
// allocated slots. Let's re-add them to make sure they are in the right slot. This 2nd loop will typically exit very early. // allocated slots. Let's re-add them to make sure they are in the right slot. This 2nd loop will typically exit very early.
for (long i = buckets; i < newBuckets; ++i) { for (long i = buckets; i < newBuckets; ++i) {

View File

@ -78,7 +78,7 @@ public abstract class ExtensionPoint {
/** /**
* Creates a new {@link ClassMap} * Creates a new {@link ClassMap}
* *
* @param name the human readable underscore case name of the extension poing. This is used in error messages etc. * @param name the human readable underscore case name of the extension point. This is used in error messages etc.
* @param extensionClass the base class that should be extended * @param extensionClass the base class that should be extended
* @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)} * @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)}
* @param reservedKeys a set of reserved keys by internal implementations * @param reservedKeys a set of reserved keys by internal implementations
@ -120,7 +120,7 @@ public abstract class ExtensionPoint {
} }
/** /**
* A Type extension point which basically allows to registerd keyed extensions like {@link ClassMap} * A Type extension point which basically allows to registered keyed extensions like {@link ClassMap}
* but doesn't instantiate and bind all the registered key value pairs but instead replace a singleton based on a given setting via {@link #bindType(Binder, Settings, String, String)} * but doesn't instantiate and bind all the registered key value pairs but instead replace a singleton based on a given setting via {@link #bindType(Binder, Settings, String, String)}
* Note: {@link #bind(Binder)} is not supported by this class * Note: {@link #bind(Binder)} is not supported by this class
*/ */
@ -169,7 +169,7 @@ public abstract class ExtensionPoint {
/** /**
* Creates a new {@link ClassSet} * Creates a new {@link ClassSet}
* *
* @param name the human readable underscore case name of the extension poing. This is used in error messages etc. * @param name the human readable underscore case name of the extension point. This is used in error messages etc.
* @param extensionClass the base class that should be extended * @param extensionClass the base class that should be extended
* @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)} * @param singletons a list of singletons to bind with this extension point - these are bound in {@link #bind(Binder)}
*/ */

View File

@ -41,7 +41,7 @@ public class DiscoverySettings extends AbstractComponent {
public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
/** /**
* sets the timeout for a complete publishing cycle, including both sending and committing. the master * sets the timeout for a complete publishing cycle, including both sending and committing. the master
* will continute to process the next cluster state update after this time has elapsed * will continue to process the next cluster state update after this time has elapsed
**/ **/
public static final Setting<TimeValue> PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); public static final Setting<TimeValue> PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);

View File

@ -328,7 +328,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
ClusterState newNodeSpecificClusterState = null; ClusterState newNodeSpecificClusterState = null;
synchronized (this) { synchronized (this) {
// we do the marshaling intentionally, to check it works well... // we do the marshaling intentionally, to check it works well...
// check if we publsihed cluster state at least once and node was in the cluster when we published cluster state the last time // check if we published cluster state at least once and node was in the cluster when we published cluster state the last time
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode().id())) { if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode().id())) {
// both conditions are true - which means we can try sending cluster state as diffs // both conditions are true - which means we can try sending cluster state as diffs
if (clusterStateDiffBytes == null) { if (clusterStateDiffBytes == null) {

View File

@ -79,7 +79,7 @@ public class ElectMasterService extends AbstractComponent {
} }
/** /**
* Returns the given nodes sorted by likelyhood of being elected as master, most likely first. * Returns the given nodes sorted by likelihood of being elected as master, most likely first.
* Non-master nodes are not removed but are rather put in the end * Non-master nodes are not removed but are rather put in the end
*/ */
public List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) { public List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {

View File

@ -342,7 +342,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
// sort the nodes by likelihood of being an active master // sort the nodes by likelihood of being an active master
List<DiscoveryNode> sortedNodesToPing = electMasterService.sortByMasterLikelihood(nodesToPingSet); List<DiscoveryNode> sortedNodesToPing = electMasterService.sortByMasterLikelihood(nodesToPingSet);
// new add the the unicast targets first // new add the unicast targets first
List<DiscoveryNode> nodesToPing = CollectionUtils.arrayAsArrayList(configuredTargetNodes); List<DiscoveryNode> nodesToPing = CollectionUtils.arrayAsArrayList(configuredTargetNodes);
nodesToPing.addAll(sortedNodesToPing); nodesToPing.addAll(sortedNodesToPing);

View File

@ -166,7 +166,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
/** /**
* Called by the response handler of the async action to fetch data. Verifies that its still working * Called by the response handler of the async action to fetch data. Verifies that its still working
* on the same cache generation, otherwise the results are discarded. It then goes and fills the relevant data for * on the same cache generation, otherwise the results are discarded. It then goes and fills the relevant data for
* the shard (response + failures), issueing a reroute at the end of it to make sure there will be another round * the shard (response + failures), issuing a reroute at the end of it to make sure there will be another round
* of allocations taking this new data into account. * of allocations taking this new data into account.
*/ */
protected synchronized void processAsyncFetch(ShardId shardId, T[] responses, FailedNodeException[] failures) { protected synchronized void processAsyncFetch(ShardId shardId, T[] responses, FailedNodeException[] failures) {

View File

@ -255,7 +255,7 @@ public abstract class MetaDataStateFormat<T> {
List<PathAndStateId> files = new ArrayList<>(); List<PathAndStateId> files = new ArrayList<>();
long maxStateId = -1; long maxStateId = -1;
boolean maxStateIdIsLegacy = true; boolean maxStateIdIsLegacy = true;
if (dataLocations != null) { // select all eligable files first if (dataLocations != null) { // select all eligible files first
for (Path dataLocation : dataLocations) { for (Path dataLocation : dataLocations) {
final Path stateDir = dataLocation.resolve(STATE_DIR_NAME); final Path stateDir = dataLocation.resolve(STATE_DIR_NAME);
// now, iterate over the current versions, and find latest one // now, iterate over the current versions, and find latest one

View File

@ -39,7 +39,7 @@ public class ESHttpContentDecompressor extends HttpContentDecompressor {
// compression is enabled so handle the request according to the headers (compressed and uncompressed) // compression is enabled so handle the request according to the headers (compressed and uncompressed)
return super.newContentDecoder(contentEncoding); return super.newContentDecoder(contentEncoding);
} else { } else {
// if compression is disabled only allow "indentity" (uncompressed) requests // if compression is disabled only allow "identity" (uncompressed) requests
if (HttpHeaders.Values.IDENTITY.equals(contentEncoding)) { if (HttpHeaders.Values.IDENTITY.equals(contentEncoding)) {
// nothing to handle here // nothing to handle here
return null; return null;
@ -48,4 +48,4 @@ public class ESHttpContentDecompressor extends HttpContentDecompressor {
} }
} }
} }
} }

View File

@ -132,7 +132,7 @@ public final class CorsConfig {
* xhr.withCredentials = true; * xhr.withCredentials = true;
* </pre> * </pre>
* The default value for 'withCredentials' is false in which case no cookies are sent. * The default value for 'withCredentials' is false in which case no cookies are sent.
* Settning this to true will included cookies in cross origin requests. * Setting this to true will included cookies in cross origin requests.
* *
* @return {@code true} if cookies are supported. * @return {@code true} if cookies are supported.
*/ */
@ -205,7 +205,7 @@ public final class CorsConfig {
* and this setting will check that the Origin is valid and if it is not valid no * and this setting will check that the Origin is valid and if it is not valid no
* further processing will take place, and a error will be returned to the calling client. * further processing will take place, and a error will be returned to the calling client.
* *
* @return {@code true} if a CORS request should short-curcuit upon receiving an invalid Origin header. * @return {@code true} if a CORS request should short-circuit upon receiving an invalid Origin header.
*/ */
public boolean isShortCircuit() { public boolean isShortCircuit() {
return shortCircuit; return shortCircuit;

View File

@ -133,7 +133,7 @@ public final class CorsConfigBuilder {
/** /**
* Web browsers may set the 'Origin' request header to 'null' if a resource is loaded * Web browsers may set the 'Origin' request header to 'null' if a resource is loaded
* from the local file system. Calling this method will enable a successful CORS response * from the local file system. Calling this method will enable a successful CORS response
* with a wildcard for the the CORS response header 'Access-Control-Allow-Origin'. * with a wildcard for the CORS response header 'Access-Control-Allow-Origin'.
* *
* @return {@link CorsConfigBuilder} to support method chaining. * @return {@link CorsConfigBuilder} to support method chaining.
*/ */

View File

@ -143,7 +143,7 @@ public final class IndexModule {
} }
/** /**
* Adds an {@link IndexStore} type to this index module. Typically stores are registered with a refrence to * Adds an {@link IndexStore} type to this index module. Typically stores are registered with a reference to
* it's constructor: * it's constructor:
* <pre> * <pre>
* indexModule.addIndexStore("my_store_type", MyStore::new); * indexModule.addIndexStore("my_store_type", MyStore::new);

View File

@ -691,7 +691,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
} }
boolean mustReschedule() { boolean mustReschedule() {
// don't re-schedule if its closed or if we dont' have a single shard here..., we are done // don't re-schedule if its closed or if we don't have a single shard here..., we are done
return indexService.closed.get() == false return indexService.closed.get() == false
&& closed.get() == false && interval.millis() > 0; && closed.get() == false && interval.millis() > 0;
} }

View File

@ -329,7 +329,7 @@ public final class IndexSettings {
public boolean isShadowReplicaIndex() { return isShadowReplicaIndex; } public boolean isShadowReplicaIndex() { return isShadowReplicaIndex; }
/** /**
* Returns the node settings. The settings retured from {@link #getSettings()} are a merged version of the * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the
* index settings and the node settings where node settings are overwritten by index settings. * index settings and the node settings where node settings are overwritten by index settings.
*/ */
public Settings getNodeSettings() { public Settings getNodeSettings() {

View File

@ -22,7 +22,7 @@ package org.elasticsearch.index.engine;
import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog;
/** Holds a deleted version, which just adds a timestmap to {@link VersionValue} so we know when we can expire the deletion. */ /** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */
class DeleteVersionValue extends VersionValue { class DeleteVersionValue extends VersionValue {
private final long time; private final long time;

View File

@ -29,7 +29,7 @@ import java.io.IOException;
* An engine is already closed. * An engine is already closed.
* <p> * <p>
* Note, the relationship between shard and engine indicates that engine closed is shard closed, and * Note, the relationship between shard and engine indicates that engine closed is shard closed, and
* we might get something slipping through the the shard and into the engine while the shard is closing. * we might get something slipping through the shard and into the engine while the shard is closing.
* *
* *
*/ */

View File

@ -321,7 +321,7 @@ public final class OrdinalsBuilder implements Closeable {
} }
/** /**
* Retruns the current ordinal or <tt>0</tt> if this build has not been advanced via * Returns the current ordinal or <tt>0</tt> if this build has not been advanced via
* {@link #nextOrdinal()}. * {@link #nextOrdinal()}.
*/ */
public long currentOrdinal() { public long currentOrdinal() {
@ -457,7 +457,7 @@ public final class OrdinalsBuilder implements Closeable {
* This method iterates all terms in the given {@link TermsEnum} and * This method iterates all terms in the given {@link TermsEnum} and
* associates each terms ordinal with the terms documents. The caller must * associates each terms ordinal with the terms documents. The caller must
* exhaust the returned {@link BytesRefIterator} which returns all values * exhaust the returned {@link BytesRefIterator} which returns all values
* where the first returned value is associted with the ordinal <tt>1</tt> * where the first returned value is associated with the ordinal <tt>1</tt>
* etc. * etc.
* <p> * <p>
* If the {@link TermsEnum} contains prefix coded numerical values the terms * If the {@link TermsEnum} contains prefix coded numerical values the terms

View File

@ -85,7 +85,7 @@ public final class ExtractQueryTermsService {
/** /**
* Extracts all query terms from the provided query and adds it to specified list. * Extracts all query terms from the provided query and adds it to specified list.
* *
* From boolean query with no should clauses or phrase queries only the the longest term are selected, * From boolean query with no should clauses or phrase queries only the longest term are selected,
* since that those terms are likely to be the rarest. Boolean query's must_not clauses are always ignored. * since that those terms are likely to be the rarest. Boolean query's must_not clauses are always ignored.
* *
* If from part of the query, no query terms can be extracted then term extraction is stopped and * If from part of the query, no query terms can be extracted then term extraction is stopped and

View File

@ -41,7 +41,7 @@ public class PercolateStats implements Streamable, ToXContent {
private long numQueries; private long numQueries;
/** /**
* Noop constructor for serialazation purposes. * Noop constructor for serialization purposes.
*/ */
public PercolateStats() { public PercolateStats() {
} }

View File

@ -165,7 +165,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
/** /**
* Sets the minimum number of high frequent query terms that need to match in order to * Sets the minimum number of high frequent query terms that need to match in order to
* produce a hit when there are no low frequen terms. * produce a hit when there are no low frequent terms.
*/ */
public CommonTermsQueryBuilder highFreqMinimumShouldMatch(String highFreqMinimumShouldMatch) { public CommonTermsQueryBuilder highFreqMinimumShouldMatch(String highFreqMinimumShouldMatch) {
this.highFreqMinimumShouldMatch = highFreqMinimumShouldMatch; this.highFreqMinimumShouldMatch = highFreqMinimumShouldMatch;

View File

@ -154,7 +154,7 @@ public class FuzzyQueryBuilder extends AbstractQueryBuilder<FuzzyQueryBuilder> i
} }
private FuzzyQueryBuilder() { private FuzzyQueryBuilder() {
// for protoype // for prototype
this.fieldName = null; this.fieldName = null;
this.value = null; this.value = null;
} }

View File

@ -251,7 +251,7 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
GeoPoint luceneBottomRight = new GeoPoint(bottomRight); GeoPoint luceneBottomRight = new GeoPoint(bottomRight);
if (GeoValidationMethod.isCoerce(validationMethod)) { if (GeoValidationMethod.isCoerce(validationMethod)) {
// Special case: if the difference between the left and right is 360 and the right is greater than the left, we are asking for // Special case: if the difference between the left and right is 360 and the right is greater than the left, we are asking for
// the complete longitude range so need to set longitude to the complete longditude range // the complete longitude range so need to set longitude to the complete longitude range
double right = luceneBottomRight.getLon(); double right = luceneBottomRight.getLon();
double left = luceneTopLeft.getLon(); double left = luceneTopLeft.getLon();

View File

@ -197,7 +197,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
return this.optimizeBbox; return this.optimizeBbox;
} }
/** Set validaton method for geo coordinates. */ /** Set validation method for geo coordinates. */
public void setValidationMethod(GeoValidationMethod method) { public void setValidationMethod(GeoValidationMethod method) {
this.validationMethod = method; this.validationMethod = method;
} }

View File

@ -634,7 +634,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
} }
/** /**
* The analyzer that will be used to analyze the text. Defaults to the analyzer associated with the fied. * The analyzer that will be used to analyze the text. Defaults to the analyzer associated with the field.
*/ */
public MoreLikeThisQueryBuilder analyzer(String analyzer) { public MoreLikeThisQueryBuilder analyzer(String analyzer) {
this.analyzer = analyzer; this.analyzer = analyzer;
@ -703,7 +703,7 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
* Converts an array of String ids to and Item[]. * Converts an array of String ids to and Item[].
* @param ids the ids to convert * @param ids the ids to convert
* @return the new items array * @return the new items array
* @deprecated construct the items array externaly and use it in the constructor / setter * @deprecated construct the items array externally and use it in the constructor / setter
*/ */
@Deprecated @Deprecated
public static Item[] ids(String... ids) { public static Item[] ids(String... ids) {

View File

@ -29,7 +29,7 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
/** /**
* Parser for the The More Like This Query (MLT Query) which finds documents that are "like" a given set of documents. * Parser for the More Like This Query (MLT Query) which finds documents that are "like" a given set of documents.
* *
* The documents are provided as a set of strings and/or a list of {@link Item}. * The documents are provided as a set of strings and/or a list of {@link Item}.
*/ */

View File

@ -284,7 +284,7 @@ public class MultiMatchQuery extends MatchQuery {
} catch (RuntimeException ex) { } catch (RuntimeException ex) {
// we can't parse it just use the incoming value -- it will // we can't parse it just use the incoming value -- it will
// just have a DF of 0 at the end of the day and will be ignored // just have a DF of 0 at the end of the day and will be ignored
// Note that this is like lenient = true allways // Note that this is like lenient = true always
} }
return new Term(field, value); return new Term(field, value);
} }

View File

@ -176,7 +176,7 @@ public final class ShardPath {
totFreeSpace += nodePath.fileStore.getUsableSpace(); totFreeSpace += nodePath.fileStore.getUsableSpace();
} }
// Very rough heurisic of how much disk space we expect the shard will use over its lifetime, the max of current average // Very rough heuristic of how much disk space we expect the shard will use over its lifetime, the max of current average
// shard size across the cluster and 5% of the total available free space on this node: // shard size across the cluster and 5% of the total available free space on this node:
long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0)); long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0));
@ -215,7 +215,7 @@ public final class ShardPath {
// TODO: this is a hack!! We should instead keep track of incoming (relocated) shards since we know // TODO: this is a hack!! We should instead keep track of incoming (relocated) shards since we know
// how large they will be once they're done copying, instead of a silly guess for such cases: // how large they will be once they're done copying, instead of a silly guess for such cases:
// Very rough heurisic of how much disk space we expect the shard will use over its lifetime, the max of current average // Very rough heuristic of how much disk space we expect the shard will use over its lifetime, the max of current average
// shard size across the cluster and 5% of the total available free space on this node: // shard size across the cluster and 5% of the total available free space on this node:
long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0)); long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0));

View File

@ -64,7 +64,7 @@ final class StoreRecovery {
* @param indexShard the index shard instance to recovery the shard into * @param indexShard the index shard instance to recovery the shard into
* @param indexShouldExists <code>true</code> iff the index should exist on disk ie. has the shard been allocated previously on the shards store. * @param indexShouldExists <code>true</code> iff the index should exist on disk ie. has the shard been allocated previously on the shards store.
* @param localNode the reference to the local node * @param localNode the reference to the local node
* @return <code>true</code> if the the shard has been recovered successfully, <code>false</code> if the recovery * @return <code>true</code> if the shard has been recovered successfully, <code>false</code> if the recovery
* has been ignored due to a concurrent modification of if the clusters state has changed due to async updates. * has been ignored due to a concurrent modification of if the clusters state has changed due to async updates.
* @see Store * @see Store
*/ */
@ -86,7 +86,7 @@ final class StoreRecovery {
* previously created index snapshot into an existing initializing shard. * previously created index snapshot into an existing initializing shard.
* @param indexShard the index shard instance to recovery the snapshot from * @param indexShard the index shard instance to recovery the snapshot from
* @param repository the repository holding the physical files the shard should be recovered from * @param repository the repository holding the physical files the shard should be recovered from
* @return <code>true</code> if the the shard has been recovered successfully, <code>false</code> if the recovery * @return <code>true</code> if the shard has been recovered successfully, <code>false</code> if the recovery
* has been ignored due to a concurrent modification of if the clusters state has changed due to async updates. * has been ignored due to a concurrent modification of if the clusters state has changed due to async updates.
*/ */
boolean recoverFromRepository(final IndexShard indexShard, IndexShardRepository repository, DiscoveryNode localNode) { boolean recoverFromRepository(final IndexShard indexShard, IndexShardRepository repository, DiscoveryNode localNode) {

View File

@ -728,7 +728,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
} }
/** /**
* This is a BWC layer to ensure we update the snapshots metdata with the corresponding hashes before we compare them. * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them.
* The new logic for StoreFileMetaData reads the entire <tt>.si</tt> and <tt>segments.n</tt> files to strengthen the * The new logic for StoreFileMetaData reads the entire <tt>.si</tt> and <tt>segments.n</tt> files to strengthen the
* comparison of the files on a per-segment / per-commit level. * comparison of the files on a per-segment / per-commit level.
*/ */

View File

@ -42,7 +42,7 @@ import static java.util.Collections.unmodifiableMap;
/** /**
* Contains information about all snapshot for the given shard in repository * Contains information about all snapshot for the given shard in repository
* <p> * <p>
* This class is used to find files that were already snapshoted and clear out files that no longer referenced by any * This class is used to find files that were already snapshotted and clear out files that no longer referenced by any
* snapshots * snapshots
*/ */
public class BlobStoreIndexShardSnapshots implements Iterable<SnapshotFiles>, ToXContent, FromXContentBuilder<BlobStoreIndexShardSnapshots> { public class BlobStoreIndexShardSnapshots implements Iterable<SnapshotFiles>, ToXContent, FromXContentBuilder<BlobStoreIndexShardSnapshots> {

View File

@ -97,7 +97,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
/* /*
* TODO * TODO
* - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this) but we can refactor as we go * - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this) but we can refactor as we go
* - use a simple BufferedOuputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we need to be able to do random access reads even from the buffer * - use a simple BufferedOutputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we need to be able to do random access reads even from the buffer
* - we need random exception on the FileSystem API tests for all this. * - we need random exception on the FileSystem API tests for all this.
* - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already fsynced far enough * - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already fsynced far enough
*/ */

View File

@ -515,7 +515,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
if (success == false) { if (success == false) {
addPendingDelete(index, indexSettings); addPendingDelete(index, indexSettings);
} }
// this is a pure protection to make sure this index doesn't get re-imported as a dangeling index. // this is a pure protection to make sure this index doesn't get re-imported as a dangling index.
// we should in the future rather write a tombstone rather than wiping the metadata. // we should in the future rather write a tombstone rather than wiping the metadata.
MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index.getName())); MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index.getName()));
} }

View File

@ -403,7 +403,7 @@ public class RecoverySourceHandler {
} catch (IllegalIndexShardStateException e) { } catch (IllegalIndexShardStateException e) {
// we can ignore this exception since, on the other node, when it moved to phase3 // we can ignore this exception since, on the other node, when it moved to phase3
// it will also send shard started, which might cause the index shard we work against // it will also send shard started, which might cause the index shard we work against
// to move be closed by the time we get to the the relocated method // to move be closed by the time we get to the relocated method
} }
} }
stopWatch.stop(); stopWatch.stop();

View File

@ -68,7 +68,7 @@ import java.util.concurrent.atomic.AtomicInteger;
*/ */
public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable { public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable {
// TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a seperate public service // TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a separate public service
public static final Setting<TimeValue> INDICES_STORE_DELETE_SHARD_TIMEOUT = Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER); public static final Setting<TimeValue> INDICES_STORE_DELETE_SHARD_TIMEOUT = Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER);
public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists"; public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists";
private static final EnumSet<IndexShardState> ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED); private static final EnumSet<IndexShardState> ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED);

View File

@ -41,7 +41,7 @@ public class PipelineExecutionService {
this.threadPool = threadPool; this.threadPool = threadPool;
} }
public void execute(IndexRequest request, Consumer<Throwable> failureHandler, Consumer<Boolean> completionHandler) { public void executeIndexRequest(IndexRequest request, Consumer<Throwable> failureHandler, Consumer<Boolean> completionHandler) {
Pipeline pipeline = getPipeline(request.getPipeline()); Pipeline pipeline = getPipeline(request.getPipeline());
threadPool.executor(ThreadPool.Names.INDEX).execute(new AbstractRunnable() { threadPool.executor(ThreadPool.Names.INDEX).execute(new AbstractRunnable() {
@ -58,10 +58,10 @@ public class PipelineExecutionService {
}); });
} }
public void execute(Iterable<ActionRequest<?>> actionRequests, public void executeBulkRequest(Iterable<ActionRequest<?>> actionRequests,
BiConsumer<IndexRequest, Throwable> itemFailureHandler, BiConsumer<IndexRequest, Throwable> itemFailureHandler,
Consumer<Throwable> completionHandler) { Consumer<Throwable> completionHandler) {
threadPool.executor(ThreadPool.Names.INDEX).execute(new AbstractRunnable() { threadPool.executor(ThreadPool.Names.BULK).execute(new AbstractRunnable() {
@Override @Override
public void onFailure(Throwable t) { public void onFailure(Throwable t) {

View File

@ -254,7 +254,7 @@ public final class IngestDocument {
* If the path identifies a list, the value will be appended to the existing list. * If the path identifies a list, the value will be appended to the existing list.
* If the path identifies a scalar, the scalar will be converted to a list and * If the path identifies a scalar, the scalar will be converted to a list and
* the provided value will be added to the newly created list. * the provided value will be added to the newly created list.
* Supports multiple values too provided in forms of list, in that case all the values will be appeneded to the * Supports multiple values too provided in forms of list, in that case all the values will be appended to the
* existing (or newly created) list. * existing (or newly created) list.
* @param path The path within the document in dot-notation * @param path The path within the document in dot-notation
* @param value The value or values to append to the existing ones * @param value The value or values to append to the existing ones
@ -270,7 +270,7 @@ public final class IngestDocument {
* If the path identifies a list, the value will be appended to the existing list. * If the path identifies a list, the value will be appended to the existing list.
* If the path identifies a scalar, the scalar will be converted to a list and * If the path identifies a scalar, the scalar will be converted to a list and
* the provided value will be added to the newly created list. * the provided value will be added to the newly created list.
* Supports multiple values too provided in forms of list, in that case all the values will be appeneded to the * Supports multiple values too provided in forms of list, in that case all the values will be appended to the
* existing (or newly created) list. * existing (or newly created) list.
* @param fieldPathTemplate Resolves to the path with dot-notation within the document * @param fieldPathTemplate Resolves to the path with dot-notation within the document
* @param valueSource The value source that will produce the value or values to append to the existing ones * @param valueSource The value source that will produce the value or values to append to the existing ones

View File

@ -284,7 +284,7 @@ class InstallPluginCommand extends CliTool.Command {
List<PluginsService.Bundle> bundles = PluginsService.getPluginBundles(pluginsDir); List<PluginsService.Bundle> bundles = PluginsService.getPluginBundles(pluginsDir);
// if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins // if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins
// thats always the first bundle // that's always the first bundle
if (isolated == false) { if (isolated == false) {
jars.addAll(bundles.get(0).urls); jars.addAll(bundles.get(0).urls);
} }

View File

@ -159,7 +159,7 @@ public class PluginsService extends AbstractComponent {
} }
} }
// we don't log jars in lib/ we really shouldnt log modules, // we don't log jars in lib/ we really shouldn't log modules,
// but for now: just be transparent so we can debug any potential issues // but for now: just be transparent so we can debug any potential issues
Set<String> moduleNames = new HashSet<>(); Set<String> moduleNames = new HashSet<>();
Set<String> jvmPluginNames = new HashSet<>(); Set<String> jvmPluginNames = new HashSet<>();

View File

@ -528,7 +528,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
} }
/** /**
* In v2.0.0 we changed the matadata file format * In v2.0.0 we changed the metadata file format
* @return true if legacy version should be used false otherwise * @return true if legacy version should be used false otherwise
*/ */
public static boolean legacyMetaData(Version version) { public static boolean legacyMetaData(Version version) {

View File

@ -88,7 +88,7 @@ public class RestNodesInfoAction extends BaseRestHandler {
final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds); final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds);
nodesInfoRequest.timeout(request.param("timeout")); nodesInfoRequest.timeout(request.param("timeout"));
// shortcut, dont do checks if only all is specified // shortcut, don't do checks if only all is specified
if (metrics.size() == 1 && metrics.contains("_all")) { if (metrics.size() == 1 && metrics.contains("_all")) {
nodesInfoRequest.all(); nodesInfoRequest.all();
} else { } else {

View File

@ -66,7 +66,7 @@ public class ScriptModule extends AbstractModule {
} }
/** /**
* This method is called after all modules have been processed but before we actually validate all settings. This allwos the * This method is called after all modules have been processed but before we actually validate all settings. This allows the
* script extensions to add all their settings. * script extensions to add all their settings.
*/ */
public void prepareSettings(SettingsModule settingsModule) { public void prepareSettings(SettingsModule settingsModule) {

View File

@ -247,7 +247,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
throw new ScriptException("scripts of type [" + script.getType() + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled"); throw new ScriptException("scripts of type [" + script.getType() + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled");
} }
// TODO: fix this through some API or something, thats wrong // TODO: fix this through some API or something, that's wrong
// special exception to prevent expressions from compiling as update or mapping scripts // special exception to prevent expressions from compiling as update or mapping scripts
boolean expression = "expression".equals(script.getLang()); boolean expression = "expression".equals(script.getLang());
boolean notSupported = scriptContext.getKey().equals(ScriptContext.Standard.UPDATE.getKey()); boolean notSupported = scriptContext.getKey().equals(ScriptContext.Standard.UPDATE.getKey());

View File

@ -133,7 +133,7 @@ public abstract class AggregatorBase extends Aggregator {
} }
/** /**
* Can be overriden by aggregator implementation to be called back when the collection phase starts. * Can be overridden by aggregator implementation to be called back when the collection phase starts.
*/ */
protected void doPreCollection() throws IOException { protected void doPreCollection() throws IOException {
} }
@ -252,7 +252,7 @@ public abstract class AggregatorBase extends Aggregator {
protected void doClose() {} protected void doClose() {}
/** /**
* Can be overriden by aggregator implementation to be called back when the collection phase ends. * Can be overridden by aggregator implementation to be called back when the collection phase ends.
*/ */
protected void doPostCollection() throws IOException { protected void doPostCollection() throws IOException {
} }

View File

@ -92,7 +92,7 @@ public class NestedAggregator extends SingleBucketAggregator {
// So the trick is to set at the last moment just before needed and we can use its child filter as the // So the trick is to set at the last moment just before needed and we can use its child filter as the
// parent filter. // parent filter.
// Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption // Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the assumption
// that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during // that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during
// aggs execution // aggs execution
Query parentFilterNotCached = findClosestNestedPath(parent()); Query parentFilterNotCached = findClosestNestedPath(parent());

View File

@ -66,7 +66,7 @@ final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory.L
int precision = HyperLogLogPlusPlus.DEFAULT_PRECISION; int precision = HyperLogLogPlusPlus.DEFAULT_PRECISION;
while (parent != null) { while (parent != null) {
if (parent instanceof SingleBucketAggregator == false) { if (parent instanceof SingleBucketAggregator == false) {
// if the parent creates buckets, we substract 5 to the precision, // if the parent creates buckets, we subtract 5 to the precision,
// which will effectively divide the memory usage of each counter by 32 // which will effectively divide the memory usage of each counter by 32
precision -= 5; precision -= 5;
} }

View File

@ -47,7 +47,7 @@ import java.nio.ByteOrder;
* requires more space and makes hyperloglog (which is less accurate) used sooner, * requires more space and makes hyperloglog (which is less accurate) used sooner,
* this is also considerably faster. * this is also considerably faster.
* *
* Trying to understand what this class does whithout having read the paper is * Trying to understand what this class does without having read the paper is
* considered adventurous. * considered adventurous.
*/ */
public final class HyperLogLogPlusPlus implements Releasable { public final class HyperLogLogPlusPlus implements Releasable {

View File

@ -169,7 +169,7 @@ public class HoltWintersModel extends MovAvgModel {
this.pad = pad; this.pad = pad;
// Only pad if we are multiplicative and padding is enabled // Only pad if we are multiplicative and padding is enabled
// The padding amount is not currently user-configurable...i dont see a reason to expose it? // The padding amount is not currently user-configurable...i don't see a reason to expose it?
this.padding = seasonalityType.equals(SeasonalityType.MULTIPLICATIVE) && pad ? 0.0000000001 : 0; this.padding = seasonalityType.equals(SeasonalityType.MULTIPLICATIVE) && pad ? 0.0000000001 : 0;
} }

View File

@ -442,7 +442,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} }
/** /**
* Gets the hightlighter builder for this request. * Gets the highlighter builder for this request.
*/ */
public HighlightBuilder highlighter() { public HighlightBuilder highlighter() {
return highlightBuilder; return highlightBuilder;
@ -679,7 +679,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} }
/** /**
* Sets the boost a specific index will receive when the query is executeed * Sets the boost a specific index will receive when the query is executed
* against it. * against it.
* *
* @param index * @param index
@ -697,7 +697,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
/** /**
* Gets the boost a specific indices will receive when the query is * Gets the boost a specific indices will receive when the query is
* executeed against them. * executed against them.
*/ */
public ObjectFloatHashMap<String> indexBoost() { public ObjectFloatHashMap<String> indexBoost() {
return indexBoost; return indexBoost;

View File

@ -41,7 +41,7 @@ import java.util.Comparator;
import java.util.List; import java.util.List;
/** /**
* Simple helper class for {@link FastVectorHighlighter} {@link FragmentsBuilder} implemenations. * Simple helper class for {@link FastVectorHighlighter} {@link FragmentsBuilder} implementations.
*/ */
public final class FragmentBuilderHelper { public final class FragmentBuilderHelper {

View File

@ -27,7 +27,7 @@ import java.io.IOException;
import java.util.Iterator; import java.util.Iterator;
/* /*
* Can iterate over the positions of a term an arbotrary number of times. * Can iterate over the positions of a term an arbitrary number of times.
* */ * */
public class CachedPositionIterator extends PositionIterator { public class CachedPositionIterator extends PositionIterator {

View File

@ -236,7 +236,7 @@ public class IndexFieldTerm implements Iterable<TermPosition> {
/* /*
* A user might decide inside a script to call get with _POSITIONS and then * A user might decide inside a script to call get with _POSITIONS and then
* a second time with _PAYLOADS. If the positions were recorded but the * a second time with _PAYLOADS. If the positions were recorded but the
* payloads were not, the user will not have access to them. Therfore, throw * payloads were not, the user will not have access to them. Therefore, throw
* exception here explaining how to call get(). * exception here explaining how to call get().
*/ */
public void validateFlags(int flags2) { public void validateFlags(int flags2) {

View File

@ -34,7 +34,7 @@ import java.util.List;
* - doSetNextReader() * - doSetNextReader()
* - needsScores() * - needsScores()
* *
* InternalProfiler facilitates the linking of the the Collector graph * InternalProfiler facilitates the linking of the Collector graph
*/ */
public class InternalProfileCollector implements Collector { public class InternalProfileCollector implements Collector {

View File

@ -51,7 +51,7 @@ public final class ProfileBreakdown {
*/ */
private final long[] timings; private final long[] timings;
/** Scrach to store the current timing type. */ /** Scratch to store the current timing type. */
private TimingType currentTimingType; private TimingType currentTimingType;
/** /**

View File

@ -31,7 +31,7 @@ public final class Profilers {
private final ContextIndexSearcher searcher; private final ContextIndexSearcher searcher;
private final List<Profiler> profilers; private final List<Profiler> profilers;
/** Sole constructor. This {@link Profilers} instance will initiall wrap one {@link Profiler}. */ /** Sole constructor. This {@link Profilers} instance will initially wrap one {@link Profiler}. */
public Profilers(ContextIndexSearcher searcher) { public Profilers(ContextIndexSearcher searcher) {
this.searcher = searcher; this.searcher = searcher;
this.profilers = new ArrayList<>(); this.profilers = new ArrayList<>();

View File

@ -45,7 +45,7 @@ public interface Rescorer {
* Modifies the result of the previously executed search ({@link TopDocs}) * Modifies the result of the previously executed search ({@link TopDocs})
* in place based on the given {@link RescoreSearchContext}. * in place based on the given {@link RescoreSearchContext}.
* *
* @param topDocs the result of the previously exectued search * @param topDocs the result of the previously executed search
* @param context the current {@link SearchContext}. This will never be <code>null</code>. * @param context the current {@link SearchContext}. This will never be <code>null</code>.
* @param rescoreContext the {@link RescoreSearchContext}. This will never be <code>null</code> * @param rescoreContext the {@link RescoreSearchContext}. This will never be <code>null</code>
* @throws IOException if an {@link IOException} occurs during rescoring * @throws IOException if an {@link IOException} occurs during rescoring
@ -66,7 +66,7 @@ public interface Rescorer {
Explanation sourceExplanation) throws IOException; Explanation sourceExplanation) throws IOException;
/** /**
* Parses the {@link RescoreSearchContext} for this impelementation * Parses the {@link RescoreSearchContext} for this implementation
* *
* @param parser the parser to read the context from * @param parser the parser to read the context from
* @param context the current shard context * @param context the current shard context
@ -76,7 +76,7 @@ public interface Rescorer {
public RescoreSearchContext parse(XContentParser parser, QueryShardContext context) throws IOException; public RescoreSearchContext parse(XContentParser parser, QueryShardContext context) throws IOException;
/** /**
* Extracts all terms needed to exectue this {@link Rescorer}. This method * Extracts all terms needed to execute this {@link Rescorer}. This method
* is executed in a distributed frequency collection roundtrip for * is executed in a distributed frequency collection roundtrip for
* {@link SearchType#DFS_QUERY_AND_FETCH} and * {@link SearchType#DFS_QUERY_AND_FETCH} and
* {@link SearchType#DFS_QUERY_THEN_FETCH} * {@link SearchType#DFS_QUERY_THEN_FETCH}
@ -84,8 +84,8 @@ public interface Rescorer {
public void extractTerms(SearchContext context, RescoreSearchContext rescoreContext, Set<Term> termsSet); public void extractTerms(SearchContext context, RescoreSearchContext rescoreContext, Set<Term> termsSet);
/* /*
* TODO: At this point we only have one implemenation which modifies the * TODO: At this point we only have one implementation which modifies the
* TopDocs given. Future implemenations might return actual resutls that * TopDocs given. Future implementations might return actual results that
* contain information about the rescore context. For example a pair wise * contain information about the rescore context. For example a pair wise
* reranker might return the feature vector for the top N window in order to * reranker might return the feature vector for the top N window in order to
* merge results on the callers side. For now we don't have a return type at * merge results on the callers side. For now we don't have a return type at

Some files were not shown because too many files have changed in this diff Show More