parent
08aa715d2e
commit
4059e4ac86
|
@ -82,7 +82,7 @@ if not errorlevel 1 goto managed
|
|||
echo Failed starting service manager for '%SERVICE_ID%'
|
||||
goto:eof
|
||||
:managed
|
||||
echo Succesfully started service manager for '%SERVICE_ID%'.
|
||||
echo Successfully started service manager for '%SERVICE_ID%'.
|
||||
goto:eof
|
||||
|
||||
:doRemove
|
||||
|
|
|
@ -242,7 +242,7 @@ def build_release(run_tests=False, dry_run=True, cpus=1, bwc_version=None):
|
|||
'test -Dtests.jvms=%s -Des.node.mode=local' % (cpus),
|
||||
'test -Dtests.jvms=%s -Des.node.mode=network' % (cpus))
|
||||
if bwc_version:
|
||||
print('Running Backwards compatibilty tests against version [%s]' % (bwc_version))
|
||||
print('Running Backwards compatibility tests against version [%s]' % (bwc_version))
|
||||
run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version)
|
||||
run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"')
|
||||
run_mvn('clean %s -DskipTests' % (target))
|
||||
|
|
|
@ -32,7 +32,7 @@ def all_installed_gems
|
|||
end
|
||||
|
||||
all_installed_gems.select {|y| y.gem_dir.include?('vendor') }.sort {|v, u| v.name <=> u.name }.each do |x|
|
||||
puts '='*80 #seperator
|
||||
puts '='*80 #separator
|
||||
if(x.license) #ah gem has license information
|
||||
puts "%s,%s,%s,%s,%s"%[x.name, x.version, x.license, x.homepage, x.email]
|
||||
else
|
||||
|
|
|
@ -23,7 +23,7 @@ org.apache.lucene.index.IndexReader#decRef()
|
|||
org.apache.lucene.index.IndexReader#incRef()
|
||||
org.apache.lucene.index.IndexReader#tryIncRef()
|
||||
|
||||
@defaultMessage QueryWrapperFilter is cachable by default - use Queries#wrap instead
|
||||
@defaultMessage QueryWrapperFilter is cacheable by default - use Queries#wrap instead
|
||||
org.apache.lucene.search.QueryWrapperFilter#<init>(org.apache.lucene.search.Query)
|
||||
|
||||
@defaultMessage Pass the precision step from the mappings explicitly instead
|
||||
|
|
|
@ -79,7 +79,7 @@ The number of terms in a field cannot be accessed using the `_index` variable. S
|
|||
Term statistics for a field can be accessed with a subscript operator like
|
||||
this: `_index['FIELD']['TERM']`. This will never return null, even if term or field does not exist.
|
||||
If you do not need the term frequency, call `_index['FIELD'].get('TERM', 0)`
|
||||
to avoid uneccesary initialization of the frequencies. The flag will have only
|
||||
to avoid unnecessary initialization of the frequencies. The flag will have only
|
||||
affect is your set the `index_options` to `docs` (see <<mapping-core-types, mapping documentation>>).
|
||||
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ as defined for the field mapping.
|
|||
==== Filter Format
|
||||
|
||||
The Filter supports two ways of defining the Filter shape, either by
|
||||
providing a whole shape defintion, or by referencing the name of a shape
|
||||
providing a whole shape definition, or by referencing the name of a shape
|
||||
pre-indexed in another index. Both formats are defined below with
|
||||
examples.
|
||||
|
||||
|
|
|
@ -189,7 +189,7 @@ negates the character class. The allowed forms are:
|
|||
[^-abc] # any character except '-' or 'a' or 'b' or 'c'
|
||||
[^abc\-] # any character except '-' or 'a' or 'b' or 'c'
|
||||
|
||||
Note that the dash `"-"` indicates a range of characeters, unless it is
|
||||
Note that the dash `"-"` indicates a range of characters, unless it is
|
||||
the first character or if it is escaped with a backslash.
|
||||
|
||||
For string `"abcd"`:
|
||||
|
|
|
@ -30,4 +30,4 @@ The `include` and `exclude` clauses can be any span type query. The
|
|||
`exclude` clause is the span query whose matches must not overlap those
|
||||
returned.
|
||||
|
||||
In the above example all documents with the term hoya are filtered except the ones that have 'la' preceeding them.
|
||||
In the above example all documents with the term hoya are filtered except the ones that have 'la' preceding them.
|
||||
|
|
|
@ -329,7 +329,7 @@ Google normalized distance as described in "The Google Similarity Distance", Ci
|
|||
===== Which one is best?
|
||||
|
||||
|
||||
Roughly, `mutual_information` prefers high frequent terms even if they occur also frequently in the background. For example, in an analysis of natural language text this might lead to selection of stop words. `mutual_information` is unlikely to select very rare terms like misspellings. `gnd` prefers terms with a high co-occurence and avoids selection of stopwords. It might be better suited for synonym detection. However, `gnd` has a tendency to select very rare terms that are, for example, a result of misspelling. `chi_square` and `jlh` are somewhat in-between.
|
||||
Roughly, `mutual_information` prefers high frequent terms even if they occur also frequently in the background. For example, in an analysis of natural language text this might lead to selection of stop words. `mutual_information` is unlikely to select very rare terms like misspellings. `gnd` prefers terms with a high co-occurrence and avoids selection of stopwords. It might be better suited for synonym detection. However, `gnd` has a tendency to select very rare terms that are, for example, a result of misspelling. `chi_square` and `jlh` are somewhat in-between.
|
||||
|
||||
It is hard to say which one of the different heuristics will be the best choice as it depends on what the significant terms are used for (see for example [Yang and Pedersen, "A Comparative Study on Feature Selection in Text Categorization", 1997](http://courses.ischool.berkeley.edu/i256/f06/papers/yang97comparative.pdf) for a study on using significant terms for feature selection for text classification).
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ suggestions are required in order to present to the end-user. The
|
|||
to select entire corrected phrases instead of individual tokens weighted
|
||||
based on `ngram-language` models. In practice this suggester will be
|
||||
able to make better decisions about which tokens to pick based on
|
||||
co-occurence and frequencies.
|
||||
co-occurrence and frequencies.
|
||||
|
||||
==== API Example
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ path.data: ["/mnt/first", "/mnt/second"]
|
|||
[[default-paths]]
|
||||
=== Default Paths
|
||||
|
||||
Below are the default paths that elasticsearch will use, if not explictly changed.
|
||||
Below are the default paths that elasticsearch will use, if not explicitly changed.
|
||||
|
||||
[float]
|
||||
==== deb and rpm
|
||||
|
|
|
@ -208,7 +208,7 @@ As many elasticsearch tests are checking for a similar output, like the amount o
|
|||
`assertSecondHit()`:: Asserts the second hit hits the specified matcher
|
||||
`assertThirdHit()`:: Asserts the third hits hits the specified matcher
|
||||
`assertSearchHit()`:: Assert a certain element in a search response hits the specified matcher
|
||||
`assertNoFailures()`:: Asserts that no shard failures have occured in the response
|
||||
`assertNoFailures()`:: Asserts that no shard failures have occurred in the response
|
||||
`assertFailures()`:: Asserts that shard failures have happened during a search request
|
||||
`assertHighlight()`:: Assert specific highlights matched
|
||||
`assertSuggestion()`:: Assert for specific suggestions
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
"type" : "enum",
|
||||
"options" : ["sync","async"],
|
||||
"default" : "sync",
|
||||
"description" : "Explicitely set the replication type"
|
||||
"description" : "Explicitly set the replication type"
|
||||
},
|
||||
"routing": {
|
||||
"type" : "string",
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
},
|
||||
"min_term_freq": {
|
||||
"type" : "number",
|
||||
"description" : "The term frequency as percent: terms with lower occurence in the source document will be ignored"
|
||||
"description" : "The term frequency as percent: terms with lower occurrence in the source document will be ignored"
|
||||
},
|
||||
"min_word_length": {
|
||||
"type" : "number",
|
||||
|
|
|
@ -24,7 +24,7 @@ setup:
|
|||
- match: {explanations: []}
|
||||
|
||||
---
|
||||
"Explain API for non-existant node & shard":
|
||||
"Explain API for non-existent node & shard":
|
||||
|
||||
- do:
|
||||
cluster.state:
|
||||
|
|
|
@ -135,7 +135,7 @@ public final class ExceptionsHelper {
|
|||
public static <T extends Throwable> void rethrowAndSuppress(List<T> exceptions) throws T {
|
||||
T main = null;
|
||||
for (T ex : exceptions) {
|
||||
main = useOrSupress(main, ex);
|
||||
main = useOrSuppress(main, ex);
|
||||
}
|
||||
if (main != null) {
|
||||
throw main;
|
||||
|
@ -149,14 +149,14 @@ public final class ExceptionsHelper {
|
|||
public static <T extends Throwable> void maybeThrowRuntimeAndSuppress(List<T> exceptions) {
|
||||
T main = null;
|
||||
for (T ex : exceptions) {
|
||||
main = useOrSupress(main, ex);
|
||||
main = useOrSuppress(main, ex);
|
||||
}
|
||||
if (main != null) {
|
||||
throw new ElasticsearchException(main.getMessage(), main);
|
||||
}
|
||||
}
|
||||
|
||||
public static <T extends Throwable> T useOrSupress(T first, T second) {
|
||||
public static <T extends Throwable> T useOrSuppress(T first, T second) {
|
||||
if (first == null) {
|
||||
return second;
|
||||
} else {
|
||||
|
|
|
@ -248,7 +248,7 @@ public final class TermVectorFields extends Fields {
|
|||
// realloc.
|
||||
growBuffers();
|
||||
// finally, read the values into the arrays
|
||||
// curentPosition etc. so that we can just iterate
|
||||
// currentPosition etc. so that we can just iterate
|
||||
// later
|
||||
writeInfos(perFieldTermVectorInput);
|
||||
return spare.get();
|
||||
|
|
|
@ -517,7 +517,7 @@ public class Base64 {
|
|||
* anywhere along their length by specifying
|
||||
* <var>srcOffset</var> and <var>destOffset</var>.
|
||||
* This method does not check to make sure your arrays
|
||||
* are large enough to accomodate <var>srcOffset</var> + 3 for
|
||||
* are large enough to accommodate <var>srcOffset</var> + 3 for
|
||||
* the <var>source</var> array or <var>destOffset</var> + 4 for
|
||||
* the <var>destination</var> array.
|
||||
* The actual number of significant bytes in your array is
|
||||
|
@ -1038,7 +1038,7 @@ public class Base64 {
|
|||
* anywhere along their length by specifying
|
||||
* <var>srcOffset</var> and <var>destOffset</var>.
|
||||
* This method does not check to make sure your arrays
|
||||
* are large enough to accomodate <var>srcOffset</var> + 4 for
|
||||
* are large enough to accommodate <var>srcOffset</var> + 4 for
|
||||
* the <var>source</var> array or <var>destOffset</var> + 3 for
|
||||
* the <var>destination</var> array.
|
||||
* This method returns the actual number of bytes that
|
||||
|
|
|
@ -270,7 +270,7 @@ public class Strings {
|
|||
}
|
||||
|
||||
/**
|
||||
* Trim all occurences of the supplied leading character from the given String.
|
||||
* Trim all occurrences of the supplied leading character from the given String.
|
||||
*
|
||||
* @param str the String to check
|
||||
* @param leadingCharacter the leading character to be trimmed
|
||||
|
@ -326,7 +326,7 @@ public class Strings {
|
|||
}
|
||||
|
||||
/**
|
||||
* Replace all occurences of a substring within a string with
|
||||
* Replace all occurrences of a substring within a string with
|
||||
* another string.
|
||||
*
|
||||
* @param inString String to examine
|
||||
|
|
|
@ -131,7 +131,7 @@ class BindingProcessor extends AbstractProcessor {
|
|||
return null;
|
||||
}
|
||||
|
||||
// This cast is safe after the preceeding check.
|
||||
// This cast is safe after the preceding check.
|
||||
final BindingImpl<T> binding;
|
||||
try {
|
||||
binding = injector.createUnitializedBinding(key, scoping, source, errors);
|
||||
|
|
|
@ -114,7 +114,7 @@ import java.util.Set;
|
|||
* <p/>
|
||||
* <h3>Backwards compatibility using {@literal @}AssistedInject</h3>
|
||||
* Instead of the {@literal @}Inject annotation, you may annotate the constructed classes with
|
||||
* {@literal @}{@link AssistedInject}. This triggers a limited backwards-compatability mode.
|
||||
* {@literal @}{@link AssistedInject}. This triggers a limited backwards-compatibility mode.
|
||||
* <p/>
|
||||
* <p>Instead of matching factory method arguments to constructor parameters using their names, the
|
||||
* <strong>parameters are matched by their order</strong>. The first factory method argument is
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
package org.elasticsearch.common.inject.internal;
|
||||
|
||||
/**
|
||||
* Wraps an exception that occured during a computation in a different thread.
|
||||
* Wraps an exception that occurred during a computation in a different thread.
|
||||
*
|
||||
* @author Bob Lee
|
||||
*/
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
package org.elasticsearch.common.inject.internal;
|
||||
|
||||
/**
|
||||
* Wraps an exception that occured during a computation.
|
||||
* Wraps an exception that occurred during a computation.
|
||||
*/
|
||||
public class ComputationException extends RuntimeException {
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ import java.util.*;
|
|||
|
||||
/**
|
||||
* A collection of error messages. If this type is passed as a method parameter, the method is
|
||||
* considered to have executed succesfully only if new errors were not added to this collection.
|
||||
* considered to have executed successfully only if new errors were not added to this collection.
|
||||
* <p/>
|
||||
* <p>Errors can be chained to provide additional context. To add context, call {@link #withSource}
|
||||
* to create a new Errors instance that contains additional context. All messages added to the
|
||||
|
|
|
@ -94,7 +94,7 @@ public class ProviderMethod<T> implements ProviderWithDependencies<T> {
|
|||
}
|
||||
|
||||
try {
|
||||
// We know this cast is safe becase T is the method's return type.
|
||||
// We know this cast is safe because T is the method's return type.
|
||||
@SuppressWarnings({"unchecked", "UnnecessaryLocalVariable"})
|
||||
T result = (T) method.invoke(instance, parameters);
|
||||
return result;
|
||||
|
|
|
@ -64,7 +64,7 @@ public interface ElementVisitor<V> {
|
|||
<T> V visit(MembersInjectorLookup<T> lookup);
|
||||
|
||||
/**
|
||||
* Visit an error message and the context in which it occured.
|
||||
* Visit an error message and the context in which it occurred.
|
||||
*/
|
||||
V visit(Message message);
|
||||
|
||||
|
|
|
@ -89,18 +89,18 @@ public final class InjectionPoint {
|
|||
}
|
||||
|
||||
private ImmutableList<Dependency<?>> forMember(Member member, TypeLiteral<?> type,
|
||||
Annotation[][] paramterAnnotations) {
|
||||
Annotation[][] parameterAnnotations) {
|
||||
Errors errors = new Errors(member);
|
||||
Iterator<Annotation[]> annotationsIterator = Arrays.asList(paramterAnnotations).iterator();
|
||||
Iterator<Annotation[]> annotationsIterator = Arrays.asList(parameterAnnotations).iterator();
|
||||
|
||||
List<Dependency<?>> dependencies = Lists.newArrayList();
|
||||
int index = 0;
|
||||
|
||||
for (TypeLiteral<?> parameterType : type.getParameterTypes(member)) {
|
||||
try {
|
||||
Annotation[] parameterAnnotations = annotationsIterator.next();
|
||||
Key<?> key = Annotations.getKey(parameterType, member, parameterAnnotations, errors);
|
||||
dependencies.add(newDependency(key, Nullability.allowsNull(parameterAnnotations), index));
|
||||
Annotation[] paramAnnotations = annotationsIterator.next();
|
||||
Key<?> key = Annotations.getKey(parameterType, member, paramAnnotations, errors);
|
||||
dependencies.add(newDependency(key, Nullability.allowsNull(paramAnnotations), index));
|
||||
index++;
|
||||
} catch (ErrorsException e) {
|
||||
errors.merge(e.getErrors());
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.util.List;
|
|||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
/**
|
||||
* An error message and the context in which it occured. Messages are usually created internally by
|
||||
* An error message and the context in which it occurred. Messages are usually created internally by
|
||||
* Guice and its extensions. Messages can be created explicitly in a module using {@link
|
||||
* org.elasticsearch.common.inject.Binder#addError(Throwable) addError()} statements:
|
||||
* <pre>
|
||||
|
|
|
@ -68,7 +68,7 @@ public class LoggerMessageFormat {
|
|||
return sbuf.toString();
|
||||
}
|
||||
} else {
|
||||
if (isEscapedDelimeter(messagePattern, j)) {
|
||||
if (isEscapedDelimiter(messagePattern, j)) {
|
||||
if (!isDoubleEscaped(messagePattern, j)) {
|
||||
L--; // DELIM_START was escaped, thus should not be incremented
|
||||
sbuf.append(messagePattern.substring(i, j - 1));
|
||||
|
@ -95,13 +95,13 @@ public class LoggerMessageFormat {
|
|||
return sbuf.toString();
|
||||
}
|
||||
|
||||
static boolean isEscapedDelimeter(String messagePattern,
|
||||
int delimeterStartIndex) {
|
||||
static boolean isEscapedDelimiter(String messagePattern,
|
||||
int delimiterStartIndex) {
|
||||
|
||||
if (delimeterStartIndex == 0) {
|
||||
if (delimiterStartIndex == 0) {
|
||||
return false;
|
||||
}
|
||||
char potentialEscape = messagePattern.charAt(delimeterStartIndex - 1);
|
||||
char potentialEscape = messagePattern.charAt(delimiterStartIndex - 1);
|
||||
if (potentialEscape == ESCAPE_CHAR) {
|
||||
return true;
|
||||
} else {
|
||||
|
@ -109,8 +109,8 @@ public class LoggerMessageFormat {
|
|||
}
|
||||
}
|
||||
|
||||
static boolean isDoubleEscaped(String messagePattern, int delimeterStartIndex) {
|
||||
if (delimeterStartIndex >= 2 && messagePattern.charAt(delimeterStartIndex - 2) == ESCAPE_CHAR) {
|
||||
static boolean isDoubleEscaped(String messagePattern, int delimiterStartIndex) {
|
||||
if (delimiterStartIndex >= 2 && messagePattern.charAt(delimiterStartIndex - 2) == ESCAPE_CHAR) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
|
|
|
@ -124,7 +124,7 @@ public class Queries {
|
|||
return result;
|
||||
}
|
||||
|
||||
/* otherwise, simple expresion */
|
||||
/* otherwise, simple expression */
|
||||
|
||||
if (-1 < spec.indexOf('%')) {
|
||||
/* percentage - assume the % was the last char. If not, let Integer.parseInt fail. */
|
||||
|
|
|
@ -30,7 +30,7 @@ public class HttpPipeliningHandler extends SimpleChannelHandler {
|
|||
|
||||
/**
|
||||
* @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel
|
||||
* connection. This is required as events cannot queue up indefintely; we would run out of
|
||||
* connection. This is required as events cannot queue up indefinitely; we would run out of
|
||||
* memory if this was the case.
|
||||
*/
|
||||
public HttpPipeliningHandler(final int maxEventsHeld) {
|
||||
|
|
|
@ -294,7 +294,7 @@ public class ShardIndexingService extends AbstractIndexShardComponent {
|
|||
assert startOfThrottleMillis > 0 : "Bad state of startOfThrottleMillis";
|
||||
long throttleTimeMillis = System.currentTimeMillis() - startOfThrottleMillis;
|
||||
if (throttleTimeMillis >= 0) {
|
||||
//A timeslip may have occured but never want to add a negative number
|
||||
//A timeslip may have occurred but never want to add a negative number
|
||||
throttleTimeMillisMetric.inc(throttleTimeMillis);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ public class TypeFilterParser implements FilterParser {
|
|||
parser.nextToken();
|
||||
|
||||
Filter filter;
|
||||
//LUCENE 4 UPGRADE document mapper should use bytesref aswell?
|
||||
//LUCENE 4 UPGRADE document mapper should use bytesref as well?
|
||||
DocumentMapper documentMapper = parseContext.mapperService().documentMapper(type.utf8ToString());
|
||||
if (documentMapper == null) {
|
||||
filter = new TermFilter(new Term(TypeFieldMapper.NAME, type));
|
||||
|
|
|
@ -85,7 +85,7 @@ import java.util.Locale;
|
|||
* parameters origin and scale.
|
||||
* <p>
|
||||
* To write a new scoring function, create a new class that inherits from this
|
||||
* one and implement the getDistanceFuntion(). Furthermore, to create a builder,
|
||||
* one and implement the getDistanceFunction(). Furthermore, to create a builder,
|
||||
* override the getName() in {@link DecayFunctionBuilder}.
|
||||
* <p>
|
||||
* See {@link GaussDecayFunctionBuilder} and {@link GaussDecayFunctionParser}
|
||||
|
|
|
@ -51,7 +51,7 @@ import java.util.Arrays;
|
|||
public class FunctionScoreQueryParser implements QueryParser {
|
||||
|
||||
public static final String NAME = "function_score";
|
||||
ScoreFunctionParserMapper funtionParserMapper;
|
||||
ScoreFunctionParserMapper functionParserMapper;
|
||||
// For better readability of error message
|
||||
static final String MISPLACED_FUNCTION_MESSAGE_PREFIX = "You can either define \"functions\":[...] or a single function, not both. ";
|
||||
static final String MISPLACED_BOOST_FUNCTION_MESSAGE_SUFFIX = " Did you mean \"boost\" instead?";
|
||||
|
@ -59,8 +59,8 @@ public class FunctionScoreQueryParser implements QueryParser {
|
|||
public static final ParseField WEIGHT_FIELD = new ParseField("weight");
|
||||
|
||||
@Inject
|
||||
public FunctionScoreQueryParser(ScoreFunctionParserMapper funtionParserMapper) {
|
||||
this.funtionParserMapper = funtionParserMapper;
|
||||
public FunctionScoreQueryParser(ScoreFunctionParserMapper functionParserMapper) {
|
||||
this.functionParserMapper = functionParserMapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -129,7 +129,7 @@ public class FunctionScoreQueryParser implements QueryParser {
|
|||
// we try to parse a score function. If there is no score
|
||||
// function for the current field name,
|
||||
// functionParserMapper.get() will throw an Exception.
|
||||
scoreFunction = funtionParserMapper.get(parseContext.index(), currentFieldName).parse(parseContext, parser);
|
||||
scoreFunction = functionParserMapper.get(parseContext.index(), currentFieldName).parse(parseContext, parser);
|
||||
}
|
||||
if (functionArrayFound) {
|
||||
String errorString = "Found \"functions\": [...] already, now encountering \"" + currentFieldName + "\".";
|
||||
|
@ -202,9 +202,9 @@ public class FunctionScoreQueryParser implements QueryParser {
|
|||
filter = parseContext.parseInnerFilter();
|
||||
} else {
|
||||
// do not need to check null here,
|
||||
// funtionParserMapper throws exception if parser
|
||||
// functionParserMapper throws exception if parser
|
||||
// non-existent
|
||||
ScoreFunctionParser functionParser = funtionParserMapper.get(parseContext.index(), currentFieldName);
|
||||
ScoreFunctionParser functionParser = functionParserMapper.get(parseContext.index(), currentFieldName);
|
||||
scoreFunction = functionParser.parse(parseContext, parser);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ public class IncludeNestedDocsQuery extends Query {
|
|||
this.parentFilter = parentFilter;
|
||||
}
|
||||
|
||||
// For rewritting
|
||||
// For rewriting
|
||||
IncludeNestedDocsQuery(Query rewrite, Query originalQuery, IncludeNestedDocsQuery previousInstance) {
|
||||
this.origParentQuery = originalQuery;
|
||||
this.parentQuery = rewrite;
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
* Abstract implemenetation of {@link SimilarityProvider} providing common behaviour
|
||||
* Abstract implementation of {@link SimilarityProvider} providing common behaviour
|
||||
*/
|
||||
public abstract class AbstractSimilarityProvider implements SimilarityProvider {
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ abstract class QueryCollector extends SimpleCollector {
|
|||
|
||||
MatchAndSort(ESLogger logger, PercolateContext context, boolean isNestedDoc) {
|
||||
super(logger, context, isNestedDoc);
|
||||
// TODO: Use TopFieldCollector.create(...) for ascending and decending scoring?
|
||||
// TODO: Use TopFieldCollector.create(...) for ascending and descending scoring?
|
||||
topDocsCollector = TopScoreDocCollector.create(context.size(), false);
|
||||
}
|
||||
|
||||
|
|
|
@ -98,8 +98,8 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
|||
public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
||||
|
||||
public static final String NORMS_LOADING_KEY = "index.norms.loading";
|
||||
private static final String DEFAUTL_KEEPALIVE_COMPONENENT_KEY = "default_keep_alive";
|
||||
public static final String DEFAUTL_KEEPALIVE_KEY = "search." + DEFAUTL_KEEPALIVE_COMPONENENT_KEY;
|
||||
private static final String DEFAULT_KEEPALIVE_COMPONENENT_KEY = "default_keep_alive";
|
||||
public static final String DEFAULT_KEEPALIVE_KEY = "search." + DEFAULT_KEEPALIVE_COMPONENENT_KEY;
|
||||
private static final String KEEPALIVE_INTERVAL_COMPONENENT_KEY = "keep_alive_interval";
|
||||
public static final String KEEPALIVE_INTERVAL_KEY = "search." + KEEPALIVE_INTERVAL_COMPONENENT_KEY;
|
||||
|
||||
|
@ -155,7 +155,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
|||
|
||||
TimeValue keepAliveInterval = componentSettings.getAsTime(KEEPALIVE_INTERVAL_COMPONENENT_KEY, timeValueMinutes(1));
|
||||
// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
|
||||
this.defaultKeepAlive = componentSettings.getAsTime(DEFAUTL_KEEPALIVE_COMPONENENT_KEY, timeValueMinutes(5)).millis();
|
||||
this.defaultKeepAlive = componentSettings.getAsTime(DEFAULT_KEEPALIVE_COMPONENENT_KEY, timeValueMinutes(5)).millis();
|
||||
|
||||
Map<String, SearchParseElement> elementParsers = new HashMap<>();
|
||||
elementParsers.putAll(dfsPhase.parseElements());
|
||||
|
|
|
@ -86,7 +86,7 @@ public class PositionIterator implements Iterator<TermPosition> {
|
|||
public Iterator<TermPosition> reset() {
|
||||
if (resetted) {
|
||||
throw new ElasticsearchException(
|
||||
"Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitely.");
|
||||
"Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly.");
|
||||
}
|
||||
resetted = true;
|
||||
return this;
|
||||
|
|
|
@ -49,7 +49,7 @@ import java.util.*;
|
|||
public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider {
|
||||
|
||||
// for serialization
|
||||
public static final int SERIALIZE_PRESERVE_SEPERATORS = 1;
|
||||
public static final int SERIALIZE_PRESERVE_SEPARATORS = 1;
|
||||
public static final int SERIALIZE_HAS_PAYLOADS = 2;
|
||||
public static final int SERIALIZE_PRESERVE_POSITION_INCREMENTS = 4;
|
||||
|
||||
|
@ -166,7 +166,7 @@ public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider
|
|||
output.writeVInt(maxSurfaceFormsPerAnalyzedForm);
|
||||
output.writeInt(maxGraphExpansions); // can be negative
|
||||
int options = 0;
|
||||
options |= preserveSep ? SERIALIZE_PRESERVE_SEPERATORS : 0;
|
||||
options |= preserveSep ? SERIALIZE_PRESERVE_SEPARATORS : 0;
|
||||
options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0;
|
||||
options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0;
|
||||
output.writeVInt(options);
|
||||
|
@ -210,7 +210,7 @@ public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider
|
|||
int maxSurfaceFormsPerAnalyzedForm = input.readVInt();
|
||||
int maxGraphExpansions = input.readInt();
|
||||
int options = input.readVInt();
|
||||
boolean preserveSep = (options & SERIALIZE_PRESERVE_SEPERATORS) != 0;
|
||||
boolean preserveSep = (options & SERIALIZE_PRESERVE_SEPARATORS) != 0;
|
||||
boolean hasPayloads = (options & SERIALIZE_HAS_PAYLOADS) != 0;
|
||||
boolean preservePositionIncrements = (options & SERIALIZE_PRESERVE_POSITION_INCREMENTS) != 0;
|
||||
|
||||
|
|
|
@ -440,7 +440,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
|||
/**
|
||||
* Returns status of shards currently finished snapshots
|
||||
* <p>
|
||||
* This method is executed on master node and it's complimentary to the {@link #currentSnapshotShards(SnapshotId)} becuase it
|
||||
* This method is executed on master node and it's complimentary to the {@link #currentSnapshotShards(SnapshotId)} because it
|
||||
* returns simliar information but for already finished snapshots.
|
||||
* </p>
|
||||
*
|
||||
|
|
|
@ -128,7 +128,7 @@ public class RandomAllocationDeciderTests extends ElasticsearchAllocationTestCas
|
|||
}
|
||||
|
||||
|
||||
randomAllocationDecider.allwaysSayYes = true;
|
||||
randomAllocationDecider.alwaysSayYes = true;
|
||||
logger.info("now say YES to everything");
|
||||
int iterations = 0;
|
||||
do {
|
||||
|
@ -172,7 +172,7 @@ public class RandomAllocationDeciderTests extends ElasticsearchAllocationTestCas
|
|||
this.random = random;
|
||||
}
|
||||
|
||||
public boolean allwaysSayYes = false;
|
||||
public boolean alwaysSayYes = false;
|
||||
|
||||
@Override
|
||||
public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
|
||||
|
@ -180,7 +180,7 @@ public class RandomAllocationDeciderTests extends ElasticsearchAllocationTestCas
|
|||
}
|
||||
|
||||
private Decision getRandomDecision() {
|
||||
if (allwaysSayYes) {
|
||||
if (alwaysSayYes) {
|
||||
return Decision.YES;
|
||||
}
|
||||
switch (random.nextInt(10)) {
|
||||
|
|
|
@ -82,7 +82,7 @@ public class BooleansTests extends ElasticsearchTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testIsExplict() {
|
||||
public void testIsExplicit() {
|
||||
assertThat(Booleans.isExplicitFalse(randomFrom("true", "on", "yes", "1", "foo", null)), is(false));
|
||||
assertThat(Booleans.isExplicitFalse(randomFrom("false", "off", "no", "0")), is(true));
|
||||
assertThat(Booleans.isExplicitTrue(randomFrom("true", "on", "yes", "1")), is(true));
|
||||
|
|
|
@ -578,14 +578,14 @@ public abstract class AbstractStringFieldDataTests extends AbstractFieldDataImpl
|
|||
// 3 b/c 1 segment level caches and 1 top level cache
|
||||
assertThat(indicesFieldDataCache.getCache().size(), equalTo(4l));
|
||||
|
||||
IndexOrdinalsFieldData cachedInstace = null;
|
||||
IndexOrdinalsFieldData cachedInstance = null;
|
||||
for (Accountable ramUsage : indicesFieldDataCache.getCache().asMap().values()) {
|
||||
if (ramUsage instanceof IndexOrdinalsFieldData) {
|
||||
cachedInstace = (IndexOrdinalsFieldData) ramUsage;
|
||||
cachedInstance = (IndexOrdinalsFieldData) ramUsage;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertThat(cachedInstace, sameInstance(globalOrdinals));
|
||||
assertThat(cachedInstance, sameInstance(globalOrdinals));
|
||||
topLevelReader.close();
|
||||
// Now only 3 segment level entries, only the toplevel reader has been closed, but the segment readers are still used by IW
|
||||
assertThat(indicesFieldDataCache.getCache().size(), equalTo(3l));
|
||||
|
|
|
@ -2477,7 +2477,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testWeight1fStillProducesWeighFuction() throws IOException {
|
||||
public void testWeight1fStillProducesWeighFunction() throws IOException {
|
||||
IndexQueryParserService queryParser = queryParser();
|
||||
String queryString = jsonBuilder().startObject()
|
||||
.startObject("function_score")
|
||||
|
|
|
@ -339,7 +339,7 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest {
|
|||
defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap();
|
||||
assertThat(defaultMapping, not(hasKey("date_detection")));
|
||||
|
||||
// now test you can change stuff that are normally unchangable
|
||||
// now test you can change stuff that are normally unchangeable
|
||||
logger.info("Creating _default_ mappings with an analyzed field");
|
||||
putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
|
||||
JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
|
||||
|
|
|
@ -574,13 +574,13 @@ public class IndexLookupTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(sr.getHits().hits().length, equalTo(0));
|
||||
ShardSearchFailure[] shardFails = sr.getShardFailures();
|
||||
for (ShardSearchFailure fail : shardFails) {
|
||||
assertThat(fail.reason().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitely."),
|
||||
assertThat(fail.reason().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."),
|
||||
Matchers.greaterThan(-1));
|
||||
}
|
||||
} catch (SearchPhaseExecutionException ex) {
|
||||
assertThat(
|
||||
"got " + ex.getDetailedMessage(),
|
||||
ex.getDetailedMessage().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitely."),
|
||||
ex.getDetailedMessage().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."),
|
||||
Matchers.greaterThan(-1));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -361,7 +361,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testDisabledPreserveSeperators() throws Exception {
|
||||
public void testDisabledPreserveSeparators() throws Exception {
|
||||
completionMappingBuilder.preserveSeparators(false);
|
||||
createIndexAndMapping(completionMappingBuilder);
|
||||
|
||||
|
@ -385,7 +385,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testEnabledPreserveSeperators() throws Exception {
|
||||
public void testEnabledPreserveSeparators() throws Exception {
|
||||
completionMappingBuilder.preserveSeparators(true);
|
||||
createIndexAndMapping(completionMappingBuilder);
|
||||
|
||||
|
|
|
@ -561,7 +561,7 @@ public class ContextSuggestSearchTests extends ElasticsearchIntegrationTest {
|
|||
// now index a document without a color field
|
||||
try {
|
||||
index(INDEX, "service", "2", jsonBuilder().startObject().startObject("suggest").field("input", "backback").endObject().endObject());
|
||||
fail("index operation was not supposed to be succesful");
|
||||
fail("index operation was not supposed to be successful");
|
||||
} catch (ElasticsearchIllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("one or more prefixes needed"));
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ import java.util.TreeMap;
|
|||
public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvider {
|
||||
|
||||
// for serialization
|
||||
public static final int SERIALIZE_PRESERVE_SEPERATORS = 1;
|
||||
public static final int SERIALIZE_PRESERVE_SEPARATORS = 1;
|
||||
public static final int SERIALIZE_HAS_PAYLOADS = 2;
|
||||
public static final int SERIALIZE_PRESERVE_POSITION_INCREMENTS = 4;
|
||||
|
||||
|
@ -182,7 +182,7 @@ public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvide
|
|||
output.writeVInt(maxSurfaceFormsPerAnalyzedForm);
|
||||
output.writeInt(maxGraphExpansions); // can be negative
|
||||
int options = 0;
|
||||
options |= preserveSep ? SERIALIZE_PRESERVE_SEPERATORS : 0;
|
||||
options |= preserveSep ? SERIALIZE_PRESERVE_SEPARATORS : 0;
|
||||
options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0;
|
||||
options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0;
|
||||
output.writeVInt(options);
|
||||
|
@ -216,7 +216,7 @@ public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvide
|
|||
int maxSurfaceFormsPerAnalyzedForm = input.readVInt();
|
||||
int maxGraphExpansions = input.readInt();
|
||||
int options = input.readVInt();
|
||||
boolean preserveSep = (options & SERIALIZE_PRESERVE_SEPERATORS) != 0;
|
||||
boolean preserveSep = (options & SERIALIZE_PRESERVE_SEPARATORS) != 0;
|
||||
boolean hasPayloads = (options & SERIALIZE_HAS_PAYLOADS) != 0;
|
||||
boolean preservePositionIncrements = (options & SERIALIZE_PRESERVE_POSITION_INCREMENTS) != 0;
|
||||
sizeInBytes += fst.ramBytesUsed();
|
||||
|
|
|
@ -500,18 +500,18 @@ public abstract class ElasticsearchTestCase extends AbstractRandomizedTest {
|
|||
* Retruns the tests compatibility version.
|
||||
*/
|
||||
public Version compatibilityVersion() {
|
||||
return compatibiltyVersion(getClass());
|
||||
return compatibilityVersion(getClass());
|
||||
}
|
||||
|
||||
private Version compatibiltyVersion(Class<?> clazz) {
|
||||
private Version compatibilityVersion(Class<?> clazz) {
|
||||
if (clazz == Object.class || clazz == ElasticsearchIntegrationTest.class) {
|
||||
return globalCompatibilityVersion();
|
||||
}
|
||||
CompatibilityVersion annotation = clazz.getAnnotation(CompatibilityVersion.class);
|
||||
if (annotation != null) {
|
||||
return Version.smallest(Version.fromId(annotation.version()), compatibiltyVersion(clazz.getSuperclass()));
|
||||
return Version.smallest(Version.fromId(annotation.version()), compatibilityVersion(clazz.getSuperclass()));
|
||||
}
|
||||
return compatibiltyVersion(clazz.getSuperclass());
|
||||
return compatibilityVersion(clazz.getSuperclass());
|
||||
}
|
||||
|
||||
private static String compatibilityVersionProperty() {
|
||||
|
|
|
@ -374,7 +374,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueSeconds(10 + random.nextInt(5 * 60)));
|
||||
}
|
||||
if (random.nextBoolean()) { // sometimes set a
|
||||
builder.put(SearchService.DEFAUTL_KEEPALIVE_KEY, TimeValue.timeValueSeconds(100 + random.nextInt(5 * 60)));
|
||||
builder.put(SearchService.DEFAULT_KEEPALIVE_KEY, TimeValue.timeValueSeconds(100 + random.nextInt(5 * 60)));
|
||||
}
|
||||
if (random.nextBoolean()) {
|
||||
// change threadpool types to make sure we don't have components that rely on the type of thread pools
|
||||
|
|
|
@ -692,7 +692,7 @@ public class UpdateTests extends ElasticsearchIntegrationTest {
|
|||
long start = System.currentTimeMillis();
|
||||
do {
|
||||
long msRemaining = timeOut.getMillis() - (System.currentTimeMillis() - start);
|
||||
logger.info("[{}] going to try and aquire [{}] in [{}]ms [{}] available to aquire right now",name, maxRequests,msRemaining, requestsOutstanding.availablePermits());
|
||||
logger.info("[{}] going to try and acquire [{}] in [{}]ms [{}] available to acquire right now",name, maxRequests,msRemaining, requestsOutstanding.availablePermits());
|
||||
try {
|
||||
requestsOutstanding.tryAcquire(maxRequests, msRemaining, TimeUnit.MILLISECONDS );
|
||||
return;
|
||||
|
@ -718,7 +718,7 @@ public class UpdateTests extends ElasticsearchIntegrationTest {
|
|||
ut.join(); //Threads should have finished because of the latch.await
|
||||
}
|
||||
|
||||
//If are no errors every request recieved a response otherwise the test would have timedout
|
||||
//If are no errors every request received a response otherwise the test would have timedout
|
||||
//aquiring the request outstanding semaphores.
|
||||
for (Throwable throwable : failures) {
|
||||
logger.info("Captured failure on concurrent update:", throwable);
|
||||
|
|
Loading…
Reference in New Issue