mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-17 10:25:15 +00:00
Merge branch 'master' into more_tribe_node_settings
This commit is contained in:
commit
55cf49420c
@ -68,7 +68,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
|
||||
// we "upgrade" these optional deps to provided for plugins, since they will run
|
||||
// with a full elasticsearch server that includes optional deps
|
||||
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "com.vividsolutions:jts:${project.versions.jts}"
|
||||
provided "log4j:log4j:${project.versions.log4j}"
|
||||
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
|
||||
|
@ -68,11 +68,17 @@ class PluginPropertiesTask extends Copy {
|
||||
}
|
||||
|
||||
Map generateSubstitutions() {
|
||||
def stringSnap = { version ->
|
||||
if (version.endsWith("-SNAPSHOT")) {
|
||||
return version.substring(0, version.length() - 9)
|
||||
}
|
||||
return version
|
||||
}
|
||||
return [
|
||||
'name': extension.name,
|
||||
'description': extension.description,
|
||||
'version': extension.version,
|
||||
'elasticsearchVersion': VersionProperties.elasticsearch,
|
||||
'version': stringSnap(extension.version),
|
||||
'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch),
|
||||
'javaVersion': project.targetCompatibility as String,
|
||||
'isolated': extension.isolated as String,
|
||||
'classname': extension.classname
|
||||
|
@ -1486,7 +1486,6 @@
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BucketScriptTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DateRangeTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]EquivalenceTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]FunctionScoreTests.java" checks="LineLength" />
|
||||
|
@ -33,20 +33,6 @@ java.util.Formatter#<init>(java.lang.String,java.lang.String,java.util.Locale)
|
||||
java.io.RandomAccessFile
|
||||
java.nio.file.Path#toFile()
|
||||
|
||||
@defaultMessage Don't use deprecated lucene apis
|
||||
org.apache.lucene.index.DocsEnum
|
||||
org.apache.lucene.index.DocsAndPositionsEnum
|
||||
org.apache.lucene.queries.TermFilter
|
||||
org.apache.lucene.queries.TermsFilter
|
||||
org.apache.lucene.search.Filter
|
||||
org.apache.lucene.search.FilteredQuery
|
||||
org.apache.lucene.search.TermRangeFilter
|
||||
org.apache.lucene.search.NumericRangeFilter
|
||||
org.apache.lucene.search.PrefixFilter
|
||||
org.apache.lucene.search.QueryWrapperFilter
|
||||
org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter
|
||||
org.apache.lucene.index.IndexWriter#isLocked(org.apache.lucene.store.Directory)
|
||||
|
||||
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
|
||||
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
|
||||
|
||||
|
@ -41,14 +41,10 @@ org.apache.lucene.index.IndexReader#addReaderClosedListener(org.apache.lucene.in
|
||||
org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener)
|
||||
|
||||
@defaultMessage Pass the precision step from the mappings explicitly instead
|
||||
org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||
org.apache.lucene.search.LegacyNumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||
|
||||
@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead.
|
||||
java.lang.Object#wait()
|
||||
@ -88,9 +84,6 @@ java.util.concurrent.Future#cancel(boolean)
|
||||
org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[])
|
||||
org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
||||
|
||||
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
|
||||
org.apache.lucene.search.Query#setBoost(float)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
||||
|
@ -1,8 +1,8 @@
|
||||
elasticsearch = 3.0.0
|
||||
lucene = 5.5.0
|
||||
elasticsearch = 5.0.0
|
||||
lucene = 6.0.0-snapshot-bea235f
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
spatial4j = 0.6
|
||||
jts = 1.13
|
||||
jackson = 2.7.1
|
||||
log4j = 1.2.17
|
||||
|
@ -42,6 +42,7 @@ dependencies {
|
||||
compile "org.apache.lucene:lucene-queryparser:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-sandbox:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial-extras:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
|
||||
|
||||
@ -71,7 +72,7 @@ dependencies {
|
||||
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
|
||||
|
||||
// lucene spatial
|
||||
compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||
compile "com.vividsolutions:jts:${versions.jts}", optional
|
||||
|
||||
// logging
|
||||
@ -168,11 +169,6 @@ thirdPartyAudit.excludes = [
|
||||
'org.apache.commons.logging.Log',
|
||||
'org.apache.commons.logging.LogFactory',
|
||||
|
||||
// from org.apache.lucene.sandbox.queries.regex.JakartaRegexpCapabilities$JakartaRegexMatcher (lucene-sandbox)
|
||||
'org.apache.regexp.CharacterIterator',
|
||||
'org.apache.regexp.RE',
|
||||
'org.apache.regexp.REProgram',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
||||
'org.apache.tomcat.jni.Buffer',
|
||||
'org.apache.tomcat.jni.Library',
|
||||
@ -210,7 +206,7 @@ thirdPartyAudit.excludes = [
|
||||
'org.jboss.marshalling.MarshallingConfiguration',
|
||||
'org.jboss.marshalling.Unmarshaller',
|
||||
|
||||
// from com.spatial4j.core.io.GeoJSONReader (spatial4j)
|
||||
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
|
||||
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
||||
|
@ -33,7 +33,6 @@ import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.InPlaceMergeSorter;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
@ -247,14 +246,15 @@ public abstract class BlendedTermQuery extends Query {
|
||||
if (boosts != null) {
|
||||
boost = boosts[i];
|
||||
}
|
||||
builder.append(ToStringUtils.boost(boost));
|
||||
if (boost != 1f) {
|
||||
builder.append('^').append(boost);
|
||||
}
|
||||
builder.append(", ");
|
||||
}
|
||||
if (terms.length > 0) {
|
||||
builder.setLength(builder.length() - 2);
|
||||
}
|
||||
builder.append("])");
|
||||
builder.append(ToStringUtils.boost(getBoost()));
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
@ -165,7 +166,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getFieldQuerySingle(field, queryText, quoted);
|
||||
@ -267,7 +268,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return super.getFieldQuery(field, queryText, slop);
|
||||
@ -318,7 +319,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
}
|
||||
|
||||
@ -380,7 +381,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
}
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getFuzzyQuerySingle(field, termStr, minSimilarity);
|
||||
@ -445,7 +446,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getPrefixQuerySingle(field, termStr);
|
||||
@ -520,7 +521,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
for (String token : tlist) {
|
||||
clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD));
|
||||
}
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
}
|
||||
|
||||
@ -575,7 +576,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getWildcardQuerySingle(field, termStr);
|
||||
@ -704,7 +705,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
if (clauses.size() == 0) // happens for stopwords
|
||||
return null;
|
||||
return getBooleanQuery(clauses, true);
|
||||
return getBooleanQueryCoordDisabled(clauses);
|
||||
}
|
||||
} else {
|
||||
return getRegexpQuerySingle(field, termStr);
|
||||
@ -739,10 +740,24 @@ public class MapperQueryParser extends QueryParser {
|
||||
setAnalyzer(oldAnalyzer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated review all use of this, don't rely on coord
|
||||
*/
|
||||
@Deprecated
|
||||
protected Query getBooleanQueryCoordDisabled(List<BooleanClause> clauses) throws ParseException {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
builder.setDisableCoord(true);
|
||||
for (BooleanClause clause : clauses) {
|
||||
builder.add(clause);
|
||||
}
|
||||
return fixNegativeQueryIfNeeded(builder.build());
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord) throws ParseException {
|
||||
Query q = super.getBooleanQuery(clauses, disableCoord);
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses) throws ParseException {
|
||||
Query q = super.getBooleanQuery(clauses);
|
||||
if (q == null) {
|
||||
return null;
|
||||
}
|
||||
@ -769,7 +784,6 @@ public class MapperQueryParser extends QueryParser {
|
||||
}
|
||||
pq = builder.build();
|
||||
//make sure that the boost hasn't been set beforehand, otherwise we'd lose it
|
||||
assert q.getBoost() == 1f;
|
||||
assert q instanceof BoostQuery == false;
|
||||
return pq;
|
||||
} else if (q instanceof MultiPhraseQuery) {
|
||||
|
@ -26,8 +26,7 @@ import java.io.IOException;
|
||||
/**
|
||||
* Abstract decorator class of a DocIdSetIterator
|
||||
* implementation that provides on-demand filter/validation
|
||||
* mechanism on an underlying DocIdSetIterator. See {@link
|
||||
* FilteredDocIdSet}.
|
||||
* mechanism on an underlying DocIdSetIterator.
|
||||
*/
|
||||
public abstract class XFilteredDocIdSetIterator extends DocIdSetIterator {
|
||||
protected DocIdSetIterator _innerIter;
|
||||
|
@ -87,7 +87,7 @@ public class CustomFieldQuery extends FieldQuery {
|
||||
if (numTerms > 16) {
|
||||
for (Term[] currentPosTerm : terms) {
|
||||
for (Term term : currentPosTerm) {
|
||||
super.flatten(new TermQuery(term), reader, flatQueries, orig.getBoost());
|
||||
super.flatten(new TermQuery(term), reader, flatQueries, 1F);
|
||||
}
|
||||
}
|
||||
return;
|
||||
@ -104,7 +104,7 @@ public class CustomFieldQuery extends FieldQuery {
|
||||
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
|
||||
}
|
||||
Query query = queryBuilder.build();
|
||||
this.flatten(query, reader, flatQueries, orig.getBoost());
|
||||
this.flatten(query, reader, flatQueries, 1F);
|
||||
} else {
|
||||
Term[] t = terms.get(currentPos);
|
||||
for (int i = 0; i < t.length; i++) {
|
||||
|
@ -35,212 +35,10 @@ import java.io.IOException;
|
||||
@SuppressWarnings("deprecation")
|
||||
public class Version {
|
||||
|
||||
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator
|
||||
// AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
||||
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator
|
||||
// AA values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
||||
// the (internal) format of the id is there so we can easily do after/before checks on the id
|
||||
|
||||
// NOTE: indexes created with 3.6 use this constant for e.g. analysis chain emulation (imperfect)
|
||||
public static final org.apache.lucene.util.Version LUCENE_3_EMULATION_VERSION = org.apache.lucene.util.Version.LUCENE_4_0_0;
|
||||
|
||||
public static final int V_0_18_0_ID = /*00*/180099;
|
||||
public static final Version V_0_18_0 = new Version(V_0_18_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_1_ID = /*00*/180199;
|
||||
public static final Version V_0_18_1 = new Version(V_0_18_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_2_ID = /*00*/180299;
|
||||
public static final Version V_0_18_2 = new Version(V_0_18_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_3_ID = /*00*/180399;
|
||||
public static final Version V_0_18_3 = new Version(V_0_18_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_4_ID = /*00*/180499;
|
||||
public static final Version V_0_18_4 = new Version(V_0_18_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_5_ID = /*00*/180599;
|
||||
public static final Version V_0_18_5 = new Version(V_0_18_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_6_ID = /*00*/180699;
|
||||
public static final Version V_0_18_6 = new Version(V_0_18_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_7_ID = /*00*/180799;
|
||||
public static final Version V_0_18_7 = new Version(V_0_18_7_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_18_8_ID = /*00*/180899;
|
||||
public static final Version V_0_18_8 = new Version(V_0_18_8_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC1_ID = /*00*/190051;
|
||||
public static final Version V_0_19_0_RC1 = new Version(V_0_19_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC2_ID = /*00*/190052;
|
||||
public static final Version V_0_19_0_RC2 = new Version(V_0_19_0_RC2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_RC3_ID = /*00*/190053;
|
||||
public static final Version V_0_19_0_RC3 = new Version(V_0_19_0_RC3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_19_0_ID = /*00*/190099;
|
||||
public static final Version V_0_19_0 = new Version(V_0_19_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_1_ID = /*00*/190199;
|
||||
public static final Version V_0_19_1 = new Version(V_0_19_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_2_ID = /*00*/190299;
|
||||
public static final Version V_0_19_2 = new Version(V_0_19_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_3_ID = /*00*/190399;
|
||||
public static final Version V_0_19_3 = new Version(V_0_19_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_4_ID = /*00*/190499;
|
||||
public static final Version V_0_19_4 = new Version(V_0_19_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_5_ID = /*00*/190599;
|
||||
public static final Version V_0_19_5 = new Version(V_0_19_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_6_ID = /*00*/190699;
|
||||
public static final Version V_0_19_6 = new Version(V_0_19_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_7_ID = /*00*/190799;
|
||||
public static final Version V_0_19_7 = new Version(V_0_19_7_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_8_ID = /*00*/190899;
|
||||
public static final Version V_0_19_8 = new Version(V_0_19_8_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_9_ID = /*00*/190999;
|
||||
public static final Version V_0_19_9 = new Version(V_0_19_9_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_10_ID = /*00*/191099;
|
||||
public static final Version V_0_19_10 = new Version(V_0_19_10_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_11_ID = /*00*/191199;
|
||||
public static final Version V_0_19_11 = new Version(V_0_19_11_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_12_ID = /*00*/191299;
|
||||
public static final Version V_0_19_12 = new Version(V_0_19_12_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_19_13_ID = /*00*/191399;
|
||||
public static final Version V_0_19_13 = new Version(V_0_19_13_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_20_0_RC1_ID = /*00*/200051;
|
||||
public static final Version V_0_20_0_RC1 = new Version(V_0_20_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_0_ID = /*00*/200099;
|
||||
public static final Version V_0_20_0 = new Version(V_0_20_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_1_ID = /*00*/200199;
|
||||
public static final Version V_0_20_1 = new Version(V_0_20_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_2_ID = /*00*/200299;
|
||||
public static final Version V_0_20_2 = new Version(V_0_20_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_3_ID = /*00*/200399;
|
||||
public static final Version V_0_20_3 = new Version(V_0_20_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_4_ID = /*00*/200499;
|
||||
public static final Version V_0_20_4 = new Version(V_0_20_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_5_ID = /*00*/200599;
|
||||
public static final Version V_0_20_5 = new Version(V_0_20_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||
public static final int V_0_20_6_ID = /*00*/200699;
|
||||
public static final Version V_0_20_6 = new Version(V_0_20_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||
|
||||
public static final int V_0_90_0_Beta1_ID = /*00*/900001;
|
||||
public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
|
||||
public static final int V_0_90_0_RC1_ID = /*00*/900051;
|
||||
public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
|
||||
public static final int V_0_90_0_RC2_ID = /*00*/900052;
|
||||
public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_2);
|
||||
public static final int V_0_90_0_ID = /*00*/900099;
|
||||
public static final Version V_0_90_0 = new Version(V_0_90_0_ID, org.apache.lucene.util.Version.LUCENE_4_2);
|
||||
public static final int V_0_90_1_ID = /*00*/900199;
|
||||
public static final Version V_0_90_1 = new Version(V_0_90_1_ID, org.apache.lucene.util.Version.LUCENE_4_3);
|
||||
public static final int V_0_90_2_ID = /*00*/900299;
|
||||
public static final Version V_0_90_2 = new Version(V_0_90_2_ID, org.apache.lucene.util.Version.LUCENE_4_3);
|
||||
public static final int V_0_90_3_ID = /*00*/900399;
|
||||
public static final Version V_0_90_3 = new Version(V_0_90_3_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_4_ID = /*00*/900499;
|
||||
public static final Version V_0_90_4 = new Version(V_0_90_4_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_5_ID = /*00*/900599;
|
||||
public static final Version V_0_90_5 = new Version(V_0_90_5_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
||||
public static final int V_0_90_6_ID = /*00*/900699;
|
||||
public static final Version V_0_90_6 = new Version(V_0_90_6_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_0_90_7_ID = /*00*/900799;
|
||||
public static final Version V_0_90_7 = new Version(V_0_90_7_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_0_90_8_ID = /*00*/900899;
|
||||
public static final Version V_0_90_8 = new Version(V_0_90_8_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_9_ID = /*00*/900999;
|
||||
public static final Version V_0_90_9 = new Version(V_0_90_9_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_10_ID = /*00*/901099;
|
||||
public static final Version V_0_90_10 = new Version(V_0_90_10_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_11_ID = /*00*/901199;
|
||||
public static final Version V_0_90_11 = new Version(V_0_90_11_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_12_ID = /*00*/901299;
|
||||
public static final Version V_0_90_12 = new Version(V_0_90_12_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_0_90_13_ID = /*00*/901399;
|
||||
public static final Version V_0_90_13 = new Version(V_0_90_13_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
|
||||
public static final int V_1_0_0_Beta1_ID = 1000001;
|
||||
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
||||
public static final int V_1_0_0_Beta2_ID = 1000002;
|
||||
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_RC1_ID = 1000051;
|
||||
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_RC2_ID = 1000052;
|
||||
public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_0_ID = 1000099;
|
||||
public static final Version V_1_0_0 = new Version(V_1_0_0_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_1_ID = 1000199;
|
||||
public static final Version V_1_0_1 = new Version(V_1_0_1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_2_ID = 1000299;
|
||||
public static final Version V_1_0_2 = new Version(V_1_0_2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_0_3_ID = 1000399;
|
||||
public static final Version V_1_0_3 = new Version(V_1_0_3_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
||||
public static final int V_1_1_0_ID = 1010099;
|
||||
public static final Version V_1_1_0 = new Version(V_1_1_0_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_1_1_ID = 1010199;
|
||||
public static final Version V_1_1_1 = new Version(V_1_1_1_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_1_2_ID = 1010299;
|
||||
public static final Version V_1_1_2 = new Version(V_1_1_2_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
||||
public static final int V_1_2_0_ID = 1020099;
|
||||
public static final Version V_1_2_0 = new Version(V_1_2_0_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_1_ID = 1020199;
|
||||
public static final Version V_1_2_1 = new Version(V_1_2_1_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_2_ID = 1020299;
|
||||
public static final Version V_1_2_2 = new Version(V_1_2_2_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_3_ID = 1020399;
|
||||
public static final Version V_1_2_3 = new Version(V_1_2_3_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_2_4_ID = 1020499;
|
||||
public static final Version V_1_2_4 = new Version(V_1_2_4_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
||||
public static final int V_1_3_0_ID = 1030099;
|
||||
public static final Version V_1_3_0 = new Version(V_1_3_0_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_1_ID = 1030199;
|
||||
public static final Version V_1_3_1 = new Version(V_1_3_1_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_2_ID = 1030299;
|
||||
public static final Version V_1_3_2 = new Version(V_1_3_2_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_3_ID = 1030399;
|
||||
public static final Version V_1_3_3 = new Version(V_1_3_3_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_4_ID = 1030499;
|
||||
public static final Version V_1_3_4 = new Version(V_1_3_4_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_5_ID = 1030599;
|
||||
public static final Version V_1_3_5 = new Version(V_1_3_5_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_6_ID = 1030699;
|
||||
public static final Version V_1_3_6 = new Version(V_1_3_6_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_7_ID = 1030799;
|
||||
public static final Version V_1_3_7 = new Version(V_1_3_7_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_8_ID = 1030899;
|
||||
public static final Version V_1_3_8 = new Version(V_1_3_8_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_3_9_ID = 1030999;
|
||||
public static final Version V_1_3_9 = new Version(V_1_3_9_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
||||
public static final int V_1_4_0_Beta1_ID = 1040001;
|
||||
public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_10_1);
|
||||
public static final int V_1_4_0_ID = 1040099;
|
||||
public static final Version V_1_4_0 = new Version(V_1_4_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_1_ID = 1040199;
|
||||
public static final Version V_1_4_1 = new Version(V_1_4_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_2_ID = 1040299;
|
||||
public static final Version V_1_4_2 = new Version(V_1_4_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
||||
public static final int V_1_4_3_ID = 1040399;
|
||||
public static final Version V_1_4_3 = new Version(V_1_4_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_4_ID = 1040499;
|
||||
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
||||
public static final int V_1_4_5_ID = 1040599;
|
||||
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_0_ID = 1050099;
|
||||
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_1_ID = 1050199;
|
||||
public static final Version V_1_5_1 = new Version(V_1_5_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_5_2_ID = 1050299;
|
||||
public static final Version V_1_5_2 = new Version(V_1_5_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_0_ID = 1060099;
|
||||
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_1_ID = 1060199;
|
||||
public static final Version V_1_6_1 = new Version(V_1_6_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_6_2_ID = 1060299;
|
||||
public static final Version V_1_6_2 = new Version(V_1_6_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_0_ID = 1070099;
|
||||
public static final Version V_1_7_0 = new Version(V_1_7_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_1_ID = 1070199;
|
||||
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_2_ID = 1070299;
|
||||
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_3_ID = 1070399;
|
||||
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_4_ID = 1070499;
|
||||
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
public static final int V_1_7_5_ID = 1070599;
|
||||
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
||||
|
||||
public static final int V_2_0_0_beta1_ID = 2000001;
|
||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
@ -264,9 +62,9 @@ public class Version {
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final Version CURRENT = V_3_0_0;
|
||||
public static final int V_5_0_0_ID = 5000099;
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final Version CURRENT = V_5_0_0;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
@ -279,8 +77,8 @@ public class Version {
|
||||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_3_0_0_ID:
|
||||
return V_3_0_0;
|
||||
case V_5_0_0_ID:
|
||||
return V_5_0_0;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_0_ID:
|
||||
@ -303,198 +101,6 @@ public class Version {
|
||||
return V_2_0_0_beta2;
|
||||
case V_2_0_0_beta1_ID:
|
||||
return V_2_0_0_beta1;
|
||||
case V_1_7_5_ID:
|
||||
return V_1_7_5;
|
||||
case V_1_7_4_ID:
|
||||
return V_1_7_4;
|
||||
case V_1_7_3_ID:
|
||||
return V_1_7_3;
|
||||
case V_1_7_2_ID:
|
||||
return V_1_7_2;
|
||||
case V_1_7_1_ID:
|
||||
return V_1_7_1;
|
||||
case V_1_7_0_ID:
|
||||
return V_1_7_0;
|
||||
case V_1_6_2_ID:
|
||||
return V_1_6_2;
|
||||
case V_1_6_1_ID:
|
||||
return V_1_6_1;
|
||||
case V_1_6_0_ID:
|
||||
return V_1_6_0;
|
||||
case V_1_5_2_ID:
|
||||
return V_1_5_2;
|
||||
case V_1_5_1_ID:
|
||||
return V_1_5_1;
|
||||
case V_1_5_0_ID:
|
||||
return V_1_5_0;
|
||||
case V_1_4_5_ID:
|
||||
return V_1_4_5;
|
||||
case V_1_4_4_ID:
|
||||
return V_1_4_4;
|
||||
case V_1_4_3_ID:
|
||||
return V_1_4_3;
|
||||
case V_1_4_2_ID:
|
||||
return V_1_4_2;
|
||||
case V_1_4_1_ID:
|
||||
return V_1_4_1;
|
||||
case V_1_4_0_ID:
|
||||
return V_1_4_0;
|
||||
case V_1_4_0_Beta1_ID:
|
||||
return V_1_4_0_Beta1;
|
||||
case V_1_3_9_ID:
|
||||
return V_1_3_9;
|
||||
case V_1_3_8_ID:
|
||||
return V_1_3_8;
|
||||
case V_1_3_7_ID:
|
||||
return V_1_3_7;
|
||||
case V_1_3_6_ID:
|
||||
return V_1_3_6;
|
||||
case V_1_3_5_ID:
|
||||
return V_1_3_5;
|
||||
case V_1_3_4_ID:
|
||||
return V_1_3_4;
|
||||
case V_1_3_3_ID:
|
||||
return V_1_3_3;
|
||||
case V_1_3_2_ID:
|
||||
return V_1_3_2;
|
||||
case V_1_3_1_ID:
|
||||
return V_1_3_1;
|
||||
case V_1_3_0_ID:
|
||||
return V_1_3_0;
|
||||
case V_1_2_4_ID:
|
||||
return V_1_2_4;
|
||||
case V_1_2_3_ID:
|
||||
return V_1_2_3;
|
||||
case V_1_2_2_ID:
|
||||
return V_1_2_2;
|
||||
case V_1_2_1_ID:
|
||||
return V_1_2_1;
|
||||
case V_1_2_0_ID:
|
||||
return V_1_2_0;
|
||||
case V_1_1_2_ID:
|
||||
return V_1_1_2;
|
||||
case V_1_1_1_ID:
|
||||
return V_1_1_1;
|
||||
case V_1_1_0_ID:
|
||||
return V_1_1_0;
|
||||
case V_1_0_3_ID:
|
||||
return V_1_0_3;
|
||||
case V_1_0_2_ID:
|
||||
return V_1_0_2;
|
||||
case V_1_0_1_ID:
|
||||
return V_1_0_1;
|
||||
case V_1_0_0_ID:
|
||||
return V_1_0_0;
|
||||
case V_1_0_0_RC2_ID:
|
||||
return V_1_0_0_RC2;
|
||||
case V_1_0_0_RC1_ID:
|
||||
return V_1_0_0_RC1;
|
||||
case V_1_0_0_Beta2_ID:
|
||||
return V_1_0_0_Beta2;
|
||||
case V_1_0_0_Beta1_ID:
|
||||
return V_1_0_0_Beta1;
|
||||
case V_0_90_13_ID:
|
||||
return V_0_90_13;
|
||||
case V_0_90_12_ID:
|
||||
return V_0_90_12;
|
||||
case V_0_90_11_ID:
|
||||
return V_0_90_11;
|
||||
case V_0_90_10_ID:
|
||||
return V_0_90_10;
|
||||
case V_0_90_9_ID:
|
||||
return V_0_90_9;
|
||||
case V_0_90_8_ID:
|
||||
return V_0_90_8;
|
||||
case V_0_90_7_ID:
|
||||
return V_0_90_7;
|
||||
case V_0_90_6_ID:
|
||||
return V_0_90_6;
|
||||
case V_0_90_5_ID:
|
||||
return V_0_90_5;
|
||||
case V_0_90_4_ID:
|
||||
return V_0_90_4;
|
||||
case V_0_90_3_ID:
|
||||
return V_0_90_3;
|
||||
case V_0_90_2_ID:
|
||||
return V_0_90_2;
|
||||
case V_0_90_1_ID:
|
||||
return V_0_90_1;
|
||||
case V_0_90_0_ID:
|
||||
return V_0_90_0;
|
||||
case V_0_90_0_RC2_ID:
|
||||
return V_0_90_0_RC2;
|
||||
case V_0_90_0_RC1_ID:
|
||||
return V_0_90_0_RC1;
|
||||
case V_0_90_0_Beta1_ID:
|
||||
return V_0_90_0_Beta1;
|
||||
case V_0_20_6_ID:
|
||||
return V_0_20_6;
|
||||
case V_0_20_5_ID:
|
||||
return V_0_20_5;
|
||||
case V_0_20_4_ID:
|
||||
return V_0_20_4;
|
||||
case V_0_20_3_ID:
|
||||
return V_0_20_3;
|
||||
case V_0_20_2_ID:
|
||||
return V_0_20_2;
|
||||
case V_0_20_1_ID:
|
||||
return V_0_20_1;
|
||||
case V_0_20_0_ID:
|
||||
return V_0_20_0;
|
||||
case V_0_20_0_RC1_ID:
|
||||
return V_0_20_0_RC1;
|
||||
case V_0_19_0_RC1_ID:
|
||||
return V_0_19_0_RC1;
|
||||
case V_0_19_0_RC2_ID:
|
||||
return V_0_19_0_RC2;
|
||||
case V_0_19_0_RC3_ID:
|
||||
return V_0_19_0_RC3;
|
||||
case V_0_19_0_ID:
|
||||
return V_0_19_0;
|
||||
case V_0_19_1_ID:
|
||||
return V_0_19_1;
|
||||
case V_0_19_2_ID:
|
||||
return V_0_19_2;
|
||||
case V_0_19_3_ID:
|
||||
return V_0_19_3;
|
||||
case V_0_19_4_ID:
|
||||
return V_0_19_4;
|
||||
case V_0_19_5_ID:
|
||||
return V_0_19_5;
|
||||
case V_0_19_6_ID:
|
||||
return V_0_19_6;
|
||||
case V_0_19_7_ID:
|
||||
return V_0_19_7;
|
||||
case V_0_19_8_ID:
|
||||
return V_0_19_8;
|
||||
case V_0_19_9_ID:
|
||||
return V_0_19_9;
|
||||
case V_0_19_10_ID:
|
||||
return V_0_19_10;
|
||||
case V_0_19_11_ID:
|
||||
return V_0_19_11;
|
||||
case V_0_19_12_ID:
|
||||
return V_0_19_12;
|
||||
case V_0_19_13_ID:
|
||||
return V_0_19_13;
|
||||
case V_0_18_0_ID:
|
||||
return V_0_18_0;
|
||||
case V_0_18_1_ID:
|
||||
return V_0_18_1;
|
||||
case V_0_18_2_ID:
|
||||
return V_0_18_2;
|
||||
case V_0_18_3_ID:
|
||||
return V_0_18_3;
|
||||
case V_0_18_4_ID:
|
||||
return V_0_18_4;
|
||||
case V_0_18_5_ID:
|
||||
return V_0_18_5;
|
||||
case V_0_18_6_ID:
|
||||
return V_0_18_6;
|
||||
case V_0_18_7_ID:
|
||||
return V_0_18_7;
|
||||
case V_0_18_8_ID:
|
||||
return V_0_18_8;
|
||||
default:
|
||||
return new Version(id, org.apache.lucene.util.Version.LATEST);
|
||||
}
|
||||
@ -531,15 +137,23 @@ public class Version {
|
||||
if (!Strings.hasLength(version)) {
|
||||
return Version.CURRENT;
|
||||
}
|
||||
final boolean snapshot; // this is some BWC for 2.x and before indices
|
||||
if (snapshot = version.endsWith("-SNAPSHOT")) {
|
||||
version = version.substring(0, version.length() - 9);
|
||||
}
|
||||
String[] parts = version.split("\\.|\\-");
|
||||
if (parts.length < 3 || parts.length > 4) {
|
||||
throw new IllegalArgumentException("the version needs to contain major, minor, and revision, and optionally the build: " + version);
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
final int rawMajor = Integer.parseInt(parts[0]);
|
||||
if (rawMajor >= 5 && snapshot) { // we don't support snapshot as part of the version here anymore
|
||||
throw new IllegalArgumentException("illegal version format - snapshots are only supported until version 2.x");
|
||||
}
|
||||
final int betaOffset = rawMajor < 5 ? 0 : 25;
|
||||
//we reverse the version id calculation based on some assumption as we can't reliably reverse the modulo
|
||||
final int major = Integer.parseInt(parts[0]) * 1000000;
|
||||
final int major = rawMajor * 1000000;
|
||||
final int minor = Integer.parseInt(parts[1]) * 10000;
|
||||
final int revision = Integer.parseInt(parts[2]) * 100;
|
||||
|
||||
@ -547,11 +161,17 @@ public class Version {
|
||||
int build = 99;
|
||||
if (parts.length == 4) {
|
||||
String buildStr = parts[3];
|
||||
if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
|
||||
build = Integer.parseInt(buildStr.substring(4));
|
||||
}
|
||||
if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
|
||||
if (buildStr.startsWith("alpha")) {
|
||||
assert rawMajor >= 5 : "major must be >= 5 but was " + major;
|
||||
build = Integer.parseInt(buildStr.substring(5));
|
||||
assert build < 25 : "expected a beta build but " + build + " >= 25";
|
||||
} else if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
|
||||
build = betaOffset + Integer.parseInt(buildStr.substring(4));
|
||||
assert build < 50 : "expected a beta build but " + build + " >= 50";
|
||||
} else if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
|
||||
build = Integer.parseInt(buildStr.substring(2)) + 50;
|
||||
} else {
|
||||
throw new IllegalArgumentException("unable to parse version " + version);
|
||||
}
|
||||
}
|
||||
|
||||
@ -614,13 +234,16 @@ public class Version {
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(major).append('.').append(minor).append('.').append(revision);
|
||||
if (isBeta()) {
|
||||
if (isAlpha()) {
|
||||
sb.append("-alpha");
|
||||
sb.append(build);
|
||||
} else if (isBeta()) {
|
||||
if (major >= 2) {
|
||||
sb.append("-beta");
|
||||
} else {
|
||||
sb.append(".Beta");
|
||||
}
|
||||
sb.append(build);
|
||||
sb.append(major < 5 ? build : build-25);
|
||||
} else if (build < 99) {
|
||||
if (major >= 2) {
|
||||
sb.append("-rc");
|
||||
@ -656,7 +279,16 @@ public class Version {
|
||||
}
|
||||
|
||||
public boolean isBeta() {
|
||||
return build < 50;
|
||||
return major < 5 ? build < 50 : build >= 25 && build < 50;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff this version is an alpha version
|
||||
* Note: This has been introduced in elasticsearch version 5. Previous versions will never
|
||||
* have an alpha version.
|
||||
*/
|
||||
public boolean isAlpha() {
|
||||
return major < 5 ? false : build < 25;
|
||||
}
|
||||
|
||||
public boolean isRC() {
|
||||
|
@ -197,9 +197,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
||||
numberOfPendingTasks = in.readInt();
|
||||
timedOut = in.readBoolean();
|
||||
numberOfInFlightFetch = in.readInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_7_0)) {
|
||||
delayedUnassignedShards= in.readInt();
|
||||
}
|
||||
delayedUnassignedShards= in.readInt();
|
||||
taskMaxWaitingTime = TimeValue.readTimeValue(in);
|
||||
}
|
||||
|
||||
@ -212,9 +210,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
||||
out.writeInt(numberOfPendingTasks);
|
||||
out.writeBoolean(timedOut);
|
||||
out.writeInt(numberOfInFlightFetch);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_7_0)) {
|
||||
out.writeInt(delayedUnassignedShards);
|
||||
}
|
||||
out.writeInt(delayedUnassignedShards);
|
||||
taskMaxWaitingTime.writeTo(out);
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.HttpInfo;
|
||||
import org.elasticsearch.ingest.core.IngestInfo;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.os.OsInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessInfo;
|
||||
@ -74,12 +75,15 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
@Nullable
|
||||
private PluginsAndModules plugins;
|
||||
|
||||
NodeInfo() {
|
||||
@Nullable
|
||||
private IngestInfo ingest;
|
||||
|
||||
public NodeInfo() {
|
||||
}
|
||||
|
||||
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
|
||||
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins) {
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest) {
|
||||
super(node);
|
||||
this.version = version;
|
||||
this.build = build;
|
||||
@ -92,6 +96,7 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
this.transport = transport;
|
||||
this.http = http;
|
||||
this.plugins = plugins;
|
||||
this.ingest = ingest;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -176,6 +181,11 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
return this.plugins;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IngestInfo getIngest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
public static NodeInfo readNodeInfo(StreamInput in) throws IOException {
|
||||
NodeInfo nodeInfo = new NodeInfo();
|
||||
nodeInfo.readFrom(in);
|
||||
@ -220,6 +230,10 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
plugins = new PluginsAndModules();
|
||||
plugins.readFrom(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
ingest = new IngestInfo();
|
||||
ingest.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -285,5 +299,11 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
out.writeBoolean(true);
|
||||
plugins.writeTo(out);
|
||||
}
|
||||
if (ingest == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ingest.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
private boolean transport = true;
|
||||
private boolean http = true;
|
||||
private boolean plugins = true;
|
||||
private boolean ingest = true;
|
||||
|
||||
public NodesInfoRequest() {
|
||||
}
|
||||
@ -62,6 +63,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
transport = false;
|
||||
http = false;
|
||||
plugins = false;
|
||||
ingest = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -77,6 +79,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
transport = true;
|
||||
http = true;
|
||||
plugins = true;
|
||||
ingest = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -202,6 +205,22 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
return plugins;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should information about ingest be returned
|
||||
* @param ingest true if you want info
|
||||
*/
|
||||
public NodesInfoRequest ingest(boolean ingest) {
|
||||
this.ingest = ingest;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if information about ingest is requested
|
||||
*/
|
||||
public boolean ingest() {
|
||||
return ingest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
@ -213,6 +232,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
transport = in.readBoolean();
|
||||
http = in.readBoolean();
|
||||
plugins = in.readBoolean();
|
||||
ingest = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -226,5 +246,6 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||
out.writeBoolean(transport);
|
||||
out.writeBoolean(http);
|
||||
out.writeBoolean(plugins);
|
||||
out.writeBoolean(ingest);
|
||||
}
|
||||
}
|
||||
|
@ -110,4 +110,12 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
|
||||
request().plugins(plugins);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the node ingest info be returned.
|
||||
*/
|
||||
public NodesInfoRequestBuilder setIngest(boolean ingest) {
|
||||
request().ingest(ingest);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -121,6 +121,9 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
||||
if (nodeInfo.getPlugins() != null) {
|
||||
nodeInfo.getPlugins().toXContent(builder, params);
|
||||
}
|
||||
if (nodeInfo.getIngest() != null) {
|
||||
nodeInfo.getIngest().toXContent(builder, params);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
||||
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) {
|
||||
NodesInfoRequest request = nodeRequest.request;
|
||||
return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
|
||||
request.transport(), request.http(), request.plugins());
|
||||
request.transport(), request.http(), request.plugins(), request.ingest());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -95,7 +95,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
|
||||
public NodeInfoRequest() {
|
||||
}
|
||||
|
||||
NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
public NodeInfoRequest(String nodeId, NodesInfoRequest request) {
|
||||
super(nodeId);
|
||||
this.request = request;
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
||||
|
||||
@Override
|
||||
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
|
||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true);
|
||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false);
|
||||
List<ShardStats> shardsStats = new ArrayList<>();
|
||||
for (IndexService indexService : indicesService) {
|
||||
|
@ -166,7 +166,7 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
completion = indexShard.completionStats(flags.completionDataFields());
|
||||
break;
|
||||
case Segments:
|
||||
segments = indexShard.segmentStats();
|
||||
segments = indexShard.segmentStats(flags.includeSegmentFileSizes());
|
||||
break;
|
||||
case Percolate:
|
||||
percolate = indexShard.percolateStats();
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
@ -38,6 +39,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
private String[] groups = null;
|
||||
private String[] fieldDataFields = null;
|
||||
private String[] completionDataFields = null;
|
||||
private boolean includeSegmentFileSizes = false;
|
||||
|
||||
|
||||
/**
|
||||
@ -62,6 +64,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
groups = null;
|
||||
fieldDataFields = null;
|
||||
completionDataFields = null;
|
||||
includeSegmentFileSizes = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -74,6 +77,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
groups = null;
|
||||
fieldDataFields = null;
|
||||
completionDataFields = null;
|
||||
includeSegmentFileSizes = false;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -137,6 +141,15 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
return this.completionDataFields;
|
||||
}
|
||||
|
||||
public CommonStatsFlags includeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
this.includeSegmentFileSizes = includeSegmentFileSizes;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean includeSegmentFileSizes() {
|
||||
return this.includeSegmentFileSizes;
|
||||
}
|
||||
|
||||
public boolean isSet(Flag flag) {
|
||||
return flags.contains(flag);
|
||||
}
|
||||
@ -177,6 +190,9 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
out.writeStringArrayNullable(groups);
|
||||
out.writeStringArrayNullable(fieldDataFields);
|
||||
out.writeStringArrayNullable(completionDataFields);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
out.writeBoolean(includeSegmentFileSizes);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -192,6 +208,11 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
groups = in.readStringArray();
|
||||
fieldDataFields = in.readStringArray();
|
||||
completionDataFields = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
includeSegmentFileSizes = in.readBoolean();
|
||||
} else {
|
||||
includeSegmentFileSizes = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -265,6 +265,15 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
|
||||
return flags.isSet(Flag.Recovery);
|
||||
}
|
||||
|
||||
public boolean includeSegmentFileSizes() {
|
||||
return flags.includeSegmentFileSizes();
|
||||
}
|
||||
|
||||
public IndicesStatsRequest includeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
flags.includeSegmentFileSizes(includeSegmentFileSizes);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
|
@ -166,4 +166,9 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
|
||||
request.recovery(recovery);
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setIncludeSegmentFileSizes(boolean includeSegmentFileSizes) {
|
||||
request.includeSegmentFileSizes(includeSegmentFileSizes);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -144,6 +144,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
|
||||
}
|
||||
if (request.segments()) {
|
||||
flags.set(CommonStatsFlags.Flag.Segments);
|
||||
flags.includeSegmentFileSizes(request.includeSegmentFileSizes());
|
||||
}
|
||||
if (request.completion()) {
|
||||
flags.set(CommonStatsFlags.Flag.Completion);
|
||||
|
@ -60,8 +60,11 @@ import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -73,27 +76,41 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
private final ClusterService clusterService;
|
||||
private final TransportShardBulkAction shardBulkAction;
|
||||
private final TransportCreateIndexAction createIndexAction;
|
||||
private final LongSupplier relativeTimeProvider;
|
||||
|
||||
@Inject
|
||||
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
this(settings, threadPool, transportService, clusterService,
|
||||
shardBulkAction, createIndexAction,
|
||||
actionFilters, indexNameExpressionResolver,
|
||||
autoCreateIndex,
|
||||
System::nanoTime);
|
||||
}
|
||||
|
||||
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) {
|
||||
super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
|
||||
Objects.requireNonNull(relativeTimeProvider);
|
||||
this.clusterService = clusterService;
|
||||
this.shardBulkAction = shardBulkAction;
|
||||
this.createIndexAction = createIndexAction;
|
||||
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.allowIdGeneration = this.settings.getAsBoolean("action.bulk.action.allow_id_generation", true);
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final long startTime = relativeTime();
|
||||
final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
|
||||
|
||||
if (autoCreateIndex.needToCheck()) {
|
||||
if (needToCheck()) {
|
||||
// Keep track of all unique indices and all unique types per index for the create index requests:
|
||||
final Map<String, Set<String>> indicesAndTypes = new HashMap<>();
|
||||
for (ActionRequest request : bulkRequest.requests) {
|
||||
@ -112,7 +129,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
ClusterState state = clusterService.state();
|
||||
for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {
|
||||
final String index = entry.getKey();
|
||||
if (autoCreateIndex.shouldAutoCreate(index, state)) {
|
||||
if (shouldAutoCreate(index, state)) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(index);
|
||||
for (String type : entry.getValue()) {
|
||||
@ -163,6 +180,14 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
}
|
||||
}
|
||||
|
||||
boolean needToCheck() {
|
||||
return autoCreateIndex.needToCheck();
|
||||
}
|
||||
|
||||
boolean shouldAutoCreate(String index, ClusterState state) {
|
||||
return autoCreateIndex.shouldAutoCreate(index, state);
|
||||
}
|
||||
|
||||
private boolean setResponseFailureIfIndexMatches(AtomicArray<BulkItemResponse> responses, int idx, ActionRequest request, String index, Throwable e) {
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
@ -195,16 +220,15 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
* @see #doExecute(BulkRequest, org.elasticsearch.action.ActionListener)
|
||||
*/
|
||||
public void executeBulk(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
|
||||
final long startTime = System.currentTimeMillis();
|
||||
executeBulk(bulkRequest, startTime, listener, new AtomicArray<>(bulkRequest.requests.size()));
|
||||
final long startTimeNanos = relativeTime();
|
||||
executeBulk(bulkRequest, startTimeNanos, listener, new AtomicArray<>(bulkRequest.requests.size()));
|
||||
}
|
||||
|
||||
private long buildTookInMillis(long startTime) {
|
||||
// protect ourselves against time going backwards
|
||||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
private long buildTookInMillis(long startTimeNanos) {
|
||||
return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos);
|
||||
}
|
||||
|
||||
private void executeBulk(final BulkRequest bulkRequest, final long startTime, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
void executeBulk(final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
// TODO use timeout to wait here if its blocked...
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE);
|
||||
@ -302,7 +326,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
}
|
||||
|
||||
if (requestsByShard.isEmpty()) {
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime)));
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -352,7 +376,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime)));
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)));
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -398,7 +422,6 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
private static class ConcreteIndices {
|
||||
private final ClusterState state;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
@ -422,4 +445,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
return concreteIndex;
|
||||
}
|
||||
}
|
||||
|
||||
private long relativeTime() {
|
||||
return relativeTimeProvider.getAsLong();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,10 @@
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
@ -27,24 +31,32 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.ingest.PipelineStore;
|
||||
import org.elasticsearch.ingest.core.IngestInfo;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPipelineRequest, WritePipelineResponse> {
|
||||
|
||||
private final PipelineStore pipelineStore;
|
||||
private final ClusterService clusterService;
|
||||
private final TransportNodesInfoAction nodesInfoAction;
|
||||
|
||||
@Inject
|
||||
public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) {
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService,
|
||||
TransportNodesInfoAction nodesInfoAction) {
|
||||
super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.nodesInfoAction = nodesInfoAction;
|
||||
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
|
||||
}
|
||||
|
||||
@ -60,7 +72,28 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPip
|
||||
|
||||
@Override
|
||||
protected void masterOperation(PutPipelineRequest request, ClusterState state, ActionListener<WritePipelineResponse> listener) throws Exception {
|
||||
pipelineStore.put(clusterService, request, listener);
|
||||
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
|
||||
nodesInfoRequest.clear();
|
||||
nodesInfoRequest.ingest(true);
|
||||
nodesInfoAction.execute(nodesInfoRequest, new ActionListener<NodesInfoResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesInfoResponse nodeInfos) {
|
||||
try {
|
||||
Map<DiscoveryNode, IngestInfo> ingestInfos = new HashMap<>();
|
||||
for (NodeInfo nodeInfo : nodeInfos) {
|
||||
ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest());
|
||||
}
|
||||
pipelineStore.put(clusterService, ingestInfos, request, listener);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -110,7 +110,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
}
|
||||
|
||||
|
||||
private class AsyncAction {
|
||||
class AsyncAction {
|
||||
|
||||
private final NodesRequest request;
|
||||
private final String[] nodesIds;
|
||||
@ -120,7 +120,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
private final AtomicInteger counter = new AtomicInteger();
|
||||
private final Task task;
|
||||
|
||||
private AsyncAction(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
AsyncAction(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
this.task = task;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
@ -135,7 +135,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
this.responses = new AtomicReferenceArray<>(this.nodesIds.length);
|
||||
}
|
||||
|
||||
private void start() {
|
||||
void start() {
|
||||
if (nodesIds.length == 0) {
|
||||
// nothing to notify
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@ -158,11 +158,6 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
||||
try {
|
||||
if (node == null) {
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
|
||||
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
|
||||
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we need to fix
|
||||
// those (and they randomize the client node usage, so tricky to find when)
|
||||
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
|
||||
} else {
|
||||
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
|
||||
if (task != null) {
|
||||
|
@ -235,12 +235,6 @@ public abstract class TransportTasksAction<
|
||||
try {
|
||||
if (node == null) {
|
||||
onFailure(idx, nodeId, new NoSuchNodeException(nodeId));
|
||||
} else if (!clusterService.localNode().shouldConnectTo(node) && !clusterService.localNode().equals(node)) {
|
||||
// the check "!clusterService.localNode().equals(node)" is to maintain backward comp. where before
|
||||
// we allowed to connect from "local" client node to itself, certain tests rely on it, if we remove it, we
|
||||
// need to fix
|
||||
// those (and they randomize the client node usage, so tricky to find when)
|
||||
onFailure(idx, nodeId, new NodeShouldNotConnectException(clusterService.localNode(), node));
|
||||
} else {
|
||||
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
|
@ -22,7 +22,6 @@ package org.elasticsearch.bootstrap;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.PidFile;
|
||||
@ -33,8 +32,6 @@ import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
@ -42,17 +39,12 @@ import org.elasticsearch.monitor.os.OsProbe;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
@ -142,6 +134,8 @@ final class Bootstrap {
|
||||
// we've already logged this.
|
||||
}
|
||||
|
||||
JNANatives.trySetMaxNumberOfThreads();
|
||||
|
||||
// init lucene random seed. it will use /dev/urandom where available:
|
||||
StringHelper.randomId();
|
||||
}
|
||||
@ -189,7 +183,8 @@ final class Bootstrap {
|
||||
.put(settings)
|
||||
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true)
|
||||
.build();
|
||||
enforceOrLogLimits(nodeSettings);
|
||||
|
||||
BootstrapCheck.check(nodeSettings);
|
||||
|
||||
node = new Node(nodeSettings);
|
||||
}
|
||||
@ -349,50 +344,4 @@ final class Bootstrap {
|
||||
}
|
||||
}
|
||||
|
||||
static final Set<Setting> ENFORCE_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
TransportSettings.BIND_HOST,
|
||||
TransportSettings.HOST,
|
||||
TransportSettings.PUBLISH_HOST,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING
|
||||
)));
|
||||
|
||||
private static boolean enforceLimits(Settings settings) {
|
||||
if (Build.CURRENT.isSnapshot()) {
|
||||
return false;
|
||||
}
|
||||
for (Setting setting : ENFORCE_SETTINGS) {
|
||||
if (setting.exists(settings)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void enforceOrLogLimits(Settings settings) { // pkg private for testing
|
||||
/* We enforce limits once any network host is configured. In this case we assume the node is running in production
|
||||
* and all production limit checks must pass. This should be extended as we go to settings like:
|
||||
* - discovery.zen.minimum_master_nodes
|
||||
* - discovery.zen.ping.unicast.hosts is set if we use zen disco
|
||||
* - ensure we can write in all data directories
|
||||
* - fail if mlockall failed and was configured
|
||||
* - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform)
|
||||
* - fail if the default cluster.name is used, if this is setup on network a real clustername should be used?*/
|
||||
final boolean enforceLimits = enforceLimits(settings);
|
||||
final ESLogger logger = Loggers.getLogger(Bootstrap.class);
|
||||
final long maxFileDescriptorCount = ProcessProbe.getInstance().getMaxFileDescriptorCount();
|
||||
if (maxFileDescriptorCount != -1) {
|
||||
final int fileDescriptorCountThreshold = (1 << 16);
|
||||
if (maxFileDescriptorCount < fileDescriptorCountThreshold) {
|
||||
if (enforceLimits){
|
||||
throw new IllegalStateException("max file descriptors [" + maxFileDescriptorCount
|
||||
+ "] for elasticsearch process likely too low, increase it to at least [" + fileDescriptorCountThreshold +"]");
|
||||
}
|
||||
logger.warn(
|
||||
"max file descriptors [{}] for elasticsearch process likely too low, consider increasing to at least [{}]",
|
||||
maxFileDescriptorCount, fileDescriptorCountThreshold);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,8 +26,8 @@ import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.cli.CliTool;
|
||||
import org.elasticsearch.common.cli.CliToolConfig;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.cli.UserError;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
@ -37,7 +37,6 @@ import java.util.Iterator;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
|
||||
|
@ -0,0 +1,252 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* We enforce limits once any network host is configured. In this case we assume the node is running in production
|
||||
* and all production limit checks must pass. This should be extended as we go to settings like:
|
||||
* - discovery.zen.minimum_master_nodes
|
||||
* - discovery.zen.ping.unicast.hosts is set if we use zen disco
|
||||
* - ensure we can write in all data directories
|
||||
* - fail if vm.max_map_count is under a certain limit (not sure if this works cross platform)
|
||||
* - fail if the default cluster.name is used, if this is setup on network a real clustername should be used?
|
||||
*/
|
||||
final class BootstrapCheck {
|
||||
|
||||
private BootstrapCheck() {
|
||||
}
|
||||
|
||||
/**
|
||||
* checks the current limits against the snapshot or release build
|
||||
* checks
|
||||
*
|
||||
* @param settings the current node settings
|
||||
*/
|
||||
public static void check(final Settings settings) {
|
||||
check(enforceLimits(settings), checks(settings));
|
||||
}
|
||||
|
||||
/**
|
||||
* executes the provided checks and fails the node if
|
||||
* enforceLimits is true, otherwise logs warnings
|
||||
*
|
||||
* @param enforceLimits true if the checks should be enforced or
|
||||
* warned
|
||||
* @param checks the checks to execute
|
||||
*/
|
||||
// visible for testing
|
||||
static void check(final boolean enforceLimits, final List<Check> checks) {
|
||||
final ESLogger logger = Loggers.getLogger(BootstrapCheck.class);
|
||||
|
||||
for (final Check check : checks) {
|
||||
final boolean fail = check.check();
|
||||
if (fail) {
|
||||
if (enforceLimits) {
|
||||
throw new RuntimeException(check.errorMessage());
|
||||
} else {
|
||||
logger.warn(check.errorMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The set of settings such that if any are set for the node, then
|
||||
* the checks are enforced
|
||||
*
|
||||
* @return the enforcement settings
|
||||
*/
|
||||
// visible for testing
|
||||
static Set<Setting> enforceSettings() {
|
||||
return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
|
||||
TransportSettings.BIND_HOST,
|
||||
TransportSettings.HOST,
|
||||
TransportSettings.PUBLISH_HOST,
|
||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING
|
||||
)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests if the checks should be enforced
|
||||
*
|
||||
* @param settings the current node settings
|
||||
* @return true if the checks should be enforced
|
||||
*/
|
||||
// visible for testing
|
||||
static boolean enforceLimits(final Settings settings) {
|
||||
return enforceSettings().stream().anyMatch(s -> s.exists(settings));
|
||||
}
|
||||
|
||||
// the list of checks to execute
|
||||
private static List<Check> checks(final Settings settings) {
|
||||
final List<Check> checks = new ArrayList<>();
|
||||
final FileDescriptorCheck fileDescriptorCheck
|
||||
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
|
||||
checks.add(fileDescriptorCheck);
|
||||
checks.add(new MlockallCheck(BootstrapSettings.MLOCKALL_SETTING.get(settings)));
|
||||
if (Constants.LINUX) {
|
||||
checks.add(new MaxNumberOfThreadsCheck());
|
||||
}
|
||||
return Collections.unmodifiableList(checks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Encapsulates a limit check
|
||||
*/
|
||||
interface Check {
|
||||
|
||||
/**
|
||||
* test if the node fails the check
|
||||
*
|
||||
* @return true if the node failed the check
|
||||
*/
|
||||
boolean check();
|
||||
|
||||
/**
|
||||
* the message for a failed check
|
||||
*
|
||||
* @return the error message on check failure
|
||||
*/
|
||||
String errorMessage();
|
||||
|
||||
}
|
||||
|
||||
static class OsXFileDescriptorCheck extends FileDescriptorCheck {
|
||||
|
||||
public OsXFileDescriptorCheck() {
|
||||
// see constant OPEN_MAX defined in
|
||||
// /usr/include/sys/syslimits.h on OS X and its use in JVM
|
||||
// initialization in int os:init_2(void) defined in the JVM
|
||||
// code for BSD (contains OS X)
|
||||
super(10240);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
static class FileDescriptorCheck implements Check {
|
||||
|
||||
private final int limit;
|
||||
|
||||
FileDescriptorCheck() {
|
||||
this(1 << 16);
|
||||
}
|
||||
|
||||
protected FileDescriptorCheck(final int limit) {
|
||||
if (limit <= 0) {
|
||||
throw new IllegalArgumentException("limit must be positive but was [" + limit + "]");
|
||||
}
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
public final boolean check() {
|
||||
final long maxFileDescriptorCount = getMaxFileDescriptorCount();
|
||||
return maxFileDescriptorCount != -1 && maxFileDescriptorCount < limit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String errorMessage() {
|
||||
return String.format(
|
||||
Locale.ROOT,
|
||||
"max file descriptors [%d] for elasticsearch process likely too low, increase to at least [%d]",
|
||||
getMaxFileDescriptorCount(),
|
||||
limit
|
||||
);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
long getMaxFileDescriptorCount() {
|
||||
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
static class MlockallCheck implements Check {
|
||||
|
||||
private final boolean mlockallSet;
|
||||
|
||||
public MlockallCheck(final boolean mlockAllSet) {
|
||||
this.mlockallSet = mlockAllSet;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return mlockallSet && !isMemoryLocked();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return "memory locking requested for elasticsearch process but memory is not locked";
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
boolean isMemoryLocked() {
|
||||
return Natives.isMemoryLocked();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MaxNumberOfThreadsCheck implements Check {
|
||||
|
||||
private final long maxNumberOfThreadsThreshold = 1 << 15;
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < maxNumberOfThreadsThreshold;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return String.format(
|
||||
Locale.ROOT,
|
||||
"max number of threads [%d] for user [%s] likely too low, increase to at least [%d]",
|
||||
getMaxNumberOfThreads(),
|
||||
BootstrapInfo.getSystemProperties().get("user.name"),
|
||||
maxNumberOfThreadsThreshold);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
long getMaxNumberOfThreads() {
|
||||
return JNANatives.MAX_NUMBER_OF_THREADS;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -48,6 +48,9 @@ class JNANatives {
|
||||
// Set to true, in case policy can be applied to all threads of the process (even existing ones)
|
||||
// otherwise they are only inherited for new threads (ES app threads)
|
||||
static boolean LOCAL_SECCOMP_ALL = false;
|
||||
// set to the maximum number of threads that can be created for
|
||||
// the user ID that owns the running Elasticsearch process
|
||||
static long MAX_NUMBER_OF_THREADS = -1;
|
||||
|
||||
static void tryMlockall() {
|
||||
int errno = Integer.MIN_VALUE;
|
||||
@ -103,13 +106,29 @@ class JNANatives {
|
||||
}
|
||||
}
|
||||
|
||||
static void trySetMaxNumberOfThreads() {
|
||||
if (Constants.LINUX) {
|
||||
// this is only valid on Linux and the value *is* different on OS X
|
||||
// see /usr/include/sys/resource.h on OS X
|
||||
// on Linux the resource RLIMIT_NPROC means *the number of threads*
|
||||
// this is in opposition to BSD-derived OSes
|
||||
final int rlimit_nproc = 6;
|
||||
|
||||
final JNACLibrary.Rlimit rlimit = new JNACLibrary.Rlimit();
|
||||
if (JNACLibrary.getrlimit(rlimit_nproc, rlimit) == 0) {
|
||||
MAX_NUMBER_OF_THREADS = rlimit.rlim_cur.longValue();
|
||||
} else {
|
||||
logger.warn("unable to retrieve max number of threads [" + JNACLibrary.strerror(Native.getLastError()) + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static String rlimitToString(long value) {
|
||||
assert Constants.LINUX || Constants.MAC_OS_X;
|
||||
if (value == JNACLibrary.RLIM_INFINITY) {
|
||||
return "unlimited";
|
||||
} else {
|
||||
// TODO, on java 8 use Long.toUnsignedString, since that's what it is.
|
||||
return Long.toString(value);
|
||||
return Long.toUnsignedString(value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -913,11 +913,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
}
|
||||
}
|
||||
} else if ("warmers".equals(currentFieldName)) {
|
||||
// TODO: do this in 4.0:
|
||||
// TODO: do this in 6.0:
|
||||
// throw new IllegalArgumentException("Warmers are not supported anymore - are you upgrading from 1.x?");
|
||||
// ignore: warmers have been removed in 3.0 and are
|
||||
// ignore: warmers have been removed in 5.0 and are
|
||||
// simply ignored when upgrading from 2.x
|
||||
assert Version.CURRENT.major <= 3;
|
||||
assert Version.CURRENT.major <= 5;
|
||||
parser.skipChildren();
|
||||
} else {
|
||||
// check if its a custom index metadata
|
||||
|
@ -209,16 +209,6 @@ public class DiscoveryNode implements Streamable, ToXContent {
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this node form a connection to the provided node.
|
||||
*/
|
||||
public boolean shouldConnectTo(DiscoveryNode otherNode) {
|
||||
if (clientNode() && otherNode.clientNode()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* The address that the node can be communicated with.
|
||||
*/
|
||||
|
@ -266,7 +266,7 @@ public final class ShardRouting implements Streamable, ToXContent {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_3_0_0)) {
|
||||
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0)) {
|
||||
// when no shards with this id have ever been active for this index
|
||||
return false;
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ public class AllocationService extends AbstractComponent {
|
||||
}
|
||||
|
||||
// move shards that no longer can be allocated
|
||||
changed |= moveShards(allocation);
|
||||
changed |= shardsAllocators.moveShards(allocation);
|
||||
|
||||
// rebalance
|
||||
changed |= shardsAllocators.rebalance(allocation);
|
||||
@ -327,46 +327,6 @@ public class AllocationService extends AbstractComponent {
|
||||
}
|
||||
}
|
||||
|
||||
private boolean moveShards(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
|
||||
// create a copy of the shards interleaving between nodes, and check if they can remain
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
int index = 0;
|
||||
boolean found = true;
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
while (found) {
|
||||
found = false;
|
||||
for (RoutingNode routingNode : routingNodes) {
|
||||
if (index >= routingNode.size()) {
|
||||
continue;
|
||||
}
|
||||
found = true;
|
||||
shards.add(routingNode.get(index));
|
||||
}
|
||||
index++;
|
||||
}
|
||||
for (int i = 0; i < shards.size(); i++) {
|
||||
ShardRouting shardRouting = shards.get(i);
|
||||
// we can only move started shards...
|
||||
if (!shardRouting.started()) {
|
||||
continue;
|
||||
}
|
||||
final RoutingNode routingNode = routingNodes.node(shardRouting.currentNodeId());
|
||||
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
|
||||
boolean moved = shardsAllocators.move(shardRouting, routingNode, allocation);
|
||||
if (!moved) {
|
||||
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
|
||||
} else {
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
private boolean electPrimariesAndUnassignedDanglingReplicas(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
|
@ -42,6 +42,7 @@ import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.PriorityComparator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
@ -49,6 +50,7 @@ import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
@ -119,9 +121,9 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
public boolean moveShards(RoutingAllocation allocation) {
|
||||
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
|
||||
return balancer.move(shardRouting, node);
|
||||
return balancer.moveShards();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -489,56 +491,93 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
||||
}
|
||||
|
||||
/**
|
||||
* This function executes a move operation moving the given shard from
|
||||
* the given node to the minimal eligible node with respect to the
|
||||
* weight function. Iff the shard is moved the shard will be set to
|
||||
* Move started shards that can not be allocated to a node anymore
|
||||
*
|
||||
* For each shard to be moved this function executes a move operation
|
||||
* to the minimal eligible node with respect to the
|
||||
* weight function. If a shard is moved the shard will be set to
|
||||
* {@link ShardRoutingState#RELOCATING} and a shadow instance of this
|
||||
* shard is created with an incremented version in the state
|
||||
* {@link ShardRoutingState#INITIALIZING}.
|
||||
*
|
||||
* @return <code>true</code> iff the shard has successfully been moved.
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean move(ShardRouting shard, RoutingNode node ) {
|
||||
if (nodes.isEmpty() || !shard.started()) {
|
||||
/* with no nodes or a not started shard this is pointless */
|
||||
public boolean moveShards() {
|
||||
if (nodes.isEmpty()) {
|
||||
/* with no nodes this is pointless */
|
||||
return false;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Try moving shard [{}] from [{}]", shard, node);
|
||||
}
|
||||
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
|
||||
boolean changed = initialize(routingNodes, unassigned);
|
||||
if (!changed) {
|
||||
final ModelNode sourceNode = nodes.get(node.nodeId());
|
||||
assert sourceNode != null;
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
sorter.reset(shard.getIndexName());
|
||||
final ModelNode[] nodes = sorter.modelNodes;
|
||||
assert sourceNode.containsShard(shard);
|
||||
/*
|
||||
* the sorter holds the minimum weight node first for the shards index.
|
||||
* We now walk through the nodes until we find a node to allocate the shard.
|
||||
* This is not guaranteed to be balanced after this operation we still try best effort to
|
||||
* allocate on the minimal eligible node.
|
||||
*/
|
||||
|
||||
for (ModelNode currentNode : nodes) {
|
||||
if (currentNode.getNodeId().equals(node.nodeId())) {
|
||||
// Create a copy of the started shards interleaving between nodes, and check if they can remain. In the presence of throttling
|
||||
// shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are
|
||||
// offloading the shards.
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
int index = 0;
|
||||
boolean found = true;
|
||||
while (found) {
|
||||
found = false;
|
||||
for (RoutingNode routingNode : routingNodes) {
|
||||
if (index >= routingNode.size()) {
|
||||
continue;
|
||||
}
|
||||
RoutingNode target = currentNode.getRoutingNode(routingNodes);
|
||||
Decision allocationDecision = allocation.deciders().canAllocate(shard, target, allocation);
|
||||
Decision rebalanceDecision = allocation.deciders().canRebalance(shard, allocation);
|
||||
Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
|
||||
if (decision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
|
||||
sourceNode.removeShard(shard);
|
||||
ShardRouting targetRelocatingShard = routingNodes.relocate(shard, target.nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
currentNode.addShard(targetRelocatingShard, decision);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Moved shard [{}] to node [{}]", shard, currentNode.getNodeId());
|
||||
found = true;
|
||||
ShardRouting shardRouting = routingNode.get(index);
|
||||
// we can only move started shards...
|
||||
if (shardRouting.started()) {
|
||||
shards.add(shardRouting);
|
||||
}
|
||||
}
|
||||
index++;
|
||||
}
|
||||
if (shards.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
|
||||
boolean changed = initialize(routingNodes, unassigned);
|
||||
if (changed == false) {
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
final ModelNode[] modelNodes = sorter.modelNodes;
|
||||
for (ShardRouting shardRouting : shards) {
|
||||
final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
|
||||
assert sourceNode != null && sourceNode.containsShard(shardRouting);
|
||||
final RoutingNode routingNode = sourceNode.getRoutingNode(routingNodes);
|
||||
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
|
||||
sorter.reset(shardRouting.getIndexName());
|
||||
/*
|
||||
* the sorter holds the minimum weight node first for the shards index.
|
||||
* We now walk through the nodes until we find a node to allocate the shard.
|
||||
* This is not guaranteed to be balanced after this operation we still try best effort to
|
||||
* allocate on the minimal eligible node.
|
||||
*/
|
||||
boolean moved = false;
|
||||
for (ModelNode currentNode : modelNodes) {
|
||||
if (currentNode == sourceNode) {
|
||||
continue;
|
||||
}
|
||||
RoutingNode target = currentNode.getRoutingNode(routingNodes);
|
||||
Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation);
|
||||
Decision rebalanceDecision = allocation.deciders().canRebalance(shardRouting, allocation);
|
||||
if (allocationDecision.type() == Type.YES && rebalanceDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
|
||||
Decision sourceDecision = sourceNode.removeShard(shardRouting);
|
||||
ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
// re-add (now relocating shard) to source node
|
||||
sourceNode.addShard(shardRouting, sourceDecision);
|
||||
Decision targetDecision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
|
||||
currentNode.addShard(targetRelocatingShard, targetDecision);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node());
|
||||
}
|
||||
moved = true;
|
||||
changed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (moved == false) {
|
||||
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
|
||||
}
|
||||
changed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.allocator;
|
||||
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
@ -36,22 +35,22 @@ import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
||||
public interface ShardsAllocator {
|
||||
|
||||
/**
|
||||
* Applies changes on started nodes based on the implemented algorithm. For example if a
|
||||
* shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING}
|
||||
* Applies changes on started nodes based on the implemented algorithm. For example if a
|
||||
* shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING}
|
||||
* this allocator might apply some cleanups on the node that used to hold the shard.
|
||||
* @param allocation all started {@link ShardRouting shards}
|
||||
*/
|
||||
void applyStartedShards(StartedRerouteAllocation allocation);
|
||||
|
||||
/**
|
||||
* Applies changes on failed nodes based on the implemented algorithm.
|
||||
* Applies changes on failed nodes based on the implemented algorithm.
|
||||
* @param allocation all failed {@link ShardRouting shards}
|
||||
*/
|
||||
void applyFailedShards(FailedRerouteAllocation allocation);
|
||||
|
||||
/**
|
||||
* Assign all unassigned shards to nodes
|
||||
*
|
||||
* Assign all unassigned shards to nodes
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
@ -59,19 +58,17 @@ public interface ShardsAllocator {
|
||||
|
||||
/**
|
||||
* Rebalancing number of shards on all nodes
|
||||
*
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
boolean rebalance(RoutingAllocation allocation);
|
||||
|
||||
/**
|
||||
* Moves a shard from the given node to other node.
|
||||
*
|
||||
* @param shardRouting the shard to move
|
||||
* @param node A node containing the shard
|
||||
* Move started shards that can not be allocated to a node anymore
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation);
|
||||
boolean moveShards(RoutingAllocation allocation);
|
||||
}
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.allocator;
|
||||
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
||||
@ -96,7 +94,7 @@ public class ShardsAllocators extends AbstractComponent implements ShardsAllocat
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
return allocator.move(shardRouting, node, allocation);
|
||||
public boolean moveShards(RoutingAllocation allocation) {
|
||||
return allocator.moveShards(allocation);
|
||||
}
|
||||
}
|
||||
|
@ -570,9 +570,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
|
||||
// TODO, do this in parallel (and wait)
|
||||
for (DiscoveryNode node : nodesDelta.addedNodes()) {
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
@ -824,9 +821,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
if (lifecycle.stoppedOrClosed()) {
|
||||
return;
|
||||
}
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
if (clusterState.nodes().nodeExists(node.id())) { // we double check existence of node since connectToNode might take time...
|
||||
if (!transportService.nodeConnected(node)) {
|
||||
try {
|
||||
@ -873,10 +867,6 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
return Strings.randomBase64UUID(random);
|
||||
}
|
||||
|
||||
private boolean nodeRequiresConnection(DiscoveryNode node) {
|
||||
return localNode().shouldConnectTo(node);
|
||||
}
|
||||
|
||||
private static class LocalNodeMasterListeners implements ClusterStateListener {
|
||||
|
||||
private final List<LocalNodeMasterListener> listeners = new CopyOnWriteArrayList<>();
|
||||
|
@ -23,6 +23,7 @@ import java.io.BufferedReader;
|
||||
import java.io.Console;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.PrintWriter;
|
||||
import java.nio.charset.Charset;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
@ -52,6 +53,13 @@ public abstract class Terminal {
|
||||
/** The current verbosity for the terminal, defaulting to {@link Verbosity#NORMAL}. */
|
||||
private Verbosity verbosity = Verbosity.NORMAL;
|
||||
|
||||
/** The newline used when calling println. */
|
||||
private final String lineSeparator;
|
||||
|
||||
protected Terminal(String lineSeparator) {
|
||||
this.lineSeparator = lineSeparator;
|
||||
}
|
||||
|
||||
/** Sets the verbosity of the terminal. */
|
||||
void setVerbosity(Verbosity verbosity) {
|
||||
this.verbosity = verbosity;
|
||||
@ -63,8 +71,8 @@ public abstract class Terminal {
|
||||
/** Reads password text from the terminal input. See {@link Console#readPassword()}}. */
|
||||
public abstract char[] readSecret(String prompt);
|
||||
|
||||
/** Print a message directly to the terminal. */
|
||||
protected abstract void doPrint(String msg);
|
||||
/** Returns a Writer which can be used to write to the terminal directly. */
|
||||
public abstract PrintWriter getWriter();
|
||||
|
||||
/** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */
|
||||
public final void println(String msg) {
|
||||
@ -74,7 +82,8 @@ public abstract class Terminal {
|
||||
/** Prints a line to the terminal at {@code verbosity} level. */
|
||||
public final void println(Verbosity verbosity, String msg) {
|
||||
if (this.verbosity.ordinal() >= verbosity.ordinal()) {
|
||||
doPrint(msg + System.lineSeparator());
|
||||
getWriter().print(msg + lineSeparator);
|
||||
getWriter().flush();
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,14 +91,17 @@ public abstract class Terminal {
|
||||
|
||||
private static final Console console = System.console();
|
||||
|
||||
ConsoleTerminal() {
|
||||
super(System.lineSeparator());
|
||||
}
|
||||
|
||||
static boolean isSupported() {
|
||||
return console != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doPrint(String msg) {
|
||||
console.printf("%s", msg);
|
||||
console.flush();
|
||||
public PrintWriter getWriter() {
|
||||
return console.writer();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -105,16 +117,25 @@ public abstract class Terminal {
|
||||
|
||||
private static class SystemTerminal extends Terminal {
|
||||
|
||||
private final PrintWriter writer = newWriter();
|
||||
|
||||
SystemTerminal() {
|
||||
super(System.lineSeparator());
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Writer for System.out")
|
||||
private static PrintWriter newWriter() {
|
||||
return new PrintWriter(System.out);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressForbidden(reason = "System#out")
|
||||
public void doPrint(String msg) {
|
||||
System.out.print(msg);
|
||||
System.out.flush();
|
||||
public PrintWriter getWriter() {
|
||||
return writer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readText(String text) {
|
||||
doPrint(text);
|
||||
getWriter().print(text);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
|
||||
try {
|
||||
return reader.readLine();
|
||||
|
@ -29,7 +29,7 @@ public class ShapesAvailability {
|
||||
static {
|
||||
boolean xSPATIAL4J_AVAILABLE;
|
||||
try {
|
||||
Class.forName("com.spatial4j.core.shape.impl.PointImpl");
|
||||
Class.forName("org.locationtech.spatial4j.shape.impl.PointImpl");
|
||||
xSPATIAL4J_AVAILABLE = true;
|
||||
} catch (Throwable t) {
|
||||
xSPATIAL4J_AVAILABLE = false;
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.common.geo;
|
||||
|
||||
import com.spatial4j.core.context.SpatialContext;
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import com.spatial4j.core.shape.ShapeCollection;
|
||||
import org.locationtech.spatial4j.context.SpatialContext;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.ShapeCollection;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Circle;
|
||||
import org.locationtech.spatial4j.shape.Circle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Rectangle;
|
||||
import org.locationtech.spatial4j.shape.Rectangle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.geo.XShapeCollection;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import com.vividsolutions.jts.geom.Geometry;
|
||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import com.vividsolutions.jts.geom.Geometry;
|
||||
import com.vividsolutions.jts.geom.LineString;
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Point;
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.Point;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.geo.XShapeCollection;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.geo.XShapeCollection;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Point;
|
||||
import org.locationtech.spatial4j.shape.Point;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.exception.InvalidShapeException;
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import org.locationtech.spatial4j.exception.InvalidShapeException;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import com.vividsolutions.jts.geom.Geometry;
|
||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||
|
@ -19,10 +19,10 @@
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.context.jts.JtsSpatialContext;
|
||||
import com.spatial4j.core.exception.InvalidShapeException;
|
||||
import com.spatial4j.core.shape.Shape;
|
||||
import com.spatial4j.core.shape.jts.JtsGeometry;
|
||||
import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
|
||||
import org.locationtech.spatial4j.exception.InvalidShapeException;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import com.vividsolutions.jts.geom.Geometry;
|
||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||
@ -81,9 +81,9 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
|
||||
* this normally isn't allowed.
|
||||
*/
|
||||
protected final boolean multiPolygonMayOverlap = false;
|
||||
/** @see com.spatial4j.core.shape.jts.JtsGeometry#validate() */
|
||||
/** @see org.locationtech.spatial4j.shape.jts.JtsGeometry#validate() */
|
||||
protected final boolean autoValidateJtsGeometry = true;
|
||||
/** @see com.spatial4j.core.shape.jts.JtsGeometry#index() */
|
||||
/** @see org.locationtech.spatial4j.shape.jts.JtsGeometry#index() */
|
||||
protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it.
|
||||
|
||||
protected ShapeBuilder() {
|
||||
|
@ -32,7 +32,7 @@ public abstract class ESLoggerFactory {
|
||||
public static final Setting<LogLevel> LOG_DEFAULT_LEVEL_SETTING =
|
||||
new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<LogLevel> LOG_LEVEL_SETTING =
|
||||
Setting.dynamicKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER);
|
||||
Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER);
|
||||
|
||||
public static ESLogger getLogger(String prefix, String name) {
|
||||
prefix = prefix == null ? null : prefix.intern();
|
||||
|
@ -1,74 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.lucene;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Weight;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Base implementation for a query which is cacheable at the index level but
|
||||
* not the segment level as usually expected.
|
||||
*/
|
||||
public abstract class IndexCacheableQuery extends Query {
|
||||
|
||||
private Object readerCacheKey;
|
||||
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) throws IOException {
|
||||
if (reader.getCoreCacheKey() != this.readerCacheKey) {
|
||||
IndexCacheableQuery rewritten = (IndexCacheableQuery) clone();
|
||||
rewritten.readerCacheKey = reader.getCoreCacheKey();
|
||||
return rewritten;
|
||||
}
|
||||
return super.rewrite(reader);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return super.equals(obj)
|
||||
&& readerCacheKey == ((IndexCacheableQuery) obj).readerCacheKey;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 * super.hashCode() + Objects.hashCode(readerCacheKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
|
||||
if (readerCacheKey == null) {
|
||||
throw new IllegalStateException("Rewrite first");
|
||||
}
|
||||
if (readerCacheKey != searcher.getIndexReader().getCoreCacheKey()) {
|
||||
throw new IllegalStateException("Must create weight on the same reader which has been used for rewriting");
|
||||
}
|
||||
return doCreateWeight(searcher, needsScores);
|
||||
}
|
||||
|
||||
/** Create a {@link Weight} for this query.
|
||||
* @see Query#createWeight(IndexSearcher, boolean)
|
||||
*/
|
||||
public abstract Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException;
|
||||
}
|
@ -88,7 +88,7 @@ import java.util.Objects;
|
||||
public class Lucene {
|
||||
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54";
|
||||
public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
|
||||
public static final String LATEST_CODEC = "Lucene54";
|
||||
public static final String LATEST_CODEC = "Lucene60";
|
||||
|
||||
static {
|
||||
Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class);
|
||||
@ -236,13 +236,8 @@ public class Lucene {
|
||||
protected Object doBody(String segmentFileName) throws IOException {
|
||||
try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) {
|
||||
final int format = input.readInt();
|
||||
final int actualFormat;
|
||||
if (format == CodecUtil.CODEC_MAGIC) {
|
||||
// 4.0+
|
||||
actualFormat = CodecUtil.checkHeaderNoMagic(input, "segments", SegmentInfos.VERSION_40, Integer.MAX_VALUE);
|
||||
if (actualFormat >= SegmentInfos.VERSION_48) {
|
||||
CodecUtil.checksumEntireFile(input);
|
||||
}
|
||||
CodecUtil.checksumEntireFile(input);
|
||||
}
|
||||
// legacy....
|
||||
}
|
||||
@ -382,7 +377,7 @@ public class Lucene {
|
||||
writeMissingValue(out, comparatorSource.missingValue(sortField.getReverse()));
|
||||
} else {
|
||||
writeSortType(out, sortField.getType());
|
||||
writeMissingValue(out, sortField.missingValue);
|
||||
writeMissingValue(out, sortField.getMissingValue());
|
||||
}
|
||||
out.writeBoolean(sortField.getReverse());
|
||||
}
|
||||
@ -684,7 +679,7 @@ public class Lucene {
|
||||
segmentsFileName = infos.getSegmentsFileName();
|
||||
this.dir = dir;
|
||||
userData = infos.getUserData();
|
||||
files = Collections.unmodifiableCollection(infos.files(dir, true));
|
||||
files = Collections.unmodifiableCollection(infos.files(true));
|
||||
generation = infos.getGeneration();
|
||||
segmentCount = infos.size();
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.Similarity.SimScorer;
|
||||
import org.apache.lucene.search.similarities.Similarity.SimWeight;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -186,9 +187,13 @@ public final class AllTermQuery extends Query {
|
||||
float boost;
|
||||
if (payload == null) {
|
||||
boost = 1;
|
||||
} else {
|
||||
assert payload.length == 4;
|
||||
} else if (payload.length == 1) {
|
||||
boost = SmallFloat.byte315ToFloat(payload.bytes[payload.offset]);
|
||||
} else if (payload.length == 4) {
|
||||
// TODO: for bw compat only, remove this in 6.0
|
||||
boost = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
|
||||
} else {
|
||||
throw new IllegalStateException("Payloads are expected to have a length of 1 or 4 but got: " + payload);
|
||||
}
|
||||
payloadBoost += boost;
|
||||
}
|
||||
@ -221,7 +226,7 @@ public final class AllTermQuery extends Query {
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
return new TermQuery(term).toString(field) + ToStringUtils.boost(getBoost());
|
||||
return new TermQuery(term).toString(field);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -25,11 +25,10 @@ import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.apache.lucene.analysis.payloads.PayloadHelper.encodeFloat;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
@ -39,7 +38,7 @@ public final class AllTokenStream extends TokenFilter {
|
||||
return new AllTokenStream(analyzer.tokenStream(allFieldName, allEntries), allEntries);
|
||||
}
|
||||
|
||||
private final BytesRef payloadSpare = new BytesRef(new byte[4]);
|
||||
private final BytesRef payloadSpare = new BytesRef(new byte[1]);
|
||||
|
||||
private final AllEntries allEntries;
|
||||
|
||||
@ -64,7 +63,7 @@ public final class AllTokenStream extends TokenFilter {
|
||||
}
|
||||
final float boost = allEntries.boost(offsetAttribute.startOffset());
|
||||
if (boost != 1.0f) {
|
||||
encodeFloat(boost, payloadSpare.bytes, payloadSpare.offset);
|
||||
payloadSpare.bytes[0] = SmallFloat.floatToByte315(boost);
|
||||
payloadAttribute.setPayload(payloadSpare);
|
||||
} else {
|
||||
payloadAttribute.setPayload(null);
|
||||
|
@ -30,7 +30,6 @@ import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.BitDocIdSet;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
@ -76,7 +75,7 @@ public class FilterableTermsEnum extends TermsEnum {
|
||||
this.docsEnumFlag = docsEnumFlag;
|
||||
if (filter == null) {
|
||||
// Important - need to use the doc count that includes deleted docs
|
||||
// or we have this issue: https://github.com/elasticsearch/elasticsearch/issues/7951
|
||||
// or we have this issue: https://github.com/elastic/elasticsearch/issues/7951
|
||||
numDocs = reader.maxDoc();
|
||||
}
|
||||
List<LeafReaderContext> leaves = reader.leaves();
|
||||
@ -118,9 +117,7 @@ public class FilterableTermsEnum extends TermsEnum {
|
||||
};
|
||||
}
|
||||
|
||||
BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
|
||||
builder.or(docs);
|
||||
bits = builder.build().bits();
|
||||
bits = BitSet.of(docs, context.reader().maxDoc());
|
||||
|
||||
// Count how many docs are in our filtered set
|
||||
// TODO make this lazy-loaded only for those that need it?
|
||||
|
@ -30,7 +30,6 @@ import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
@ -51,7 +50,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
||||
/**
|
||||
* Sets the phrase slop for this query.
|
||||
*
|
||||
* @see org.apache.lucene.search.PhraseQuery#setSlop(int)
|
||||
* @see org.apache.lucene.search.PhraseQuery.Builder#setSlop(int)
|
||||
*/
|
||||
public void setSlop(int s) {
|
||||
slop = s;
|
||||
@ -64,7 +63,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
||||
/**
|
||||
* Sets the phrase slop for this query.
|
||||
*
|
||||
* @see org.apache.lucene.search.PhraseQuery#getSlop()
|
||||
* @see org.apache.lucene.search.PhraseQuery.Builder#getSlop()
|
||||
*/
|
||||
public int getSlop() {
|
||||
return slop;
|
||||
@ -73,7 +72,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
||||
/**
|
||||
* Add a single term at the next position in the phrase.
|
||||
*
|
||||
* @see org.apache.lucene.search.PhraseQuery#add(Term)
|
||||
* @see org.apache.lucene.search.PhraseQuery.Builder#add(Term)
|
||||
*/
|
||||
public void add(Term term) {
|
||||
add(new Term[]{term});
|
||||
@ -83,7 +82,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
||||
* Add multiple terms at the next position in the phrase. Any of the terms
|
||||
* may match.
|
||||
*
|
||||
* @see org.apache.lucene.search.PhraseQuery#add(Term)
|
||||
* @see org.apache.lucene.search.PhraseQuery.Builder#add(Term)
|
||||
*/
|
||||
public void add(Term[] terms) {
|
||||
int position = 0;
|
||||
@ -98,7 +97,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
||||
*
|
||||
* @param terms the terms
|
||||
* @param position the position of the terms provided as argument
|
||||
* @see org.apache.lucene.search.PhraseQuery#add(Term, int)
|
||||
* @see org.apache.lucene.search.PhraseQuery.Builder#add(Term, int)
|
||||
*/
|
||||
public void add(Term[] terms, int position) {
|
||||
if (termArrays.size() == 0)
|
||||
@ -231,8 +230,6 @@ public class MultiPhrasePrefixQuery extends Query {
|
||||
buffer.append(slop);
|
||||
}
|
||||
|
||||
buffer.append(ToStringUtils.boost(getBoost()));
|
||||
|
||||
return buffer.toString();
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,6 @@ import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
@ -132,11 +131,7 @@ public class Queries {
|
||||
builder.add(clause);
|
||||
}
|
||||
builder.setMinimumNumberShouldMatch(msm);
|
||||
BooleanQuery bq = builder.build();
|
||||
if (query.getBoost() != 1f) {
|
||||
return new BoostQuery(bq, query.getBoost());
|
||||
}
|
||||
return bq;
|
||||
return builder.build();
|
||||
} else {
|
||||
return query;
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
@ -102,7 +101,7 @@ public class FiltersFunctionScoreQuery extends Query {
|
||||
}
|
||||
}
|
||||
|
||||
Query subQuery;
|
||||
final Query subQuery;
|
||||
final FilterFunction[] filterFunctions;
|
||||
final ScoreMode scoreMode;
|
||||
final float maxBoost;
|
||||
@ -136,9 +135,7 @@ public class FiltersFunctionScoreQuery extends Query {
|
||||
Query newQ = subQuery.rewrite(reader);
|
||||
if (newQ == subQuery)
|
||||
return this;
|
||||
FiltersFunctionScoreQuery bq = (FiltersFunctionScoreQuery) this.clone();
|
||||
bq.subQuery = newQ;
|
||||
return bq;
|
||||
return new FiltersFunctionScoreQuery(newQ, scoreMode, filterFunctions, maxBoost, minScore, combineFunction);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -355,7 +352,6 @@ public class FiltersFunctionScoreQuery extends Query {
|
||||
sb.append("{filter(").append(filterFunction.filter).append("), function [").append(filterFunction.function).append("]}");
|
||||
}
|
||||
sb.append("])");
|
||||
sb.append(ToStringUtils.boost(getBoost()));
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,6 @@ import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
@ -41,7 +40,7 @@ public class FunctionScoreQuery extends Query {
|
||||
|
||||
public static final float DEFAULT_MAX_BOOST = Float.MAX_VALUE;
|
||||
|
||||
Query subQuery;
|
||||
final Query subQuery;
|
||||
final ScoreFunction function;
|
||||
final float maxBoost;
|
||||
final CombineFunction combineFunction;
|
||||
@ -84,9 +83,7 @@ public class FunctionScoreQuery extends Query {
|
||||
if (newQ == subQuery) {
|
||||
return this;
|
||||
}
|
||||
FunctionScoreQuery bq = (FunctionScoreQuery) this.clone();
|
||||
bq.subQuery = newQ;
|
||||
return bq;
|
||||
return new FunctionScoreQuery(newQ, function, minScore, combineFunction, maxBoost);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -205,7 +202,6 @@ public class FunctionScoreQuery extends Query {
|
||||
public String toString(String field) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("function score (").append(subQuery.toString(field)).append(",function=").append(function).append(')');
|
||||
sb.append(ToStringUtils.boost(getBoost()));
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ public class FilterIndexOutput extends IndexOutput {
|
||||
protected final IndexOutput out;
|
||||
|
||||
public FilterIndexOutput(String resourceDescription, IndexOutput out) {
|
||||
super(resourceDescription);
|
||||
super(resourceDescription, out.getName());
|
||||
this.out = out;
|
||||
}
|
||||
|
||||
|
@ -296,12 +296,25 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
||||
}
|
||||
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
|
||||
if (entry.getValue().match(key)) {
|
||||
assert assertMatcher(key, 1);
|
||||
return entry.getValue().getConcreteSetting(key);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private boolean assertMatcher(String key, int numComplexMatchers) {
|
||||
List<Setting<?>> list = new ArrayList<>();
|
||||
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
|
||||
if (entry.getValue().match(key)) {
|
||||
list.add(entry.getValue().getConcreteSetting(key));
|
||||
}
|
||||
}
|
||||
assert list.size() == numComplexMatchers : "Expected " + numComplexMatchers + " complex matchers to match key [" +
|
||||
key + "] but got: " + list.toString();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if the setting for the given key is dynamically updateable. Otherwise <code>false</code>.
|
||||
*/
|
||||
|
@ -66,7 +66,7 @@ import java.util.stream.Collectors;
|
||||
* </pre>
|
||||
*/
|
||||
public class Setting<T> extends ToXContentToBytes {
|
||||
private final String key;
|
||||
private final Key key;
|
||||
protected final Function<Settings, String> defaultValue;
|
||||
private final Function<String, T> parser;
|
||||
private final boolean dynamic;
|
||||
@ -80,7 +80,7 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
* @param dynamic true iff this setting can be dynamically updateable
|
||||
* @param scope the scope of this setting
|
||||
*/
|
||||
public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
public Setting(Key key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null";
|
||||
this.key = key;
|
||||
this.defaultValue = defaultValue;
|
||||
@ -89,6 +89,18 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
this.scope = scope;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Setting instance
|
||||
* @param key the settings key for this setting.
|
||||
* @param defaultValue a default value function that returns the default values string representation.
|
||||
* @param parser a parser that parses the string rep into a complex datatype.
|
||||
* @param dynamic true iff this setting can be dynamically updateable
|
||||
* @param scope the scope of this setting
|
||||
*/
|
||||
public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
this(new SimpleKey(key), defaultValue, parser, dynamic, scope);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Setting instance
|
||||
* @param key the settings key for this setting.
|
||||
@ -109,6 +121,13 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
* @see #isGroupSetting()
|
||||
*/
|
||||
public final String getKey() {
|
||||
return key.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the original representation of a setting key.
|
||||
*/
|
||||
public final Key getRawKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@ -159,7 +178,7 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
* Returns <code>true</code> iff this setting is present in the given settings object. Otherwise <code>false</code>
|
||||
*/
|
||||
public final boolean exists(Settings settings) {
|
||||
return settings.get(key) != null;
|
||||
return settings.get(getKey()) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -186,7 +205,7 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
* instead. This is useful if the value can't be parsed due to an invalid value to access the actual value.
|
||||
*/
|
||||
public String getRaw(Settings settings) {
|
||||
return settings.get(key, defaultValue.apply(settings));
|
||||
return settings.get(getKey(), defaultValue.apply(settings));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -194,14 +213,14 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
* given key is part of the settings group.
|
||||
* @see #isGroupSetting()
|
||||
*/
|
||||
public boolean match(String toTest) {
|
||||
return key.equals(toTest);
|
||||
public final boolean match(String toTest) {
|
||||
return key.match(toTest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("key", key);
|
||||
builder.field("key", key.toString());
|
||||
builder.field("type", scope.name());
|
||||
builder.field("dynamic", dynamic);
|
||||
builder.field("is_group_setting", isGroupSetting());
|
||||
@ -387,6 +406,14 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
return value;
|
||||
}
|
||||
|
||||
public static TimeValue parseTimeValue(String s, TimeValue minValue, String key) {
|
||||
TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
|
||||
if (timeValue.millis() < minValue.millis()) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
return timeValue;
|
||||
}
|
||||
|
||||
public static Setting<Integer> intSetting(String key, int defaultValue, boolean dynamic, Scope scope) {
|
||||
return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope);
|
||||
}
|
||||
@ -431,19 +458,13 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
Function<String, List<T>> parser = (s) ->
|
||||
parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList());
|
||||
|
||||
return new Setting<List<T>>(key, (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) {
|
||||
private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?");
|
||||
return new Setting<List<T>>(new ListKey(key), (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) {
|
||||
@Override
|
||||
public String getRaw(Settings settings) {
|
||||
String[] array = settings.getAsArray(key, null);
|
||||
String[] array = settings.getAsArray(getKey(), null);
|
||||
return array == null ? defaultValue.apply(settings) : arrayToParsableString(array);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return pattern.matcher(toTest).matches();
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean hasComplexMatcher() {
|
||||
return true;
|
||||
@ -486,11 +507,7 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
}
|
||||
|
||||
public static Setting<Settings> groupSetting(String key, boolean dynamic, Scope scope) {
|
||||
if (key.endsWith(".") == false) {
|
||||
throw new IllegalArgumentException("key must end with a '.'");
|
||||
}
|
||||
return new Setting<Settings>(key, "", (s) -> null, dynamic, scope) {
|
||||
|
||||
return new Setting<Settings>(new GroupKey(key), (s) -> "", (s) -> null, dynamic, scope) {
|
||||
@Override
|
||||
public boolean isGroupSetting() {
|
||||
return true;
|
||||
@ -498,12 +515,7 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
|
||||
@Override
|
||||
public Settings get(Settings settings) {
|
||||
return settings.getByPrefix(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return Regex.simpleMatch(key + "*", toTest);
|
||||
return settings.getByPrefix(getKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -549,13 +561,7 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, Function<Settings, String> defaultValue, TimeValue minValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, defaultValue, (s) -> {
|
||||
TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
|
||||
if (timeValue.millis() < minValue.millis()) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
return timeValue;
|
||||
}, dynamic, scope);
|
||||
return new Setting<>(key, defaultValue, (s) -> parseTimeValue(s, minValue, key), dynamic, scope);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) {
|
||||
@ -595,10 +601,27 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
|
||||
/**
|
||||
* This setting type allows to validate settings that have the same type and a common prefix. For instance feature.${type}=[true|false]
|
||||
* can easily be added with this setting. Yet, dynamic key settings don't support updaters our of the box unless {@link #getConcreteSetting(String)}
|
||||
* is used to pull the updater.
|
||||
* can easily be added with this setting. Yet, prefix key settings don't support updaters out of the box unless
|
||||
* {@link #getConcreteSetting(String)} is used to pull the updater.
|
||||
*/
|
||||
public static <T> Setting<T> dynamicKeySetting(String key, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
public static <T> Setting<T> prefixKeySetting(String prefix, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, dynamic, scope);
|
||||
}
|
||||
|
||||
/**
|
||||
* This setting type allows to validate settings that have the same type and a common prefix and suffix. For instance
|
||||
* storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, adfix key settings don't support updaters
|
||||
* out of the box unless {@link #getConcreteSetting(String)} is used to pull the updater.
|
||||
*/
|
||||
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, dynamic, scope);
|
||||
}
|
||||
|
||||
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, dynamic, scope);
|
||||
}
|
||||
|
||||
public static <T> Setting<T> affixKeySetting(AffixKey key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return new Setting<T>(key, defaultValue, parser, dynamic, scope) {
|
||||
|
||||
@Override
|
||||
@ -606,14 +629,9 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return toTest.startsWith(getKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
AbstractScopedSettings.SettingUpdater<T> newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
|
||||
throw new UnsupportedOperationException("dynamic settings can't be updated use #getConcreteSetting for updating");
|
||||
throw new UnsupportedOperationException("Affix settings can't be updated. Use #getConcreteSetting for updating.");
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -621,9 +639,145 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
if (match(key)) {
|
||||
return new Setting<>(key, defaultValue, parser, dynamic, scope);
|
||||
} else {
|
||||
throw new IllegalArgumentException("key must match setting but didn't ["+key +"]");
|
||||
throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't.");
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
public interface Key {
|
||||
boolean match(String key);
|
||||
}
|
||||
|
||||
public static class SimpleKey implements Key {
|
||||
protected final String key;
|
||||
|
||||
public SimpleKey(String key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String key) {
|
||||
return this.key.equals(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
SimpleKey simpleKey = (SimpleKey) o;
|
||||
return Objects.equals(key, simpleKey.key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(key);
|
||||
}
|
||||
}
|
||||
|
||||
public static final class GroupKey extends SimpleKey {
|
||||
public GroupKey(String key) {
|
||||
super(key);
|
||||
if (key.endsWith(".") == false) {
|
||||
throw new IllegalArgumentException("key must end with a '.'");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return Regex.simpleMatch(key + "*", toTest);
|
||||
}
|
||||
}
|
||||
|
||||
public static final class ListKey extends SimpleKey {
|
||||
private final Pattern pattern;
|
||||
|
||||
public ListKey(String key) {
|
||||
super(key);
|
||||
this.pattern = Pattern.compile(Pattern.quote(key) + "(\\.\\d+)?");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String toTest) {
|
||||
return pattern.matcher(toTest).matches();
|
||||
}
|
||||
}
|
||||
|
||||
public static final class AffixKey implements Key {
|
||||
public static AffixKey withPrefix(String prefix) {
|
||||
return new AffixKey(prefix, null);
|
||||
}
|
||||
|
||||
public static AffixKey withAdfix(String prefix, String suffix) {
|
||||
return new AffixKey(prefix, suffix);
|
||||
}
|
||||
|
||||
private final String prefix;
|
||||
private final String suffix;
|
||||
|
||||
public AffixKey(String prefix, String suffix) {
|
||||
assert prefix != null || suffix != null: "Either prefix or suffix must be non-null";
|
||||
this.prefix = prefix;
|
||||
this.suffix = suffix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(String key) {
|
||||
boolean match = true;
|
||||
if (prefix != null) {
|
||||
match = key.startsWith(prefix);
|
||||
}
|
||||
if (suffix != null) {
|
||||
match = match && key.endsWith(suffix);
|
||||
}
|
||||
return match;
|
||||
}
|
||||
|
||||
public SimpleKey toConcreteKey(String missingPart) {
|
||||
StringBuilder key = new StringBuilder();
|
||||
if (prefix != null) {
|
||||
key.append(prefix);
|
||||
}
|
||||
key.append(missingPart);
|
||||
if (suffix != null) {
|
||||
key.append(".");
|
||||
key.append(suffix);
|
||||
}
|
||||
return new SimpleKey(key.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (prefix != null) {
|
||||
sb.append(prefix);
|
||||
}
|
||||
if (suffix != null) {
|
||||
sb.append("*");
|
||||
sb.append(suffix);
|
||||
sb.append(".");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
AffixKey that = (AffixKey) o;
|
||||
return Objects.equals(prefix, that.prefix) &&
|
||||
Objects.equals(suffix, that.suffix);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(prefix, suffix);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -41,9 +41,9 @@ public class BigArrays {
|
||||
|
||||
/** Page size in bytes: 16KB */
|
||||
public static final int PAGE_SIZE_IN_BYTES = 1 << 14;
|
||||
public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_BYTE;
|
||||
public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_INT;
|
||||
public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_LONG;
|
||||
public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES;
|
||||
public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Integer.BYTES;
|
||||
public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Long.BYTES;
|
||||
public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||
|
||||
/** Returns the next size to grow when working with parallel arrays that may have different page sizes or number of bytes per element. */
|
||||
@ -490,7 +490,7 @@ public class BigArrays {
|
||||
if (minSize <= array.size()) {
|
||||
return array;
|
||||
}
|
||||
final long newSize = overSize(minSize, BYTE_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_BYTE);
|
||||
final long newSize = overSize(minSize, BYTE_PAGE_SIZE, 1);
|
||||
return resize(array, newSize);
|
||||
}
|
||||
|
||||
@ -573,7 +573,7 @@ public class BigArrays {
|
||||
if (minSize <= array.size()) {
|
||||
return array;
|
||||
}
|
||||
final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_INT);
|
||||
final long newSize = overSize(minSize, INT_PAGE_SIZE, Integer.BYTES);
|
||||
return resize(array, newSize);
|
||||
}
|
||||
|
||||
@ -623,7 +623,7 @@ public class BigArrays {
|
||||
if (minSize <= array.size()) {
|
||||
return array;
|
||||
}
|
||||
final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG);
|
||||
final long newSize = overSize(minSize, LONG_PAGE_SIZE, Long.BYTES);
|
||||
return resize(array, newSize);
|
||||
}
|
||||
|
||||
@ -670,7 +670,7 @@ public class BigArrays {
|
||||
if (minSize <= array.size()) {
|
||||
return array;
|
||||
}
|
||||
final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG);
|
||||
final long newSize = overSize(minSize, LONG_PAGE_SIZE, Long.BYTES);
|
||||
return resize(array, newSize);
|
||||
}
|
||||
|
||||
@ -717,7 +717,7 @@ public class BigArrays {
|
||||
if (minSize <= array.size()) {
|
||||
return array;
|
||||
}
|
||||
final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_FLOAT);
|
||||
final long newSize = overSize(minSize, INT_PAGE_SIZE, Float.BYTES);
|
||||
return resize(array, newSize);
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ final class BigByteArray extends AbstractBigArray implements ByteArray {
|
||||
|
||||
@Override
|
||||
protected int numBytesPerElement() {
|
||||
return RamUsageEstimator.NUM_BYTES_BYTE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||
|
@ -71,7 +71,7 @@ final class BigDoubleArray extends AbstractBigArray implements DoubleArray {
|
||||
|
||||
@Override
|
||||
protected int numBytesPerElement() {
|
||||
return RamUsageEstimator.NUM_BYTES_INT;
|
||||
return Integer.BYTES;
|
||||
}
|
||||
|
||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||
|
@ -71,7 +71,7 @@ final class BigFloatArray extends AbstractBigArray implements FloatArray {
|
||||
|
||||
@Override
|
||||
protected int numBytesPerElement() {
|
||||
return RamUsageEstimator.NUM_BYTES_FLOAT;
|
||||
return Float.BYTES;
|
||||
}
|
||||
|
||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||
|
@ -88,7 +88,7 @@ final class BigIntArray extends AbstractBigArray implements IntArray {
|
||||
|
||||
@Override
|
||||
protected int numBytesPerElement() {
|
||||
return RamUsageEstimator.NUM_BYTES_INT;
|
||||
return Integer.BYTES;
|
||||
}
|
||||
|
||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||
|
@ -70,7 +70,7 @@ final class BigLongArray extends AbstractBigArray implements LongArray {
|
||||
|
||||
@Override
|
||||
protected int numBytesPerElement() {
|
||||
return RamUsageEstimator.NUM_BYTES_LONG;
|
||||
return Long.BYTES;
|
||||
}
|
||||
|
||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||
|
@ -65,7 +65,7 @@ final class BigObjectArray<T> extends AbstractBigArray implements ObjectArray<T>
|
||||
|
||||
@Override
|
||||
protected int numBytesPerElement() {
|
||||
return RamUsageEstimator.NUM_BYTES_INT;
|
||||
return Integer.BYTES;
|
||||
}
|
||||
|
||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||
|
@ -388,7 +388,7 @@ public class BloomFilter {
|
||||
}
|
||||
|
||||
public long ramBytesUsed() {
|
||||
return RamUsageEstimator.NUM_BYTES_LONG * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16;
|
||||
return Long.BYTES * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -333,7 +333,7 @@ public class CollectionUtils {
|
||||
assert indices.length >= numValues;
|
||||
if (numValues > 1) {
|
||||
new InPlaceMergeSorter() {
|
||||
final Comparator<BytesRef> comparator = BytesRef.getUTF8SortedAsUnicodeComparator();
|
||||
final Comparator<BytesRef> comparator = Comparator.naturalOrder();
|
||||
@Override
|
||||
protected int compare(int i, int j) {
|
||||
return comparator.compare(bytes.get(scratch, indices[i]), bytes.get(scratch1, indices[j]));
|
||||
|
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* {@code AbstractLifecycleRunnable} is a service-lifecycle aware {@link AbstractRunnable}.
|
||||
* <p>
|
||||
* This simplifies the running and rescheduling of {@link Lifecycle}-based {@code Runnable}s.
|
||||
*/
|
||||
public abstract class AbstractLifecycleRunnable extends AbstractRunnable {
|
||||
/**
|
||||
* The monitored lifecycle for the associated service.
|
||||
*/
|
||||
private final Lifecycle lifecycle;
|
||||
/**
|
||||
* The service's logger (note: this is passed in!).
|
||||
*/
|
||||
private final ESLogger logger;
|
||||
|
||||
/**
|
||||
* {@link AbstractLifecycleRunnable} must be aware of the actual {@code lifecycle} to react properly.
|
||||
*
|
||||
* @param lifecycle The lifecycle to react too
|
||||
* @param logger The logger to use when logging
|
||||
* @throws NullPointerException if any parameter is {@code null}
|
||||
*/
|
||||
public AbstractLifecycleRunnable(Lifecycle lifecycle, ESLogger logger) {
|
||||
this.lifecycle = Objects.requireNonNull(lifecycle, "lifecycle must not be null");
|
||||
this.logger = Objects.requireNonNull(logger, "logger must not be null");
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p>
|
||||
* This invokes {@link #doRunInLifecycle()} <em>only</em> if the {@link #lifecycle} is not stopped or closed. Otherwise it exits
|
||||
* immediately.
|
||||
*/
|
||||
@Override
|
||||
protected final void doRun() throws Exception {
|
||||
// prevent execution if the service is stopped
|
||||
if (lifecycle.stoppedOrClosed()) {
|
||||
logger.trace("lifecycle is stopping. exiting");
|
||||
return;
|
||||
}
|
||||
|
||||
doRunInLifecycle();
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform runnable logic, but only if the {@link #lifecycle} is <em>not</em> stopped or closed.
|
||||
*
|
||||
* @throws InterruptedException if the run method throws an {@link InterruptedException}
|
||||
*/
|
||||
protected abstract void doRunInLifecycle() throws Exception;
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* <p>
|
||||
* This overrides the default behavior of {@code onAfter} to add the caveat that it only runs if the {@link #lifecycle} is <em>not</em>
|
||||
* stopped or closed.
|
||||
* <p>
|
||||
* Note: this does not guarantee that it won't be stopped concurrently as it invokes {@link #onAfterInLifecycle()},
|
||||
* but it's a solid attempt at preventing it. For those that use this for rescheduling purposes, the next invocation would be
|
||||
* effectively cancelled immediately if that's the case.
|
||||
*
|
||||
* @see #onAfterInLifecycle()
|
||||
*/
|
||||
@Override
|
||||
public final void onAfter() {
|
||||
if (lifecycle.stoppedOrClosed() == false) {
|
||||
onAfterInLifecycle();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is invoked in the finally block of the run method, but it is only executed if the {@link #lifecycle} is <em>not</em>
|
||||
* stopped or closed.
|
||||
* <p>
|
||||
* This method is most useful for rescheduling the next iteration of the current runnable.
|
||||
*/
|
||||
protected void onAfterInLifecycle() {
|
||||
// nothing by default
|
||||
}
|
||||
}
|
@ -39,6 +39,7 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
@ -206,7 +207,20 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
|
||||
}
|
||||
} else {
|
||||
if (recovered.compareAndSet(false, true)) {
|
||||
threadPool.generic().execute(() -> gateway.performStateRecovery(recoveryListener));
|
||||
threadPool.generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.warn("Recovery failed", t);
|
||||
// we reset `recovered` in the listener don't reset it here otherwise there might be a race
|
||||
// that resets it to false while a new recover is already running?
|
||||
recoveryListener.onFailure("state recovery failed: " + t.getMessage());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
gateway.performStateRecovery(recoveryListener);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ public abstract class MetaDataStateFormat<T> {
|
||||
final Path finalStatePath = stateLocation.resolve(fileName);
|
||||
try {
|
||||
final String resourceDesc = "MetaDataStateFormat.write(path=\"" + tmpStatePath + "\")";
|
||||
try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(resourceDesc, Files.newOutputStream(tmpStatePath), BUFFER_SIZE)) {
|
||||
try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(resourceDesc, fileName, Files.newOutputStream(tmpStatePath), BUFFER_SIZE)) {
|
||||
CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION);
|
||||
out.writeInt(format.index());
|
||||
out.writeLong(version);
|
||||
|
@ -113,7 +113,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
final boolean enoughAllocationsFound;
|
||||
|
||||
if (lastActiveAllocationIds.isEmpty()) {
|
||||
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_3_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new";
|
||||
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new";
|
||||
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
|
||||
// fall back to old version-based allocation mode
|
||||
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
|
||||
@ -123,7 +123,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
} else {
|
||||
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
|
||||
}
|
||||
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_3_0_0, nodeShardsResult.allocationsFound, shard);
|
||||
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0, nodeShardsResult.allocationsFound, shard);
|
||||
} else {
|
||||
assert lastActiveAllocationIds.isEmpty() == false;
|
||||
// use allocation ids to select nodes
|
||||
|
@ -20,8 +20,8 @@
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.NumericTokenStream;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
|
||||
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
|
||||
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
|
||||
@ -300,7 +300,7 @@ public class Analysis {
|
||||
* <p>Although most analyzers generate character terms (CharTermAttribute),
|
||||
* some token only contain binary terms (BinaryTermAttribute,
|
||||
* CharTermAttribute being a special type of BinaryTermAttribute), such as
|
||||
* {@link NumericTokenStream} and unsuitable for highlighting and
|
||||
* {@link LegacyNumericTokenStream} and unsuitable for highlighting and
|
||||
* more-like-this queries which expect character terms.</p>
|
||||
*/
|
||||
public static boolean isCharacterTokenStream(TokenStream tokenStream) {
|
||||
|
@ -127,7 +127,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
|
||||
}
|
||||
if (analyzers.containsKey("default_index")) {
|
||||
final Version createdVersion = indexSettings.getIndexVersionCreated();
|
||||
if (createdVersion.onOrAfter(Version.V_3_0_0)) {
|
||||
if (createdVersion.onOrAfter(Version.V_5_0_0)) {
|
||||
throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index().getName() + "]");
|
||||
} else {
|
||||
deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index().getName());
|
||||
|
@ -21,10 +21,8 @@ package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
||||
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter;
|
||||
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
||||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
@ -43,14 +41,11 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
public static final int SIDE_BACK = 2;
|
||||
private final int side;
|
||||
|
||||
private org.elasticsearch.Version esVersion;
|
||||
|
||||
public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
|
||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
|
||||
this.side = parseSide(settings.get("side", "front"));
|
||||
this.esVersion = org.elasticsearch.Version.indexCreated(indexSettings.getSettings());
|
||||
}
|
||||
|
||||
static int parseSide(String side) {
|
||||
@ -70,15 +65,7 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
result = new ReverseStringFilter(result);
|
||||
}
|
||||
|
||||
if (version.onOrAfter(Version.LUCENE_4_3) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) {
|
||||
/*
|
||||
* We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version.
|
||||
* Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version.
|
||||
*/
|
||||
result = new EdgeNGramTokenFilter(result, minGram, maxGram);
|
||||
} else {
|
||||
result = new Lucene43EdgeNGramTokenFilter(result, minGram, maxGram);
|
||||
}
|
||||
result = new EdgeNGramTokenFilter(result, minGram, maxGram);
|
||||
|
||||
// side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect
|
||||
if (side == SIDE_BACK) {
|
||||
|
@ -21,9 +21,7 @@ package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
|
||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
@ -33,55 +31,33 @@ import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenC
|
||||
/**
|
||||
*
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
|
||||
|
||||
private final int minGram;
|
||||
|
||||
private final int maxGram;
|
||||
|
||||
private final Lucene43EdgeNGramTokenizer.Side side;
|
||||
|
||||
private final CharMatcher matcher;
|
||||
|
||||
protected org.elasticsearch.Version esVersion;
|
||||
|
||||
|
||||
public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
||||
this.side = Lucene43EdgeNGramTokenizer.Side.getSide(settings.get("side", Lucene43EdgeNGramTokenizer.DEFAULT_SIDE.getLabel()));
|
||||
this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
|
||||
this.esVersion = indexSettings.getIndexVersionCreated();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Tokenizer create() {
|
||||
if (version.onOrAfter(Version.LUCENE_4_3) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) {
|
||||
/*
|
||||
* We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version.
|
||||
* Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version.
|
||||
*/
|
||||
if (side == Lucene43EdgeNGramTokenizer.Side.BACK) {
|
||||
throw new IllegalArgumentException("side=back is not supported anymore. Please fix your analysis chain or use"
|
||||
+ " an older compatibility version (<=4.2) but beware that it might cause highlighting bugs."
|
||||
+ " To obtain the same behavior as the previous version please use \"edgeNGram\" filter which still supports side=back"
|
||||
+ " in combination with a \"keyword\" tokenizer");
|
||||
}
|
||||
final Version version = this.version == Version.LUCENE_4_3 ? Version.LUCENE_4_4 : this.version; // always use 4.4 or higher
|
||||
if (matcher == null) {
|
||||
return new EdgeNGramTokenizer(minGram, maxGram);
|
||||
} else {
|
||||
return new EdgeNGramTokenizer(minGram, maxGram) {
|
||||
@Override
|
||||
protected boolean isTokenChar(int chr) {
|
||||
return matcher.isTokenChar(chr);
|
||||
}
|
||||
};
|
||||
}
|
||||
if (matcher == null) {
|
||||
return new EdgeNGramTokenizer(minGram, maxGram);
|
||||
} else {
|
||||
return new Lucene43EdgeNGramTokenizer(side, minGram, maxGram);
|
||||
return new EdgeNGramTokenizer(minGram, maxGram) {
|
||||
@Override
|
||||
protected boolean isTokenChar(int chr) {
|
||||
return matcher.isTokenChar(chr);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.KeepWordFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.Lucene43KeepWordFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -40,9 +39,6 @@ import org.elasticsearch.index.IndexSettings;
|
||||
* <li>{@value #KEEP_WORDS_PATH_KEY} an reference to a file containing the words
|
||||
* / tokens to keep. Note: this is an alternative to {@value #KEEP_WORDS_KEY} if
|
||||
* both are set an exception will be thrown.</li>
|
||||
* <li>{@value #ENABLE_POS_INC_KEY} <code>true</code> iff the filter should
|
||||
* maintain position increments for dropped tokens. The default is
|
||||
* <code>true</code>.</li>
|
||||
* <li>{@value #KEEP_WORDS_CASE_KEY} to use case sensitive keep words. The
|
||||
* default is <code>false</code> which corresponds to case-sensitive.</li>
|
||||
* </ul>
|
||||
@ -51,10 +47,11 @@ import org.elasticsearch.index.IndexSettings;
|
||||
*/
|
||||
public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
|
||||
private final CharArraySet keepWords;
|
||||
private final boolean enablePositionIncrements;
|
||||
private static final String KEEP_WORDS_KEY = "keep_words";
|
||||
private static final String KEEP_WORDS_PATH_KEY = KEEP_WORDS_KEY + "_path";
|
||||
private static final String KEEP_WORDS_CASE_KEY = KEEP_WORDS_KEY + "_case"; // for javadoc
|
||||
|
||||
// unsupported ancient option
|
||||
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
|
||||
|
||||
public KeepWordFilterFactory(IndexSettings indexSettings,
|
||||
@ -68,26 +65,14 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
|
||||
throw new IllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `"
|
||||
+ KEEP_WORDS_PATH_KEY + "` to be configured");
|
||||
}
|
||||
if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) {
|
||||
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use"
|
||||
+ " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
|
||||
if (settings.get(ENABLE_POS_INC_KEY) != null) {
|
||||
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain");
|
||||
}
|
||||
enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true);
|
||||
|
||||
this.keepWords = Analysis.getWordSet(env, settings, KEEP_WORDS_KEY);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
if (version.onOrAfter(Version.LUCENE_4_4)) {
|
||||
return new KeepWordFilter(tokenStream, keepWords);
|
||||
} else {
|
||||
@SuppressWarnings("deprecation")
|
||||
final TokenStream filter = new Lucene43KeepWordFilter(enablePositionIncrements, tokenStream, keepWords);
|
||||
return filter;
|
||||
}
|
||||
return new KeepWordFilter(tokenStream, keepWords);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.LengthFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.Lucene43LengthFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
@ -34,28 +32,21 @@ public class LengthTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
private final int min;
|
||||
private final int max;
|
||||
private final boolean enablePositionIncrements;
|
||||
|
||||
// ancient unsupported option
|
||||
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
|
||||
|
||||
public LengthTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
min = settings.getAsInt("min", 0);
|
||||
max = settings.getAsInt("max", Integer.MAX_VALUE);
|
||||
if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) {
|
||||
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use"
|
||||
+ " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
|
||||
if (settings.get(ENABLE_POS_INC_KEY) != null) {
|
||||
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain");
|
||||
}
|
||||
enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
if (version.onOrAfter(Version.LUCENE_4_4)) {
|
||||
return new LengthFilter(tokenStream, min, max);
|
||||
} else {
|
||||
@SuppressWarnings("deprecation")
|
||||
final TokenStream filter = new Lucene43LengthFilter(enablePositionIncrements, tokenStream, min, max);
|
||||
return filter;
|
||||
}
|
||||
return new LengthFilter(tokenStream, min, max);
|
||||
}
|
||||
}
|
||||
|
@ -20,9 +20,7 @@
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenFilter;
|
||||
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
@ -44,14 +42,8 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
final Version version = this.version == Version.LUCENE_4_3 ? Version.LUCENE_4_4 : this.version; // we supported it since 4.3
|
||||
if (version.onOrAfter(Version.LUCENE_4_3)) {
|
||||
return new NGramTokenFilter(tokenStream, minGram, maxGram);
|
||||
} else {
|
||||
return new Lucene43NGramTokenFilter(tokenStream, minGram, maxGram);
|
||||
}
|
||||
return new NGramTokenFilter(tokenStream, minGram, maxGram);
|
||||
}
|
||||
}
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream;
|
||||
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||
import org.joda.time.format.DateTimeFormatter;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -30,11 +30,11 @@ import java.io.IOException;
|
||||
public class NumericDateTokenizer extends NumericTokenizer {
|
||||
|
||||
public NumericDateTokenizer(int precisionStep, char[] buffer, DateTimeFormatter dateTimeFormatter) throws IOException {
|
||||
super(new NumericTokenStream(precisionStep), buffer, dateTimeFormatter);
|
||||
super(new LegacyNumericTokenStream(precisionStep), buffer, dateTimeFormatter);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
||||
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||
tokenStream.setLongValue(((DateTimeFormatter) extra).parseMillis(value));
|
||||
}
|
||||
}
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream;
|
||||
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@ -29,11 +29,11 @@ import java.io.IOException;
|
||||
public class NumericDoubleTokenizer extends NumericTokenizer {
|
||||
|
||||
public NumericDoubleTokenizer(int precisionStep, char[] buffer) throws IOException {
|
||||
super(new NumericTokenStream(precisionStep), buffer, null);
|
||||
super(new LegacyNumericTokenStream(precisionStep), buffer, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
||||
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||
tokenStream.setDoubleValue(Double.parseDouble(value));
|
||||
}
|
||||
}
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream;
|
||||
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@ -29,11 +29,11 @@ import java.io.IOException;
|
||||
public class NumericFloatTokenizer extends NumericTokenizer {
|
||||
|
||||
public NumericFloatTokenizer(int precisionStep, char[] buffer) throws IOException {
|
||||
super(new NumericTokenStream(precisionStep), buffer, null);
|
||||
super(new LegacyNumericTokenStream(precisionStep), buffer, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
||||
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||
tokenStream.setFloatValue(Float.parseFloat(value));
|
||||
}
|
||||
}
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream;
|
||||
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@ -29,11 +29,11 @@ import java.io.IOException;
|
||||
public class NumericIntegerTokenizer extends NumericTokenizer {
|
||||
|
||||
public NumericIntegerTokenizer(int precisionStep, char[] buffer) throws IOException {
|
||||
super(new NumericTokenStream(precisionStep), buffer, null);
|
||||
super(new LegacyNumericTokenStream(precisionStep), buffer, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
||||
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||
tokenStream.setIntValue(Integer.parseInt(value));
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream;
|
||||
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@ -29,11 +29,11 @@ import java.io.IOException;
|
||||
public class NumericLongTokenizer extends NumericTokenizer {
|
||||
|
||||
public NumericLongTokenizer(int precisionStep, char[] buffer) throws IOException {
|
||||
super(new NumericTokenStream(precisionStep), buffer, null);
|
||||
super(new LegacyNumericTokenStream(precisionStep), buffer, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
||||
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||
tokenStream.setLongValue(Long.parseLong(value));
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream;
|
||||
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.util.Attribute;
|
||||
import org.apache.lucene.util.AttributeFactory;
|
||||
@ -45,12 +45,12 @@ public abstract class NumericTokenizer extends Tokenizer {
|
||||
};
|
||||
}
|
||||
|
||||
private final NumericTokenStream numericTokenStream;
|
||||
private final LegacyNumericTokenStream numericTokenStream;
|
||||
private final char[] buffer;
|
||||
protected final Object extra;
|
||||
private boolean started;
|
||||
|
||||
protected NumericTokenizer(NumericTokenStream numericTokenStream, char[] buffer, Object extra) throws IOException {
|
||||
protected NumericTokenizer(LegacyNumericTokenStream numericTokenStream, char[] buffer, Object extra) throws IOException {
|
||||
super(delegatingAttributeFactory(numericTokenStream));
|
||||
this.numericTokenStream = numericTokenStream;
|
||||
// Add attributes from the numeric token stream, this works fine because the attribute factory delegates to numericTokenStream
|
||||
@ -95,5 +95,5 @@ public abstract class NumericTokenizer extends Tokenizer {
|
||||
numericTokenStream.close();
|
||||
}
|
||||
|
||||
protected abstract void setValue(NumericTokenStream tokenStream, String value);
|
||||
protected abstract void setValue(LegacyNumericTokenStream tokenStream, String value);
|
||||
}
|
||||
|
@ -40,13 +40,7 @@ public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analy
|
||||
public PatternAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
|
||||
Version esVersion = indexSettings.getIndexVersionCreated();
|
||||
final CharArraySet defaultStopwords;
|
||||
if (esVersion.onOrAfter(Version.V_1_0_0_RC1)) {
|
||||
defaultStopwords = CharArraySet.EMPTY_SET;
|
||||
} else {
|
||||
defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
|
||||
}
|
||||
final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
|
||||
boolean lowercase = settings.getAsBoolean("lowercase", true);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);
|
||||
|
||||
|
@ -28,7 +28,6 @@ import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
|
||||
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||
import org.apache.lucene.analysis.standard.std40.StandardTokenizer40;
|
||||
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.util.Version;
|
||||
@ -64,12 +63,7 @@ public final class SnowballAnalyzer extends Analyzer {
|
||||
and a {@link SnowballFilter} */
|
||||
@Override
|
||||
public TokenStreamComponents createComponents(String fieldName) {
|
||||
final Tokenizer tokenizer;
|
||||
if (getVersion().onOrAfter(Version.LUCENE_4_7_0)) {
|
||||
tokenizer = new StandardTokenizer();
|
||||
} else {
|
||||
tokenizer = new StandardTokenizer40();
|
||||
}
|
||||
final Tokenizer tokenizer = new StandardTokenizer();
|
||||
TokenStream result = tokenizer;
|
||||
// remove the possessive 's for english stemmers
|
||||
if (name.equals("English") || name.equals("Porter") || name.equals("Lovins"))
|
||||
|
@ -33,18 +33,10 @@ import org.elasticsearch.index.IndexSettings;
|
||||
public class StandardAnalyzerProvider extends AbstractIndexAnalyzerProvider<StandardAnalyzer> {
|
||||
|
||||
private final StandardAnalyzer standardAnalyzer;
|
||||
private final Version esVersion;
|
||||
|
||||
public StandardAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
this.esVersion = indexSettings.getIndexVersionCreated();
|
||||
final CharArraySet defaultStopwords;
|
||||
if (esVersion.onOrAfter(Version.V_1_0_0_Beta1)) {
|
||||
defaultStopwords = CharArraySet.EMPTY_SET;
|
||||
} else {
|
||||
defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
|
||||
}
|
||||
|
||||
final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);
|
||||
int maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
||||
standardAnalyzer = new StandardAnalyzer(stopWords);
|
||||
|
@ -26,10 +26,8 @@ import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||
import org.apache.lucene.analysis.standard.std40.StandardTokenizer40;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
|
||||
|
||||
@ -47,12 +45,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
|
||||
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(final String fieldName) {
|
||||
final Tokenizer src;
|
||||
if (getVersion().onOrAfter(Version.LUCENE_4_7_0)) {
|
||||
src = new StandardTokenizer();
|
||||
} else {
|
||||
src = new StandardTokenizer40();
|
||||
}
|
||||
final Tokenizer src = new StandardTokenizer();
|
||||
TokenStream tok = new StandardFilter(src);
|
||||
tok = new LowerCaseFilter(tok);
|
||||
if (!stopwords.isEmpty()) {
|
||||
|
@ -32,17 +32,10 @@ import org.elasticsearch.index.IndexSettings;
|
||||
public class StandardHtmlStripAnalyzerProvider extends AbstractIndexAnalyzerProvider<StandardHtmlStripAnalyzer> {
|
||||
|
||||
private final StandardHtmlStripAnalyzer analyzer;
|
||||
private final Version esVersion;
|
||||
|
||||
public StandardHtmlStripAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
this.esVersion = indexSettings.getIndexVersionCreated();
|
||||
final CharArraySet defaultStopwords;
|
||||
if (esVersion.onOrAfter(Version.V_1_0_0_RC1)) {
|
||||
defaultStopwords = CharArraySet.EMPTY_SET;
|
||||
} else {
|
||||
defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
|
||||
}
|
||||
final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);
|
||||
analyzer = new StandardHtmlStripAnalyzer(stopWords);
|
||||
analyzer.setVersion(version);
|
||||
|
@ -22,8 +22,6 @@ package org.elasticsearch.index.analysis;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||
import org.apache.lucene.analysis.standard.std40.StandardTokenizer40;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
@ -41,14 +39,8 @@ public class StandardTokenizerFactory extends AbstractTokenizerFactory {
|
||||
|
||||
@Override
|
||||
public Tokenizer create() {
|
||||
if (version.onOrAfter(Version.LUCENE_4_7_0)) {
|
||||
StandardTokenizer tokenizer = new StandardTokenizer();
|
||||
tokenizer.setMaxTokenLength(maxTokenLength);
|
||||
return tokenizer;
|
||||
} else {
|
||||
StandardTokenizer40 tokenizer = new StandardTokenizer40();
|
||||
tokenizer.setMaxTokenLength(maxTokenLength);
|
||||
return tokenizer;
|
||||
}
|
||||
StandardTokenizer tokenizer = new StandardTokenizer();
|
||||
tokenizer.setMaxTokenLength(maxTokenLength);
|
||||
return tokenizer;
|
||||
}
|
||||
}
|
||||
|
@ -122,11 +122,7 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
// English stemmers
|
||||
} else if ("english".equalsIgnoreCase(language)) {
|
||||
if (indexVersion.onOrAfter(Version.V_1_3_0)) {
|
||||
return new PorterStemFilter(tokenStream);
|
||||
} else {
|
||||
return new SnowballFilter(tokenStream, new EnglishStemmer());
|
||||
}
|
||||
return new PorterStemFilter(tokenStream);
|
||||
} else if ("light_english".equalsIgnoreCase(language) || "lightEnglish".equalsIgnoreCase(language)
|
||||
|| "kstem".equalsIgnoreCase(language)) {
|
||||
return new KStemFilter(tokenStream);
|
||||
@ -135,11 +131,7 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
} else if ("porter".equalsIgnoreCase(language)) {
|
||||
return new PorterStemFilter(tokenStream);
|
||||
} else if ("porter2".equalsIgnoreCase(language)) {
|
||||
if (indexVersion.onOrAfter(Version.V_1_3_0)) {
|
||||
return new SnowballFilter(tokenStream, new EnglishStemmer());
|
||||
} else {
|
||||
return new SnowballFilter(tokenStream, new PorterStemmer());
|
||||
}
|
||||
return new SnowballFilter(tokenStream, new EnglishStemmer());
|
||||
} else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) {
|
||||
return new EnglishMinimalStemFilter(tokenStream);
|
||||
} else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) {
|
||||
|
@ -20,7 +20,6 @@
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.core.Lucene43StopFilter;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
@ -42,7 +41,6 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
private final boolean ignoreCase;
|
||||
|
||||
private final boolean enablePositionIncrements;
|
||||
private final boolean removeTrailing;
|
||||
|
||||
public StopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
@ -50,21 +48,15 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
|
||||
this.removeTrailing = settings.getAsBoolean("remove_trailing", true);
|
||||
this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
|
||||
if (version.onOrAfter(Version.LUCENE_4_4) && settings.get("enable_position_increments") != null) {
|
||||
throw new IllegalArgumentException("enable_position_increments is not supported anymore as of Lucene 4.4 as it can create broken token streams."
|
||||
+ " Please fix your analysis chain or use an older compatibility version (<= 4.3).");
|
||||
if (settings.get("enable_position_increments") != null) {
|
||||
throw new IllegalArgumentException("enable_position_increments is not supported anymore. Please fix your analysis chain");
|
||||
}
|
||||
this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
if (removeTrailing) {
|
||||
if (version.onOrAfter(Version.LUCENE_4_4)) {
|
||||
return new StopFilter(tokenStream, stopWords);
|
||||
} else {
|
||||
return new Lucene43StopFilter(enablePositionIncrements, tokenStream, stopWords);
|
||||
}
|
||||
return new StopFilter(tokenStream, stopWords);
|
||||
} else {
|
||||
return new SuggestStopFilter(tokenStream, stopWords);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user