Merge branch 'master' into break
This commit is contained in:
commit
0fdadf4737
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 5.0.0-alpha6
|
||||
lucene = 6.1.0
|
||||
lucene = 6.2.0
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
|
|
|
@ -19,11 +19,11 @@
|
|||
|
||||
package org.apache.lucene.analysis.miscellaneous;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ public class Version {
|
|||
public static final int V_5_0_0_alpha5_ID = 5000005;
|
||||
public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
|
||||
public static final int V_5_0_0_alpha6_ID = 5000006;
|
||||
public static final Version V_5_0_0_alpha6 = new Version(V_5_0_0_alpha6_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
|
||||
public static final Version V_5_0_0_alpha6 = new Version(V_5_0_0_alpha6_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha6;
|
||||
|
||||
static {
|
||||
|
|
|
@ -88,7 +88,7 @@ import java.util.Objects;
|
|||
public class Lucene {
|
||||
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54";
|
||||
public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
|
||||
public static final String LATEST_CODEC = "Lucene60";
|
||||
public static final String LATEST_CODEC = "Lucene62";
|
||||
|
||||
static {
|
||||
Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class);
|
||||
|
|
|
@ -20,8 +20,9 @@
|
|||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
|
||||
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
|
||||
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
|
||||
|
@ -55,7 +56,6 @@ import org.apache.lucene.analysis.sv.SwedishAnalyzer;
|
|||
import org.apache.lucene.analysis.th.ThaiAnalyzer;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tr.TurkishAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class ArabicAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arabic
|
|||
public ArabicAnalyzer get() {
|
||||
return this.arabicAnalyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.hy.ArmenianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class ArmenianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arme
|
|||
public ArmenianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.eu.BasqueAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class BasqueAnalyzerProvider extends AbstractIndexAnalyzerProvider<Basque
|
|||
public BasqueAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class BrazilianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bra
|
|||
public BrazilianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.index.analysis;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.br.BrazilianStemFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -42,4 +42,4 @@ public class BrazilianStemTokenFilterFactory extends AbstractTokenFilterFactory
|
|||
public TokenStream create(TokenStream tokenStream) {
|
||||
return new BrazilianStemFilter(new SetKeywordMarkerFilter(tokenStream, exclusions));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class BulgarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bul
|
|||
public BulgarianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.ca.CatalanAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class CatalanAnalyzerProvider extends AbstractIndexAnalyzerProvider<Catal
|
|||
public CatalanAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -44,4 +44,4 @@ public class CjkAnalyzerProvider extends AbstractIndexAnalyzerProvider<CJKAnalyz
|
|||
public CJKAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
|
||||
import org.apache.lucene.analysis.commongrams.CommonGramsQueryFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.cz.CzechAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class CzechAnalyzerProvider extends AbstractIndexAnalyzerProvider<CzechAn
|
|||
public CzechAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.da.DanishAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class DanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Danish
|
|||
public DanishAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.nl.DutchAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class DutchAnalyzerProvider extends AbstractIndexAnalyzerProvider<DutchAn
|
|||
public DutchAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
|
||||
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -45,4 +45,4 @@ public class DutchStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
tokenStream = new SetKeywordMarkerFilter(tokenStream, exclusions);
|
||||
return new SnowballFilter(tokenStream, new DutchStemmer());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.analysis.util.ElisionFilter;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -47,4 +47,4 @@ public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory implem
|
|||
public Object getMultiTermComponent() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.en.EnglishAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class EnglishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Engli
|
|||
public EnglishAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,14 +20,14 @@
|
|||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.FingerprintFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
|
||||
/** OpenRefine Fingerprinting, which uses a Standard tokenizer and lowercase + stop + fingerprint + asciifolding filters */
|
||||
public final class FingerprintAnalyzer extends Analyzer {
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.fi.FinnishAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class FinnishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Finni
|
|||
public FinnishAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class FrenchAnalyzerProvider extends AbstractIndexAnalyzerProvider<French
|
|||
public FrenchAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
|
||||
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -45,4 +45,4 @@ public class FrenchStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
tokenStream = new SetKeywordMarkerFilter(tokenStream, exclusions);
|
||||
return new SnowballFilter(tokenStream, new FrenchStemmer());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.gl.GalicianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class GalicianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Gali
|
|||
public GalicianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.de.GermanAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class GermanAnalyzerProvider extends AbstractIndexAnalyzerProvider<German
|
|||
public GermanAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.de.GermanStemFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class GermanStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
public TokenStream create(TokenStream tokenStream) {
|
||||
return new GermanStemFilter(new SetKeywordMarkerFilter(tokenStream, exclusions));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.hi.HindiAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class HindiAnalyzerProvider extends AbstractIndexAnalyzerProvider<HindiAn
|
|||
public HindiAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.hu.HungarianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class HungarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Hun
|
|||
public HungarianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.id.IndonesianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class IndonesianAnalyzerProvider extends AbstractIndexAnalyzerProvider<In
|
|||
public IndonesianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.ga.IrishAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class IrishAnalyzerProvider extends AbstractIndexAnalyzerProvider<IrishAn
|
|||
public IrishAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.it.ItalianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class ItalianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Itali
|
|||
public ItalianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.KeepWordFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.lv.LatvianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class LatvianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Latvi
|
|||
public LatvianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.lt.LithuanianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class LithuanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Li
|
|||
public LithuanianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.el.GreekLowerCaseFilter;
|
||||
import org.apache.lucene.analysis.ga.IrishLowerCaseFilter;
|
||||
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.no.NorwegianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class NorwegianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Nor
|
|||
public NorwegianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,12 +20,12 @@
|
|||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.pt.PortugueseAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class PortugueseAnalyzerProvider extends AbstractIndexAnalyzerProvider<Po
|
|||
public PortugueseAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.ro.RomanianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class RomanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Roma
|
|||
public RomanianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.ru.RussianAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class RussianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Russi
|
|||
public RussianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.index.IndexSettings;
|
|||
/**
|
||||
* Factory for {@link ScandinavianFoldingFilter}
|
||||
*/
|
||||
public class ScandinavianFoldingFilterFactory extends AbstractTokenFilterFactory {
|
||||
public class ScandinavianFoldingFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent {
|
||||
|
||||
public ScandinavianFoldingFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
|
@ -38,4 +38,8 @@ public class ScandinavianFoldingFilterFactory extends AbstractTokenFilterFactory
|
|||
return new ScandinavianFoldingFilter(tokenStream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getMultiTermComponent() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.index.IndexSettings;
|
|||
/**
|
||||
* Factory for {@link ScandinavianNormalizationFilter}
|
||||
*/
|
||||
public class ScandinavianNormalizationFilterFactory extends AbstractTokenFilterFactory {
|
||||
public class ScandinavianNormalizationFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent {
|
||||
|
||||
public ScandinavianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
|
@ -38,4 +38,8 @@ public class ScandinavianNormalizationFilterFactory extends AbstractTokenFilterF
|
|||
return new ScandinavianNormalizationFilter(tokenStream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getMultiTermComponent() {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,16 +20,16 @@ package org.elasticsearch.index.analysis;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
|
||||
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/** Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
*/
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.de.GermanAnalyzer;
|
||||
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
|
||||
import org.apache.lucene.analysis.nl.DutchAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.ckb.SoraniAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class SoraniAnalyzerProvider extends AbstractIndexAnalyzerProvider<Sorani
|
|||
public SoraniAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.es.SpanishAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class SpanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Spani
|
|||
public SpanishAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
|
|
@ -19,15 +19,15 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.StopwordAnalyzerBase;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
|
||||
|
||||
public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
|
||||
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class StopAnalyzerProvider extends AbstractIndexAnalyzerProvider<StopAnal
|
|||
public StopAnalyzer get() {
|
||||
return this.stopAnalyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.sv.SwedishAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class SwedishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Swedi
|
|||
public SwedishAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,9 @@
|
|||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.analysis.synonym.SolrSynonymParser;
|
||||
import org.apache.lucene.analysis.synonym.SynonymFilter;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.tr.TurkishAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -43,4 +43,4 @@ public class TurkishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Turki
|
|||
public TurkishAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.analysis.compound;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.compound.CompoundWordTokenFilterBase;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
|
|
@ -21,8 +21,7 @@ package org.elasticsearch.index.codec;
|
|||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
|
||||
import org.apache.lucene.codecs.lucene54.Lucene54Codec;
|
||||
import org.apache.lucene.codecs.lucene60.Lucene60Codec;
|
||||
import org.apache.lucene.codecs.lucene62.Lucene62Codec;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -48,8 +47,8 @@ public class CodecService {
|
|||
public CodecService(@Nullable MapperService mapperService, ESLogger logger) {
|
||||
final MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder();
|
||||
if (mapperService == null) {
|
||||
codecs.put(DEFAULT_CODEC, new Lucene60Codec());
|
||||
codecs.put(BEST_COMPRESSION_CODEC, new Lucene60Codec(Mode.BEST_COMPRESSION));
|
||||
codecs.put(DEFAULT_CODEC, new Lucene62Codec());
|
||||
codecs.put(BEST_COMPRESSION_CODEC, new Lucene62Codec(Mode.BEST_COMPRESSION));
|
||||
} else {
|
||||
codecs.put(DEFAULT_CODEC,
|
||||
new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger));
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.index.codec;
|
|||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
|
||||
import org.apache.lucene.codecs.lucene60.Lucene60Codec;
|
||||
import org.apache.lucene.codecs.lucene62.Lucene62Codec;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.index.mapper.CompletionFieldMapper;
|
||||
|
@ -39,7 +39,7 @@ import org.elasticsearch.index.mapper.MapperService;
|
|||
* configured for a specific field the default postings format is used.
|
||||
*/
|
||||
// LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version
|
||||
public class PerFieldMappingPostingFormatCodec extends Lucene60Codec {
|
||||
public class PerFieldMappingPostingFormatCodec extends Lucene62Codec {
|
||||
private final ESLogger logger;
|
||||
private final MapperService mapperService;
|
||||
|
||||
|
|
|
@ -239,10 +239,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
}
|
||||
|
||||
List<Object> values = searchLookup.source().extractRawValues(field);
|
||||
if (!values.isEmpty()) {
|
||||
for (int i = 0; i < values.size(); i++) {
|
||||
values.set(i, fieldMapper.fieldType().valueForSearch(values.get(i)));
|
||||
}
|
||||
if (values.isEmpty() == false) {
|
||||
value = values;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||
import org.apache.lucene.util.LegacyNumericUtils;
|
||||
|
@ -139,7 +140,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper,
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
|
||||
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
|
||||
|
||||
public Y build(Mapper.BuilderContext context) {
|
||||
GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType;
|
||||
|
@ -166,10 +167,17 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
geoPointFieldType.setLatLonEnabled(latMapper.fieldType(), lonMapper.fieldType());
|
||||
}
|
||||
KeywordFieldMapper geoHashMapper = null;
|
||||
FieldMapper geoHashMapper = null;
|
||||
if (enableGeoHash || enableGeoHashPrefix) {
|
||||
// TODO: possible also implicitly enable geohash if geohash precision is set
|
||||
geoHashMapper = new KeywordFieldMapper.Builder(Names.GEOHASH).index(true).includeInAll(false).store(fieldType.stored()).build(context);
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
geoHashMapper = new KeywordFieldMapper.Builder(Names.GEOHASH)
|
||||
.index(true).includeInAll(false).store(fieldType.stored()).build(context);
|
||||
} else {
|
||||
geoHashMapper = new StringFieldMapper.Builder(Names.GEOHASH)
|
||||
.tokenized(false).index(true).omitNorms(true).indexOptions(IndexOptions.DOCS)
|
||||
.includeInAll(false).store(fieldType.stored()).build(context);
|
||||
}
|
||||
geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix);
|
||||
}
|
||||
context.path().remove();
|
||||
|
@ -376,12 +384,12 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
protected FieldMapper lonMapper;
|
||||
|
||||
protected KeywordFieldMapper geoHashMapper;
|
||||
protected FieldMapper geoHashMapper;
|
||||
|
||||
protected Explicit<Boolean> ignoreMalformed;
|
||||
|
||||
protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
FieldMapper latMapper, FieldMapper lonMapper, KeywordFieldMapper geoHashMapper,
|
||||
FieldMapper latMapper, FieldMapper lonMapper, FieldMapper geoHashMapper,
|
||||
MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
this.latMapper = latMapper;
|
||||
|
@ -552,7 +560,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
@Override
|
||||
public FieldMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
|
||||
BaseGeoPointFieldMapper updated = (BaseGeoPointFieldMapper) super.updateFieldType(fullNameToFieldType);
|
||||
KeywordFieldMapper geoUpdated = geoHashMapper == null ? null : (KeywordFieldMapper) geoHashMapper.updateFieldType(fullNameToFieldType);
|
||||
FieldMapper geoUpdated = geoHashMapper == null ? null : geoHashMapper.updateFieldType(fullNameToFieldType);
|
||||
FieldMapper latUpdated = latMapper == null ? null : latMapper.updateFieldType(fullNameToFieldType);
|
||||
FieldMapper lonUpdated = lonMapper == null ? null : lonMapper.updateFieldType(fullNameToFieldType);
|
||||
if (updated == this
|
||||
|
|
|
@ -73,7 +73,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
@Override
|
||||
public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, FieldMapper latMapper,
|
||||
FieldMapper lonMapper, KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
FieldMapper lonMapper, FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
||||
|
@ -104,7 +104,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
|
||||
public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
FieldMapper latMapper, FieldMapper lonMapper,
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
ignoreMalformed, copyTo);
|
||||
}
|
||||
|
|
|
@ -160,6 +160,16 @@ public final class KeywordFieldMapper extends FieldMapper {
|
|||
failIfNoDocValues();
|
||||
return new DocValuesIndexFieldData.Builder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object valueForSearch(Object value) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
// keywords are internally stored as utf8 bytes
|
||||
BytesRef binaryValue = (BytesRef) value;
|
||||
return binaryValue.utf8ToString();
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
|
@ -212,12 +222,14 @@ public final class KeywordFieldMapper extends FieldMapper {
|
|||
context.allEntries().addText(fieldType().name(), value, fieldType().boost());
|
||||
}
|
||||
|
||||
// convert to utf8 only once before feeding postings/dv/stored fields
|
||||
final BytesRef binaryValue = new BytesRef(value);
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
Field field = new Field(fieldType().name(), value, fieldType());
|
||||
Field field = new Field(fieldType().name(), binaryValue, fieldType());
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
fields.add(new SortedSetDocValuesField(fieldType().name(), new BytesRef(value)));
|
||||
fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement
|
|||
}
|
||||
|
||||
public static class Defaults extends BaseGeoPointFieldMapper.Defaults{
|
||||
public static final Explicit<Boolean> COERCE = new Explicit(false, false);
|
||||
public static final Explicit<Boolean> COERCE = new Explicit<>(false, false);
|
||||
|
||||
public static final GeoPointFieldType FIELD_TYPE = new GeoPointFieldType();
|
||||
|
||||
|
@ -100,7 +100,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement
|
|||
@Override
|
||||
public LegacyGeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper,
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
setupFieldType(context);
|
||||
|
@ -261,7 +261,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement
|
|||
protected Explicit<Boolean> coerce;
|
||||
|
||||
public LegacyGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper, KeywordFieldMapper geoHashMapper,
|
||||
Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper, FieldMapper geoHashMapper,
|
||||
MultiFields multiFields, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
ignoreMalformed, copyTo);
|
||||
|
|
|
@ -168,7 +168,7 @@ public class ObjectMapper extends Mapper implements Cloneable {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
ObjectMapper.Builder builder = createBuilder(name);
|
||||
ObjectMapper.Builder builder = new Builder(name);
|
||||
parseNested(name, node, builder);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -300,9 +300,6 @@ public class ObjectMapper extends Mapper implements Cloneable {
|
|||
|
||||
}
|
||||
|
||||
protected Builder createBuilder(String name) {
|
||||
return new Builder(name);
|
||||
}
|
||||
}
|
||||
|
||||
private final String fullPath;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
|
@ -30,14 +31,13 @@ import org.elasticsearch.index.mapper.DynamicTemplate.XContentFieldType;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.TypeParsers.parseDateTimeFormatter;
|
||||
|
||||
/**
|
||||
|
@ -57,79 +57,43 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
|
||||
public static class Builder extends ObjectMapper.Builder<Builder, RootObjectMapper> {
|
||||
|
||||
protected final List<DynamicTemplate> dynamicTemplates = new ArrayList<>();
|
||||
|
||||
// we use this to filter out seen date formats, because we might get duplicates during merging
|
||||
protected Set<String> seenDateFormats = new HashSet<>();
|
||||
protected List<FormatDateTimeFormatter> dynamicDateTimeFormatters = new ArrayList<>();
|
||||
|
||||
protected boolean dateDetection = Defaults.DATE_DETECTION;
|
||||
protected boolean numericDetection = Defaults.NUMERIC_DETECTION;
|
||||
protected Explicit<DynamicTemplate[]> dynamicTemplates = new Explicit<>(new DynamicTemplate[0], false);
|
||||
protected Explicit<FormatDateTimeFormatter[]> dynamicDateTimeFormatters = new Explicit<>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false);
|
||||
protected Explicit<Boolean> dateDetection = new Explicit<>(Defaults.DATE_DETECTION, false);
|
||||
protected Explicit<Boolean> numericDetection = new Explicit<>(Defaults.NUMERIC_DETECTION, false);
|
||||
|
||||
public Builder(String name) {
|
||||
super(name);
|
||||
this.builder = this;
|
||||
}
|
||||
|
||||
public Builder noDynamicDateTimeFormatter() {
|
||||
this.dynamicDateTimeFormatters = null;
|
||||
return builder;
|
||||
}
|
||||
|
||||
public Builder dynamicDateTimeFormatter(Iterable<FormatDateTimeFormatter> dateTimeFormatters) {
|
||||
for (FormatDateTimeFormatter dateTimeFormatter : dateTimeFormatters) {
|
||||
if (!seenDateFormats.contains(dateTimeFormatter.format())) {
|
||||
seenDateFormats.add(dateTimeFormatter.format());
|
||||
this.dynamicDateTimeFormatters.add(dateTimeFormatter);
|
||||
}
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
public Builder add(DynamicTemplate dynamicTemplate) {
|
||||
this.dynamicTemplates.add(dynamicTemplate);
|
||||
public Builder dynamicDateTimeFormatter(Collection<FormatDateTimeFormatter> dateTimeFormatters) {
|
||||
this.dynamicDateTimeFormatters = new Explicit<>(dateTimeFormatters.toArray(new FormatDateTimeFormatter[0]), true);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder add(DynamicTemplate... dynamicTemplate) {
|
||||
for (DynamicTemplate template : dynamicTemplate) {
|
||||
this.dynamicTemplates.add(template);
|
||||
}
|
||||
public Builder dynamicTemplates(Collection<DynamicTemplate> templates) {
|
||||
this.dynamicTemplates = new Explicit<>(templates.toArray(new DynamicTemplate[0]), true);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic,
|
||||
Boolean includeInAll, Map<String, Mapper> mappers, @Nullable Settings settings) {
|
||||
assert !nested.isNested();
|
||||
FormatDateTimeFormatter[] dates = null;
|
||||
if (dynamicDateTimeFormatters == null) {
|
||||
dates = new FormatDateTimeFormatter[0];
|
||||
} else if (dynamicDateTimeFormatters.isEmpty()) {
|
||||
// add the default one
|
||||
dates = Defaults.DYNAMIC_DATE_TIME_FORMATTERS;
|
||||
} else {
|
||||
dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]);
|
||||
}
|
||||
return new RootObjectMapper(name, enabled, dynamic, includeInAll, mappers,
|
||||
dates,
|
||||
dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]),
|
||||
dynamicDateTimeFormatters,
|
||||
dynamicTemplates,
|
||||
dateDetection, numericDetection);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TypeParser extends ObjectMapper.TypeParser {
|
||||
|
||||
@Override
|
||||
protected ObjectMapper.Builder createBuilder(String name) {
|
||||
return new Builder(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
|
||||
ObjectMapper.Builder builder = createBuilder(name);
|
||||
RootObjectMapper.Builder builder = new Builder(name);
|
||||
Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -143,26 +107,22 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
return builder;
|
||||
}
|
||||
|
||||
protected boolean processField(ObjectMapper.Builder builder, String fieldName, Object fieldNode,
|
||||
protected boolean processField(RootObjectMapper.Builder builder, String fieldName, Object fieldNode,
|
||||
Version indexVersionCreated) {
|
||||
if (fieldName.equals("date_formats") || fieldName.equals("dynamic_date_formats")) {
|
||||
List<FormatDateTimeFormatter> dateTimeFormatters = new ArrayList<>();
|
||||
if (fieldNode instanceof List) {
|
||||
for (Object node1 : (List) fieldNode) {
|
||||
if (node1.toString().startsWith("epoch_")) {
|
||||
throw new MapperParsingException("Epoch ["+ node1.toString() +"] is not supported as dynamic date format");
|
||||
List<FormatDateTimeFormatter> formatters = new ArrayList<>();
|
||||
for (Object formatter : (List<?>) fieldNode) {
|
||||
if (formatter.toString().startsWith("epoch_")) {
|
||||
throw new MapperParsingException("Epoch ["+ formatter +"] is not supported as dynamic date format");
|
||||
}
|
||||
dateTimeFormatters.add(parseDateTimeFormatter(node1));
|
||||
formatters.add(parseDateTimeFormatter(formatter));
|
||||
}
|
||||
builder.dynamicDateTimeFormatter(formatters);
|
||||
} else if ("none".equals(fieldNode.toString())) {
|
||||
dateTimeFormatters = null;
|
||||
builder.dynamicDateTimeFormatter(Collections.emptyList());
|
||||
} else {
|
||||
dateTimeFormatters.add(parseDateTimeFormatter(fieldNode));
|
||||
}
|
||||
if (dateTimeFormatters == null) {
|
||||
((Builder) builder).noDynamicDateTimeFormatter();
|
||||
} else {
|
||||
((Builder) builder).dynamicDateTimeFormatter(dateTimeFormatters);
|
||||
builder.dynamicDateTimeFormatter(Collections.singleton(parseDateTimeFormatter(fieldNode)));
|
||||
}
|
||||
return true;
|
||||
} else if (fieldName.equals("dynamic_templates")) {
|
||||
|
@ -175,7 +135,8 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
// }
|
||||
// }
|
||||
// ]
|
||||
List tmplNodes = (List) fieldNode;
|
||||
List<?> tmplNodes = (List<?>) fieldNode;
|
||||
List<DynamicTemplate> templates = new ArrayList<>();
|
||||
for (Object tmplNode : tmplNodes) {
|
||||
Map<String, Object> tmpl = (Map<String, Object>) tmplNode;
|
||||
if (tmpl.size() != 1) {
|
||||
|
@ -186,30 +147,30 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
Map<String, Object> templateParams = (Map<String, Object>) entry.getValue();
|
||||
DynamicTemplate template = DynamicTemplate.parse(templateName, templateParams, indexVersionCreated);
|
||||
if (template != null) {
|
||||
((Builder) builder).add(template);
|
||||
templates.add(template);
|
||||
}
|
||||
}
|
||||
builder.dynamicTemplates(templates);
|
||||
return true;
|
||||
} else if (fieldName.equals("date_detection")) {
|
||||
((Builder) builder).dateDetection = lenientNodeBooleanValue(fieldNode);
|
||||
((Builder) builder).dateDetection = new Explicit<>(nodeBooleanValue(fieldNode), true);
|
||||
return true;
|
||||
} else if (fieldName.equals("numeric_detection")) {
|
||||
((Builder) builder).numericDetection = lenientNodeBooleanValue(fieldNode);
|
||||
((Builder) builder).numericDetection = new Explicit<>(nodeBooleanValue(fieldNode), true);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private final FormatDateTimeFormatter[] dynamicDateTimeFormatters;
|
||||
|
||||
private final boolean dateDetection;
|
||||
private final boolean numericDetection;
|
||||
|
||||
private volatile DynamicTemplate dynamicTemplates[];
|
||||
private Explicit<FormatDateTimeFormatter[]> dynamicDateTimeFormatters;
|
||||
private Explicit<Boolean> dateDetection;
|
||||
private Explicit<Boolean> numericDetection;
|
||||
private Explicit<DynamicTemplate[]> dynamicTemplates;
|
||||
|
||||
RootObjectMapper(String name, boolean enabled, Dynamic dynamic, Boolean includeInAll, Map<String, Mapper> mappers,
|
||||
FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) {
|
||||
Explicit<FormatDateTimeFormatter[]> dynamicDateTimeFormatters, Explicit<DynamicTemplate[]> dynamicTemplates,
|
||||
Explicit<Boolean> dateDetection, Explicit<Boolean> numericDetection) {
|
||||
super(name, name, enabled, Nested.NO, dynamic, includeInAll, mappers);
|
||||
this.dynamicTemplates = dynamicTemplates;
|
||||
this.dynamicDateTimeFormatters = dynamicDateTimeFormatters;
|
||||
|
@ -220,21 +181,26 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
@Override
|
||||
public ObjectMapper mappingUpdate(Mapper mapper) {
|
||||
RootObjectMapper update = (RootObjectMapper) super.mappingUpdate(mapper);
|
||||
// dynamic templates are irrelevant for dynamic mappings updates
|
||||
update.dynamicTemplates = new DynamicTemplate[0];
|
||||
// for dynamic updates, no need to carry root-specific options, we just
|
||||
// set everything to they implicit default value so that they are not
|
||||
// applied at merge time
|
||||
update.dynamicTemplates = new Explicit<>(new DynamicTemplate[0], false);
|
||||
update.dynamicDateTimeFormatters = new Explicit<FormatDateTimeFormatter[]>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false);
|
||||
update.dateDetection = new Explicit<>(Defaults.DATE_DETECTION, false);
|
||||
update.numericDetection = new Explicit<>(Defaults.NUMERIC_DETECTION, false);
|
||||
return update;
|
||||
}
|
||||
|
||||
public boolean dateDetection() {
|
||||
return this.dateDetection;
|
||||
return this.dateDetection.value();
|
||||
}
|
||||
|
||||
public boolean numericDetection() {
|
||||
return this.numericDetection;
|
||||
return this.numericDetection.value();
|
||||
}
|
||||
|
||||
public FormatDateTimeFormatter[] dynamicDateTimeFormatters() {
|
||||
return dynamicDateTimeFormatters;
|
||||
return dynamicDateTimeFormatters.value();
|
||||
}
|
||||
|
||||
public Mapper.Builder findTemplateBuilder(ParseContext context, String name, XContentFieldType matchType) {
|
||||
|
@ -264,7 +230,7 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
|
||||
public DynamicTemplate findTemplate(ContentPath path, String name, XContentFieldType matchType) {
|
||||
final String pathAsString = path.pathAsText(name);
|
||||
for (DynamicTemplate dynamicTemplate : dynamicTemplates) {
|
||||
for (DynamicTemplate dynamicTemplate : dynamicTemplates.value()) {
|
||||
if (dynamicTemplate.match(pathAsString, name, matchType)) {
|
||||
return dynamicTemplate;
|
||||
}
|
||||
|
@ -281,21 +247,18 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) {
|
||||
super.doMerge(mergeWith, updateAllTypes);
|
||||
RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith;
|
||||
// merge them
|
||||
List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates));
|
||||
for (DynamicTemplate template : mergeWithObject.dynamicTemplates) {
|
||||
boolean replaced = false;
|
||||
for (int i = 0; i < mergedTemplates.size(); i++) {
|
||||
if (mergedTemplates.get(i).name().equals(template.name())) {
|
||||
mergedTemplates.set(i, template);
|
||||
replaced = true;
|
||||
}
|
||||
}
|
||||
if (!replaced) {
|
||||
mergedTemplates.add(template);
|
||||
}
|
||||
if (mergeWithObject.numericDetection.explicit()) {
|
||||
this.numericDetection = mergeWithObject.numericDetection;
|
||||
}
|
||||
if (mergeWithObject.dateDetection.explicit()) {
|
||||
this.dateDetection = mergeWithObject.dateDetection;
|
||||
}
|
||||
if (mergeWithObject.dynamicDateTimeFormatters.explicit()) {
|
||||
this.dynamicDateTimeFormatters = mergeWithObject.dynamicDateTimeFormatters;
|
||||
}
|
||||
if (mergeWithObject.dynamicTemplates.explicit()) {
|
||||
this.dynamicTemplates = mergeWithObject.dynamicTemplates;
|
||||
}
|
||||
this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -305,19 +268,19 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
|
||||
@Override
|
||||
protected void doXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
if (dynamicDateTimeFormatters != Defaults.DYNAMIC_DATE_TIME_FORMATTERS) {
|
||||
if (dynamicDateTimeFormatters.length > 0) {
|
||||
builder.startArray("dynamic_date_formats");
|
||||
for (FormatDateTimeFormatter dateTimeFormatter : dynamicDateTimeFormatters) {
|
||||
builder.value(dateTimeFormatter.format());
|
||||
}
|
||||
builder.endArray();
|
||||
final boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
|
||||
|
||||
if (dynamicDateTimeFormatters.explicit() || includeDefaults) {
|
||||
builder.startArray("dynamic_date_formats");
|
||||
for (FormatDateTimeFormatter dateTimeFormatter : dynamicDateTimeFormatters.value()) {
|
||||
builder.value(dateTimeFormatter.format());
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
if (dynamicTemplates != null && dynamicTemplates.length > 0) {
|
||||
if (dynamicTemplates.explicit() || includeDefaults) {
|
||||
builder.startArray("dynamic_templates");
|
||||
for (DynamicTemplate dynamicTemplate : dynamicTemplates) {
|
||||
for (DynamicTemplate dynamicTemplate : dynamicTemplates.value()) {
|
||||
builder.startObject();
|
||||
builder.field(dynamicTemplate.name(), dynamicTemplate);
|
||||
builder.endObject();
|
||||
|
@ -325,11 +288,11 @@ public class RootObjectMapper extends ObjectMapper {
|
|||
builder.endArray();
|
||||
}
|
||||
|
||||
if (dateDetection != Defaults.DATE_DETECTION) {
|
||||
builder.field("date_detection", dateDetection);
|
||||
if (dateDetection.explicit() || includeDefaults) {
|
||||
builder.field("date_detection", dateDetection.value());
|
||||
}
|
||||
if (numericDetection != Defaults.NUMERIC_DETECTION) {
|
||||
builder.field("numeric_detection", numericDetection);
|
||||
if (numericDetection.explicit() || includeDefaults) {
|
||||
builder.field("numeric_detection", numericDetection.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ final class LocalShardSnapshot implements Closeable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void renameFile(String source, String dest) throws IOException {
|
||||
public void rename(String source, String dest) throws IOException {
|
||||
throw new UnsupportedOperationException("this directory is read-only");
|
||||
}
|
||||
|
||||
|
|
|
@ -128,25 +128,14 @@ public final class SimilarityService extends AbstractIndexComponent {
|
|||
static class PerFieldSimilarity extends PerFieldSimilarityWrapper {
|
||||
|
||||
private final Similarity defaultSimilarity;
|
||||
private final Similarity baseSimilarity;
|
||||
private final MapperService mapperService;
|
||||
|
||||
PerFieldSimilarity(Similarity defaultSimilarity, Similarity baseSimilarity, MapperService mapperService) {
|
||||
super(baseSimilarity);
|
||||
this.defaultSimilarity = defaultSimilarity;
|
||||
this.baseSimilarity = baseSimilarity;
|
||||
this.mapperService = mapperService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float coord(int overlap, int maxOverlap) {
|
||||
return baseSimilarity.coord(overlap, maxOverlap);
|
||||
}
|
||||
|
||||
@Override
|
||||
public float queryNorm(float valueForNormalization) {
|
||||
return baseSimilarity.queryNorm(valueForNormalization);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Similarity get(String name) {
|
||||
MappedFieldType fieldType = mapperService.fullName(name);
|
||||
|
|
|
@ -245,7 +245,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
|
||||
|
||||
/**
|
||||
* Renames all the given files form the key of the map to the
|
||||
* Renames all the given files from the key of the map to the
|
||||
* value of the map. All successfully renamed files are removed from the map in-place.
|
||||
*/
|
||||
public void renameTempFilesSafe(Map<String, String> tempFileMap) throws IOException {
|
||||
|
@ -282,10 +282,11 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
logger.debug("failed to delete file [{}]", ex, origFile);
|
||||
}
|
||||
// now, rename the files... and fail it it won't work
|
||||
this.renameFile(tempFile, origFile);
|
||||
directory.rename(tempFile, origFile);
|
||||
final String remove = tempFileMap.remove(tempFile);
|
||||
assert remove != null;
|
||||
}
|
||||
directory.syncMetaData();
|
||||
} finally {
|
||||
metadataLock.writeLock().unlock();
|
||||
}
|
||||
|
@ -297,11 +298,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
return statsCache.getOrRefresh();
|
||||
}
|
||||
|
||||
public void renameFile(String from, String to) throws IOException {
|
||||
ensureOpen();
|
||||
directory.renameFile(from, to);
|
||||
}
|
||||
|
||||
/**
|
||||
* Increments the refCount of this Store instance. RefCounts are used to determine when a
|
||||
* Store can be closed safely, i.e. as soon as there are no more references. Be sure to always call a
|
||||
|
|
|
@ -1177,7 +1177,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
}
|
||||
|
||||
@Override
|
||||
public void prepareCommit() throws IOException {
|
||||
public long prepareCommit() throws IOException {
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (currentCommittingGeneration != NOT_SET_GENERATION) {
|
||||
|
@ -1200,10 +1200,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
IOUtils.closeWhileHandlingException(this); // tragic event
|
||||
throw e;
|
||||
}
|
||||
return 0L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit() throws IOException {
|
||||
public long commit() throws IOException {
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (currentCommittingGeneration == NOT_SET_GENERATION) {
|
||||
|
@ -1216,6 +1217,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
currentCommittingGeneration = NOT_SET_GENERATION;
|
||||
trimUnreferencedReaders();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void trimUnreferencedReaders() {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.indices.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
|
||||
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
|
||||
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
|
||||
|
@ -58,7 +59,6 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
|||
import org.apache.lucene.analysis.sv.SwedishAnalyzer;
|
||||
import org.apache.lucene.analysis.th.ThaiAnalyzer;
|
||||
import org.apache.lucene.analysis.tr.TurkishAnalyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.index.analysis.PatternAnalyzer;
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
*/
|
||||
package org.elasticsearch.indices.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
|
||||
import org.apache.lucene.analysis.ar.ArabicStemFilter;
|
||||
|
@ -27,9 +30,7 @@ import org.apache.lucene.analysis.cjk.CJKWidthFilter;
|
|||
import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter;
|
||||
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
|
||||
import org.apache.lucene.analysis.core.DecimalDigitFilter;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.core.UpperCaseFilter;
|
||||
import org.apache.lucene.analysis.cz.CzechStemFilter;
|
||||
import org.apache.lucene.analysis.de.GermanNormalizationFilter;
|
||||
|
@ -60,7 +61,6 @@ import org.apache.lucene.analysis.snowball.SnowballFilter;
|
|||
import org.apache.lucene.analysis.standard.ClassicFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardFilter;
|
||||
import org.apache.lucene.analysis.tr.ApostropheFilter;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.analysis.util.ElisionFilter;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.index.analysis.DelimitedPayloadTokenFilterFactory;
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.lucene.store.IndexOutput;
|
|||
import org.apache.lucene.store.RateLimiter;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -1636,6 +1637,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
} else {
|
||||
stream = new RateLimitingInputStream(partSliceStream, restoreRateLimiter, restoreRateLimitingTimeInNanos::inc);
|
||||
}
|
||||
|
||||
// TODO: why does the target file sometimes already exist? Simon says: I think, this can happen if you fail a shard and
|
||||
// it's not cleaned up yet, the restore process tries to reuse files
|
||||
IOUtils.deleteFilesIgnoringExceptions(store.directory(), fileInfo.physicalName());
|
||||
|
||||
try (final IndexOutput indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), fileInfo.metadata(), IOContext.DEFAULT)) {
|
||||
final byte[] buffer = new byte[BUFFER_SIZE];
|
||||
int length;
|
||||
|
|
|
@ -25,11 +25,9 @@ import org.apache.lucene.search.Scorer;
|
|||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.packed.PackedInts;
|
||||
import org.apache.lucene.util.packed.PackedLongValues;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.LongHash;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
|
||||
import org.elasticsearch.search.aggregations.BucketCollector;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
|
@ -119,6 +117,7 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector {
|
|||
|
||||
@Override
|
||||
public void preCollection() throws IOException {
|
||||
collector.preCollection();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -145,7 +144,6 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector {
|
|||
}
|
||||
this.selectedBuckets = hash;
|
||||
|
||||
collector.preCollection();
|
||||
boolean needsScores = collector.needsScores();
|
||||
Weight weight = null;
|
||||
if (needsScores) {
|
||||
|
|
|
@ -48,7 +48,7 @@ import java.util.List;
|
|||
* {@link BestDocsDeferringCollector#createTopDocsCollector(int)} is designed to
|
||||
* be overridden and allows subclasses to choose a custom collector
|
||||
* implementation for determining the top N matches.
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
public class BestDocsDeferringCollector extends DeferringBucketCollector implements Releasable {
|
||||
|
@ -61,7 +61,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
|||
|
||||
/**
|
||||
* Sole constructor.
|
||||
*
|
||||
*
|
||||
* @param shardSize
|
||||
* The number of top-scoring docs to collect for each bucket
|
||||
*/
|
||||
|
@ -111,6 +111,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
|||
|
||||
@Override
|
||||
public void preCollection() throws IOException {
|
||||
deferred.preCollection();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -125,7 +126,6 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
|||
}
|
||||
|
||||
private void runDeferredAggs() throws IOException {
|
||||
deferred.preCollection();
|
||||
|
||||
List<ScoreDoc> allDocs = new ArrayList<>(shardSize);
|
||||
for (int i = 0; i < perBucketSamples.size(); i++) {
|
||||
|
@ -135,14 +135,14 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
|||
}
|
||||
perBucketSample.getMatches(allDocs);
|
||||
}
|
||||
|
||||
|
||||
// Sort the top matches by docID for the benefit of deferred collector
|
||||
ScoreDoc[] docsArr = allDocs.toArray(new ScoreDoc[allDocs.size()]);
|
||||
Arrays.sort(docsArr, new Comparator<ScoreDoc>() {
|
||||
@Override
|
||||
public int compare(ScoreDoc o1, ScoreDoc o2) {
|
||||
if(o1.doc == o2.doc){
|
||||
return o1.shardIndex - o2.shardIndex;
|
||||
return o1.shardIndex - o2.shardIndex;
|
||||
}
|
||||
return o1.doc - o2.doc;
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
|||
currentScore = scoreDoc.score;
|
||||
currentDocId = rebased;
|
||||
// We stored the bucket ID in Lucene's shardIndex property
|
||||
// for convenience.
|
||||
// for convenience.
|
||||
leafCollector.collect(rebased, scoreDoc.shardIndex);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.profile.aggregation;
|
||||
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -40,4 +41,9 @@ public class ProfilingLeafBucketCollector extends LeafBucketCollector {
|
|||
profileBreakdown.stopAndRecordTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
delegate.setScorer(scorer);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.lucene.util.Accountable;
|
|||
import org.apache.lucene.util.Accountables;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.index.mapper.CompletionFieldMapper2x;
|
||||
|
@ -127,7 +128,7 @@ public class Completion090PostingsFormat extends PostingsFormat {
|
|||
boolean success = false;
|
||||
try {
|
||||
output = state.directory.createOutput(suggestFSTFile, state.context);
|
||||
CodecUtil.writeHeader(output, CODEC_NAME, SUGGEST_VERSION_CURRENT);
|
||||
CodecUtil.writeIndexHeader(output, CODEC_NAME, SUGGEST_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
|
||||
/*
|
||||
* we write the delegate postings format name so we can load it
|
||||
* without getting an instance in the ctor
|
||||
|
@ -165,7 +166,13 @@ public class Completion090PostingsFormat extends PostingsFormat {
|
|||
public CompletionFieldsProducer(SegmentReadState state) throws IOException {
|
||||
String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION);
|
||||
IndexInput input = state.directory.openInput(suggestFSTFile, state.context);
|
||||
version = CodecUtil.checkHeader(input, CODEC_NAME, SUGGEST_CODEC_VERSION, SUGGEST_VERSION_CURRENT);
|
||||
if (state.segmentInfo.getVersion().onOrAfter(Version.LUCENE_6_2_0)) {
|
||||
// Lucene 6.2.0+ requires all index files to use index header, but prior to that we used an ordinary codec header:
|
||||
version = CodecUtil.checkIndexHeader(input, CODEC_NAME, SUGGEST_CODEC_VERSION, SUGGEST_VERSION_CURRENT,
|
||||
state.segmentInfo.getId(), state.segmentSuffix);
|
||||
} else {
|
||||
version = CodecUtil.checkHeader(input, CODEC_NAME, SUGGEST_CODEC_VERSION, SUGGEST_VERSION_CURRENT);
|
||||
}
|
||||
FieldsProducer delegateProducer = null;
|
||||
boolean success = false;
|
||||
try {
|
||||
|
|
|
@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" {
|
|||
//// Very special jar permissions:
|
||||
//// These are dangerous permissions that we don't want to grant to everything.
|
||||
|
||||
grant codeBase "${codebase.lucene-core-6.1.0.jar}" {
|
||||
grant codeBase "${codebase.lucene-core-6.2.0.jar}" {
|
||||
// needed to allow MMapDirectory's "unmap hack" (die unmap hack, die)
|
||||
// java 8 package
|
||||
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
|
||||
|
@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-6.1.0.jar}" {
|
|||
permission java.lang.RuntimePermission "accessDeclaredMembers";
|
||||
};
|
||||
|
||||
grant codeBase "${codebase.lucene-misc-6.1.0.jar}" {
|
||||
grant codeBase "${codebase.lucene-misc-6.2.0.jar}" {
|
||||
// needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper
|
||||
permission java.nio.file.LinkPermission "hard";
|
||||
};
|
||||
|
|
|
@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" {
|
|||
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
|
||||
};
|
||||
|
||||
grant codeBase "${codebase.lucene-test-framework-6.1.0.jar}" {
|
||||
grant codeBase "${codebase.lucene-test-framework-6.2.0.jar}" {
|
||||
// needed by RamUsageTester
|
||||
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
|
||||
// needed for testing hardlinks in StoreRecoveryTests since we install MockFS
|
||||
|
|
|
@ -20,14 +20,14 @@
|
|||
package org.elasticsearch.action.termvectors;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
|
||||
import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.elasticsearch.index.analysis;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.elasticsearch.test.ESTokenStreamTestCase;
|
||||
|
||||
public class FingerprintAnalyzerTests extends ESTokenStreamTestCase {
|
||||
|
|
|
@ -19,11 +19,12 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
|
||||
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.elasticsearch.test.ESTokenStreamTestCase;
|
||||
|
||||
|
|
|
@ -19,14 +19,14 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Settings.Builder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.test.ESTokenStreamTestCase;
|
||||
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
*/
|
||||
package org.elasticsearch.index.analysis.filter1;
|
||||
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.StopFilter;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -36,4 +36,4 @@ public class MyFilterTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
public TokenStream create(TokenStream tokenStream) {
|
||||
return new StopFilter(tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
|
|||
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
|
||||
import org.apache.lucene.codecs.lucene53.Lucene53Codec;
|
||||
import org.apache.lucene.codecs.lucene54.Lucene54Codec;
|
||||
import org.apache.lucene.codecs.lucene60.Lucene60Codec;
|
||||
import org.apache.lucene.codecs.lucene62.Lucene62Codec;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
|
@ -55,7 +55,7 @@ public class CodecTests extends ESTestCase {
|
|||
public void testResolveDefaultCodecs() throws Exception {
|
||||
CodecService codecService = createCodecService();
|
||||
assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class));
|
||||
assertThat(codecService.codec("default"), instanceOf(Lucene60Codec.class));
|
||||
assertThat(codecService.codec("default"), instanceOf(Lucene62Codec.class));
|
||||
assertThat(codecService.codec("Lucene54"), instanceOf(Lucene54Codec.class));
|
||||
assertThat(codecService.codec("Lucene53"), instanceOf(Lucene53Codec.class));
|
||||
assertThat(codecService.codec("Lucene50"), instanceOf(Lucene50Codec.class));
|
||||
|
|
|
@ -1017,12 +1017,6 @@ public class InternalEngineTests extends ESTestCase {
|
|||
engine.index(new Engine.Index(newUid("2"), doc));
|
||||
EngineConfig config = engine.config();
|
||||
engine.close();
|
||||
final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
|
||||
if (directory != null) {
|
||||
// since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
|
||||
// this so we have to disable the check explicitly
|
||||
directory.setPreventDoubleWrite(false);
|
||||
}
|
||||
engine = new InternalEngine(copy(config, EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG));
|
||||
engine.recoverFromTranslog();
|
||||
assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||
|
@ -1758,7 +1752,6 @@ public class InternalEngineTests extends ESTestCase {
|
|||
if (directory != null) {
|
||||
// since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
|
||||
// this so we have to disable the check explicitly
|
||||
directory.setPreventDoubleWrite(false);
|
||||
boolean started = false;
|
||||
final int numIters = randomIntBetween(10, 20);
|
||||
for (int i = 0; i < numIters; i++) {
|
||||
|
@ -1804,12 +1797,6 @@ public class InternalEngineTests extends ESTestCase {
|
|||
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
|
||||
assertThat(topDocs.totalHits, equalTo(numDocs));
|
||||
}
|
||||
final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
|
||||
if (directory != null) {
|
||||
// since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
|
||||
// this so we have to disable the check explicitly
|
||||
directory.setPreventDoubleWrite(false);
|
||||
}
|
||||
engine.close();
|
||||
engine = new InternalEngine(engine.config());
|
||||
|
||||
|
@ -1928,12 +1915,6 @@ public class InternalEngineTests extends ESTestCase {
|
|||
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
|
||||
assertThat(topDocs.totalHits, equalTo(numDocs));
|
||||
}
|
||||
final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
|
||||
if (directory != null) {
|
||||
// since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
|
||||
// this so we have to disable the check explicitly
|
||||
directory.setPreventDoubleWrite(false);
|
||||
}
|
||||
|
||||
TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer();
|
||||
parser.mappingUpdate = dynamicUpdate();
|
||||
|
@ -2051,12 +2032,6 @@ public class InternalEngineTests extends ESTestCase {
|
|||
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
|
||||
assertThat(topDocs.totalHits, equalTo(numDocs));
|
||||
}
|
||||
final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
|
||||
if (directory != null) {
|
||||
// since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
|
||||
// this so we have to disable the check explicitly
|
||||
directory.setPreventDoubleWrite(false);
|
||||
}
|
||||
Translog.TranslogGeneration generation = engine.getTranslog().getGeneration();
|
||||
engine.close();
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.index.mapper;
|
|||
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
@ -80,7 +81,7 @@ public class DynamicTemplatesTests extends ESSingleNodeTestCase {
|
|||
|
||||
IndexableField f = doc.getField("name");
|
||||
assertThat(f.name(), equalTo("name"));
|
||||
assertThat(f.stringValue(), equalTo("some name"));
|
||||
assertThat(f.binaryValue(), equalTo(new BytesRef("some name")));
|
||||
assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
|
||||
assertThat(f.fieldType().tokenized(), equalTo(false));
|
||||
|
||||
|
@ -98,7 +99,7 @@ public class DynamicTemplatesTests extends ESSingleNodeTestCase {
|
|||
|
||||
f = doc.getField("multi1.org");
|
||||
assertThat(f.name(), equalTo("multi1.org"));
|
||||
assertThat(f.stringValue(), equalTo("multi 1"));
|
||||
assertThat(f.binaryValue(), equalTo(new BytesRef("multi 1")));
|
||||
assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
|
||||
assertThat(f.fieldType().tokenized(), equalTo(false));
|
||||
|
||||
|
@ -116,7 +117,7 @@ public class DynamicTemplatesTests extends ESSingleNodeTestCase {
|
|||
|
||||
f = doc.getField("multi2.org");
|
||||
assertThat(f.name(), equalTo("multi2.org"));
|
||||
assertThat(f.stringValue(), equalTo("multi 2"));
|
||||
assertThat(f.binaryValue(), equalTo(new BytesRef("multi 2")));
|
||||
assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
|
||||
assertThat(f.fieldType().tokenized(), equalTo(false));
|
||||
|
||||
|
@ -137,7 +138,7 @@ public class DynamicTemplatesTests extends ESSingleNodeTestCase {
|
|||
|
||||
IndexableField f = doc.getField("name");
|
||||
assertThat(f.name(), equalTo("name"));
|
||||
assertThat(f.stringValue(), equalTo("some name"));
|
||||
assertThat(f.binaryValue(), equalTo(new BytesRef("some name")));
|
||||
assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
|
||||
assertThat(f.fieldType().tokenized(), equalTo(false));
|
||||
|
||||
|
@ -155,7 +156,7 @@ public class DynamicTemplatesTests extends ESSingleNodeTestCase {
|
|||
|
||||
f = doc.getField("multi1.org");
|
||||
assertThat(f.name(), equalTo("multi1.org"));
|
||||
assertThat(f.stringValue(), equalTo("multi 1"));
|
||||
assertThat(f.binaryValue(), equalTo(new BytesRef("multi 1")));
|
||||
assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
|
||||
assertThat(f.fieldType().tokenized(), equalTo(false));
|
||||
|
||||
|
@ -173,7 +174,7 @@ public class DynamicTemplatesTests extends ESSingleNodeTestCase {
|
|||
|
||||
f = doc.getField("multi2.org");
|
||||
assertThat(f.name(), equalTo("multi2.org"));
|
||||
assertThat(f.stringValue(), equalTo("multi 2"));
|
||||
assertThat(f.binaryValue(), equalTo(new BytesRef("multi 2")));
|
||||
assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
|
||||
assertThat(f.fieldType().tokenized(), equalTo(false));
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -155,7 +156,7 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase {
|
|||
assertThat(doc.rootDoc().getField("field.field").stringValue(), is("foo"));
|
||||
|
||||
assertThat(doc.rootDoc().getField("field.field.raw"), notNullValue());
|
||||
assertThat(doc.rootDoc().getField("field.field.raw").stringValue(), is("foo"));
|
||||
assertThat(doc.rootDoc().getField("field.field.raw").binaryValue(), is(new BytesRef("foo")));
|
||||
}
|
||||
|
||||
public void testExternalValuesWithMultifieldTwoLevels() throws Exception {
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper;
|
|||
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
|
@ -112,7 +113,11 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
|||
|
||||
assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
|
||||
assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
|
||||
assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2)));
|
||||
if (version.onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
assertThat(doc.rootDoc().getBinaryValue("point.geohash"), equalTo(new BytesRef(stringEncode(1.3, 1.2))));
|
||||
} else {
|
||||
assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2)));
|
||||
}
|
||||
}
|
||||
|
||||
public void testLatLonInOneValueWithGeohash() throws Exception {
|
||||
|
@ -132,7 +137,11 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
|||
|
||||
assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
|
||||
assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
|
||||
assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2)));
|
||||
if (version.onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
assertThat(doc.rootDoc().getBinaryValue("point.geohash"), equalTo(new BytesRef(stringEncode(1.3, 1.2))));
|
||||
} else {
|
||||
assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2)));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGeoHashIndexValue() throws Exception {
|
||||
|
@ -152,7 +161,11 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
|
|||
|
||||
assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
|
||||
assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
|
||||
assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2)));
|
||||
if (version.onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
assertThat(doc.rootDoc().getBinaryValue("point.geohash"), equalTo(new BytesRef(stringEncode(1.3, 1.2))));
|
||||
} else {
|
||||
assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2)));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGeoHashValue() throws Exception {
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -118,7 +119,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase {
|
|||
|
||||
assertThat(doc.rootDoc().getField("point.lat"), nullValue());
|
||||
assertThat(doc.rootDoc().getField("point.lon"), nullValue());
|
||||
assertThat(doc.rootDoc().get("point.geohash"), equalTo(stringEncode(1.3, 1.2)));
|
||||
assertThat(doc.rootDoc().getBinaryValue("point.geohash"), equalTo(new BytesRef(stringEncode(1.3, 1.2))));
|
||||
assertThat(doc.rootDoc().get("point"), notNullValue());
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase {
|
|||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(2, fields.length);
|
||||
|
||||
assertEquals("1234", fields[0].stringValue());
|
||||
assertEquals(new BytesRef("1234"), fields[0].binaryValue());
|
||||
IndexableFieldType fieldType = fields[0].fieldType();
|
||||
assertThat(fieldType.omitNorms(), equalTo(true));
|
||||
assertFalse(fieldType.tokenized());
|
||||
|
@ -163,7 +163,7 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase {
|
|||
|
||||
fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(2, fields.length);
|
||||
assertEquals("uri", fields[0].stringValue());
|
||||
assertEquals(new BytesRef("uri"), fields[0].binaryValue());
|
||||
}
|
||||
|
||||
public void testEnableStore() throws IOException {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.index.mapper;
|
|||
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -89,7 +90,7 @@ public class MultiFieldTests extends ESSingleNodeTestCase {
|
|||
|
||||
f = doc.getField("object1.multi1.string");
|
||||
assertThat(f.name(), equalTo("object1.multi1.string"));
|
||||
assertThat(f.stringValue(), equalTo("2010-01-01"));
|
||||
assertThat(f.binaryValue(), equalTo(new BytesRef("2010-01-01")));
|
||||
|
||||
assertThat(docMapper.mappers().getMapper("name"), notNullValue());
|
||||
assertThat(docMapper.mappers().getMapper("name"), instanceOf(TextFieldMapper.class));
|
||||
|
|
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class RootObjectMapperTests extends ESSingleNodeTestCase {
|
||||
|
||||
public void testNumericDetection() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.field("numeric_detection", false)
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
// update with a different explicit value
|
||||
String mapping2 = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.field("numeric_detection", true)
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping2, mapper.mappingSource().toString());
|
||||
|
||||
// update with an implicit value: no change
|
||||
String mapping3 = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping2, mapper.mappingSource().toString());
|
||||
}
|
||||
|
||||
public void testDateDetection() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.field("date_detection", true)
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
// update with a different explicit value
|
||||
String mapping2 = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.field("date_detection", false)
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping2, mapper.mappingSource().toString());
|
||||
|
||||
// update with an implicit value: no change
|
||||
String mapping3 = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping2, mapper.mappingSource().toString());
|
||||
}
|
||||
|
||||
public void testDateFormatters() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.field("dynamic_date_formats", Arrays.asList("YYYY-MM-dd"))
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
// no update if formatters are not set explicitly
|
||||
String mapping2 = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
String mapping3 = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.field("dynamic_date_formats", Arrays.asList())
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping3, mapper.mappingSource().toString());
|
||||
}
|
||||
|
||||
public void testDynamicTemplates() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.startArray("dynamic_templates")
|
||||
.startObject()
|
||||
.startObject("my_template")
|
||||
.field("match_mapping_type", "string")
|
||||
.startObject("mapping")
|
||||
.field("type", "keyword")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endArray()
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
// no update if templates are not set explicitly
|
||||
String mapping2 = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
mapper = mapperService.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
String mapping3 = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
.field("dynamic_templates", Arrays.asList())
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
mapper = mapperService.merge("type", new CompressedXContent(mapping3), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping3, mapper.mappingSource().toString());
|
||||
}
|
||||
}
|
|
@ -354,49 +354,6 @@ public class StoreTests extends ESTestCase {
|
|||
IOUtils.close(store);
|
||||
}
|
||||
|
||||
public void testRenameFile() throws IOException {
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random(), false);
|
||||
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
|
||||
{
|
||||
IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT);
|
||||
int iters = scaledRandomIntBetween(10, 100);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
|
||||
output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
|
||||
}
|
||||
CodecUtil.writeFooter(output);
|
||||
output.close();
|
||||
}
|
||||
store.renameFile("foo.bar", "bar.foo");
|
||||
assertThat(numNonExtraFiles(store), is(1));
|
||||
final long lastChecksum;
|
||||
try (IndexInput input = store.directory().openInput("bar.foo", IOContext.DEFAULT)) {
|
||||
lastChecksum = CodecUtil.checksumEntireFile(input);
|
||||
}
|
||||
|
||||
try {
|
||||
store.directory().openInput("foo.bar", IOContext.DEFAULT);
|
||||
fail("file was renamed");
|
||||
} catch (FileNotFoundException | NoSuchFileException ex) {
|
||||
// expected
|
||||
}
|
||||
{
|
||||
IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT);
|
||||
int iters = scaledRandomIntBetween(10, 100);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
|
||||
output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
|
||||
}
|
||||
CodecUtil.writeFooter(output);
|
||||
output.close();
|
||||
}
|
||||
store.renameFile("foo.bar", "bar.foo");
|
||||
assertThat(numNonExtraFiles(store), is(1));
|
||||
assertDeleteContent(store, directoryService);
|
||||
IOUtils.close(store);
|
||||
}
|
||||
|
||||
public void testCheckIntegrity() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
long luceneFileLength = 0;
|
||||
|
@ -519,9 +476,6 @@ public class StoreTests extends ESTestCase {
|
|||
public LuceneManagedDirectoryService(Random random, boolean preventDoubleWrite) {
|
||||
super(new ShardId(INDEX_SETTINGS.getIndex(), 1), INDEX_SETTINGS);
|
||||
dir = StoreTests.newDirectory(random);
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper) dir).setPreventDoubleWrite(preventDoubleWrite);
|
||||
}
|
||||
this.random = random;
|
||||
}
|
||||
|
||||
|
@ -963,11 +917,8 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
writer.commit();
|
||||
writer.close();
|
||||
MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
|
||||
if (leaf != null) {
|
||||
leaf.setPreventDoubleWrite(false); // I do this on purpose
|
||||
}
|
||||
SegmentInfos segmentCommitInfos = store.readLastCommittedSegmentsInfo();
|
||||
store.directory().deleteFile(segmentCommitInfos.getSegmentsFileName());
|
||||
try (IndexOutput out = store.directory().createOutput(segmentCommitInfos.getSegmentsFileName(), IOContext.DEFAULT)) {
|
||||
// empty file
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.indices.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
|
||||
import org.apache.lucene.analysis.StopwordAnalyzerBase;
|
||||
|
||||
public class DummyAnalyzer extends StopwordAnalyzerBase {
|
||||
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.search.profile.aggregation;
|
|||
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
|
||||
import org.elasticsearch.search.aggregations.bucket.sampler.DiversifiedOrdinalsSamplerAggregator;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator;
|
||||
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator;
|
||||
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregator;
|
||||
|
@ -37,6 +39,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.diversifiedSampler;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
|
||||
import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
||||
|
@ -187,6 +190,129 @@ public class AggregationProfilerIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testMultiLevelProfileBreadthFirst() {
|
||||
SearchResponse response = client().prepareSearch("idx").setProfile(true)
|
||||
.addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L).subAggregation(terms("terms")
|
||||
.collectMode(SubAggCollectionMode.BREADTH_FIRST).field(TAG_FIELD).subAggregation(avg("avg").field(NUMBER_FIELD))))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
Map<String, ProfileShardResult> profileResults = response.getProfileResults();
|
||||
assertThat(profileResults, notNullValue());
|
||||
assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries));
|
||||
for (ProfileShardResult profileShardResult : profileResults.values()) {
|
||||
assertThat(profileShardResult, notNullValue());
|
||||
AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults();
|
||||
assertThat(aggProfileResults, notNullValue());
|
||||
List<ProfileResult> aggProfileResultsList = aggProfileResults.getProfileResults();
|
||||
assertThat(aggProfileResultsList, notNullValue());
|
||||
assertThat(aggProfileResultsList.size(), equalTo(1));
|
||||
ProfileResult histoAggResult = aggProfileResultsList.get(0);
|
||||
assertThat(histoAggResult, notNullValue());
|
||||
assertThat(histoAggResult.getQueryName(),
|
||||
equalTo("org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator"));
|
||||
assertThat(histoAggResult.getLuceneDescription(), equalTo("histo"));
|
||||
assertThat(histoAggResult.getTime(), greaterThan(0L));
|
||||
Map<String, Long> histoBreakdown = histoAggResult.getTimeBreakdown();
|
||||
assertThat(histoBreakdown, notNullValue());
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
|
||||
assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1));
|
||||
|
||||
ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0);
|
||||
assertThat(termsAggResult, notNullValue());
|
||||
assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.WithHash.class.getName()));
|
||||
assertThat(termsAggResult.getLuceneDescription(), equalTo("terms"));
|
||||
assertThat(termsAggResult.getTime(), greaterThan(0L));
|
||||
Map<String, Long> termsBreakdown = termsAggResult.getTimeBreakdown();
|
||||
assertThat(termsBreakdown, notNullValue());
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
|
||||
assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1));
|
||||
|
||||
ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0);
|
||||
assertThat(avgAggResult, notNullValue());
|
||||
assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName()));
|
||||
assertThat(avgAggResult.getLuceneDescription(), equalTo("avg"));
|
||||
assertThat(avgAggResult.getTime(), greaterThan(0L));
|
||||
Map<String, Long> avgBreakdown = termsAggResult.getTimeBreakdown();
|
||||
assertThat(avgBreakdown, notNullValue());
|
||||
assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
|
||||
assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
|
||||
assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
|
||||
assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
|
||||
assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
|
||||
assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
|
||||
assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
|
||||
assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
|
||||
assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDiversifiedAggProfile() {
|
||||
SearchResponse response = client().prepareSearch("idx").setProfile(true)
|
||||
.addAggregation(diversifiedSampler("diversify").shardSize(10).field(STRING_FIELD).maxDocsPerValue(2)
|
||||
.subAggregation(max("max").field(NUMBER_FIELD)))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
Map<String, ProfileShardResult> profileResults = response.getProfileResults();
|
||||
assertThat(profileResults, notNullValue());
|
||||
assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries));
|
||||
for (ProfileShardResult profileShardResult : profileResults.values()) {
|
||||
assertThat(profileShardResult, notNullValue());
|
||||
AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults();
|
||||
assertThat(aggProfileResults, notNullValue());
|
||||
List<ProfileResult> aggProfileResultsList = aggProfileResults.getProfileResults();
|
||||
assertThat(aggProfileResultsList, notNullValue());
|
||||
assertThat(aggProfileResultsList.size(), equalTo(1));
|
||||
ProfileResult diversifyAggResult = aggProfileResultsList.get(0);
|
||||
assertThat(diversifyAggResult, notNullValue());
|
||||
assertThat(diversifyAggResult.getQueryName(),
|
||||
equalTo(DiversifiedOrdinalsSamplerAggregator.class.getName()));
|
||||
assertThat(diversifyAggResult.getLuceneDescription(), equalTo("diversify"));
|
||||
assertThat(diversifyAggResult.getTime(), greaterThan(0L));
|
||||
Map<String, Long> histoBreakdown = diversifyAggResult.getTimeBreakdown();
|
||||
assertThat(histoBreakdown, notNullValue());
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
|
||||
assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
|
||||
assertThat(diversifyAggResult.getProfiledChildren().size(), equalTo(1));
|
||||
|
||||
ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0);
|
||||
assertThat(maxAggResult, notNullValue());
|
||||
assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName()));
|
||||
assertThat(maxAggResult.getLuceneDescription(), equalTo("max"));
|
||||
assertThat(maxAggResult.getTime(), greaterThan(0L));
|
||||
Map<String, Long> termsBreakdown = maxAggResult.getTimeBreakdown();
|
||||
assertThat(termsBreakdown, notNullValue());
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
|
||||
assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
|
||||
assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0));
|
||||
}
|
||||
}
|
||||
|
||||
public void testComplexProfile() {
|
||||
SearchResponse response = client().prepareSearch("idx").setProfile(true)
|
||||
.addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue