Fix deprecation logging for lenient booleans

This commit fixes an issue with deprecation logging for lenient
booleans. The underlying issue is that adding deprecation logging for
lenient booleans added a static deprecation logger to the Settings
class. However, the Settings class is initialized very early and in CLI
tools can be initialized before logging is initialized. This leads to
status logger error messages. Additionally, the deprecation logging for
a lot of the settings does not provide useful context (for example, in
the token filter factories, the deprecation logging only produces the
name of the setting, but gives no context which token filter factory it
comes from). This commit addresses both of these issues by changing the
call sites to push a deprecation logger through to the lenient boolean
parsing.

Relates #22696
This commit is contained in:
Jason Tedor 2017-01-19 12:30:33 -05:00 committed by GitHub
parent 881993de3a
commit 9781b88a38
32 changed files with 98 additions and 53 deletions

View File

@ -41,6 +41,8 @@ import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@ -1256,6 +1258,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
}
}
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(IndexMetaData.class));
/**
* Returns <code>true</code> iff the given settings indicate that the index
* associated with these settings allocates it's shards on a shared
@ -1266,7 +1270,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
public boolean isOnSharedFilesystem(Settings settings) {
// don't use the setting directly, not to trigger verbose deprecation logging
return settings.getAsBooleanLenientForPreEs6Indices(
this.indexCreatedVersion, SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings));
this.indexCreatedVersion, SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings), deprecationLogger);
}
/**
@ -1280,7 +1284,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
public boolean isIndexUsingShadowReplicas(Settings settings) {
// don't use the setting directly, not to trigger verbose deprecation logging
return settings.getAsBooleanLenientForPreEs6Indices(this.indexCreatedVersion, SETTING_SHADOW_REPLICAS, false);
return settings.getAsBooleanLenientForPreEs6Indices(this.indexCreatedVersion, SETTING_SHADOW_REPLICAS, false, deprecationLogger);
}
/**

View File

@ -76,7 +76,6 @@ import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
* An immutable settings implementation.
*/
public final class Settings implements ToXContent {
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(Settings.class));
public static final Settings EMPTY = new Builder().build();
private static final Pattern ARRAY_PATTERN = Pattern.compile("(.*)\\.\\d+$");
@ -327,7 +326,11 @@ public final class Settings implements ToXContent {
* @deprecated Only used to provide automatic upgrades for pre 6.0 indices.
*/
@Deprecated
public Boolean getAsBooleanLenientForPreEs6Indices(Version indexVersion, String setting, Boolean defaultValue) {
public Boolean getAsBooleanLenientForPreEs6Indices(
final Version indexVersion,
final String setting,
final Boolean defaultValue,
final DeprecationLogger deprecationLogger) {
if (indexVersion.before(Version.V_6_0_0_alpha1_UNRELEASED)) {
//Only emit a warning if the setting's value is not a proper boolean
final String value = get(setting, "false");

View File

@ -479,9 +479,9 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
// don't use the setting directly, not to trigger verbose deprecation logging
return (metaData.isOnSharedFilesystem(metaData.getSettings()) || metaData.isOnSharedFilesystem(this.settings))
&& (metaData.getSettings().getAsBooleanLenientForPreEs6Indices(
metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false) ||
metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger) ||
this.settings.getAsBooleanLenientForPreEs6Indices
(metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false));
(metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger));
}
protected abstract FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);

View File

@ -22,6 +22,7 @@ import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.MergePolicy;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;

View File

@ -23,6 +23,7 @@ import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.TieredMergePolicy;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.unit.ByteSizeUnit;
@ -165,7 +166,7 @@ public final class MergePolicyConfig {
double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING);
double reclaimDeletesWeight = indexSettings.getValue(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING);
this.mergesEnabled = indexSettings.getSettings()
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), INDEX_MERGE_ENABLED, true);
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), INDEX_MERGE_ENABLED, true, new DeprecationLogger(logger));
if (mergesEnabled == false) {
logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production environments", INDEX_MERGE_ENABLED);
}

View File

@ -38,7 +38,7 @@ public class ASCIIFoldingTokenFilterFactory extends AbstractTokenFilterFactory i
public ASCIIFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
preserveOriginal = settings.getAsBooleanLenientForPreEs6Indices(
indexSettings.getIndexVersionCreated(), PRESERVE_ORIGINAL.getPreferredName(), DEFAULT_PRESERVE_ORIGINAL);
indexSettings.getIndexVersionCreated(), PRESERVE_ORIGINAL.getPreferredName(), DEFAULT_PRESERVE_ORIGINAL, deprecationLogger);
}
@Override

View File

@ -56,6 +56,8 @@ import org.apache.lucene.analysis.tr.TurkishAnalyzer;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@ -79,6 +81,8 @@ import static java.util.Collections.unmodifiableMap;
public class Analysis {
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(Analysis.class));
public static Version parseAnalysisVersion(Settings indexSettings, Settings settings, Logger logger) {
// check for explicit version on the specific analyzer component
String sVersion = settings.get("version");
@ -179,13 +183,14 @@ public class Analysis {
}
public static CharArraySet parseArticles(Environment env, org.elasticsearch.Version indexCreatedVersion, Settings settings) {
boolean articlesCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, "articles_case", false);
boolean articlesCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, "articles_case", false, deprecationLogger);
return parseWords(env, settings, "articles", null, null, articlesCase);
}
public static CharArraySet parseStopWords(Environment env, org.elasticsearch.Version indexCreatedVersion, Settings settings,
CharArraySet defaultStopWords) {
boolean stopwordsCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, "stopwords_case", false);
boolean stopwordsCase =
settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, "stopwords_case", false, deprecationLogger);
return parseStopWords(env, settings, defaultStopWords, stopwordsCase);
}
@ -214,7 +219,8 @@ public class Analysis {
if (wordList == null) {
return null;
}
boolean ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, settingsPrefix + "_case", false);
boolean ignoreCase =
settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, settingsPrefix + "_case", false, deprecationLogger);
return new CharArraySet(wordList, ignoreCase);
}

View File

@ -50,7 +50,8 @@ public final class CJKBigramFilterFactory extends AbstractTokenFilterFactory {
public CJKBigramFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "output_unigrams", false);
outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(
indexSettings.getIndexVersionCreated(), "output_unigrams", false, deprecationLogger);
final String[] asArray = settings.getAsArray("ignored_scripts");
Set<String> scripts = new HashSet<>(Arrays.asList("han", "hiragana", "katakana", "hangul"));
if (asArray != null) {

View File

@ -37,8 +37,8 @@ public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory {
public CommonGramsTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false);
this.queryMode = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "query_mode", false);
this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false, deprecationLogger);
this.queryMode = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "query_mode", false, deprecationLogger);
this.words = Analysis.parseCommonWords(env, settings, null, ignoreCase);
if (this.words == null) {

View File

@ -46,8 +46,9 @@ public class HunspellTokenFilterFactory extends AbstractTokenFilterFactory {
throw new IllegalArgumentException(String.format(Locale.ROOT, "Unknown hunspell dictionary for locale [%s]", locale));
}
dedup = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "dedup", true);
longestOnly = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "longest_only", false);
dedup = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "dedup", true, deprecationLogger);
longestOnly =
settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "longest_only", false, deprecationLogger);
}
@Override

View File

@ -35,7 +35,8 @@ public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory
public KeywordMarkerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
boolean ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false);
boolean ignoreCase =
settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false, deprecationLogger);
Set<?> rules = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, "keywords");
if (rules == null) {
throw new IllegalArgumentException("keyword filter requires either `keywords` or `keywords_path` to be configured");
@ -47,4 +48,5 @@ public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory
public TokenStream create(TokenStream tokenStream) {
return new SetKeywordMarkerFilter(tokenStream, keywordLookup);
}
}

View File

@ -37,7 +37,7 @@ public class LimitTokenCountFilterFactory extends AbstractTokenFilterFactory {
super(indexSettings, name, settings);
this.maxTokenCount = settings.getAsInt("max_token_count", DEFAULT_MAX_TOKEN_COUNT);
this.consumeAllTokens = settings.getAsBooleanLenientForPreEs6Indices(
indexSettings.getIndexVersionCreated(), "consume_all_tokens", DEFAULT_CONSUME_ALL_TOKENS);
indexSettings.getIndexVersionCreated(), "consume_all_tokens", DEFAULT_CONSUME_ALL_TOKENS, deprecationLogger);
}
@Override

View File

@ -56,7 +56,8 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
this.replacement = replacement.charAt(0);
}
this.skip = settings.getAsInt("skip", PathHierarchyTokenizer.DEFAULT_SKIP);
this.reverse = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "reverse", false);
this.reverse =
settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "reverse", false, deprecationLogger);
}
@Override
@ -66,4 +67,5 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
}
return new PathHierarchyTokenizer(bufferSize, delimiter, replacement, skip);
}
}

View File

@ -36,7 +36,8 @@ public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analy
super(indexSettings, name, settings);
final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
boolean lowercase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "lowercase", true);
boolean lowercase =
settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "lowercase", true, deprecationLogger);
CharArraySet stopWords = Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, defaultStopwords);
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);

View File

@ -45,7 +45,8 @@ public class PatternCaptureGroupTokenFilterFactory extends AbstractTokenFilterFa
patterns[i] = Pattern.compile(regexes[i]);
}
preserveOriginal = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), PRESERVE_ORIG_KEY, true);
preserveOriginal = settings.getAsBooleanLenientForPreEs6Indices(
indexSettings.getIndexVersionCreated(), PRESERVE_ORIG_KEY, true, deprecationLogger);
}
@Override

View File

@ -43,7 +43,7 @@ public class PatternReplaceTokenFilterFactory extends AbstractTokenFilterFactory
}
this.pattern = Regex.compile(sPattern, settings.get("flags"));
this.replacement = settings.get("replacement", "");
this.all = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "all", true);
this.all = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "all", true, deprecationLogger);
}
@Override

View File

@ -33,8 +33,8 @@ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory {
super(indexSettings, name, settings);
Integer maxShingleSize = settings.getAsInt("max_shingle_size", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);
Integer minShingleSize = settings.getAsInt("min_shingle_size", ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);
Boolean outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "output_unigrams", true);
Boolean outputUnigramsIfNoShingles = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "output_unigrams_if_no_shingles", false);
Boolean outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "output_unigrams", true, deprecationLogger);
Boolean outputUnigramsIfNoShingles = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "output_unigrams_if_no_shingles", false, deprecationLogger);
String tokenSeparator = settings.get("token_separator", ShingleFilter.DEFAULT_TOKEN_SEPARATOR);
String fillerToken = settings.get("filler_token", ShingleFilter.DEFAULT_FILLER_TOKEN);
factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, tokenSeparator, fillerToken);

View File

@ -40,8 +40,10 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
public StopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false);
this.removeTrailing = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "remove_trailing", true);
this.ignoreCase =
settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false, deprecationLogger);
this.removeTrailing = settings.getAsBooleanLenientForPreEs6Indices(
indexSettings.getIndexVersionCreated(), "remove_trailing", true, deprecationLogger);
this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
if (settings.get("enable_position_increments") != null) {
throw new IllegalArgumentException("enable_position_increments is not supported anymore. Please fix your analysis chain");

View File

@ -61,8 +61,10 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
throw new IllegalArgumentException("synonym requires either `synonyms` or `synonyms_path` to be configured");
}
this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false);
boolean expand = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "expand", true);
this.ignoreCase =
settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false, deprecationLogger);
boolean expand =
settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "expand", true, deprecationLogger);
String tokenizerName = settings.get("tokenizer", "whitespace");
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory =

View File

@ -32,7 +32,7 @@ public class UniqueTokenFilterFactory extends AbstractTokenFilterFactory {
public UniqueTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.onlyOnSamePosition = settings.getAsBooleanLenientForPreEs6Indices(
indexSettings.getIndexVersionCreated(), "only_on_same_position", false);
indexSettings.getIndexVersionCreated(), "only_on_same_position", false, deprecationLogger);
}
@Override

View File

@ -101,7 +101,7 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory
}
public int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
if (settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), key, defaultValue)) {
if (settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), key, defaultValue, deprecationLogger)) {
return flag;
}
return 0;

View File

@ -45,7 +45,7 @@ public abstract class AbstractCompoundWordTokenFilterFactory extends AbstractTok
minSubwordSize = settings.getAsInt("min_subword_size", CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE);
maxSubwordSize = settings.getAsInt("max_subword_size", CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE);
onlyLongestMatch = settings
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "only_longest_match", false);
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "only_longest_match", false, deprecationLogger);
wordList = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, "word_list");
if (wordList == null) {
throw new IllegalArgumentException("word_list must be provided for [" + name + "], either as a path to a file, or directly");

View File

@ -22,6 +22,8 @@ package org.elasticsearch.index.similarity;
import org.apache.lucene.search.similarities.BM25Similarity;
import org.apache.lucene.search.similarities.Similarity;
import org.elasticsearch.Version;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
/**
@ -43,8 +45,9 @@ public class BM25SimilarityProvider extends AbstractSimilarityProvider {
super(name);
float k1 = settings.getAsFloat("k1", 1.2f);
float b = settings.getAsFloat("b", 0.75f);
boolean discountOverlaps = settings.getAsBooleanLenientForPreEs6Indices(
Version.indexCreated(indexSettings), "discount_overlaps", true);
final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(getClass()));
boolean discountOverlaps =
settings.getAsBooleanLenientForPreEs6Indices(Version.indexCreated(indexSettings), "discount_overlaps", true, deprecationLogger);
this.similarity = new BM25Similarity(k1, b);
this.similarity.setDiscountOverlaps(discountOverlaps);
@ -57,4 +60,5 @@ public class BM25SimilarityProvider extends AbstractSimilarityProvider {
public Similarity get() {
return similarity;
}
}

View File

@ -21,6 +21,8 @@ package org.elasticsearch.index.similarity;
import org.apache.lucene.search.similarities.ClassicSimilarity;
import org.elasticsearch.Version;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
/**
@ -39,7 +41,7 @@ public class ClassicSimilarityProvider extends AbstractSimilarityProvider {
public ClassicSimilarityProvider(String name, Settings settings, Settings indexSettings) {
super(name);
boolean discountOverlaps = settings.getAsBooleanLenientForPreEs6Indices(
Version.indexCreated(indexSettings), "discount_overlaps", true);
Version.indexCreated(indexSettings), "discount_overlaps", true, new DeprecationLogger(ESLoggerFactory.getLogger(getClass())));
this.similarity.setDiscountOverlaps(discountOverlaps);
}
@ -50,4 +52,5 @@ public class ClassicSimilarityProvider extends AbstractSimilarityProvider {
public ClassicSimilarity get() {
return similarity;
}
}

View File

@ -26,6 +26,8 @@ import org.apache.lucene.search.similarities.IndependenceSaturated;
import org.apache.lucene.search.similarities.IndependenceStandardized;
import org.apache.lucene.search.similarities.Similarity;
import org.elasticsearch.Version;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import java.util.HashMap;
@ -59,7 +61,7 @@ public class DFISimilarityProvider extends AbstractSimilarityProvider {
public DFISimilarityProvider(String name, Settings settings, Settings indexSettings) {
super(name);
boolean discountOverlaps = settings.getAsBooleanLenientForPreEs6Indices(
Version.indexCreated(indexSettings), "discount_overlaps", true);
Version.indexCreated(indexSettings), "discount_overlaps", true, new DeprecationLogger(ESLoggerFactory.getLogger(getClass())));
Independence measure = parseIndependence(settings);
this.similarity = new DFISimilarity(measure);
this.similarity.setDiscountOverlaps(discountOverlaps);

View File

@ -21,6 +21,8 @@ package org.elasticsearch.common.settings;
import org.elasticsearch.Version;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.loader.YamlSettingsLoader;
import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Matchers;
@ -163,9 +165,11 @@ public class SettingsTests extends ESTestCase {
.put("foo", falsy)
.put("bar", truthy).build();
assertFalse(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, "foo", null));
assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, "bar", null));
assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, "baz", true));
final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger("testLenientBooleanForPreEs6Index"));
assertFalse(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, "foo", null, deprecationLogger));
assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, "bar", null, deprecationLogger));
assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.V_5_0_0, "baz", true, deprecationLogger));
List<String> expectedDeprecationWarnings = new ArrayList<>();
if (Booleans.isBoolean(falsy) == false) {
@ -191,10 +195,12 @@ public class SettingsTests extends ESTestCase {
.put("foo", falsy)
.put("bar", truthy).build();
final DeprecationLogger deprecationLogger =
new DeprecationLogger(ESLoggerFactory.getLogger("testInvalidLenientBooleanForCurrentIndexVersion"));
expectThrows(IllegalArgumentException.class,
() -> settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, "foo", null));
() -> settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, "foo", null, deprecationLogger));
expectThrows(IllegalArgumentException.class,
() -> settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, "bar", null));
() -> settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, "bar", null, deprecationLogger));
}
@SuppressWarnings("deprecation") //#getAsBooleanLenientForPreEs6Indices is the test subject
@ -203,9 +209,11 @@ public class SettingsTests extends ESTestCase {
.put("foo", "false")
.put("bar", "true").build();
assertFalse(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, "foo", null));
assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, "bar", null));
assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, "baz", true));
final DeprecationLogger deprecationLogger =
new DeprecationLogger(ESLoggerFactory.getLogger("testValidLenientBooleanForCurrentIndexVersion"));
assertFalse(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, "foo", null, deprecationLogger));
assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, "bar", null, deprecationLogger));
assertTrue(settings.getAsBooleanLenientForPreEs6Indices(Version.CURRENT, "baz", true, deprecationLogger));
}
public void testMultLevelGetPrefix() {

View File

@ -131,7 +131,7 @@ public class IcuCollationTokenFilterFactory extends AbstractTokenFilterFactory {
}
}
Boolean caseLevel = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "caseLevel", null);
Boolean caseLevel = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "caseLevel", null, deprecationLogger);
if (caseLevel != null) {
rbc.setCaseLevel(caseLevel);
}
@ -147,7 +147,7 @@ public class IcuCollationTokenFilterFactory extends AbstractTokenFilterFactory {
}
}
Boolean numeric = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "numeric", null);
Boolean numeric = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "numeric", null, deprecationLogger);
if (numeric != null) {
rbc.setNumericCollation(numeric);
}
@ -158,7 +158,7 @@ public class IcuCollationTokenFilterFactory extends AbstractTokenFilterFactory {
}
Boolean hiraganaQuaternaryMode = settings
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "hiraganaQuaternaryMode", null);
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "hiraganaQuaternaryMode", null, deprecationLogger);
if (hiraganaQuaternaryMode != null) {
rbc.setHiraganaQuaternary(hiraganaQuaternaryMode);
}

View File

@ -45,9 +45,9 @@ public class JapaneseStopTokenFilterFactory extends AbstractTokenFilterFactory{
public JapaneseStopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false);
this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false, deprecationLogger);
this.removeTrailing = settings
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "remove_trailing", true);
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "remove_trailing", true, deprecationLogger);
this.stopWords = Analysis.parseWords(env, settings, "stopwords", JapaneseAnalyzer.getDefaultStopSet(), NAMED_STOP_WORDS, ignoreCase);
}

View File

@ -34,9 +34,9 @@ public class KuromojiIterationMarkCharFilterFactory extends AbstractCharFilterFa
public KuromojiIterationMarkCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name);
normalizeKanji = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "normalize_kanji",
JapaneseIterationMarkCharFilter.NORMALIZE_KANJI_DEFAULT);
JapaneseIterationMarkCharFilter.NORMALIZE_KANJI_DEFAULT, deprecationLogger);
normalizeKana = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "normalize_kana",
JapaneseIterationMarkCharFilter.NORMALIZE_KANA_DEFAULT);
JapaneseIterationMarkCharFilter.NORMALIZE_KANA_DEFAULT, deprecationLogger);
}
@Override

View File

@ -31,7 +31,8 @@ public class KuromojiReadingFormFilterFactory extends AbstractTokenFilterFactory
public KuromojiReadingFormFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
useRomaji = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "use_romaji", false);
useRomaji =
settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "use_romaji", false, deprecationLogger);
}
@Override

View File

@ -49,7 +49,7 @@ public class KuromojiTokenizerFactory extends AbstractTokenizerFactory {
mode = getMode(settings);
userDictionary = getUserDictionary(env, settings);
discartPunctuation = settings
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "discard_punctuation", true);
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "discard_punctuation", true, deprecationLogger);
nBestCost = settings.getAsInt(NBEST_COST, -1);
nBestExamples = settings.get(NBEST_EXAMPLES);
}

View File

@ -60,7 +60,7 @@ public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory {
this.nametype = null;
this.ruletype = null;
this.maxcodelength = 0;
this.replace = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "replace", true);
this.replace = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "replace", true, deprecationLogger);
// weird, encoder is null at last step in SimplePhoneticAnalysisTests, so we set it to metaphone as default
String encodername = settings.get("encoder", "metaphone");
if ("metaphone".equalsIgnoreCase(encodername)) {