mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-27 15:39:49 +00:00
Make boolean conversion strict (#22200)
This PR removes all leniency in the conversion of Strings to booleans: "true" is converted to the boolean value `true`, "false" is converted to the boolean value `false`. Everything else raises an error.
This commit is contained in:
parent
ee5f8c4522
commit
aece89d6a1
@ -255,7 +255,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]decider[/\\]AllocationDeciders.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]service[/\\]InternalClusterService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]Base64.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]Booleans.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]Numbers.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]fs[/\\]FsBlobStore.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]url[/\\]URLBlobStore.java" checks="LineLength" />
|
||||
@ -562,8 +561,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]TransportActionFilterChainTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]WaitActiveShardCountIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]broadcast[/\\]node[/\\]TransportBroadcastByNodeActionTests.java" checks="LineLength" />
|
||||
@ -652,7 +649,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]settings[/\\]ClusterSettingsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]structure[/\\]RoutingIteratorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]BooleansTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreContainerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]breaker[/\\]MemoryCircuitBreakerTests.java" checks="LineLength" />
|
||||
|
@ -42,7 +42,7 @@ import static org.elasticsearch.common.Strings.EMPTY_ARRAY;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
|
||||
/**
|
||||
* Create snapshot request
|
||||
@ -366,14 +366,14 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
|
||||
throw new IllegalArgumentException("malformed indices section, should be an array of strings");
|
||||
}
|
||||
} else if (name.equals("partial")) {
|
||||
partial(lenientNodeBooleanValue(entry.getValue()));
|
||||
partial(nodeBooleanValue(entry.getValue(), "partial"));
|
||||
} else if (name.equals("settings")) {
|
||||
if (!(entry.getValue() instanceof Map)) {
|
||||
throw new IllegalArgumentException("malformed settings section, should indices an inner object");
|
||||
}
|
||||
settings((Map<String, Object>) entry.getValue());
|
||||
} else if (name.equals("include_global_state")) {
|
||||
includeGlobalState = lenientNodeBooleanValue(entry.getValue());
|
||||
includeGlobalState = nodeBooleanValue(entry.getValue(), "include_global_state");
|
||||
}
|
||||
}
|
||||
indicesOptions(IndicesOptions.fromMap((Map<String, Object>) source, IndicesOptions.lenientExpandOpen()));
|
||||
|
@ -40,7 +40,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
|
||||
/**
|
||||
* Restore snapshot request
|
||||
@ -481,16 +481,16 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
|
||||
throw new IllegalArgumentException("malformed indices section, should be an array of strings");
|
||||
}
|
||||
} else if (name.equals("partial")) {
|
||||
partial(lenientNodeBooleanValue(entry.getValue()));
|
||||
partial(nodeBooleanValue(entry.getValue(), "partial"));
|
||||
} else if (name.equals("settings")) {
|
||||
if (!(entry.getValue() instanceof Map)) {
|
||||
throw new IllegalArgumentException("malformed settings section");
|
||||
}
|
||||
settings((Map<String, Object>) entry.getValue());
|
||||
} else if (name.equals("include_global_state")) {
|
||||
includeGlobalState = lenientNodeBooleanValue(entry.getValue());
|
||||
includeGlobalState = nodeBooleanValue(entry.getValue(), "include_global_state");
|
||||
} else if (name.equals("include_aliases")) {
|
||||
includeAliases = lenientNodeBooleanValue(entry.getValue());
|
||||
includeAliases = nodeBooleanValue(entry.getValue(), "include_aliases");
|
||||
} else if (name.equals("rename_pattern")) {
|
||||
if (entry.getValue() instanceof String) {
|
||||
renamePattern((String) entry.getValue());
|
||||
|
@ -23,7 +23,7 @@ import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
@ -65,7 +65,7 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldExecuteReplication(Settings settings) {
|
||||
protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import org.elasticsearch.action.support.replication.BasicReplicationRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationAction;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
@ -68,7 +68,7 @@ public class TransportShardRefreshAction
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldExecuteReplication(Settings settings) {
|
||||
protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -379,7 +379,8 @@ public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetR
|
||||
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
|
||||
versionType = VersionType.fromString(parser.text());
|
||||
} else if ("_source".equals(currentFieldName)) {
|
||||
if (parser.isBooleanValue()) {
|
||||
// check lenient to avoid interpreting the value as string but parse strict in order to provoke an error early on.
|
||||
if (parser.isBooleanValueLenient()) {
|
||||
fetchSourceContext = new FetchSourceContext(parser.booleanValue(), fetchSourceContext.includes(),
|
||||
fetchSourceContext.excludes());
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
|
@ -71,7 +71,7 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
||||
if (request.request().realtime && // if the realtime flag is set
|
||||
request.request().preference() == null && // the preference flag is not already set
|
||||
indexMeta != null && // and we have the index
|
||||
IndexMetaData.isIndexUsingShadowReplicas(indexMeta.getSettings())) { // and the index uses shadow replicas
|
||||
indexMeta.isIndexUsingShadowReplicas()) { // and the index uses shadow replicas
|
||||
// set the preference for the request to use "_primary" automatically
|
||||
request.request().preference(Preference.PRIMARY.type());
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ public final class AutoCreateIndex {
|
||||
boolean autoCreateIndex;
|
||||
List<Tuple<String, Boolean>> expressions = new ArrayList<>();
|
||||
try {
|
||||
autoCreateIndex = Booleans.parseBooleanExact(value);
|
||||
autoCreateIndex = Booleans.parseBoolean(value);
|
||||
} catch (IllegalArgumentException ex) {
|
||||
try {
|
||||
String[] patterns = Strings.commaDelimitedListToStringArray(value);
|
||||
|
@ -26,7 +26,7 @@ import org.elasticsearch.rest.RestRequest;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue;
|
||||
|
||||
/**
|
||||
@ -195,8 +195,8 @@ public class IndicesOptions {
|
||||
|
||||
//note that allowAliasesToMultipleIndices is not exposed, always true (only for internal use)
|
||||
return fromOptions(
|
||||
lenientNodeBooleanValue(ignoreUnavailableString, defaultSettings.ignoreUnavailable()),
|
||||
lenientNodeBooleanValue(allowNoIndicesString, defaultSettings.allowNoIndices()),
|
||||
nodeBooleanValue(ignoreUnavailableString, "ignore_unavailable", defaultSettings.ignoreUnavailable()),
|
||||
nodeBooleanValue(allowNoIndicesString, "allow_no_indices", defaultSettings.allowNoIndices()),
|
||||
expandWildcardsOpen,
|
||||
expandWildcardsClosed,
|
||||
defaultSettings.allowAliasesToMultipleIndices(),
|
||||
@ -279,7 +279,7 @@ public class IndicesOptions {
|
||||
", allow_no_indices=" + allowNoIndices() +
|
||||
", expand_wildcards_open=" + expandWildcardsOpen() +
|
||||
", expand_wildcards_closed=" + expandWildcardsClosed() +
|
||||
", allow_alisases_to_multiple_indices=" + allowAliasesToMultipleIndices() +
|
||||
", allow_aliases_to_multiple_indices=" + allowAliasesToMultipleIndices() +
|
||||
", forbid_closed_indices=" + forbidClosedIndices() +
|
||||
']';
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ public abstract class TransportReplicationAction<
|
||||
} else {
|
||||
setPhase(replicationTask, "primary");
|
||||
final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex());
|
||||
final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData.getSettings());
|
||||
final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData);
|
||||
final ActionListener<Response> listener = createResponseListener(primaryShardReference);
|
||||
createReplicatedOperation(request,
|
||||
ActionListener.wrap(result -> result.respond(listener), listener::onFailure),
|
||||
@ -910,8 +910,8 @@ public abstract class TransportReplicationAction<
|
||||
* Indicated whether this operation should be replicated to shadow replicas or not. If this method returns true the replication phase
|
||||
* will be skipped. For example writes such as index and delete don't need to be replicated on shadow replicas but refresh and flush do.
|
||||
*/
|
||||
protected boolean shouldExecuteReplication(Settings settings) {
|
||||
return IndexMetaData.isIndexUsingShadowReplicas(settings) == false;
|
||||
protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) {
|
||||
return indexMetaData.isIndexUsingShadowReplicas() == false;
|
||||
}
|
||||
|
||||
class ShardReference implements Releasable {
|
||||
|
@ -377,14 +377,13 @@ public class InternalClusterInfoService extends AbstractComponent
|
||||
MetaData meta = state.getMetaData();
|
||||
for (ShardStats s : stats) {
|
||||
IndexMetaData indexMeta = meta.index(s.getShardRouting().index());
|
||||
Settings indexSettings = indexMeta == null ? null : indexMeta.getSettings();
|
||||
newShardRoutingToDataPath.put(s.getShardRouting(), s.getDataPath());
|
||||
long size = s.getStats().getStore().sizeInBytes();
|
||||
String sid = ClusterInfo.shardIdentifierFromRouting(s.getShardRouting());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("shard: {} size: {}", sid, size);
|
||||
}
|
||||
if (indexSettings != null && IndexMetaData.isIndexUsingShadowReplicas(indexSettings)) {
|
||||
if (indexMeta != null && indexMeta.isIndexUsingShadowReplicas()) {
|
||||
// Shards on a shared filesystem should be considered of size 0
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("shard: {} is using shadow replicas and will be treated as size 0", sid);
|
||||
|
@ -33,7 +33,7 @@ final class AutoExpandReplicas {
|
||||
public static final Setting<AutoExpandReplicas> SETTING = new Setting<>(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "false", (value) -> {
|
||||
final int min;
|
||||
final int max;
|
||||
if (Booleans.parseBoolean(value, true) == false) {
|
||||
if (Booleans.isFalse(value)) {
|
||||
return new AutoExpandReplicas(0, 0, false);
|
||||
}
|
||||
final int dash = value.indexOf('-');
|
||||
|
@ -1263,9 +1263,10 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
||||
* is the returned value from
|
||||
* {@link #isIndexUsingShadowReplicas(org.elasticsearch.common.settings.Settings)}.
|
||||
*/
|
||||
public static boolean isOnSharedFilesystem(Settings settings) {
|
||||
public boolean isOnSharedFilesystem(Settings settings) {
|
||||
// don't use the setting directly, not to trigger verbose deprecation logging
|
||||
return settings.getAsBoolean(SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings));
|
||||
return settings.getAsBooleanLenientForPreEs6Indices(
|
||||
this.indexCreatedVersion, SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1273,9 +1274,13 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
||||
* with these settings uses shadow replicas. Otherwise <code>false</code>. The default
|
||||
* setting for this is <code>false</code>.
|
||||
*/
|
||||
public static boolean isIndexUsingShadowReplicas(Settings settings) {
|
||||
public boolean isIndexUsingShadowReplicas() {
|
||||
return isIndexUsingShadowReplicas(this.settings);
|
||||
}
|
||||
|
||||
public boolean isIndexUsingShadowReplicas(Settings settings) {
|
||||
// don't use the setting directly, not to trigger verbose deprecation logging
|
||||
return settings.getAsBoolean(SETTING_SHADOW_REPLICAS, false);
|
||||
return settings.getAsBooleanLenientForPreEs6Indices(this.indexCreatedVersion, SETTING_SHADOW_REPLICAS, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -34,7 +34,7 @@ import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
|
||||
/**
|
||||
* Mapping configuration for a type.
|
||||
@ -95,10 +95,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||
initMappers((Map<String, Object>) mappingMap.get(this.type));
|
||||
}
|
||||
|
||||
public MappingMetaData(Map<String, Object> mapping) throws IOException {
|
||||
this(mapping.keySet().iterator().next(), mapping);
|
||||
}
|
||||
|
||||
public MappingMetaData(String type, Map<String, Object> mapping) throws IOException {
|
||||
this.type = type;
|
||||
XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().map(mapping);
|
||||
@ -127,7 +123,12 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||
String fieldName = entry.getKey();
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("required")) {
|
||||
required = lenientNodeBooleanValue(fieldNode);
|
||||
try {
|
||||
required = nodeBooleanValue(fieldNode);
|
||||
} catch (IllegalArgumentException ex) {
|
||||
throw new IllegalArgumentException("Failed to create mapping for type [" + this.type() + "]. " +
|
||||
"Illegal value in field [_routing.required].", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.routing = new Routing(required);
|
||||
|
@ -428,7 +428,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
.put(indexMetaData, false)
|
||||
.build();
|
||||
|
||||
String maybeShadowIndicator = IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.getSettings()) ? "s" : "";
|
||||
String maybeShadowIndicator = indexMetaData.isIndexUsingShadowReplicas() ? "s" : "";
|
||||
logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}{}], mappings {}",
|
||||
request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(),
|
||||
indexMetaData.getNumberOfReplicas(), maybeShadowIndicator, mappings.keySet());
|
||||
|
@ -140,7 +140,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
||||
}
|
||||
|
||||
if (indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0_alpha1) &&
|
||||
IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.getSettings()) == false && // see #20650
|
||||
indexMetaData.isIndexUsingShadowReplicas() == false && // see #20650
|
||||
shardRouting.primary() && shardRouting.initializing() && shardRouting.relocating() == false &&
|
||||
RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) == false &&
|
||||
inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false)
|
||||
|
@ -540,7 +540,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
if (failedShard.primary()) {
|
||||
// promote active replica to primary if active replica exists (only the case for shadow replicas)
|
||||
ShardRouting activeReplica = activeReplica(failedShard.shardId());
|
||||
assert activeReplica == null || IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.getSettings()) :
|
||||
assert activeReplica == null || indexMetaData.isIndexUsingShadowReplicas() :
|
||||
"initializing primary [" + failedShard + "] with active replicas [" + activeReplica + "] only expected when " +
|
||||
"using shadow replicas";
|
||||
if (activeReplica == null) {
|
||||
@ -599,7 +599,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
assert activeReplica.started() : "replica relocation should have been cancelled: " + activeReplica;
|
||||
ShardRouting primarySwappedCandidate = promoteActiveReplicaShardToPrimary(activeReplica);
|
||||
routingChangesObserver.replicaPromoted(activeReplica);
|
||||
if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.getSettings())) {
|
||||
if (indexMetaData.isIndexUsingShadowReplicas()) {
|
||||
ShardRouting initializedShard = reinitShadowPrimary(primarySwappedCandidate);
|
||||
routingChangesObserver.startedPrimaryReinitialized(primarySwappedCandidate, initializedShard);
|
||||
}
|
||||
|
@ -19,14 +19,128 @@
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
public class Booleans {
|
||||
public final class Booleans {
|
||||
private Booleans() {
|
||||
throw new AssertionError("No instances intended");
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a char[] representation of a boolean value to <code>boolean</code>.
|
||||
*
|
||||
* @return <code>true</code> iff the sequence of chars is "true", <code>false</code> iff the sequence of chars is "false" or the
|
||||
* provided default value iff either text is <code>null</code> or length == 0.
|
||||
* @throws IllegalArgumentException if the string cannot be parsed to boolean.
|
||||
*/
|
||||
public static boolean parseBoolean(char[] text, int offset, int length, boolean defaultValue) {
|
||||
if (text == null || length == 0) {
|
||||
return defaultValue;
|
||||
} else {
|
||||
return parseBoolean(new String(text, offset, length));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* returns true iff the sequence of chars is one of "true","false".
|
||||
*
|
||||
* @param text sequence to check
|
||||
* @param offset offset to start
|
||||
* @param length length to check
|
||||
*/
|
||||
public static boolean isBoolean(char[] text, int offset, int length) {
|
||||
if (text == null || length == 0) {
|
||||
return false;
|
||||
}
|
||||
return isBoolean(new String(text, offset, length));
|
||||
}
|
||||
|
||||
public static boolean isBoolean(String value) {
|
||||
return isFalse(value) || isTrue(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a string representation of a boolean value to <code>boolean</code>.
|
||||
*
|
||||
* @return <code>true</code> iff the provided value is "true". <code>false</code> iff the provided value is "false".
|
||||
* @throws IllegalArgumentException if the string cannot be parsed to boolean.
|
||||
*/
|
||||
public static boolean parseBoolean(String value) {
|
||||
if (isFalse(value)) {
|
||||
return false;
|
||||
}
|
||||
if (isTrue(value)) {
|
||||
return true;
|
||||
}
|
||||
throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed.");
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param value text to parse.
|
||||
* @param defaultValue The default value to return if the provided value is <code>null</code>.
|
||||
* @return see {@link #parseBoolean(String)}
|
||||
*/
|
||||
public static boolean parseBoolean(String value, boolean defaultValue) {
|
||||
if (Strings.hasText(value)) {
|
||||
return parseBoolean(value);
|
||||
}
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
public static Boolean parseBoolean(String value, Boolean defaultValue) {
|
||||
if (Strings.hasText(value)) {
|
||||
return parseBoolean(value);
|
||||
}
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>false</code> if text is in <tt>false</tt>, <tt>0</tt>, <tt>off</tt>, <tt>no</tt>; else, true
|
||||
*
|
||||
* @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, Boolean)} instead.
|
||||
*/
|
||||
public static boolean parseBoolean(char[] text, int offset, int length, boolean defaultValue) {
|
||||
// TODO: the leniency here is very dangerous: a simple typo will be misinterpreted and the user won't know.
|
||||
// We should remove it and cutover to https://github.com/rmuir/booleanparser
|
||||
@Deprecated
|
||||
public static Boolean parseBooleanLenient(String value, Boolean defaultValue) {
|
||||
if (value == null) { // only for the null case we do that here!
|
||||
return defaultValue;
|
||||
}
|
||||
return parseBooleanLenient(value, false);
|
||||
}
|
||||
/**
|
||||
* Returns <code>true</code> iff the value is neither of the following:
|
||||
* <tt>false</tt>, <tt>0</tt>, <tt>off</tt>, <tt>no</tt>
|
||||
* otherwise <code>false</code>
|
||||
*
|
||||
* @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(String, boolean)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static boolean parseBooleanLenient(String value, boolean defaultValue) {
|
||||
if (value == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
return !(value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no"));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return <code>true</code> iff the value is <tt>false</tt>, otherwise <code>false</code>.
|
||||
*/
|
||||
public static boolean isFalse(String value) {
|
||||
return "false".equals(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return <code>true</code> iff the value is <tt>true</tt>, otherwise <code>false</code>
|
||||
*/
|
||||
public static boolean isTrue(String value) {
|
||||
return "true".equals(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>false</code> if text is in <tt>false</tt>, <tt>0</tt>, <tt>off</tt>, <tt>no</tt>; else, true
|
||||
*
|
||||
* @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #parseBoolean(char[], int, int, boolean)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static boolean parseBooleanLenient(char[] text, int offset, int length, boolean defaultValue) {
|
||||
if (text == null || length == 0) {
|
||||
return defaultValue;
|
||||
}
|
||||
@ -40,7 +154,8 @@ public class Booleans {
|
||||
return !(text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f');
|
||||
}
|
||||
if (length == 5) {
|
||||
return !(text[offset] == 'f' && text[offset + 1] == 'a' && text[offset + 2] == 'l' && text[offset + 3] == 's' && text[offset + 4] == 'e');
|
||||
return !(text[offset] == 'f' && text[offset + 1] == 'a' && text[offset + 2] == 'l' && text[offset + 3] == 's' &&
|
||||
text[offset + 4] == 'e');
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -51,8 +166,11 @@ public class Booleans {
|
||||
* @param text sequence to check
|
||||
* @param offset offset to start
|
||||
* @param length length to check
|
||||
*
|
||||
* @deprecated Only kept to provide automatic upgrades for pre 6.0 indices. Use {@link #isBoolean(char[], int, int)} instead.
|
||||
*/
|
||||
public static boolean isBoolean(char[] text, int offset, int length) {
|
||||
@Deprecated
|
||||
public static boolean isBooleanLenient(char[] text, int offset, int length) {
|
||||
if (text == null || length == 0) {
|
||||
return false;
|
||||
}
|
||||
@ -64,69 +182,16 @@ public class Booleans {
|
||||
}
|
||||
if (length == 3) {
|
||||
return (text[offset] == 'o' && text[offset + 1] == 'f' && text[offset + 2] == 'f') ||
|
||||
(text[offset] == 'y' && text[offset + 1] == 'e' && text[offset + 2] == 's');
|
||||
(text[offset] == 'y' && text[offset + 1] == 'e' && text[offset + 2] == 's');
|
||||
}
|
||||
if (length == 4) {
|
||||
return (text[offset] == 't' && text[offset + 1] == 'r' && text[offset + 2] == 'u' && text[offset + 3] == 'e');
|
||||
}
|
||||
if (length == 5) {
|
||||
return (text[offset] == 'f' && text[offset + 1] == 'a' && text[offset + 2] == 'l' && text[offset + 3] == 's' && text[offset + 4] == 'e');
|
||||
return (text[offset] == 'f' && text[offset + 1] == 'a' && text[offset + 2] == 'l' && text[offset + 3] == 's' &&
|
||||
text[offset + 4] == 'e');
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/***
|
||||
*
|
||||
* @return true/false
|
||||
* throws exception if string cannot be parsed to boolean
|
||||
*/
|
||||
public static Boolean parseBooleanExact(String value) {
|
||||
boolean isFalse = isExplicitFalse(value);
|
||||
if (isFalse) {
|
||||
return false;
|
||||
}
|
||||
boolean isTrue = isExplicitTrue(value);
|
||||
if (isTrue) {
|
||||
return true;
|
||||
}
|
||||
|
||||
throw new IllegalArgumentException("Failed to parse value [" + value + "] cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ]");
|
||||
}
|
||||
|
||||
public static Boolean parseBoolean(String value, Boolean defaultValue) {
|
||||
if (value == null) { // only for the null case we do that here!
|
||||
return defaultValue;
|
||||
}
|
||||
return parseBoolean(value, false);
|
||||
}
|
||||
/**
|
||||
* Returns <code>true</code> iff the value is neither of the following:
|
||||
* <tt>false</tt>, <tt>0</tt>, <tt>off</tt>, <tt>no</tt>
|
||||
* otherwise <code>false</code>
|
||||
*/
|
||||
public static boolean parseBoolean(String value, boolean defaultValue) {
|
||||
if (value == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
return !(value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the value is either of the following:
|
||||
* <tt>false</tt>, <tt>0</tt>, <tt>off</tt>, <tt>no</tt>
|
||||
* otherwise <code>false</code>
|
||||
*/
|
||||
public static boolean isExplicitFalse(String value) {
|
||||
return value != null && (value.equals("false") || value.equals("0") || value.equals("off") || value.equals("no"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the value is either of the following:
|
||||
* <tt>true</tt>, <tt>1</tt>, <tt>on</tt>, <tt>yes</tt>
|
||||
* otherwise <code>false</code>
|
||||
*/
|
||||
public static boolean isExplicitTrue(String value) {
|
||||
return value != null && (value.equals("true") || value.equals("1") || value.equals("on") || value.equals("yes"));
|
||||
}
|
||||
|
||||
}
|
||||
|
42
core/src/main/java/org/elasticsearch/common/TriFunction.java
Normal file
42
core/src/main/java/org/elasticsearch/common/TriFunction.java
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
/**
|
||||
* Represents a function that accepts three arguments and produces a result.
|
||||
*
|
||||
* @param <S> the type of the first argument
|
||||
* @param <T> the type of the second argument
|
||||
* @param <U> the type of the third argument
|
||||
* @param <R> the return type
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface TriFunction<S, T, U, R> {
|
||||
/**
|
||||
* Applies this function to the given arguments.
|
||||
*
|
||||
* @param s the first function argument
|
||||
* @param t the second function argument
|
||||
* @param u the third function argument
|
||||
* @return the result
|
||||
*/
|
||||
R apply(S s, T t, U u);
|
||||
}
|
@ -668,15 +668,15 @@ public class Setting<T> extends ToXContentToBytes {
|
||||
}
|
||||
|
||||
public static Setting<Boolean> boolSetting(String key, boolean defaultValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, properties);
|
||||
return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBoolean, properties);
|
||||
}
|
||||
|
||||
public static Setting<Boolean> boolSetting(String key, Setting<Boolean> fallbackSetting, Property... properties) {
|
||||
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties);
|
||||
return new Setting<>(key, fallbackSetting, Booleans::parseBoolean, properties);
|
||||
}
|
||||
|
||||
public static Setting<Boolean> boolSetting(String key, Function<Settings, String> defaultValueFn, Property... properties) {
|
||||
return new Setting<>(key, defaultValueFn, Booleans::parseBooleanExact, properties);
|
||||
return new Setting<>(key, defaultValueFn, Booleans::parseBoolean, properties);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, Property... properties) {
|
||||
|
@ -26,6 +26,8 @@ import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoaderFactory;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
@ -74,6 +76,7 @@ import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
|
||||
* An immutable settings implementation.
|
||||
*/
|
||||
public final class Settings implements ToXContent {
|
||||
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(Settings.class));
|
||||
|
||||
public static final Settings EMPTY = new Builder().build();
|
||||
private static final Pattern ARRAY_PATTERN = Pattern.compile("(.*)\\.\\d+$");
|
||||
@ -313,6 +316,32 @@ public final class Settings implements ToXContent {
|
||||
return Booleans.parseBoolean(get(setting), defaultValue);
|
||||
}
|
||||
|
||||
// TODO #22298: Delete this method and update call sites to <code>#getAsBoolean(String, Boolean)</code>.
|
||||
/**
|
||||
* Returns the setting value (as boolean) associated with the setting key. If it does not exist, returns the default value provided.
|
||||
* If the index was created on Elasticsearch below 6.0, booleans will be parsed leniently otherwise they are parsed strictly.
|
||||
*
|
||||
* See {@link Booleans#isBooleanLenient(char[], int, int)} for the definition of a "lenient boolean"
|
||||
* and {@link Booleans#isBoolean(char[], int, int)} for the definition of a "strict boolean".
|
||||
*
|
||||
* @deprecated Only used to provide automatic upgrades for pre 6.0 indices.
|
||||
*/
|
||||
@Deprecated
|
||||
public Boolean getAsBooleanLenientForPreEs6Indices(Version indexVersion, String setting, Boolean defaultValue) {
|
||||
if (indexVersion.before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
//Only emit a warning if the setting's value is not a proper boolean
|
||||
final String value = get(setting, "false");
|
||||
if (Booleans.isBoolean(value) == false) {
|
||||
@SuppressWarnings("deprecation")
|
||||
boolean convertedValue = Booleans.parseBooleanLenient(get(setting), defaultValue);
|
||||
deprecationLogger.deprecated("The value [{}] of setting [{}] is not coerced into boolean anymore. Please change " +
|
||||
"this value to [{}].", value, setting, String.valueOf(convertedValue));
|
||||
return convertedValue;
|
||||
}
|
||||
}
|
||||
return getAsBoolean(setting, defaultValue);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the setting value (as time) associated with the setting key. If it does not exists,
|
||||
* returns the default value provided.
|
||||
|
@ -52,7 +52,7 @@ public interface XContent {
|
||||
*/
|
||||
static boolean isStrictDuplicateDetectionEnabled() {
|
||||
// Don't allow duplicate keys in JSON content by default but let the user opt out
|
||||
return Booleans.parseBooleanExact(System.getProperty("es.xcontent.strict_duplicate_detection", "true"));
|
||||
return Booleans.parseBoolean(System.getProperty("es.xcontent.strict_duplicate_detection", "true"));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -201,16 +201,32 @@ public interface XContentParser extends Releasable {
|
||||
|
||||
double doubleValue() throws IOException;
|
||||
|
||||
/**
|
||||
* @return true iff the current value is either boolean (<code>true</code> or <code>false</code>) or one of "false", "true".
|
||||
*/
|
||||
boolean isBooleanValue() throws IOException;
|
||||
|
||||
boolean booleanValue() throws IOException;
|
||||
|
||||
// TODO #22298: Remove this method and replace all call sites with #isBooleanValue()
|
||||
/**
|
||||
* returns true if the current value is boolean in nature.
|
||||
* values that are considered booleans:
|
||||
* - boolean value (true/false)
|
||||
* - numeric integers (=0 is considered as false, !=0 is true)
|
||||
* - one of the following strings: "true","false","on","off","yes","no","1","0"
|
||||
*
|
||||
* @deprecated Just present for providing backwards compatibility. Use {@link #isBooleanValue()} instead.
|
||||
*/
|
||||
boolean isBooleanValue() throws IOException;
|
||||
@Deprecated
|
||||
boolean isBooleanValueLenient() throws IOException;
|
||||
|
||||
boolean booleanValue() throws IOException;
|
||||
// TODO #22298: Remove this method and replace all call sites with #booleanValue()
|
||||
/**
|
||||
* @deprecated Just present for providing backwards compatibility. Use {@link #booleanValue()} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
boolean booleanValueLenient() throws IOException;
|
||||
|
||||
/**
|
||||
* Reads a plain binary value that was written via one of the following methods:
|
||||
|
@ -77,9 +77,6 @@ public abstract class AbstractXContentParser implements XContentParser {
|
||||
switch (currentToken()) {
|
||||
case VALUE_BOOLEAN:
|
||||
return true;
|
||||
case VALUE_NUMBER:
|
||||
NumberType numberType = numberType();
|
||||
return numberType == NumberType.LONG || numberType == NumberType.INT;
|
||||
case VALUE_STRING:
|
||||
return Booleans.isBoolean(textCharacters(), textOffset(), textLength());
|
||||
default:
|
||||
@ -89,11 +86,37 @@ public abstract class AbstractXContentParser implements XContentParser {
|
||||
|
||||
@Override
|
||||
public boolean booleanValue() throws IOException {
|
||||
Token token = currentToken();
|
||||
if (token == Token.VALUE_STRING) {
|
||||
return Booleans.parseBoolean(textCharacters(), textOffset(), textLength(), false /* irrelevant */);
|
||||
}
|
||||
return doBooleanValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public boolean isBooleanValueLenient() throws IOException {
|
||||
switch (currentToken()) {
|
||||
case VALUE_BOOLEAN:
|
||||
return true;
|
||||
case VALUE_NUMBER:
|
||||
NumberType numberType = numberType();
|
||||
return numberType == NumberType.LONG || numberType == NumberType.INT;
|
||||
case VALUE_STRING:
|
||||
return Booleans.isBooleanLenient(textCharacters(), textOffset(), textLength());
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public boolean booleanValueLenient() throws IOException {
|
||||
Token token = currentToken();
|
||||
if (token == Token.VALUE_NUMBER) {
|
||||
return intValue() != 0;
|
||||
} else if (token == Token.VALUE_STRING) {
|
||||
return Booleans.parseBoolean(textCharacters(), textOffset(), textLength(), false /* irrelevant */);
|
||||
return Booleans.parseBooleanLenient(textCharacters(), textOffset(), textLength(), false /* irrelevant */);
|
||||
}
|
||||
return doBooleanValue();
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
@ -412,39 +413,29 @@ public class XContentMapValues {
|
||||
return Long.parseLong(node.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is very lenient, use {@link #nodeBooleanValue} instead.
|
||||
*/
|
||||
public static boolean lenientNodeBooleanValue(Object node, boolean defaultValue) {
|
||||
if (node == null) {
|
||||
return defaultValue;
|
||||
public static boolean nodeBooleanValue(Object node, String name, boolean defaultValue) {
|
||||
try {
|
||||
return nodeBooleanValue(node, defaultValue);
|
||||
} catch (IllegalArgumentException ex) {
|
||||
throw new IllegalArgumentException("Could not convert [" + name + "] to boolean", ex);
|
||||
}
|
||||
return lenientNodeBooleanValue(node);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is very lenient, use {@link #nodeBooleanValue} instead.
|
||||
*/
|
||||
public static boolean lenientNodeBooleanValue(Object node) {
|
||||
if (node instanceof Boolean) {
|
||||
return (Boolean) node;
|
||||
public static boolean nodeBooleanValue(Object node, boolean defaultValue) {
|
||||
String nodeValue = node == null ? null : node.toString();
|
||||
return Booleans.parseBoolean(nodeValue, defaultValue);
|
||||
}
|
||||
|
||||
public static boolean nodeBooleanValue(Object node, String name) {
|
||||
try {
|
||||
return nodeBooleanValue(node);
|
||||
} catch (IllegalArgumentException ex) {
|
||||
throw new IllegalArgumentException("Could not convert [" + name + "] to boolean", ex);
|
||||
}
|
||||
if (node instanceof Number) {
|
||||
return ((Number) node).intValue() != 0;
|
||||
}
|
||||
String value = node.toString();
|
||||
return !(value.equals("false") || value.equals("0") || value.equals("off"));
|
||||
}
|
||||
|
||||
public static boolean nodeBooleanValue(Object node) {
|
||||
switch (node.toString()) {
|
||||
case "true":
|
||||
return true;
|
||||
case "false":
|
||||
return false;
|
||||
default:
|
||||
throw new IllegalArgumentException("Can't parse boolean value [" + node + "], expected [true] or [false]");
|
||||
}
|
||||
return Booleans.parseBoolean(node.toString());
|
||||
}
|
||||
|
||||
public static TimeValue nodeTimeValue(Object node, TimeValue defaultValue) {
|
||||
|
@ -477,9 +477,11 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
||||
*/
|
||||
private boolean recoverOnAnyNode(IndexMetaData metaData) {
|
||||
// don't use the setting directly, not to trigger verbose deprecation logging
|
||||
return (IndexMetaData.isOnSharedFilesystem(metaData.getSettings()) || IndexMetaData.isOnSharedFilesystem(this.settings))
|
||||
&& (metaData.getSettings().getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false) ||
|
||||
this.settings.getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false));
|
||||
return (metaData.isOnSharedFilesystem(metaData.getSettings()) || metaData.isOnSharedFilesystem(this.settings))
|
||||
&& (metaData.getSettings().getAsBooleanLenientForPreEs6Indices(
|
||||
metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false) ||
|
||||
this.settings.getAsBooleanLenientForPreEs6Indices
|
||||
(metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false));
|
||||
}
|
||||
|
||||
protected abstract FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
|
||||
|
@ -23,6 +23,7 @@ import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.TriFunction;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -69,7 +70,7 @@ import java.util.function.Function;
|
||||
* IndexModule represents the central extension point for index level custom implementations like:
|
||||
* <ul>
|
||||
* <li>{@link SimilarityProvider} - New {@link SimilarityProvider} implementations can be registered through
|
||||
* {@link #addSimilarity(String, BiFunction)}while existing Providers can be referenced through Settings under the
|
||||
* {@link #addSimilarity(String, TriFunction)}while existing Providers can be referenced through Settings under the
|
||||
* {@link IndexModule#SIMILARITY_SETTINGS_PREFIX} prefix along with the "type" value. For example, to reference the
|
||||
* {@link BM25SimilarityProvider}, the configuration <tt>"index.similarity.my_similarity.type : "BM25"</tt> can be used.</li>
|
||||
* <li>{@link IndexStore} - Custom {@link IndexStore} instances can be registered via {@link #addIndexStore(String, Function)}</li>
|
||||
@ -112,7 +113,7 @@ public final class IndexModule {
|
||||
final SetOnce<EngineFactory> engineFactory = new SetOnce<>();
|
||||
private SetOnce<IndexSearcherWrapperFactory> indexSearcherWrapper = new SetOnce<>();
|
||||
private final Set<IndexEventListener> indexEventListeners = new HashSet<>();
|
||||
private final Map<String, BiFunction<String, Settings, SimilarityProvider>> similarities = new HashMap<>();
|
||||
private final Map<String, TriFunction<String, Settings, Settings, SimilarityProvider>> similarities = new HashMap<>();
|
||||
private final Map<String, Function<IndexSettings, IndexStore>> storeTypes = new HashMap<>();
|
||||
private final SetOnce<BiFunction<IndexSettings, IndicesQueryCache, QueryCache>> forceQueryCacheProvider = new SetOnce<>();
|
||||
private final List<SearchOperationListener> searchOperationListeners = new ArrayList<>();
|
||||
@ -256,7 +257,7 @@ public final class IndexModule {
|
||||
* @param name Name of the SimilarityProvider
|
||||
* @param similarity SimilarityProvider to register
|
||||
*/
|
||||
public void addSimilarity(String name, BiFunction<String, Settings, SimilarityProvider> similarity) {
|
||||
public void addSimilarity(String name, TriFunction<String, Settings, Settings, SimilarityProvider> similarity) {
|
||||
ensureNotFrozen();
|
||||
if (similarities.containsKey(name) || SimilarityService.BUILT_IN.containsKey(name)) {
|
||||
throw new IllegalArgumentException("similarity for name: [" + name + " is already registered");
|
||||
|
@ -343,8 +343,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||
|
||||
logger.debug("creating shard_id {}", shardId);
|
||||
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
|
||||
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
|
||||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
|
||||
final boolean canDeleteShardContent = this.indexSettings.isOnSharedFilesystem() == false ||
|
||||
(primary && this.indexSettings.isOnSharedFilesystem());
|
||||
final Engine.Warmer engineWarmer = (searcher) -> {
|
||||
IndexShard shard = getShardOrNull(shardId.getId());
|
||||
if (shard != null) {
|
||||
@ -353,7 +353,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||
};
|
||||
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock,
|
||||
new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId)));
|
||||
if (useShadowEngine(primary, indexSettings)) {
|
||||
if (useShadowEngine(primary, this.indexSettings)) {
|
||||
indexShard = new ShadowIndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService,
|
||||
indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer,
|
||||
searchOperationListeners);
|
||||
@ -381,8 +381,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||
}
|
||||
}
|
||||
|
||||
static boolean useShadowEngine(boolean primary, Settings indexSettings) {
|
||||
return primary == false && IndexMetaData.isIndexUsingShadowReplicas(indexSettings);
|
||||
static boolean useShadowEngine(boolean primary, IndexSettings indexSettings) {
|
||||
return primary == false && indexSettings.isShadowReplicaIndex();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -229,7 +229,7 @@ public final class IndexSettings {
|
||||
nodeName = Node.NODE_NAME_SETTING.get(settings);
|
||||
this.indexMetaData = indexMetaData;
|
||||
numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
|
||||
isShadowReplicaIndex = IndexMetaData.isIndexUsingShadowReplicas(settings);
|
||||
isShadowReplicaIndex = indexMetaData.isIndexUsingShadowReplicas(settings);
|
||||
|
||||
this.defaultField = DEFAULT_FIELD_SETTING.get(settings);
|
||||
this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings);
|
||||
@ -327,16 +327,7 @@ public final class IndexSettings {
|
||||
* filesystem.
|
||||
*/
|
||||
public boolean isOnSharedFilesystem() {
|
||||
return IndexMetaData.isOnSharedFilesystem(getSettings());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the given settings indicate that the index associated
|
||||
* with these settings uses shadow replicas. Otherwise <code>false</code>. The default
|
||||
* setting for this is <code>false</code>.
|
||||
*/
|
||||
public boolean isIndexUsingShadowReplicas() {
|
||||
return IndexMetaData.isOnSharedFilesystem(getSettings());
|
||||
return indexMetaData.isOnSharedFilesystem(getSettings());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -164,7 +164,8 @@ public final class MergePolicyConfig {
|
||||
ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING);
|
||||
double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING);
|
||||
double reclaimDeletesWeight = indexSettings.getValue(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING);
|
||||
this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true);
|
||||
this.mergesEnabled = indexSettings.getSettings()
|
||||
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), INDEX_MERGE_ENABLED, true);
|
||||
if (mergesEnabled == false) {
|
||||
logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production environments", INDEX_MERGE_ENABLED);
|
||||
}
|
||||
|
@ -37,7 +37,8 @@ public class ASCIIFoldingTokenFilterFactory extends AbstractTokenFilterFactory i
|
||||
|
||||
public ASCIIFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
preserveOriginal = settings.getAsBoolean(PRESERVE_ORIGINAL.getPreferredName(), DEFAULT_PRESERVE_ORIGINAL);
|
||||
preserveOriginal = settings.getAsBooleanLenientForPreEs6Indices(
|
||||
indexSettings.getIndexVersionCreated(), PRESERVE_ORIGINAL.getPreferredName(), DEFAULT_PRESERVE_ORIGINAL);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -178,12 +178,15 @@ public class Analysis {
|
||||
return parseWords(env, settings, "common_words", defaultCommonWords, NAMED_STOP_WORDS, ignoreCase);
|
||||
}
|
||||
|
||||
public static CharArraySet parseArticles(Environment env, Settings settings) {
|
||||
return parseWords(env, settings, "articles", null, null, settings.getAsBoolean("articles_case", false));
|
||||
public static CharArraySet parseArticles(Environment env, org.elasticsearch.Version indexCreatedVersion, Settings settings) {
|
||||
boolean articlesCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, "articles_case", false);
|
||||
return parseWords(env, settings, "articles", null, null, articlesCase);
|
||||
}
|
||||
|
||||
public static CharArraySet parseStopWords(Environment env, Settings settings, CharArraySet defaultStopWords) {
|
||||
return parseStopWords(env, settings, defaultStopWords, settings.getAsBoolean("stopwords_case", false));
|
||||
public static CharArraySet parseStopWords(Environment env, org.elasticsearch.Version indexCreatedVersion, Settings settings,
|
||||
CharArraySet defaultStopWords) {
|
||||
boolean stopwordsCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, "stopwords_case", false);
|
||||
return parseStopWords(env, settings, defaultStopWords, stopwordsCase);
|
||||
}
|
||||
|
||||
public static CharArraySet parseStopWords(Environment env, Settings settings, CharArraySet defaultStopWords, boolean ignoreCase) {
|
||||
@ -205,12 +208,14 @@ public class Analysis {
|
||||
return setWords;
|
||||
}
|
||||
|
||||
public static CharArraySet getWordSet(Environment env, Settings settings, String settingsPrefix) {
|
||||
public static CharArraySet getWordSet(Environment env, org.elasticsearch.Version indexCreatedVersion, Settings settings,
|
||||
String settingsPrefix) {
|
||||
List<String> wordList = getWordList(env, settings, settingsPrefix);
|
||||
if (wordList == null) {
|
||||
return null;
|
||||
}
|
||||
return new CharArraySet(wordList, settings.getAsBoolean(settingsPrefix + "_case", false));
|
||||
boolean ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexCreatedVersion, settingsPrefix + "_case", false);
|
||||
return new CharArraySet(wordList, ignoreCase);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -31,8 +31,10 @@ public class ArabicAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arabic
|
||||
|
||||
public ArabicAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
arabicAnalyzer = new ArabicAnalyzer(Analysis.parseStopWords(env, settings, ArabicAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
arabicAnalyzer = new ArabicAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, ArabicAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
arabicAnalyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class ArmenianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arme
|
||||
|
||||
public ArmenianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new ArmenianAnalyzer(Analysis.parseStopWords(env, settings, ArmenianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new ArmenianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, ArmenianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class BasqueAnalyzerProvider extends AbstractIndexAnalyzerProvider<Basque
|
||||
|
||||
public BasqueAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new BasqueAnalyzer(Analysis.parseStopWords(env, settings, BasqueAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new BasqueAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, BasqueAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class BrazilianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bra
|
||||
|
||||
public BrazilianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new BrazilianAnalyzer(Analysis.parseStopWords(env, settings, BrazilianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new BrazilianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, BrazilianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class BulgarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bul
|
||||
|
||||
public BulgarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new BulgarianAnalyzer(Analysis.parseStopWords(env, settings, BulgarianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new BulgarianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, BulgarianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ public final class CJKBigramFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
public CJKBigramFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
outputUnigrams = settings.getAsBoolean("output_unigrams", false);
|
||||
outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "output_unigrams", false);
|
||||
final String[] asArray = settings.getAsArray("ignored_scripts");
|
||||
Set<String> scripts = new HashSet<>(Arrays.asList("han", "hiragana", "katakana", "hangul"));
|
||||
if (asArray != null) {
|
||||
|
@ -31,8 +31,10 @@ public class CatalanAnalyzerProvider extends AbstractIndexAnalyzerProvider<Catal
|
||||
|
||||
public CatalanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new CatalanAnalyzer(Analysis.parseStopWords(env, settings, CatalanAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new CatalanAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, CatalanAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,8 @@ public class CjkAnalyzerProvider extends AbstractIndexAnalyzerProvider<CJKAnalyz
|
||||
|
||||
public CjkAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, CJKAnalyzer.getDefaultStopSet());
|
||||
CharArraySet stopWords = Analysis.parseStopWords(
|
||||
env, indexSettings.getIndexVersionCreated(), settings, CJKAnalyzer.getDefaultStopSet());
|
||||
|
||||
analyzer = new CJKAnalyzer(stopWords);
|
||||
analyzer.setVersion(version);
|
||||
|
@ -37,8 +37,8 @@ public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
public CommonGramsTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
|
||||
this.queryMode = settings.getAsBoolean("query_mode", false);
|
||||
this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false);
|
||||
this.queryMode = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "query_mode", false);
|
||||
this.words = Analysis.parseCommonWords(env, settings, null, ignoreCase);
|
||||
|
||||
if (this.words == null) {
|
||||
|
@ -31,8 +31,10 @@ public class CzechAnalyzerProvider extends AbstractIndexAnalyzerProvider<CzechAn
|
||||
|
||||
public CzechAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new CzechAnalyzer(Analysis.parseStopWords(env, settings, CzechAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new CzechAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, CzechAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class DanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Danish
|
||||
|
||||
public DanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new DanishAnalyzer(Analysis.parseStopWords(env, settings, DanishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new DanishAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, DanishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class DutchAnalyzerProvider extends AbstractIndexAnalyzerProvider<DutchAn
|
||||
|
||||
public DutchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new DutchAnalyzer(Analysis.parseStopWords(env, settings, DutchAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new DutchAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, DutchAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory implem
|
||||
|
||||
public ElisionTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
this.articles = Analysis.parseArticles(env, settings);
|
||||
this.articles = Analysis.parseArticles(env, indexSettings.getIndexVersionCreated(), settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -31,8 +31,10 @@ public class EnglishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Engli
|
||||
|
||||
public EnglishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new EnglishAnalyzer(Analysis.parseStopWords(env, settings, EnglishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new EnglishAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, EnglishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider<A
|
||||
|
||||
char separator = FingerprintTokenFilterFactory.parseSeparator(settings);
|
||||
int maxOutputSize = settings.getAsInt(MAX_OUTPUT_SIZE.getPreferredName(),DEFAULT_MAX_OUTPUT_SIZE);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, DEFAULT_STOP_WORDS);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, DEFAULT_STOP_WORDS);
|
||||
|
||||
this.analyzer = new FingerprintAnalyzer(stopWords, separator, maxOutputSize);
|
||||
}
|
||||
|
@ -31,8 +31,10 @@ public class FinnishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Finni
|
||||
|
||||
public FinnishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new FinnishAnalyzer(Analysis.parseStopWords(env, settings, FinnishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new FinnishAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, FinnishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class FrenchAnalyzerProvider extends AbstractIndexAnalyzerProvider<French
|
||||
|
||||
public FrenchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new FrenchAnalyzer(Analysis.parseStopWords(env, settings, FrenchAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new FrenchAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, FrenchAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class GalicianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Gali
|
||||
|
||||
public GalicianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new GalicianAnalyzer(Analysis.parseStopWords(env, settings, GalicianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new GalicianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, GalicianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class GermanAnalyzerProvider extends AbstractIndexAnalyzerProvider<German
|
||||
|
||||
public GermanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new GermanAnalyzer(Analysis.parseStopWords(env, settings, GermanAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new GermanAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, GermanAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,8 @@ public class GreekAnalyzerProvider extends AbstractIndexAnalyzerProvider<GreekAn
|
||||
|
||||
public GreekAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new GreekAnalyzer(Analysis.parseStopWords(env, settings, GreekAnalyzer.getDefaultStopSet()));
|
||||
analyzer = new GreekAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, GreekAnalyzer.getDefaultStopSet()));
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
@ -38,4 +39,4 @@ public class GreekAnalyzerProvider extends AbstractIndexAnalyzerProvider<GreekAn
|
||||
public GreekAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,8 +31,10 @@ public class HindiAnalyzerProvider extends AbstractIndexAnalyzerProvider<HindiAn
|
||||
|
||||
public HindiAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new HindiAnalyzer(Analysis.parseStopWords(env, settings, HindiAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new HindiAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, HindiAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class HungarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Hun
|
||||
|
||||
public HungarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new HungarianAnalyzer(Analysis.parseStopWords(env, settings, HungarianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new HungarianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, HungarianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ public class HunspellTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
private final boolean dedup;
|
||||
private final boolean longestOnly;
|
||||
|
||||
public HunspellTokenFilterFactory(IndexSettings indexSettings, String name, Settings settings, HunspellService hunspellService) {
|
||||
public HunspellTokenFilterFactory(IndexSettings indexSettings, String name, Settings settings, HunspellService hunspellService) {
|
||||
super(indexSettings, name, settings);
|
||||
|
||||
String locale = settings.get("locale", settings.get("language", settings.get("lang", null)));
|
||||
@ -46,8 +46,8 @@ public class HunspellTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
throw new IllegalArgumentException(String.format(Locale.ROOT, "Unknown hunspell dictionary for locale [%s]", locale));
|
||||
}
|
||||
|
||||
dedup = settings.getAsBoolean("dedup", true);
|
||||
longestOnly = settings.getAsBoolean("longest_only", false);
|
||||
dedup = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "dedup", true);
|
||||
longestOnly = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "longest_only", false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -31,8 +31,10 @@ public class IndonesianAnalyzerProvider extends AbstractIndexAnalyzerProvider<In
|
||||
|
||||
public IndonesianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new IndonesianAnalyzer(Analysis.parseStopWords(env, settings, IndonesianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new IndonesianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, IndonesianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -34,8 +34,10 @@ public class IrishAnalyzerProvider extends AbstractIndexAnalyzerProvider<IrishAn
|
||||
|
||||
public IrishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new IrishAnalyzer(Analysis.parseStopWords(env, settings, IrishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new IrishAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, IrishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class ItalianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Itali
|
||||
|
||||
public ItalianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new ItalianAnalyzer(Analysis.parseStopWords(env, settings, ItalianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new ItalianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, ItalianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
|
||||
private static final String KEEP_WORDS_KEY = "keep_words";
|
||||
private static final String KEEP_WORDS_PATH_KEY = KEEP_WORDS_KEY + "_path";
|
||||
private static final String KEEP_WORDS_CASE_KEY = KEEP_WORDS_KEY + "_case"; // for javadoc
|
||||
|
||||
|
||||
// unsupported ancient option
|
||||
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
|
||||
|
||||
@ -68,7 +68,7 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
|
||||
if (settings.get(ENABLE_POS_INC_KEY) != null) {
|
||||
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain");
|
||||
}
|
||||
this.keepWords = Analysis.getWordSet(env, settings, KEEP_WORDS_KEY);
|
||||
this.keepWords = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, KEEP_WORDS_KEY);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -35,8 +35,8 @@ public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory
|
||||
public KeywordMarkerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
|
||||
boolean ignoreCase = settings.getAsBoolean("ignore_case", false);
|
||||
Set<?> rules = Analysis.getWordSet(env, settings, "keywords");
|
||||
boolean ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false);
|
||||
Set<?> rules = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, "keywords");
|
||||
if (rules == null) {
|
||||
throw new IllegalArgumentException("keyword filter requires either `keywords` or `keywords_path` to be configured");
|
||||
}
|
||||
|
@ -31,8 +31,10 @@ public class LatvianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Latvi
|
||||
|
||||
public LatvianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new LatvianAnalyzer(Analysis.parseStopWords(env, settings, LatvianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new LatvianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, LatvianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -36,11 +36,12 @@ public class LimitTokenCountFilterFactory extends AbstractTokenFilterFactory {
|
||||
public LimitTokenCountFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
this.maxTokenCount = settings.getAsInt("max_token_count", DEFAULT_MAX_TOKEN_COUNT);
|
||||
this.consumeAllTokens = settings.getAsBoolean("consume_all_tokens", DEFAULT_CONSUME_ALL_TOKENS);
|
||||
this.consumeAllTokens = settings.getAsBooleanLenientForPreEs6Indices(
|
||||
indexSettings.getIndexVersionCreated(), "consume_all_tokens", DEFAULT_CONSUME_ALL_TOKENS);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream) {
|
||||
return new LimitTokenCountFilter(tokenStream, maxTokenCount, consumeAllTokens);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -34,8 +34,10 @@ public class LithuanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Li
|
||||
|
||||
public LithuanianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new LithuanianAnalyzer(Analysis.parseStopWords(env, settings, LithuanianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new LithuanianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, LithuanianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class NorwegianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Nor
|
||||
|
||||
public NorwegianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new NorwegianAnalyzer(Analysis.parseStopWords(env, settings, NorwegianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new NorwegianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, NorwegianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
|
||||
this.replacement = replacement.charAt(0);
|
||||
}
|
||||
this.skip = settings.getAsInt("skip", PathHierarchyTokenizer.DEFAULT_SKIP);
|
||||
this.reverse = settings.getAsBoolean("reverse", false);
|
||||
this.reverse = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "reverse", false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
@ -38,8 +36,8 @@ public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analy
|
||||
super(indexSettings, name, settings);
|
||||
|
||||
final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
|
||||
boolean lowercase = settings.getAsBoolean("lowercase", true);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);
|
||||
boolean lowercase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "lowercase", true);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, defaultStopwords);
|
||||
|
||||
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
|
||||
if (sPattern == null) {
|
||||
|
@ -45,7 +45,7 @@ public class PatternCaptureGroupTokenFilterFactory extends AbstractTokenFilterFa
|
||||
patterns[i] = Pattern.compile(regexes[i]);
|
||||
}
|
||||
|
||||
preserveOriginal = settings.getAsBoolean(PRESERVE_ORIG_KEY, true);
|
||||
preserveOriginal = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), PRESERVE_ORIG_KEY, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -43,7 +43,7 @@ public class PatternReplaceTokenFilterFactory extends AbstractTokenFilterFactory
|
||||
}
|
||||
this.pattern = Regex.compile(sPattern, settings.get("flags"));
|
||||
this.replacement = settings.get("replacement", "");
|
||||
this.all = settings.getAsBoolean("all", true);
|
||||
this.all = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "all", true);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -30,7 +30,8 @@ public class PersianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Persi
|
||||
|
||||
public PersianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new PersianAnalyzer(Analysis.parseStopWords(env, settings, PersianAnalyzer.getDefaultStopSet()));
|
||||
analyzer = new PersianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, PersianAnalyzer.getDefaultStopSet()));
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
@ -38,4 +39,4 @@ public class PersianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Persi
|
||||
public PersianAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,8 +31,10 @@ public class PortugueseAnalyzerProvider extends AbstractIndexAnalyzerProvider<Po
|
||||
|
||||
public PortugueseAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new PortugueseAnalyzer(Analysis.parseStopWords(env, settings, PortugueseAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new PortugueseAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, PortugueseAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class RomanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Roma
|
||||
|
||||
public RomanianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new RomanianAnalyzer(Analysis.parseStopWords(env, settings, RomanianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new RomanianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, RomanianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class RussianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Russi
|
||||
|
||||
public RussianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new RussianAnalyzer(Analysis.parseStopWords(env, settings, RussianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new RussianAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, RussianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -33,8 +33,8 @@ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
super(indexSettings, name, settings);
|
||||
Integer maxShingleSize = settings.getAsInt("max_shingle_size", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);
|
||||
Integer minShingleSize = settings.getAsInt("min_shingle_size", ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);
|
||||
Boolean outputUnigrams = settings.getAsBoolean("output_unigrams", true);
|
||||
Boolean outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false);
|
||||
Boolean outputUnigrams = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "output_unigrams", true);
|
||||
Boolean outputUnigramsIfNoShingles = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "output_unigrams_if_no_shingles", false);
|
||||
String tokenSeparator = settings.get("token_separator", ShingleFilter.DEFAULT_TOKEN_SEPARATOR);
|
||||
String fillerToken = settings.get("filler_token", ShingleFilter.DEFAULT_FILLER_TOKEN);
|
||||
factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, tokenSeparator, fillerToken);
|
||||
|
@ -65,7 +65,7 @@ public class SnowballAnalyzerProvider extends AbstractIndexAnalyzerProvider<Snow
|
||||
|
||||
String language = settings.get("language", settings.get("name", "English"));
|
||||
CharArraySet defaultStopwords = DEFAULT_LANGUAGE_STOPWORDS.getOrDefault(language, CharArraySet.EMPTY_SET);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, defaultStopwords);
|
||||
|
||||
analyzer = new SnowballAnalyzer(language, stopWords);
|
||||
analyzer.setVersion(version);
|
||||
|
@ -34,8 +34,10 @@ public class SoraniAnalyzerProvider extends AbstractIndexAnalyzerProvider<Sorani
|
||||
|
||||
public SoraniAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new SoraniAnalyzer(Analysis.parseStopWords(env, settings, SoraniAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new SoraniAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, SoraniAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,10 @@ public class SpanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Spani
|
||||
|
||||
public SpanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new SpanishAnalyzer(Analysis.parseStopWords(env, settings, SpanishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new SpanishAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, SpanishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ public class StandardAnalyzerProvider extends AbstractIndexAnalyzerProvider<Stan
|
||||
public StandardAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, defaultStopwords);
|
||||
int maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
||||
standardAnalyzer = new StandardAnalyzer(stopWords);
|
||||
standardAnalyzer.setVersion(version);
|
||||
|
@ -33,7 +33,7 @@ public class StandardHtmlStripAnalyzerProvider extends AbstractIndexAnalyzerProv
|
||||
public StandardHtmlStripAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, defaultStopwords);
|
||||
analyzer = new StandardHtmlStripAnalyzer(stopWords);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
@ -31,7 +31,8 @@ public class StopAnalyzerProvider extends AbstractIndexAnalyzerProvider<StopAnal
|
||||
|
||||
public StopAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(
|
||||
env, indexSettings.getIndexVersionCreated(), settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
||||
this.stopAnalyzer = new StopAnalyzer(stopWords);
|
||||
this.stopAnalyzer.setVersion(version);
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ import org.apache.lucene.analysis.StopFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
@ -41,8 +40,8 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
public StopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
|
||||
this.removeTrailing = settings.getAsBoolean("remove_trailing", true);
|
||||
this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false);
|
||||
this.removeTrailing = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "remove_trailing", true);
|
||||
this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
|
||||
if (settings.get("enable_position_increments") != null) {
|
||||
throw new IllegalArgumentException("enable_position_increments is not supported anymore. Please fix your analysis chain");
|
||||
|
@ -31,8 +31,10 @@ public class SwedishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Swedi
|
||||
|
||||
public SwedishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new SwedishAnalyzer(Analysis.parseStopWords(env, settings, SwedishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new SwedishAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, SwedishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -61,8 +61,8 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
throw new IllegalArgumentException("synonym requires either `synonyms` or `synonyms_path` to be configured");
|
||||
}
|
||||
|
||||
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
|
||||
boolean expand = settings.getAsBoolean("expand", true);
|
||||
this.ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false);
|
||||
boolean expand = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "expand", true);
|
||||
|
||||
String tokenizerName = settings.get("tokenizer", "whitespace");
|
||||
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory =
|
||||
|
@ -30,7 +30,8 @@ public class ThaiAnalyzerProvider extends AbstractIndexAnalyzerProvider<ThaiAnal
|
||||
|
||||
public ThaiAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new ThaiAnalyzer(Analysis.parseStopWords(env, settings, ThaiAnalyzer.getDefaultStopSet()));
|
||||
analyzer = new ThaiAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, ThaiAnalyzer.getDefaultStopSet()));
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
@ -38,4 +39,4 @@ public class ThaiAnalyzerProvider extends AbstractIndexAnalyzerProvider<ThaiAnal
|
||||
public ThaiAnalyzer get() {
|
||||
return this.analyzer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,8 +31,10 @@ public class TurkishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Turki
|
||||
|
||||
public TurkishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
analyzer = new TurkishAnalyzer(Analysis.parseStopWords(env, settings, TurkishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));
|
||||
analyzer = new TurkishAnalyzer(
|
||||
Analysis.parseStopWords(env, indexSettings.getIndexVersionCreated(), settings, TurkishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
);
|
||||
analyzer.setVersion(version);
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,8 @@ public class UniqueTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
public UniqueTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
this.onlyOnSamePosition = settings.getAsBoolean("only_on_same_position", false);
|
||||
this.onlyOnSamePosition = settings.getAsBooleanLenientForPreEs6Indices(
|
||||
indexSettings.getIndexVersionCreated(), "only_on_same_position", false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -87,7 +87,7 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory
|
||||
// If set, causes trailing "'s" to be removed for each subword: "O'Neil's" => "O", "Neil"
|
||||
flags |= getFlag(STEM_ENGLISH_POSSESSIVE, settings, "stem_english_possessive", true);
|
||||
// If not null is the set of tokens to protect from being delimited
|
||||
Set<?> protectedWords = Analysis.getWordSet(env, settings, "protected_words");
|
||||
Set<?> protectedWords = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, "protected_words");
|
||||
this.protoWords = protectedWords == null ? null : CharArraySet.copy(protectedWords);
|
||||
this.flags = flags;
|
||||
}
|
||||
@ -101,7 +101,7 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory
|
||||
}
|
||||
|
||||
public int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
|
||||
if (settings.getAsBoolean(key, defaultValue)) {
|
||||
if (settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), key, defaultValue)) {
|
||||
return flag;
|
||||
}
|
||||
return 0;
|
||||
|
@ -44,8 +44,9 @@ public abstract class AbstractCompoundWordTokenFilterFactory extends AbstractTok
|
||||
minWordSize = settings.getAsInt("min_word_size", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
|
||||
minSubwordSize = settings.getAsInt("min_subword_size", CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE);
|
||||
maxSubwordSize = settings.getAsInt("max_subword_size", CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE);
|
||||
onlyLongestMatch = settings.getAsBoolean("only_longest_match", false);
|
||||
wordList = Analysis.getWordSet(env, settings, "word_list");
|
||||
onlyLongestMatch = settings
|
||||
.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "only_longest_match", false);
|
||||
wordList = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, "word_list");
|
||||
if (wordList == null) {
|
||||
throw new IllegalArgumentException("word_list must be provided for [" + name + "], either as a path to a file, or directly");
|
||||
}
|
||||
|
@ -40,7 +40,6 @@ import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue;
|
||||
import static org.elasticsearch.index.mapper.TypeParsers.parseTextField;
|
||||
|
||||
@ -118,7 +117,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
||||
// the AllFieldMapper ctor in the builder since it is not valid. Here we validate
|
||||
// the doc values settings (old and new) are rejected
|
||||
Object docValues = node.get("doc_values");
|
||||
if (docValues != null && lenientNodeBooleanValue(docValues)) {
|
||||
if (docValues != null && TypeParsers.nodeBooleanValueLenient(name, "doc_values", docValues)) {
|
||||
throw new MapperParsingException("Field [" + name +
|
||||
"] is always tokenized and cannot have doc values");
|
||||
}
|
||||
@ -139,8 +138,8 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
||||
String fieldName = entry.getKey();
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("enabled")) {
|
||||
builder.enabled(lenientNodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED :
|
||||
EnabledAttributeMapper.DISABLED);
|
||||
boolean enabled = TypeParsers.nodeBooleanValueLenient(name, "enabled", fieldNode);
|
||||
builder.enabled(enabled ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED);
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
||||
Object propNode = entry.getValue();
|
||||
|
||||
if (propName.equals(Names.IGNORE_MALFORMED)) {
|
||||
builder.ignoreMalformed(XContentMapValues.lenientNodeBooleanValue(propNode));
|
||||
builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name, Names.IGNORE_MALFORMED, propNode, parserContext));
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
@ -26,8 +26,10 @@ import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
@ -44,13 +46,13 @@ import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
import static org.elasticsearch.index.mapper.TypeParsers.parseField;
|
||||
|
||||
/**
|
||||
* A field mapper for boolean fields.
|
||||
*/
|
||||
public class BooleanFieldMapper extends FieldMapper {
|
||||
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(BooleanFieldMapper.class));
|
||||
|
||||
public static final String CONTENT_TYPE = "boolean";
|
||||
|
||||
@ -108,7 +110,7 @@ public class BooleanFieldMapper extends FieldMapper {
|
||||
if (propNode == null) {
|
||||
throw new MapperParsingException("Property [null_value] cannot be null.");
|
||||
}
|
||||
builder.nullValue(lenientNodeBooleanValue(propNode));
|
||||
builder.nullValue(TypeParsers.nodeBooleanValue(name, "null_value", propNode, parserContext));
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
@ -231,7 +233,15 @@ public class BooleanFieldMapper extends FieldMapper {
|
||||
value = fieldType().nullValue();
|
||||
}
|
||||
} else {
|
||||
value = context.parser().booleanValue();
|
||||
if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
value = context.parser().booleanValue();
|
||||
} else {
|
||||
value = context.parser().booleanValueLenient();
|
||||
if (context.parser().isBooleanValueLenient() != context.parser().isBooleanValue()) {
|
||||
String rawValue = context.parser().text();
|
||||
deprecationLogger.deprecated("Expected a boolean for property [{}] but got [{}]", fieldType().name(), rawValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ public class DateFieldMapper extends FieldMapper {
|
||||
builder.nullValue(propNode.toString());
|
||||
iterator.remove();
|
||||
} else if (propName.equals("ignore_malformed")) {
|
||||
builder.ignoreMalformed(TypeParsers.nodeBooleanValue("ignore_malformed", propNode, parserContext));
|
||||
builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name, "ignore_malformed", propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("locale")) {
|
||||
builder.locale(LocaleUtils.parse(propNode.toString()));
|
||||
|
@ -237,7 +237,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
}
|
||||
}
|
||||
|
||||
private final Version indexCreatedVersion;
|
||||
protected final Version indexCreatedVersion;
|
||||
protected MappedFieldType fieldType;
|
||||
protected final MappedFieldType defaultFieldType;
|
||||
protected MultiFields multiFields;
|
||||
|
@ -36,8 +36,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
/**
|
||||
* A mapper that indexes the field names of a document under <code>_field_names</code>. This mapper is typically useful in order
|
||||
* to have fast <code>exists</code> and <code>missing</code> queries/filters.
|
||||
@ -107,7 +105,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
|
||||
String fieldName = entry.getKey();
|
||||
Object fieldNode = entry.getValue();
|
||||
if (fieldName.equals("enabled")) {
|
||||
builder.enabled(lenientNodeBooleanValue(fieldNode));
|
||||
builder.enabled(TypeParsers.nodeBooleanValue(name, "enabled", fieldNode, parserContext));
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,6 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
import org.locationtech.spatial4j.shape.Point;
|
||||
@ -54,9 +53,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
|
||||
/**
|
||||
* FieldMapper for indexing {@link org.locationtech.spatial4j.shape.Shape}s.
|
||||
* <p>
|
||||
@ -185,11 +181,12 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
||||
builder.fieldType().setStrategyName(fieldNode.toString());
|
||||
iterator.remove();
|
||||
} else if (Names.COERCE.equals(fieldName)) {
|
||||
builder.coerce(lenientNodeBooleanValue(fieldNode));
|
||||
builder.coerce(TypeParsers.nodeBooleanValue(fieldName, Names.COERCE, fieldNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)
|
||||
&& builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) {
|
||||
builder.fieldType().setPointsOnly(XContentMapValues.lenientNodeBooleanValue(fieldNode));
|
||||
boolean pointsOnly = TypeParsers.nodeBooleanValue(fieldName, Names.STRATEGY_POINTS_ONLY, fieldNode, parserContext);
|
||||
builder.fieldType().setPointsOnly(pointsOnly);
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ public class IpFieldMapper extends FieldMapper {
|
||||
builder.nullValue(InetAddresses.forString(propNode.toString()));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("ignore_malformed")) {
|
||||
builder.ignoreMalformed(TypeParsers.nodeBooleanValue("ignore_malformed", propNode, parserContext));
|
||||
builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name, "ignore_malformed", propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (TypeParsers.parseMultiField(builder, name, parserContext, propName, propNode)) {
|
||||
iterator.remove();
|
||||
|
@ -136,10 +136,10 @@ public final class KeywordFieldMapper extends FieldMapper {
|
||||
builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("norms")) {
|
||||
builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode) == false);
|
||||
builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, "norms") == false);
|
||||
iterator.remove();
|
||||
} else if (propName.equals("eager_global_ordinals")) {
|
||||
builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode));
|
||||
builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(propNode, "eager_global_ordinals"));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("normalizer")) {
|
||||
if (propNode != null) {
|
||||
|
@ -150,10 +150,10 @@ public class NumberFieldMapper extends FieldMapper {
|
||||
builder.nullValue(type.parse(propNode, false));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("ignore_malformed")) {
|
||||
builder.ignoreMalformed(TypeParsers.nodeBooleanValue("ignore_malformed", propNode, parserContext));
|
||||
builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name,"ignore_malformed", propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("coerce")) {
|
||||
builder.coerce(TypeParsers.nodeBooleanValue("coerce", propNode, parserContext));
|
||||
builder.coerce(TypeParsers.nodeBooleanValue(name, "coerce", propNode, parserContext));
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
@ -42,8 +42,6 @@ import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
|
||||
|
||||
public class ObjectMapper extends Mapper implements Cloneable {
|
||||
|
||||
public static final String CONTENT_TYPE = "object";
|
||||
@ -167,7 +165,7 @@ public class ObjectMapper extends Mapper implements Cloneable {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
ObjectMapper.Builder builder = new Builder(name);
|
||||
parseNested(name, node, builder);
|
||||
parseNested(name, node, builder, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String fieldName = entry.getKey();
|
||||
@ -185,11 +183,12 @@ public class ObjectMapper extends Mapper implements Cloneable {
|
||||
if (value.equalsIgnoreCase("strict")) {
|
||||
builder.dynamic(Dynamic.STRICT);
|
||||
} else {
|
||||
builder.dynamic(lenientNodeBooleanValue(fieldNode) ? Dynamic.TRUE : Dynamic.FALSE);
|
||||
boolean dynamic = TypeParsers.nodeBooleanValue(fieldName, "dynamic", fieldNode, parserContext);
|
||||
builder.dynamic(dynamic ? Dynamic.TRUE : Dynamic.FALSE);
|
||||
}
|
||||
return true;
|
||||
} else if (fieldName.equals("enabled")) {
|
||||
builder.enabled(lenientNodeBooleanValue(fieldNode));
|
||||
builder.enabled(TypeParsers.nodeBooleanValue(fieldName, "enabled", fieldNode, parserContext));
|
||||
return true;
|
||||
} else if (fieldName.equals("properties")) {
|
||||
if (fieldNode instanceof Collection && ((Collection) fieldNode).isEmpty()) {
|
||||
@ -201,13 +200,14 @@ public class ObjectMapper extends Mapper implements Cloneable {
|
||||
}
|
||||
return true;
|
||||
} else if (fieldName.equals("include_in_all")) {
|
||||
builder.includeInAll(lenientNodeBooleanValue(fieldNode));
|
||||
builder.includeInAll(TypeParsers.nodeBooleanValue(fieldName, "include_in_all", fieldNode, parserContext));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
protected static void parseNested(String name, Map<String, Object> node, ObjectMapper.Builder builder) {
|
||||
protected static void parseNested(String name, Map<String, Object> node, ObjectMapper.Builder builder,
|
||||
ParserContext parserContext) {
|
||||
boolean nested = false;
|
||||
boolean nestedIncludeInParent = false;
|
||||
boolean nestedIncludeInRoot = false;
|
||||
@ -224,12 +224,12 @@ public class ObjectMapper extends Mapper implements Cloneable {
|
||||
}
|
||||
fieldNode = node.get("include_in_parent");
|
||||
if (fieldNode != null) {
|
||||
nestedIncludeInParent = lenientNodeBooleanValue(fieldNode);
|
||||
nestedIncludeInParent = TypeParsers.nodeBooleanValue(name, "include_in_parent", fieldNode, parserContext);
|
||||
node.remove("include_in_parent");
|
||||
}
|
||||
fieldNode = node.get("include_in_root");
|
||||
if (fieldNode != null) {
|
||||
nestedIncludeInRoot = lenientNodeBooleanValue(fieldNode);
|
||||
nestedIncludeInRoot = TypeParsers.nodeBooleanValue(name, "include_in_root", fieldNode, parserContext);
|
||||
node.remove("include_in_root");
|
||||
}
|
||||
if (nested) {
|
||||
|
@ -122,7 +122,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
||||
}
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("eager_global_ordinals")) {
|
||||
builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(fieldNode));
|
||||
builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(fieldNode, "eager_global_ordinals"));
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ public class RangeFieldMapper extends FieldMapper {
|
||||
throw new MapperParsingException("Property [null_value] is not supported for [" + this.type.name
|
||||
+ "] field types.");
|
||||
} else if (propName.equals("coerce")) {
|
||||
builder.coerce(TypeParsers.nodeBooleanValue("coerce", propNode, parserContext));
|
||||
builder.coerce(TypeParsers.nodeBooleanValue(name, "coerce", propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("locale")) {
|
||||
builder.locale(LocaleUtils.parse(propNode.toString()));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user