Merge branch 'master' into sort-serialization-scriptsort
This commit is contained in:
commit
f0074668db
|
@ -49,6 +49,15 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
String jvmArgs = System.getProperty('tests.jvm.argline', '')
|
||||
|
||||
/**
|
||||
* The seed nodes port file. In the case the cluster has more than one node we use a seed node
|
||||
* to form the cluster. The file is null if there is no seed node yet available.
|
||||
*
|
||||
* Note: this can only be null if the cluster has only one node or if the first node is not yet
|
||||
* configured. All nodes but the first node should see a non null value.
|
||||
*/
|
||||
File seedNodePortsFile
|
||||
|
||||
/**
|
||||
* A closure to call before the cluster is considered ready. The closure is passed the node info,
|
||||
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
|
||||
|
@ -119,4 +128,12 @@ class ClusterConfiguration {
|
|||
}
|
||||
extraConfigFiles.put(path, sourceFile)
|
||||
}
|
||||
|
||||
/** Returns an address and port suitable for a uri to connect to this clusters seed node over transport protocol*/
|
||||
String seedNodeTransportUri() {
|
||||
if (seedNodePortsFile != null) {
|
||||
return seedNodePortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,13 @@ class ClusterFormationTasks {
|
|||
List<NodeInfo> nodes = []
|
||||
for (int i = 0; i < config.numNodes; ++i) {
|
||||
NodeInfo node = new NodeInfo(config, i, project, task)
|
||||
if (i == 0) {
|
||||
if (config.seedNodePortsFile != null) {
|
||||
// we might allow this in the future to be set but for now we are the only authority to set this!
|
||||
throw new GradleException("seedNodePortsFile has a non-null value but first node has not been intialized")
|
||||
}
|
||||
config.seedNodePortsFile = node.transportPortsFile;
|
||||
}
|
||||
nodes.add(node)
|
||||
startTasks.add(configureNode(project, task, node))
|
||||
}
|
||||
|
@ -220,20 +227,22 @@ class ClusterFormationTasks {
|
|||
'node.testattr' : 'test',
|
||||
'repositories.url.allowed_urls': 'http://snapshot.test*'
|
||||
]
|
||||
if (node.config.numNodes == 1) {
|
||||
esConfig['http.port'] = node.config.httpPort
|
||||
esConfig['transport.tcp.port'] = node.config.transportPort
|
||||
} else {
|
||||
// TODO: fix multi node so it doesn't use hardcoded prots
|
||||
esConfig['http.port'] = 9400 + node.nodeNum
|
||||
esConfig['transport.tcp.port'] = 9500 + node.nodeNum
|
||||
esConfig['discovery.zen.ping.unicast.hosts'] = (0..<node.config.numNodes).collect{"localhost:${9500 + it}"}.join(',')
|
||||
|
||||
}
|
||||
esConfig['http.port'] = node.config.httpPort
|
||||
esConfig['transport.tcp.port'] = node.config.transportPort
|
||||
esConfig.putAll(node.config.settings)
|
||||
|
||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||
writeConfig.doFirst {
|
||||
if (node.nodeNum > 0) { // multi-node cluster case, we have to wait for the seed node to startup
|
||||
ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') {
|
||||
resourceexists {
|
||||
file(file: node.config.seedNodePortsFile.toString())
|
||||
}
|
||||
}
|
||||
// the seed node is enough to form the cluster - all subsequent nodes will get the seed node as a unicast
|
||||
// host and join the cluster via that.
|
||||
esConfig['discovery.zen.ping.unicast.hosts'] = "\"${node.config.seedNodeTransportUri()}\""
|
||||
}
|
||||
File configFile = new File(node.confDir, 'elasticsearch.yml')
|
||||
logger.info("Configuring ${configFile}")
|
||||
configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8')
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 5.0.0
|
||||
lucene = 6.0.0-snapshot-bea235f
|
||||
lucene = 6.0.0-snapshot-f0aa4fc
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
|
|
|
@ -49,7 +49,7 @@ dependencies {
|
|||
compile 'org.elasticsearch:securesm:1.0'
|
||||
|
||||
// utilities
|
||||
compile 'commons-cli:commons-cli:1.3.1'
|
||||
compile 'net.sf.jopt-simple:jopt-simple:4.9'
|
||||
compile 'com.carrotsearch:hppc:0.7.1'
|
||||
|
||||
// time handling, remove with java 8 time
|
||||
|
|
|
@ -787,8 +787,9 @@ public class MapperQueryParser extends QueryParser {
|
|||
assert q instanceof BoostQuery == false;
|
||||
return pq;
|
||||
} else if (q instanceof MultiPhraseQuery) {
|
||||
((MultiPhraseQuery) q).setSlop(slop);
|
||||
return q;
|
||||
MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q);
|
||||
builder.setSlop(slop);
|
||||
return builder.build();
|
||||
} else {
|
||||
return q;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -68,7 +67,7 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof MultiPhraseQuery) {
|
||||
MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery);
|
||||
convertMultiPhraseQuery(0, new int[q.getTermArrays().size()], q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
|
||||
convertMultiPhraseQuery(0, new int[q.getTermArrays().length], q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
|
||||
} else if (sourceQuery instanceof BlendedTermQuery) {
|
||||
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
|
||||
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
|
||||
|
@ -77,7 +76,7 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
}
|
||||
}
|
||||
|
||||
private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, List<Term[]> terms, int[] pos, IndexReader reader, Collection<Query> flatQueries) throws IOException {
|
||||
private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, Term[][] terms, int[] pos, IndexReader reader, Collection<Query> flatQueries) throws IOException {
|
||||
if (currentPos == 0) {
|
||||
// if we have more than 16 terms
|
||||
int numTerms = 0;
|
||||
|
@ -97,16 +96,16 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
* we walk all possible ways and for each path down the MPQ we create a PhraseQuery this is what FieldQuery supports.
|
||||
* It seems expensive but most queries will pretty small.
|
||||
*/
|
||||
if (currentPos == terms.size()) {
|
||||
if (currentPos == terms.length) {
|
||||
PhraseQuery.Builder queryBuilder = new PhraseQuery.Builder();
|
||||
queryBuilder.setSlop(orig.getSlop());
|
||||
for (int i = 0; i < termsIdx.length; i++) {
|
||||
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
|
||||
queryBuilder.add(terms[i][termsIdx[i]], pos[i]);
|
||||
}
|
||||
Query query = queryBuilder.build();
|
||||
this.flatten(query, reader, flatQueries, 1F);
|
||||
} else {
|
||||
Term[] t = terms.get(currentPos);
|
||||
Term[] t = terms[currentPos];
|
||||
for (int i = 0; i < t.length; i++) {
|
||||
termsIdx[currentPos] = i;
|
||||
convertMultiPhraseQuery(currentPos+1, termsIdx, orig, terms, pos, reader, flatQueries);
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -46,7 +47,8 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
|||
private final MetaDataIndexStateService indexStateService;
|
||||
private final DestructiveOperations destructiveOperations;
|
||||
private volatile boolean closeIndexEnabled;
|
||||
public static final Setting<Boolean> CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CLUSTER_INDICES_CLOSE_ENABLE_SETTING =
|
||||
Setting.boolSetting("cluster.indices.close.enable", true, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
@Inject
|
||||
public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.collect.Tuple;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
||||
|
@ -39,7 +40,8 @@ import java.util.List;
|
|||
*/
|
||||
public final class AutoCreateIndex {
|
||||
|
||||
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING = new Setting<>("action.auto_create_index", "true", AutoCreate::new, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING =
|
||||
new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope);
|
||||
|
||||
private final boolean dynamicMappingDisabled;
|
||||
private final IndexNameExpressionResolver resolver;
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
|
@ -33,7 +34,8 @@ public final class DestructiveOperations extends AbstractComponent {
|
|||
/**
|
||||
* Setting which controls whether wildcard usage (*, prefix*, _all) is allowed.
|
||||
*/
|
||||
public static final Setting<Boolean> REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> REQUIRES_NAME_SETTING =
|
||||
Setting.boolSetting("action.destructive_requires_name", false, Property.Dynamic, Property.NodeScope);
|
||||
private volatile boolean destructiveRequiresName;
|
||||
|
||||
@Inject
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -37,7 +38,8 @@ import java.util.function.Supplier;
|
|||
public abstract class TransportMasterNodeReadAction<Request extends MasterNodeReadRequest<Request>, Response extends ActionResponse>
|
||||
extends TransportMasterNodeAction<Request, Response> {
|
||||
|
||||
public static final Setting<Boolean> FORCE_LOCAL_SETTING = Setting.boolSetting("action.master.force_local", false, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> FORCE_LOCAL_SETTING =
|
||||
Setting.boolSetting("action.master.force_local", false, Property.NodeScope);
|
||||
|
||||
private final boolean forceLocal;
|
||||
|
||||
|
|
|
@ -19,15 +19,22 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.common.PidFile;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.cli.CliTool;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
|
@ -40,13 +47,6 @@ import org.elasticsearch.monitor.process.ProcessProbe;
|
|||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
|
||||
/**
|
||||
|
@ -222,11 +222,11 @@ final class Bootstrap {
|
|||
// Set the system property before anything has a chance to trigger its use
|
||||
initLoggerPrefix();
|
||||
|
||||
BootstrapCLIParser bootstrapCLIParser = new BootstrapCLIParser();
|
||||
CliTool.ExitStatus status = bootstrapCLIParser.execute(args);
|
||||
BootstrapCliParser parser = new BootstrapCliParser();
|
||||
int status = parser.main(args, Terminal.DEFAULT);
|
||||
|
||||
if (CliTool.ExitStatus.OK != status) {
|
||||
exit(status.status());
|
||||
if (parser.shouldRun() == false || status != ExitCodes.OK) {
|
||||
exit(status);
|
||||
}
|
||||
|
||||
INSTANCE = new Bootstrap();
|
||||
|
@ -307,14 +307,6 @@ final class Bootstrap {
|
|||
System.err.close();
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "System#err")
|
||||
private static void sysError(String line, boolean flush) {
|
||||
System.err.println(line);
|
||||
if (flush) {
|
||||
System.err.flush();
|
||||
}
|
||||
}
|
||||
|
||||
private static void checkForCustomConfFile() {
|
||||
String confFileSetting = System.getProperty("es.default.config");
|
||||
checkUnsetAndMaybeExit(confFileSetting, "es.default.config");
|
||||
|
|
|
@ -1,183 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.Option;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.cli.CliTool;
|
||||
import org.elasticsearch.common.cli.CliToolConfig;
|
||||
import org.elasticsearch.common.cli.UserError;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
|
||||
|
||||
final class BootstrapCLIParser extends CliTool {
|
||||
|
||||
private static final CliToolConfig CONFIG = CliToolConfig.config("elasticsearch", BootstrapCLIParser.class)
|
||||
.cmds(Start.CMD, Version.CMD)
|
||||
.build();
|
||||
|
||||
public BootstrapCLIParser() {
|
||||
super(CONFIG);
|
||||
}
|
||||
|
||||
public BootstrapCLIParser(Terminal terminal) {
|
||||
super(CONFIG, terminal);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Command parse(String cmdName, CommandLine cli) throws Exception {
|
||||
switch (cmdName.toLowerCase(Locale.ROOT)) {
|
||||
case Start.NAME:
|
||||
return Start.parse(terminal, cli);
|
||||
case Version.NAME:
|
||||
return Version.parse(terminal, cli);
|
||||
default:
|
||||
assert false : "should never get here, if the user enters an unknown command, an error message should be shown before parse is called";
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
static class Version extends CliTool.Command {
|
||||
|
||||
private static final String NAME = "version";
|
||||
|
||||
private static final CliToolConfig.Cmd CMD = cmd(NAME, Version.class).build();
|
||||
|
||||
public static Command parse(Terminal terminal, CommandLine cli) {
|
||||
return new Version(terminal);
|
||||
}
|
||||
|
||||
public Version(Terminal terminal) {
|
||||
super(terminal);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
return ExitStatus.OK_AND_EXIT;
|
||||
}
|
||||
}
|
||||
|
||||
static class Start extends CliTool.Command {
|
||||
|
||||
private static final String NAME = "start";
|
||||
|
||||
private static final CliToolConfig.Cmd CMD = cmd(NAME, Start.class)
|
||||
.options(
|
||||
optionBuilder("d", "daemonize").hasArg(false).required(false),
|
||||
optionBuilder("p", "pidfile").hasArg(true).required(false),
|
||||
optionBuilder("V", "version").hasArg(false).required(false),
|
||||
Option.builder("D").argName("property=value").valueSeparator('=').numberOfArgs(2)
|
||||
)
|
||||
.stopAtNonOption(true) // needed to parse the --foo.bar options, so this parser must be lenient
|
||||
.build();
|
||||
|
||||
// TODO: don't use system properties as a way to do this, its horrible...
|
||||
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
|
||||
public static Command parse(Terminal terminal, CommandLine cli) throws UserError {
|
||||
if (cli.hasOption("V")) {
|
||||
return Version.parse(terminal, cli);
|
||||
}
|
||||
|
||||
if (cli.hasOption("d")) {
|
||||
System.setProperty("es.foreground", "false");
|
||||
}
|
||||
|
||||
String pidFile = cli.getOptionValue("pidfile");
|
||||
if (!Strings.isNullOrEmpty(pidFile)) {
|
||||
System.setProperty("es.pidfile", pidFile);
|
||||
}
|
||||
|
||||
if (cli.hasOption("D")) {
|
||||
Properties properties = cli.getOptionProperties("D");
|
||||
for (Map.Entry<Object, Object> entry : properties.entrySet()) {
|
||||
String key = (String) entry.getKey();
|
||||
String propertyName = key.startsWith("es.") ? key : "es." + key;
|
||||
System.setProperty(propertyName, entry.getValue().toString());
|
||||
}
|
||||
}
|
||||
|
||||
// hacky way to extract all the fancy extra args, there is no CLI tool helper for this
|
||||
Iterator<String> iterator = cli.getArgList().iterator();
|
||||
final Map<String, String> properties = new HashMap<>();
|
||||
while (iterator.hasNext()) {
|
||||
String arg = iterator.next();
|
||||
if (!arg.startsWith("--")) {
|
||||
if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) {
|
||||
throw new UserError(ExitStatus.USAGE,
|
||||
"Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --"
|
||||
);
|
||||
} else {
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "]does not start with --");
|
||||
}
|
||||
}
|
||||
// if there is no = sign, we have to get the next argu
|
||||
arg = arg.replace("--", "");
|
||||
if (arg.contains("=")) {
|
||||
String[] splitArg = arg.split("=", 2);
|
||||
String key = splitArg[0];
|
||||
String value = splitArg[1];
|
||||
properties.put("es." + key, value);
|
||||
} else {
|
||||
if (iterator.hasNext()) {
|
||||
String value = iterator.next();
|
||||
if (value.startsWith("--")) {
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
|
||||
}
|
||||
properties.put("es." + arg, value);
|
||||
} else {
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
|
||||
}
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, String> entry : properties.entrySet()) {
|
||||
System.setProperty(entry.getKey(), entry.getValue());
|
||||
}
|
||||
return new Start(terminal);
|
||||
}
|
||||
|
||||
public Start(Terminal terminal) {
|
||||
super(terminal);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
return ExitStatus.OK;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.UserError;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
final class BootstrapCliParser extends Command {
|
||||
|
||||
private final OptionSpec<Void> versionOption;
|
||||
private final OptionSpec<Void> daemonizeOption;
|
||||
private final OptionSpec<String> pidfileOption;
|
||||
private final OptionSpec<String> propertyOption;
|
||||
private boolean shouldRun = false;
|
||||
|
||||
BootstrapCliParser() {
|
||||
super("Starts elasticsearch");
|
||||
// TODO: in jopt-simple 5.0, make this mutually exclusive with all other options
|
||||
versionOption = parser.acceptsAll(Arrays.asList("V", "version"),
|
||||
"Prints elasticsearch version information and exits");
|
||||
daemonizeOption = parser.acceptsAll(Arrays.asList("d", "daemonize"),
|
||||
"Starts Elasticsearch in the background");
|
||||
// TODO: in jopt-simple 5.0 this option type can be a Path
|
||||
pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"),
|
||||
"Creates a pid file in the specified path on start")
|
||||
.withRequiredArg();
|
||||
propertyOption = parser.accepts("D", "Configures an Elasticsearch setting")
|
||||
.withRequiredArg();
|
||||
}
|
||||
|
||||
// TODO: don't use system properties as a way to do this, its horrible...
|
||||
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||
if (options.has(versionOption)) {
|
||||
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
|
||||
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
|
||||
+ ", JVM: " + JvmInfo.jvmInfo().version());
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: don't use sysprops for any of these! pass the args through to bootstrap...
|
||||
if (options.has(daemonizeOption)) {
|
||||
System.setProperty("es.foreground", "false");
|
||||
}
|
||||
String pidFile = pidfileOption.value(options);
|
||||
if (Strings.isNullOrEmpty(pidFile) == false) {
|
||||
System.setProperty("es.pidfile", pidFile);
|
||||
}
|
||||
|
||||
for (String property : propertyOption.values(options)) {
|
||||
String[] keyValue = property.split("=", 2);
|
||||
if (keyValue.length != 2) {
|
||||
throw new UserError(ExitCodes.USAGE, "Malformed elasticsearch setting, must be of the form key=value");
|
||||
}
|
||||
String key = keyValue[0];
|
||||
if (key.startsWith("es.") == false) {
|
||||
key = "es." + key;
|
||||
}
|
||||
System.setProperty(key, keyValue[1]);
|
||||
}
|
||||
shouldRun = true;
|
||||
}
|
||||
|
||||
boolean shouldRun() {
|
||||
return shouldRun;
|
||||
}
|
||||
}
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Scope;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
||||
public final class BootstrapSettings {
|
||||
|
||||
|
@ -29,10 +29,13 @@ public final class BootstrapSettings {
|
|||
|
||||
// TODO: remove this hack when insecure defaults are removed from java
|
||||
public static final Setting<Boolean> SECURITY_FILTER_BAD_DEFAULTS_SETTING =
|
||||
Setting.boolSetting("security.manager.filter_bad_defaults", true, false, Scope.CLUSTER);
|
||||
Setting.boolSetting("security.manager.filter_bad_defaults", true, Property.NodeScope);
|
||||
|
||||
public static final Setting<Boolean> MLOCKALL_SETTING = Setting.boolSetting("bootstrap.mlockall", false, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SECCOMP_SETTING = Setting.boolSetting("bootstrap.seccomp", true, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CTRLHANDLER_SETTING = Setting.boolSetting("bootstrap.ctrlhandler", true, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> MLOCKALL_SETTING =
|
||||
Setting.boolSetting("bootstrap.mlockall", false, Property.NodeScope);
|
||||
public static final Setting<Boolean> SECCOMP_SETTING =
|
||||
Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> CTRLHANDLER_SETTING =
|
||||
Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope);
|
||||
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ public final class Elasticsearch {
|
|||
/**
|
||||
* Main entry point for starting elasticsearch
|
||||
*/
|
||||
public static void main(String[] args) throws StartupError {
|
||||
public static void main(String[] args) throws Exception {
|
||||
try {
|
||||
Bootstrap.init(args);
|
||||
} catch (Throwable t) {
|
||||
|
|
|
@ -19,13 +19,13 @@
|
|||
|
||||
package org.elasticsearch.cache.recycler;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.recycler.AbstractRecyclerC;
|
||||
import org.elasticsearch.common.recycler.Recycler;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
|
@ -43,13 +43,19 @@ import static org.elasticsearch.common.recycler.Recyclers.none;
|
|||
/** A recycler of fixed-size pages. */
|
||||
public class PageCacheRecycler extends AbstractComponent implements Releasable {
|
||||
|
||||
public static final Setting<Type> TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING = Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Double> WEIGHT_BYTES_SETTING = Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Double> WEIGHT_LONG_SETTING = Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Double> WEIGHT_INT_SETTING = Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Type> TYPE_SETTING =
|
||||
new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING =
|
||||
Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", Property.NodeScope);
|
||||
public static final Setting<Double> WEIGHT_BYTES_SETTING =
|
||||
Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, Property.NodeScope);
|
||||
public static final Setting<Double> WEIGHT_LONG_SETTING =
|
||||
Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, Property.NodeScope);
|
||||
public static final Setting<Double> WEIGHT_INT_SETTING =
|
||||
Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, Property.NodeScope);
|
||||
// object pages are less useful to us so we give them a lower weight by default
|
||||
public static final Setting<Double> WEIGHT_OBJECTS_SETTING = Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Double> WEIGHT_OBJECTS_SETTING =
|
||||
Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, Property.NodeScope);
|
||||
|
||||
private final Recycler<byte[]> bytePage;
|
||||
private final Recycler<int[]> intPage;
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import joptsimple.OptionException;
|
||||
import joptsimple.OptionParser;
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* An action to execute within a cli.
|
||||
*/
|
||||
public abstract class Command {
|
||||
|
||||
/** A description of the command, used in the help output. */
|
||||
protected final String description;
|
||||
|
||||
/** The option parser for this command. */
|
||||
protected final OptionParser parser = new OptionParser();
|
||||
|
||||
private final OptionSpec<Void> helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "show help").forHelp();
|
||||
private final OptionSpec<Void> silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "show minimal output");
|
||||
private final OptionSpec<Void> verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output");
|
||||
|
||||
public Command(String description) {
|
||||
this.description = description;
|
||||
}
|
||||
|
||||
/** Parses options for this command from args and executes it. */
|
||||
public final int main(String[] args, Terminal terminal) throws Exception {
|
||||
try {
|
||||
mainWithoutErrorHandling(args, terminal);
|
||||
} catch (OptionException e) {
|
||||
printHelp(terminal);
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
|
||||
return ExitCodes.USAGE;
|
||||
} catch (UserError e) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage());
|
||||
return e.exitCode;
|
||||
}
|
||||
return ExitCodes.OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the command, but all errors are thrown.
|
||||
*/
|
||||
void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception {
|
||||
final OptionSet options = parser.parse(args);
|
||||
|
||||
if (options.has(helpOption)) {
|
||||
printHelp(terminal);
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.has(silentOption)) {
|
||||
if (options.has(verboseOption)) {
|
||||
// mutually exclusive, we can remove this with jopt-simple 5.0, which natively supports it
|
||||
throw new UserError(ExitCodes.USAGE, "Cannot specify -s and -v together");
|
||||
}
|
||||
terminal.setVerbosity(Terminal.Verbosity.SILENT);
|
||||
} else if (options.has(verboseOption)) {
|
||||
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
|
||||
} else {
|
||||
terminal.setVerbosity(Terminal.Verbosity.NORMAL);
|
||||
}
|
||||
|
||||
execute(terminal, options);
|
||||
}
|
||||
|
||||
/** Prints a help message for the command to the terminal. */
|
||||
private void printHelp(Terminal terminal) throws IOException {
|
||||
terminal.println(description);
|
||||
terminal.println("");
|
||||
printAdditionalHelp(terminal);
|
||||
parser.printHelpOn(terminal.getWriter());
|
||||
}
|
||||
|
||||
/** Prints additional help information, specific to the command */
|
||||
protected void printAdditionalHelp(Terminal terminal) {}
|
||||
|
||||
@SuppressForbidden(reason = "Allowed to exit explicitly from #main()")
|
||||
protected static void exit(int status) {
|
||||
System.exit(status);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes this command.
|
||||
*
|
||||
* Any runtime user errors (like an input file that does not exist), should throw a {@link UserError}. */
|
||||
protected abstract void execute(Terminal terminal, OptionSet options) throws Exception;
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
/**
|
||||
* POSIX exit codes.
|
||||
*/
|
||||
public class ExitCodes {
|
||||
public static final int OK = 0;
|
||||
public static final int USAGE = 64; /* command line usage error */
|
||||
public static final int DATA_ERROR = 65; /* data format error */
|
||||
public static final int NO_INPUT = 66; /* cannot open input */
|
||||
public static final int NO_USER = 67; /* addressee unknown */
|
||||
public static final int NO_HOST = 68; /* host name unknown */
|
||||
public static final int UNAVAILABLE = 69; /* service unavailable */
|
||||
public static final int CODE_ERROR = 70; /* internal software error */
|
||||
public static final int CANT_CREATE = 73; /* can't create (user) output file */
|
||||
public static final int IO_ERROR = 74; /* input/output error */
|
||||
public static final int TEMP_FAILURE = 75; /* temp failure; user is invited to retry */
|
||||
public static final int PROTOCOL = 76; /* remote error in protocol */
|
||||
public static final int NOPERM = 77; /* permission denied */
|
||||
public static final int CONFIG = 78; /* configuration error */
|
||||
|
||||
private ExitCodes() { /* no instance, just constants */ }
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import joptsimple.NonOptionArgumentSpec;
|
||||
import joptsimple.OptionSet;
|
||||
|
||||
/**
|
||||
* A cli tool which is made up of multiple subcommands.
|
||||
*/
|
||||
public class MultiCommand extends Command {
|
||||
|
||||
protected final Map<String, Command> subcommands = new LinkedHashMap<>();
|
||||
|
||||
private final NonOptionArgumentSpec<String> arguments = parser.nonOptions("command");
|
||||
|
||||
public MultiCommand(String description) {
|
||||
super(description);
|
||||
parser.posixlyCorrect(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void printAdditionalHelp(Terminal terminal) {
|
||||
if (subcommands.isEmpty()) {
|
||||
throw new IllegalStateException("No subcommands configured");
|
||||
}
|
||||
terminal.println("Commands");
|
||||
terminal.println("--------");
|
||||
for (Map.Entry<String, Command> subcommand : subcommands.entrySet()) {
|
||||
terminal.println(subcommand.getKey() + " - " + subcommand.getValue().description);
|
||||
}
|
||||
terminal.println("");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||
if (subcommands.isEmpty()) {
|
||||
throw new IllegalStateException("No subcommands configured");
|
||||
}
|
||||
String[] args = arguments.values(options).toArray(new String[0]);
|
||||
if (args.length == 0) {
|
||||
throw new UserError(ExitCodes.USAGE, "Missing command");
|
||||
}
|
||||
Command subcommand = subcommands.get(args[0]);
|
||||
if (subcommand == null) {
|
||||
throw new UserError(ExitCodes.USAGE, "Unknown command [" + args[0] + "]");
|
||||
}
|
||||
subcommand.mainWithoutErrorHandling(Arrays.copyOfRange(args, 1, args.length), terminal);
|
||||
}
|
||||
}
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.Console;
|
||||
|
@ -29,7 +29,7 @@ import java.nio.charset.Charset;
|
|||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* A Terminal wraps access to reading input and writing output for a {@link CliTool}.
|
||||
* A Terminal wraps access to reading input and writing output for a cli.
|
||||
*
|
||||
* The available methods are similar to those of {@link Console}, with the ability
|
||||
* to read either normal text or a password, and the ability to print a line
|
||||
|
@ -61,7 +61,7 @@ public abstract class Terminal {
|
|||
}
|
||||
|
||||
/** Sets the verbosity of the terminal. */
|
||||
void setVerbosity(Verbosity verbosity) {
|
||||
public void setVerbosity(Verbosity verbosity) {
|
||||
this.verbosity = verbosity;
|
||||
}
|
||||
|
||||
|
@ -89,35 +89,35 @@ public abstract class Terminal {
|
|||
|
||||
private static class ConsoleTerminal extends Terminal {
|
||||
|
||||
private static final Console console = System.console();
|
||||
private static final Console CONSOLE = System.console();
|
||||
|
||||
ConsoleTerminal() {
|
||||
super(System.lineSeparator());
|
||||
}
|
||||
|
||||
static boolean isSupported() {
|
||||
return console != null;
|
||||
return CONSOLE != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PrintWriter getWriter() {
|
||||
return console.writer();
|
||||
return CONSOLE.writer();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readText(String prompt) {
|
||||
return console.readLine("%s", prompt);
|
||||
return CONSOLE.readLine("%s", prompt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public char[] readSecret(String prompt) {
|
||||
return console.readPassword("%s", prompt);
|
||||
return CONSOLE.readPassword("%s", prompt);
|
||||
}
|
||||
}
|
||||
|
||||
private static class SystemTerminal extends Terminal {
|
||||
|
||||
private final PrintWriter writer = newWriter();
|
||||
private static final PrintWriter WRITER = newWriter();
|
||||
|
||||
SystemTerminal() {
|
||||
super(System.lineSeparator());
|
||||
|
@ -130,7 +130,7 @@ public abstract class Terminal {
|
|||
|
||||
@Override
|
||||
public PrintWriter getWriter() {
|
||||
return writer;
|
||||
return WRITER;
|
||||
}
|
||||
|
||||
@Override
|
|
@ -17,19 +17,19 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
package org.elasticsearch.cli;
|
||||
|
||||
/**
|
||||
* An exception representing a user fixable problem in {@link CliTool} usage.
|
||||
* An exception representing a user fixable problem in {@link Command} usage.
|
||||
*/
|
||||
public class UserError extends Exception {
|
||||
|
||||
/** The exist status the cli should use when catching this user error. */
|
||||
public final CliTool.ExitStatus exitStatus;
|
||||
public final int exitCode;
|
||||
|
||||
/** Constructs a UserError with an exit status and message to show the user. */
|
||||
public UserError(CliTool.ExitStatus exitStatus, String msg) {
|
||||
public UserError(int exitCode, String msg) {
|
||||
super(msg);
|
||||
this.exitStatus = exitStatus;
|
||||
this.exitCode = exitCode;
|
||||
}
|
||||
}
|
|
@ -19,12 +19,8 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
|
@ -87,6 +83,7 @@ import org.elasticsearch.action.update.UpdateResponse;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.Map;
|
||||
|
@ -114,7 +111,7 @@ public interface Client extends ElasticsearchClient, Releasable {
|
|||
default:
|
||||
throw new IllegalArgumentException("Can't parse [client.type] must be one of [node, transport]");
|
||||
}
|
||||
}, false, Setting.Scope.CLUSTER);
|
||||
}, Property.NodeScope);
|
||||
|
||||
/**
|
||||
* The admin client that can be used to perform administrative operations.
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -100,10 +101,14 @@ public class TransportClientNodesService extends AbstractComponent {
|
|||
private volatile boolean closed;
|
||||
|
||||
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF = Setting.boolSetting("client.transport.sniff", false, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL =
|
||||
Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), Property.NodeScope);
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT =
|
||||
Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), Property.NodeScope);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME =
|
||||
Setting.boolSetting("client.transport.ignore_cluster_name", false, Property.NodeScope);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF =
|
||||
Setting.boolSetting("client.transport.sniff", false, Property.NodeScope);
|
||||
|
||||
@Inject
|
||||
public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService,
|
||||
|
|
|
@ -58,6 +58,7 @@ import org.elasticsearch.common.inject.AbstractModule;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
|
@ -74,7 +75,8 @@ public class ClusterModule extends AbstractModule {
|
|||
|
||||
public static final String EVEN_SHARD_COUNT_ALLOCATOR = "even_shard";
|
||||
public static final String BALANCED_ALLOCATOR = "balanced"; // default
|
||||
public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING = new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING =
|
||||
new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);
|
||||
public static final List<Class<? extends AllocationDecider>> DEFAULT_ALLOCATION_DECIDERS =
|
||||
Collections.unmodifiableList(Arrays.asList(
|
||||
SameShardAllocationDecider.class,
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -37,7 +38,7 @@ public class ClusterName implements Streamable {
|
|||
throw new IllegalArgumentException("[cluster.name] must not be empty");
|
||||
}
|
||||
return s;
|
||||
}, false, Setting.Scope.CLUSTER);
|
||||
}, Property.NodeScope);
|
||||
|
||||
|
||||
public static final ClusterName DEFAULT = new ClusterName(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY).intern());
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
|
@ -64,8 +65,12 @@ import java.util.concurrent.TimeUnit;
|
|||
*/
|
||||
public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
|
||||
|
||||
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING =
|
||||
Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING =
|
||||
Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
private volatile TimeValue updateFrequency;
|
||||
|
||||
|
|
|
@ -35,6 +35,10 @@ import org.elasticsearch.transport.TransportService;
|
|||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
|
||||
import static org.elasticsearch.common.settings.Setting.Property;
|
||||
import static org.elasticsearch.common.settings.Setting.positiveTimeSetting;
|
||||
|
||||
|
||||
/**
|
||||
* This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are
|
||||
* removed. Also, it periodically checks that all connections are still open and if needed restores them.
|
||||
|
@ -45,7 +49,7 @@ import java.util.concurrent.ScheduledFuture;
|
|||
public class NodeConnectionsService extends AbstractLifecycleComponent<NodeConnectionsService> {
|
||||
|
||||
public static final Setting<TimeValue> CLUSTER_NODE_RECONNECT_INTERVAL_SETTING =
|
||||
Setting.positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
|
||||
positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope);
|
||||
private final ThreadPool threadPool;
|
||||
private final TransportService transportService;
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
@ -41,7 +42,9 @@ import java.util.concurrent.TimeoutException;
|
|||
*/
|
||||
public class MappingUpdatedAction extends AbstractComponent {
|
||||
|
||||
public static final Setting<TimeValue> INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING =
|
||||
Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
private IndicesAdminClient client;
|
||||
private volatile TimeValue dynamicMappingUpdateTimeout;
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.cluster.metadata;
|
|||
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
||||
/**
|
||||
* This class acts as a functional wrapper around the <tt>index.auto_expand_replicas</tt> setting.
|
||||
|
@ -56,7 +57,7 @@ final class AutoExpandReplicas {
|
|||
}
|
||||
}
|
||||
return new AutoExpandReplicas(min, max, true);
|
||||
}, true, Setting.Scope.INDEX);
|
||||
}, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
private final int minReplicas;
|
||||
private final int maxReplicas;
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.compress.CompressedXContent;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
import org.elasticsearch.common.xcontent.FromXContentBuilder;
|
||||
|
@ -152,28 +153,36 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
|
||||
public static final String INDEX_SETTING_PREFIX = "index.";
|
||||
public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards";
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING =
|
||||
Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, Property.IndexScope);
|
||||
public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas";
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING =
|
||||
Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope);
|
||||
public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas";
|
||||
public static final Setting<Boolean> INDEX_SHADOW_REPLICAS_SETTING = Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_SHADOW_REPLICAS_SETTING =
|
||||
Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, Property.IndexScope);
|
||||
|
||||
public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem";
|
||||
public static final Setting<Boolean> INDEX_SHARED_FILESYSTEM_SETTING = Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_SHARED_FILESYSTEM_SETTING =
|
||||
Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, Property.IndexScope);
|
||||
|
||||
public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas";
|
||||
public static final Setting<AutoExpandReplicas> INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING;
|
||||
public static final String SETTING_READ_ONLY = "index.blocks.read_only";
|
||||
public static final Setting<Boolean> INDEX_READ_ONLY_SETTING = Setting.boolSetting(SETTING_READ_ONLY, false, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_READ_ONLY_SETTING =
|
||||
Setting.boolSetting(SETTING_READ_ONLY, false, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
public static final String SETTING_BLOCKS_READ = "index.blocks.read";
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_READ_SETTING = Setting.boolSetting(SETTING_BLOCKS_READ, false, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_READ_SETTING =
|
||||
Setting.boolSetting(SETTING_BLOCKS_READ, false, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
public static final String SETTING_BLOCKS_WRITE = "index.blocks.write";
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_WRITE_SETTING = Setting.boolSetting(SETTING_BLOCKS_WRITE, false, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_WRITE_SETTING =
|
||||
Setting.boolSetting(SETTING_BLOCKS_WRITE, false, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata";
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_METADATA_SETTING = Setting.boolSetting(SETTING_BLOCKS_METADATA, false, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_BLOCKS_METADATA_SETTING =
|
||||
Setting.boolSetting(SETTING_BLOCKS_METADATA, false, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
public static final String SETTING_VERSION_CREATED = "index.version.created";
|
||||
public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string";
|
||||
|
@ -182,18 +191,24 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible";
|
||||
public static final String SETTING_CREATION_DATE = "index.creation_date";
|
||||
public static final String SETTING_PRIORITY = "index.priority";
|
||||
public static final Setting<Integer> INDEX_PRIORITY_SETTING = Setting.intSetting("index.priority", 1, 0, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> INDEX_PRIORITY_SETTING =
|
||||
Setting.intSetting("index.priority", 1, 0, Property.Dynamic, Property.IndexScope);
|
||||
public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string";
|
||||
public static final String SETTING_INDEX_UUID = "index.uuid";
|
||||
public static final String SETTING_DATA_PATH = "index.data_path";
|
||||
public static final Setting<String> INDEX_DATA_PATH_SETTING = new Setting<>(SETTING_DATA_PATH, "", Function.identity(), false, Setting.Scope.INDEX);
|
||||
public static final Setting<String> INDEX_DATA_PATH_SETTING =
|
||||
new Setting<>(SETTING_DATA_PATH, "", Function.identity(), Property.IndexScope);
|
||||
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
|
||||
public static final Setting<Boolean> INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING =
|
||||
Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, Property.Dynamic, Property.IndexScope);
|
||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||
|
||||
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.require.", true, Setting.Scope.INDEX);
|
||||
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.include.", true, Setting.Scope.INDEX);
|
||||
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.exclude.", true, Setting.Scope.INDEX);
|
||||
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING =
|
||||
Setting.groupSetting("index.routing.allocation.require.", Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING =
|
||||
Setting.groupSetting("index.routing.allocation.include.", Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING =
|
||||
Setting.groupSetting("index.routing.allocation.exclude.", Property.Dynamic, Property.IndexScope);
|
||||
|
||||
public static final IndexMetaData PROTO = IndexMetaData.builder("")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
import org.elasticsearch.common.xcontent.FromXContentBuilder;
|
||||
|
@ -139,7 +140,8 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
}
|
||||
|
||||
|
||||
public static final Setting<Boolean> SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_READ_ONLY_SETTING =
|
||||
Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
|
||||
|
@ -40,7 +41,7 @@ public class DiscoveryNodeService extends AbstractComponent {
|
|||
|
||||
public static final Setting<Long> NODE_ID_SEED_SETTING =
|
||||
// don't use node.id.seed so it won't be seen as an attribute
|
||||
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
|
||||
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, Property.NodeScope);
|
||||
private final List<CustomAttributesProvider> customAttributesProviders = new CopyOnWriteArrayList<>();
|
||||
private final Version version;
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.joda.Joda;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -44,7 +45,9 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
|
|||
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime");
|
||||
private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1);
|
||||
|
||||
public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic,
|
||||
Property.IndexScope);
|
||||
|
||||
/**
|
||||
* Reason why the shard is in unassigned state.
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.PriorityComparator;
|
||||
|
||||
|
@ -67,9 +68,13 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
|
|||
*/
|
||||
public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator {
|
||||
|
||||
public static final Setting<Float> INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Float> SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Float> THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Float> INDEX_BALANCE_FACTOR_SETTING =
|
||||
Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Float> SHARD_BALANCE_FACTOR_SETTING =
|
||||
Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Float> THRESHOLD_SETTING =
|
||||
Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
private volatile WeightFunction weightFunction;
|
||||
private volatile float threshold;
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
@ -77,8 +78,11 @@ public class AwarenessAllocationDecider extends AllocationDecider {
|
|||
|
||||
public static final String NAME = "awareness";
|
||||
|
||||
public static final Setting<String[]> CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String[]> CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING =
|
||||
new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , Property.Dynamic,
|
||||
Property.NodeScope);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING =
|
||||
Setting.groupSetting("cluster.routing.allocation.awareness.force.", Property.Dynamic, Property.NodeScope);
|
||||
|
||||
private String[] awarenessAttributes;
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.Locale;
|
||||
|
@ -48,7 +49,9 @@ import java.util.Locale;
|
|||
public class ClusterRebalanceAllocationDecider extends AllocationDecider {
|
||||
|
||||
public static final String NAME = "cluster_rebalance";
|
||||
public static final Setting<ClusterRebalanceType> CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ClusterRebalanceType> CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING =
|
||||
new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT),
|
||||
ClusterRebalanceType::parseString, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
/**
|
||||
* An enum representation for the configured re-balance type.
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
|
@ -42,7 +43,9 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
|
|||
|
||||
public static final String NAME = "concurrent_rebalance";
|
||||
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING =
|
||||
Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
private volatile int clusterConcurrentRebalance;
|
||||
|
||||
@Inject
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.RatioValue;
|
||||
|
@ -81,11 +82,22 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
private volatile boolean enabled;
|
||||
private volatile TimeValue rerouteInterval;
|
||||
|
||||
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.CLUSTER);;
|
||||
public static final Setting<TimeValue> CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING =
|
||||
Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING =
|
||||
new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%",
|
||||
(s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING =
|
||||
new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%",
|
||||
(s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING =
|
||||
Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true,
|
||||
Property.Dynamic, Property.NodeScope);;
|
||||
public static final Setting<TimeValue> CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING =
|
||||
Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
/**
|
||||
* Listens for a node to go over the high watermark and kicks off an empty
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.Locale;
|
||||
|
@ -60,11 +61,19 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
|
||||
public static final String NAME = "enable";
|
||||
|
||||
public static final Setting<Allocation> CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Allocation> INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Allocation> CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING =
|
||||
new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Allocation> INDEX_ROUTING_ALLOCATION_ENABLE_SETTING =
|
||||
new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
|
||||
public static final Setting<Rebalance> CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Rebalance> INDEX_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Rebalance> CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING =
|
||||
new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Rebalance> INDEX_ROUTING_REBALANCE_ENABLE_SETTING =
|
||||
new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
|
||||
private volatile Rebalance enableRebalance;
|
||||
private volatile Allocation enableAllocation;
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
|
||||
|
@ -60,9 +61,12 @@ public class FilterAllocationDecider extends AllocationDecider {
|
|||
|
||||
public static final String NAME = "filter";
|
||||
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING =
|
||||
Setting.groupSetting("cluster.routing.allocation.require.", Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING =
|
||||
Setting.groupSetting("cluster.routing.allocation.include.", Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING =
|
||||
Setting.groupSetting("cluster.routing.allocation.exclude.", Property.Dynamic, Property.NodeScope);
|
||||
|
||||
private volatile DiscoveryNodeFilters clusterRequireFilters;
|
||||
private volatile DiscoveryNodeFilters clusterIncludeFilters;
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
|
@ -59,13 +60,17 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
* Controls the maximum number of shards per index on a single Elasticsearch
|
||||
* node. Negative values are interpreted as unlimited.
|
||||
*/
|
||||
public static final Setting<Integer> INDEX_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> INDEX_TOTAL_SHARDS_PER_NODE_SETTING =
|
||||
Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
|
||||
/**
|
||||
* Controls the maximum number of shards per node on a global level.
|
||||
* Negative values are interpreted as unlimited.
|
||||
*/
|
||||
public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING =
|
||||
Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
|
||||
@Inject
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
|
@ -39,7 +40,9 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
|
|||
/**
|
||||
* Disables relocation of shards that are currently being snapshotted.
|
||||
*/
|
||||
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING =
|
||||
Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
private volatile boolean enableRelocation = false;
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
|
@ -50,10 +51,25 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2;
|
||||
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4;
|
||||
public static final String NAME = "throttling";
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING =
|
||||
new Setting<>("cluster.routing.allocation.node_concurrent_recoveries",
|
||||
Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES),
|
||||
(s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING =
|
||||
Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries",
|
||||
DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING =
|
||||
new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries",
|
||||
(s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s),
|
||||
(s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING =
|
||||
new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries",
|
||||
(s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s),
|
||||
(s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
|
||||
private volatile int primariesInitialRecoveries;
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.elasticsearch.common.logging.ESLogger;
|
|||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -89,7 +90,9 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
|
|||
*/
|
||||
public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService {
|
||||
|
||||
public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING =
|
||||
Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
|
||||
private final ThreadPool threadPool;
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.PosixFileAttributeView;
|
||||
import java.nio.file.attribute.PosixFileAttributes;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.nio.file.attribute.PosixFilePermissions;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A helper command that checks if configured paths have been changed when running a CLI command.
|
||||
* It is only executed in case of specified paths by the command and if the paths underlying filesystem
|
||||
* supports posix permissions.
|
||||
*
|
||||
* If this is the case, a warn message is issued whenever an owner, a group or the file permissions is changed by
|
||||
* the command being executed and not configured back to its prior state, which should be the task of the command
|
||||
* being executed.
|
||||
*
|
||||
*/
|
||||
public abstract class CheckFileCommand extends CliTool.Command {
|
||||
|
||||
public CheckFileCommand(Terminal terminal) {
|
||||
super(terminal);
|
||||
}
|
||||
|
||||
/**
|
||||
* abstract method, which should implement the same logic as CliTool.Command.execute(), but is wrapped
|
||||
*/
|
||||
public abstract CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception;
|
||||
|
||||
/**
|
||||
* Returns the array of paths, that should be checked if the permissions, user or groups have changed
|
||||
* before and after execution of the command
|
||||
*
|
||||
*/
|
||||
protected abstract Path[] pathsForPermissionsCheck(Settings settings, Environment env) throws Exception;
|
||||
|
||||
@Override
|
||||
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
Path[] paths = pathsForPermissionsCheck(settings, env);
|
||||
|
||||
if (paths == null || paths.length == 0) {
|
||||
return doExecute(settings, env);
|
||||
}
|
||||
|
||||
Map<Path, Set<PosixFilePermission>> permissions = new HashMap<>(paths.length);
|
||||
Map<Path, String> owners = new HashMap<>(paths.length);
|
||||
Map<Path, String> groups = new HashMap<>(paths.length);
|
||||
|
||||
if (paths != null && paths.length > 0) {
|
||||
for (Path path : paths) {
|
||||
try {
|
||||
boolean supportsPosixPermissions = Environment.getFileStore(path).supportsFileAttributeView(PosixFileAttributeView.class);
|
||||
if (supportsPosixPermissions) {
|
||||
PosixFileAttributes attributes = Files.readAttributes(path, PosixFileAttributes.class);
|
||||
permissions.put(path, attributes.permissions());
|
||||
owners.put(path, attributes.owner().getName());
|
||||
groups.put(path, attributes.group().getName());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// silently swallow if not supported, no need to log things
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CliTool.ExitStatus status = doExecute(settings, env);
|
||||
|
||||
// check if permissions differ
|
||||
for (Map.Entry<Path, Set<PosixFilePermission>> entry : permissions.entrySet()) {
|
||||
if (!Files.exists(entry.getKey())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Set<PosixFilePermission> permissionsBeforeWrite = entry.getValue();
|
||||
Set<PosixFilePermission> permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey());
|
||||
if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "WARNING: The file permissions of [" + entry.getKey() + "] have changed "
|
||||
+ "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] "
|
||||
+ "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]");
|
||||
terminal.println(Terminal.Verbosity.SILENT, "Please ensure that the user account running Elasticsearch has read access to this file!");
|
||||
}
|
||||
}
|
||||
|
||||
// check if owner differs
|
||||
for (Map.Entry<Path, String> entry : owners.entrySet()) {
|
||||
if (!Files.exists(entry.getKey())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String ownerBeforeWrite = entry.getValue();
|
||||
String ownerAfterWrite = Files.getOwner(entry.getKey()).getName();
|
||||
if (!ownerAfterWrite.equals(ownerBeforeWrite)) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "WARNING: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]");
|
||||
}
|
||||
}
|
||||
|
||||
// check if group differs
|
||||
for (Map.Entry<Path, String> entry : groups.entrySet()) {
|
||||
if (!Files.exists(entry.getKey())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String groupBeforeWrite = entry.getValue();
|
||||
String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName();
|
||||
if (!groupAfterWrite.equals(groupBeforeWrite)) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "WARNING: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]");
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
}
|
|
@ -1,250 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import org.apache.commons.cli.AlreadySelectedException;
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.CommandLineParser;
|
||||
import org.apache.commons.cli.DefaultParser;
|
||||
import org.apache.commons.cli.MissingArgumentException;
|
||||
import org.apache.commons.cli.MissingOptionException;
|
||||
import org.apache.commons.cli.UnrecognizedOptionException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
|
||||
/**
|
||||
* A base class for command-line interface tool.
|
||||
*
|
||||
* Two modes are supported:
|
||||
*
|
||||
* - Single command mode. The tool exposes a single command that can potentially accept arguments (eg. CLI options).
|
||||
* - Multi command mode. The tool support multiple commands, each for different tasks, each potentially accepts arguments.
|
||||
*
|
||||
* In a multi-command mode. The first argument must be the command name. For example, the plugin manager
|
||||
* can be seen as a multi-command tool with two possible commands: install and uninstall
|
||||
*
|
||||
* The tool is configured using a {@link CliToolConfig} which encapsulates the tool's commands and their
|
||||
* potential options. The tool also comes with out of the box simple help support (the -h/--help option is
|
||||
* automatically handled) where the help text is configured in a dedicated *.help files located in the same package
|
||||
* as the tool.
|
||||
*/
|
||||
public abstract class CliTool {
|
||||
|
||||
// based on sysexits.h
|
||||
public enum ExitStatus {
|
||||
OK(0),
|
||||
OK_AND_EXIT(0),
|
||||
USAGE(64), /* command line usage error */
|
||||
DATA_ERROR(65), /* data format error */
|
||||
NO_INPUT(66), /* cannot open input */
|
||||
NO_USER(67), /* addressee unknown */
|
||||
NO_HOST(68), /* host name unknown */
|
||||
UNAVAILABLE(69), /* service unavailable */
|
||||
CODE_ERROR(70), /* internal software error */
|
||||
CANT_CREATE(73), /* can't create (user) output file */
|
||||
IO_ERROR(74), /* input/output error */
|
||||
TEMP_FAILURE(75), /* temp failure; user is invited to retry */
|
||||
PROTOCOL(76), /* remote error in protocol */
|
||||
NOPERM(77), /* permission denied */
|
||||
CONFIG(78); /* configuration error */
|
||||
|
||||
final int status;
|
||||
|
||||
ExitStatus(int status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
public int status() {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
protected final Terminal terminal;
|
||||
protected final Environment env;
|
||||
protected final Settings settings;
|
||||
|
||||
private final CliToolConfig config;
|
||||
|
||||
protected CliTool(CliToolConfig config) {
|
||||
this(config, Terminal.DEFAULT);
|
||||
}
|
||||
|
||||
protected CliTool(CliToolConfig config, Terminal terminal) {
|
||||
if (config.cmds().size() == 0) {
|
||||
throw new IllegalArgumentException("At least one command must be configured");
|
||||
}
|
||||
this.config = config;
|
||||
this.terminal = terminal;
|
||||
env = InternalSettingsPreparer.prepareEnvironment(EMPTY_SETTINGS, terminal);
|
||||
settings = env.settings();
|
||||
}
|
||||
|
||||
public final ExitStatus execute(String... args) throws Exception {
|
||||
|
||||
// first lets see if the user requests tool help. We're doing it only if
|
||||
// this is a multi-command tool. If it's a single command tool, the -h/--help
|
||||
// option will be taken care of on the command level
|
||||
if (!config.isSingle() && args.length > 0 && (args[0].equals("-h") || args[0].equals("--help"))) {
|
||||
config.printUsage(terminal);
|
||||
return ExitStatus.OK_AND_EXIT;
|
||||
}
|
||||
|
||||
CliToolConfig.Cmd cmd;
|
||||
if (config.isSingle()) {
|
||||
cmd = config.single();
|
||||
} else {
|
||||
|
||||
if (args.length == 0) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: command not specified");
|
||||
config.printUsage(terminal);
|
||||
return ExitStatus.USAGE;
|
||||
}
|
||||
|
||||
String cmdName = args[0];
|
||||
cmd = config.cmd(cmdName);
|
||||
if (cmd == null) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: unknown command [" + cmdName + "]. Use [-h] option to list available commands");
|
||||
return ExitStatus.USAGE;
|
||||
}
|
||||
|
||||
// we now remove the command name from the args
|
||||
if (args.length == 1) {
|
||||
args = new String[0];
|
||||
} else {
|
||||
String[] cmdArgs = new String[args.length - 1];
|
||||
System.arraycopy(args, 1, cmdArgs, 0, cmdArgs.length);
|
||||
args = cmdArgs;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
return parse(cmd, args).execute(settings, env);
|
||||
} catch (UserError error) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + error.getMessage());
|
||||
return error.exitStatus;
|
||||
}
|
||||
}
|
||||
|
||||
public Command parse(String cmdName, String[] args) throws Exception {
|
||||
CliToolConfig.Cmd cmd = config.cmd(cmdName);
|
||||
return parse(cmd, args);
|
||||
}
|
||||
|
||||
public Command parse(CliToolConfig.Cmd cmd, String[] args) throws Exception {
|
||||
CommandLineParser parser = new DefaultParser();
|
||||
CommandLine cli = parser.parse(CliToolConfig.OptionsSource.HELP.options(), args, true);
|
||||
if (cli.hasOption("h")) {
|
||||
return helpCmd(cmd);
|
||||
}
|
||||
try {
|
||||
cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption());
|
||||
} catch (AlreadySelectedException|MissingArgumentException|MissingOptionException|UnrecognizedOptionException e) {
|
||||
// intentionally drop the stack trace here as these are really user errors,
|
||||
// the stack trace into cli parsing lib is not important
|
||||
throw new UserError(ExitStatus.USAGE, e.toString());
|
||||
}
|
||||
|
||||
if (cli.hasOption("v")) {
|
||||
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
|
||||
} else if (cli.hasOption("s")) {
|
||||
terminal.setVerbosity(Terminal.Verbosity.SILENT);
|
||||
} else {
|
||||
terminal.setVerbosity(Terminal.Verbosity.NORMAL);
|
||||
}
|
||||
return parse(cmd.name(), cli);
|
||||
}
|
||||
|
||||
protected Command.Help helpCmd(CliToolConfig.Cmd cmd) {
|
||||
return new Command.Help(cmd, terminal);
|
||||
}
|
||||
|
||||
protected static Command.Exit exitCmd(ExitStatus status) {
|
||||
return new Command.Exit(null, status, null);
|
||||
}
|
||||
|
||||
protected static Command.Exit exitCmd(ExitStatus status, Terminal terminal, String msg, Object... args) {
|
||||
return new Command.Exit(String.format(Locale.ROOT, msg, args), status, terminal);
|
||||
}
|
||||
|
||||
protected abstract Command parse(String cmdName, CommandLine cli) throws Exception;
|
||||
|
||||
public static abstract class Command {
|
||||
|
||||
protected final Terminal terminal;
|
||||
|
||||
protected Command(Terminal terminal) {
|
||||
this.terminal = terminal;
|
||||
}
|
||||
|
||||
public abstract ExitStatus execute(Settings settings, Environment env) throws Exception;
|
||||
|
||||
public static class Help extends Command {
|
||||
|
||||
private final CliToolConfig.Cmd cmd;
|
||||
|
||||
private Help(CliToolConfig.Cmd cmd, Terminal terminal) {
|
||||
super(terminal);
|
||||
this.cmd = cmd;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
cmd.printUsage(terminal);
|
||||
return ExitStatus.OK_AND_EXIT;
|
||||
}
|
||||
}
|
||||
|
||||
public static class Exit extends Command {
|
||||
private final String msg;
|
||||
private final ExitStatus status;
|
||||
|
||||
private Exit(String msg, ExitStatus status, Terminal terminal) {
|
||||
super(terminal);
|
||||
this.msg = msg;
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ExitStatus execute(Settings settings, Environment env) throws Exception {
|
||||
if (msg != null) {
|
||||
if (status != ExitStatus.OK) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + msg);
|
||||
} else {
|
||||
terminal.println(msg);
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
public ExitStatus status() {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -1,302 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import org.apache.commons.cli.Option;
|
||||
import org.apache.commons.cli.OptionGroup;
|
||||
import org.apache.commons.cli.Options;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class CliToolConfig {
|
||||
|
||||
public static Builder config(String name, Class<? extends CliTool> toolType) {
|
||||
return new Builder(name, toolType);
|
||||
}
|
||||
|
||||
private final Class<? extends CliTool> toolType;
|
||||
private final String name;
|
||||
private final Map<String, Cmd> cmds;
|
||||
|
||||
private static final HelpPrinter helpPrinter = new HelpPrinter();
|
||||
|
||||
private CliToolConfig(String name, Class<? extends CliTool> toolType, Cmd[] cmds) {
|
||||
this.name = name;
|
||||
this.toolType = toolType;
|
||||
final Map<String, Cmd> cmdsMapping = new HashMap<>();
|
||||
for (int i = 0; i < cmds.length; i++) {
|
||||
cmdsMapping.put(cmds[i].name, cmds[i]);
|
||||
}
|
||||
this.cmds = Collections.unmodifiableMap(cmdsMapping);
|
||||
}
|
||||
|
||||
public boolean isSingle() {
|
||||
return cmds.size() == 1;
|
||||
}
|
||||
|
||||
public Cmd single() {
|
||||
assert isSingle() : "Requesting single command on a multi-command tool";
|
||||
return cmds.values().iterator().next();
|
||||
}
|
||||
|
||||
public Class<? extends CliTool> toolType() {
|
||||
return toolType;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public Collection<Cmd> cmds() {
|
||||
return cmds.values();
|
||||
}
|
||||
|
||||
public Cmd cmd(String name) {
|
||||
return cmds.get(name);
|
||||
}
|
||||
|
||||
public void printUsage(Terminal terminal) {
|
||||
helpPrinter.print(this, terminal);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
public static Cmd.Builder cmd(String name, Class<? extends CliTool.Command> cmdType) {
|
||||
return new Cmd.Builder(name, cmdType);
|
||||
}
|
||||
|
||||
public static OptionBuilder option(String shortName, String longName) {
|
||||
return new OptionBuilder(shortName, longName);
|
||||
}
|
||||
|
||||
public static Option.Builder optionBuilder(String shortName, String longName) {
|
||||
return Option.builder(shortName).argName(longName).longOpt(longName);
|
||||
}
|
||||
|
||||
public static OptionGroupBuilder optionGroup(boolean required) {
|
||||
return new OptionGroupBuilder(required);
|
||||
}
|
||||
|
||||
private final Class<? extends CliTool> toolType;
|
||||
private final String name;
|
||||
private Cmd[] cmds;
|
||||
|
||||
private Builder(String name, Class<? extends CliTool> toolType) {
|
||||
this.name = name;
|
||||
this.toolType = toolType;
|
||||
}
|
||||
|
||||
public Builder cmds(Cmd.Builder... cmds) {
|
||||
this.cmds = new Cmd[cmds.length];
|
||||
for (int i = 0; i < cmds.length; i++) {
|
||||
this.cmds[i] = cmds[i].build();
|
||||
this.cmds[i].toolName = name;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder cmds(Cmd... cmds) {
|
||||
for (int i = 0; i < cmds.length; i++) {
|
||||
cmds[i].toolName = name;
|
||||
}
|
||||
this.cmds = cmds;
|
||||
return this;
|
||||
}
|
||||
|
||||
public CliToolConfig build() {
|
||||
return new CliToolConfig(name, toolType, cmds);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Cmd {
|
||||
|
||||
private String toolName;
|
||||
private final String name;
|
||||
private final Class<? extends CliTool.Command> cmdType;
|
||||
private final Options options;
|
||||
private final boolean stopAtNonOption;
|
||||
|
||||
private Cmd(String name, Class<? extends CliTool.Command> cmdType, Options options, boolean stopAtNonOption) {
|
||||
this.name = name;
|
||||
this.cmdType = cmdType;
|
||||
this.options = options;
|
||||
this.stopAtNonOption = stopAtNonOption;
|
||||
OptionsSource.VERBOSITY.populate(options);
|
||||
}
|
||||
|
||||
public Class<? extends CliTool.Command> cmdType() {
|
||||
return cmdType;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public Options options() {
|
||||
return options;
|
||||
}
|
||||
|
||||
public boolean isStopAtNonOption() {
|
||||
return stopAtNonOption;
|
||||
}
|
||||
|
||||
public void printUsage(Terminal terminal) {
|
||||
helpPrinter.print(toolName, this, terminal);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private final String name;
|
||||
private final Class<? extends CliTool.Command> cmdType;
|
||||
private Options options = new Options();
|
||||
private boolean stopAtNonOption = false;
|
||||
|
||||
private Builder(String name, Class<? extends CliTool.Command> cmdType) {
|
||||
this.name = name;
|
||||
this.cmdType = cmdType;
|
||||
}
|
||||
|
||||
public Builder options(OptionBuilder... optionBuilder) {
|
||||
for (int i = 0; i < optionBuilder.length; i++) {
|
||||
options.addOption(optionBuilder[i].build());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder options(Option.Builder... optionBuilders) {
|
||||
for (int i = 0; i < optionBuilders.length; i++) {
|
||||
options.addOption(optionBuilders[i].build());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder optionGroups(OptionGroupBuilder... optionGroupBuilders) {
|
||||
for (OptionGroupBuilder builder : optionGroupBuilders) {
|
||||
options.addOptionGroup(builder.build());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param stopAtNonOption if <tt>true</tt> an unrecognized argument stops
|
||||
* the parsing and the remaining arguments are added to the
|
||||
* args list. If <tt>false</tt> an unrecognized
|
||||
* argument triggers a ParseException.
|
||||
*/
|
||||
public Builder stopAtNonOption(boolean stopAtNonOption) {
|
||||
this.stopAtNonOption = stopAtNonOption;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Cmd build() {
|
||||
return new Cmd(name, cmdType, options, stopAtNonOption);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class OptionBuilder {
|
||||
|
||||
private final Option option;
|
||||
|
||||
private OptionBuilder(String shortName, String longName) {
|
||||
option = new Option(shortName, "");
|
||||
option.setLongOpt(longName);
|
||||
option.setArgName(longName);
|
||||
}
|
||||
|
||||
public OptionBuilder required(boolean required) {
|
||||
option.setRequired(required);
|
||||
return this;
|
||||
}
|
||||
|
||||
public OptionBuilder hasArg(boolean optional) {
|
||||
option.setOptionalArg(optional);
|
||||
option.setArgs(1);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Option build() {
|
||||
return option;
|
||||
}
|
||||
}
|
||||
|
||||
public static class OptionGroupBuilder {
|
||||
|
||||
private OptionGroup group;
|
||||
|
||||
private OptionGroupBuilder(boolean required) {
|
||||
group = new OptionGroup();
|
||||
group.setRequired(required);
|
||||
}
|
||||
|
||||
public OptionGroupBuilder options(OptionBuilder... optionBuilders) {
|
||||
for (OptionBuilder builder : optionBuilders) {
|
||||
group.addOption(builder.build());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public OptionGroup build() {
|
||||
return group;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static abstract class OptionsSource {
|
||||
|
||||
static final OptionsSource HELP = new OptionsSource() {
|
||||
|
||||
@Override
|
||||
void populate(Options options) {
|
||||
options.addOption(new OptionBuilder("h", "help").required(false).build());
|
||||
}
|
||||
};
|
||||
|
||||
static final OptionsSource VERBOSITY = new OptionsSource() {
|
||||
@Override
|
||||
void populate(Options options) {
|
||||
OptionGroup verbosityGroup = new OptionGroup();
|
||||
verbosityGroup.setRequired(false);
|
||||
verbosityGroup.addOption(new OptionBuilder("s", "silent").required(false).build());
|
||||
verbosityGroup.addOption(new OptionBuilder("v", "verbose").required(false).build());
|
||||
options.addOptionGroup(verbosityGroup);
|
||||
}
|
||||
};
|
||||
|
||||
private Options options;
|
||||
|
||||
Options options() {
|
||||
if (options == null) {
|
||||
options = new Options();
|
||||
populate(options);
|
||||
}
|
||||
return options;
|
||||
}
|
||||
|
||||
abstract void populate(Options options);
|
||||
|
||||
}
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class HelpPrinter {
|
||||
|
||||
private static final String HELP_FILE_EXT = ".help";
|
||||
|
||||
public void print(CliToolConfig config, Terminal terminal) {
|
||||
print(config.toolType(), config.name(), terminal);
|
||||
}
|
||||
|
||||
public void print(String toolName, CliToolConfig.Cmd cmd, Terminal terminal) {
|
||||
print(cmd.cmdType(), toolName + "-" + cmd.name(), terminal);
|
||||
}
|
||||
|
||||
private static void print(Class clazz, String name, final Terminal terminal) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, "");
|
||||
try (InputStream input = clazz.getResourceAsStream(name + HELP_FILE_EXT)) {
|
||||
Streams.readAllLines(input, new Callback<String>() {
|
||||
@Override
|
||||
public void handle(String line) {
|
||||
terminal.println(Terminal.Verbosity.SILENT, line);
|
||||
}
|
||||
});
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
terminal.println(Terminal.Verbosity.SILENT, "");
|
||||
}
|
||||
}
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.logging;
|
|||
|
||||
import org.apache.log4j.Logger;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
|
@ -30,9 +31,10 @@ import java.util.Locale;
|
|||
public abstract class ESLoggerFactory {
|
||||
|
||||
public static final Setting<LogLevel> LOG_DEFAULT_LEVEL_SETTING =
|
||||
new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, false, Setting.Scope.CLUSTER);
|
||||
new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, Property.NodeScope);
|
||||
public static final Setting<LogLevel> LOG_LEVEL_SETTING =
|
||||
Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER);
|
||||
Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
public static ESLogger getLogger(String prefix, String name) {
|
||||
prefix = prefix == null ? null : prefix.intern();
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.common.logging;
|
|||
|
||||
import org.apache.log4j.AppenderSkeleton;
|
||||
import org.apache.log4j.spi.LoggingEvent;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
|
||||
/**
|
||||
* TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli.
|
||||
|
|
|
@ -134,7 +134,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||
if (termArrays.isEmpty()) {
|
||||
return new MatchNoDocsQuery();
|
||||
}
|
||||
MultiPhraseQuery query = new MultiPhraseQuery();
|
||||
MultiPhraseQuery.Builder query = new MultiPhraseQuery.Builder();
|
||||
query.setSlop(slop);
|
||||
int sizeMinus1 = termArrays.size() - 1;
|
||||
for (int i = 0; i < sizeMinus1; i++) {
|
||||
|
@ -153,7 +153,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||
return Queries.newMatchNoDocsQuery();
|
||||
}
|
||||
query.add(terms.toArray(Term.class), position);
|
||||
return query.rewrite(reader);
|
||||
return query.build();
|
||||
}
|
||||
|
||||
private void getPrefixTerms(ObjectHashSet<Term> terms, final Term prefix, final IndexReader reader) throws IOException {
|
||||
|
|
|
@ -28,8 +28,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Setting.Scope;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.http.HttpServer;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
|
@ -155,10 +155,11 @@ public class NetworkModule extends AbstractModule {
|
|||
public static final String LOCAL_TRANSPORT = "local";
|
||||
public static final String NETTY_TRANSPORT = "netty";
|
||||
|
||||
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString("http.type", false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, false, Scope.CLUSTER);
|
||||
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING = Setting.simpleString("transport.service.type", false, Scope.CLUSTER);
|
||||
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", false, Scope.CLUSTER);
|
||||
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString("http.type", Property.NodeScope);
|
||||
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope);
|
||||
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING =
|
||||
Setting.simpleString("transport.service.type", Property.NodeScope);
|
||||
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", Property.NodeScope);
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.common.network;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -34,6 +35,7 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -43,24 +45,33 @@ public class NetworkService extends AbstractComponent {
|
|||
/** By default, we bind to loopback interfaces */
|
||||
public static final String DEFAULT_NETWORK_HOST = "_local_";
|
||||
|
||||
public static final Setting<List<String>> GLOBAL_NETWORK_HOST_SETTING = Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST),
|
||||
s -> s, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<List<String>> GLOBAL_NETWORK_BINDHOST_SETTING = Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING,
|
||||
s -> s, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<List<String>> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING,
|
||||
s -> s, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> NETWORK_SERVER = Setting.boolSetting("network.server", true, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<List<String>> GLOBAL_NETWORK_HOST_SETTING =
|
||||
Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST), Function.identity(), Property.NodeScope);
|
||||
public static final Setting<List<String>> GLOBAL_NETWORK_BINDHOST_SETTING =
|
||||
Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING, Function.identity(), Property.NodeScope);
|
||||
public static final Setting<List<String>> GLOBAL_NETWORK_PUBLISHHOST_SETTING =
|
||||
Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING, Function.identity(), Property.NodeScope);
|
||||
public static final Setting<Boolean> NETWORK_SERVER = Setting.boolSetting("network.server", true, Property.NodeScope);
|
||||
|
||||
public static final class TcpSettings {
|
||||
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_KEEP_ALIVE = Setting.boolSetting("network.tcp.keep_alive", true, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_REUSE_ADDRESS = Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_BLOCKING = Setting.boolSetting("network.tcp.blocking", false, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_BLOCKING_SERVER = Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> TCP_NO_DELAY =
|
||||
Setting.boolSetting("network.tcp.no_delay", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> TCP_KEEP_ALIVE =
|
||||
Setting.boolSetting("network.tcp.keep_alive", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> TCP_REUSE_ADDRESS =
|
||||
Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE =
|
||||
Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE =
|
||||
Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), Property.NodeScope);
|
||||
public static final Setting<Boolean> TCP_BLOCKING =
|
||||
Setting.boolSetting("network.tcp.blocking", false, Property.NodeScope);
|
||||
public static final Setting<Boolean> TCP_BLOCKING_SERVER =
|
||||
Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, Property.NodeScope);
|
||||
public static final Setting<Boolean> TCP_BLOCKING_CLIENT =
|
||||
Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, Property.NodeScope);
|
||||
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT =
|
||||
Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -44,19 +44,19 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
private final List<SettingUpdater<?>> settingUpdaters = new CopyOnWriteArrayList<>();
|
||||
private final Map<String, Setting<?>> complexMatchers;
|
||||
private final Map<String, Setting<?>> keySettings;
|
||||
private final Setting.Scope scope;
|
||||
private final Setting.Property scope;
|
||||
private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$");
|
||||
private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$");
|
||||
|
||||
protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet, Setting.Scope scope) {
|
||||
protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet, Setting.Property scope) {
|
||||
super(settings);
|
||||
this.lastSettingsApplied = Settings.EMPTY;
|
||||
this.scope = scope;
|
||||
Map<String, Setting<?>> complexMatchers = new HashMap<>();
|
||||
Map<String, Setting<?>> keySettings = new HashMap<>();
|
||||
for (Setting<?> setting : settingsSet) {
|
||||
if (setting.getScope() != scope) {
|
||||
throw new IllegalArgumentException("Setting must be a " + scope + " setting but was: " + setting.getScope());
|
||||
if (setting.getProperties().contains(scope) == false) {
|
||||
throw new IllegalArgumentException("Setting must be a " + scope + " setting but has: " + setting.getProperties());
|
||||
}
|
||||
if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) {
|
||||
throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]");
|
||||
|
@ -96,7 +96,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
return GROUP_KEY_PATTERN.matcher(key).matches();
|
||||
}
|
||||
|
||||
public Setting.Scope getScope() {
|
||||
public Setting.Property getScope() {
|
||||
return this.scope;
|
||||
}
|
||||
|
||||
|
@ -342,8 +342,9 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
* Returns the value for the given setting.
|
||||
*/
|
||||
public <T> T get(Setting<T> setting) {
|
||||
if (setting.getScope() != scope) {
|
||||
throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] != [" + setting.getScope() + "]");
|
||||
if (setting.getProperties().contains(scope) == false) {
|
||||
throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] not in [" +
|
||||
setting.getProperties() + "]");
|
||||
}
|
||||
if (get(setting.getKey()) == null) {
|
||||
throw new IllegalArgumentException("setting " + setting.getKey() + " has not been registered");
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.elasticsearch.cluster.service.InternalClusterService;
|
|||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
|
@ -103,7 +104,7 @@ import java.util.function.Predicate;
|
|||
*/
|
||||
public final class ClusterSettings extends AbstractScopedSettings {
|
||||
public ClusterSettings(Settings nodeSettings, Set<Setting<?>> settingsSet) {
|
||||
super(nodeSettings, settingsSet, Setting.Scope.CLUSTER);
|
||||
super(nodeSettings, settingsSet, Property.NodeScope);
|
||||
addSettingsUpdater(new LoggingSettingUpdater(nodeSettings));
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -51,7 +52,7 @@ import java.util.function.Predicate;
|
|||
|
||||
/**
|
||||
* Encapsulates all valid index level settings.
|
||||
* @see org.elasticsearch.common.settings.Setting.Scope#INDEX
|
||||
* @see Property#IndexScope
|
||||
*/
|
||||
public final class IndexScopedSettings extends AbstractScopedSettings {
|
||||
|
||||
|
@ -136,22 +137,22 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
EngineConfig.INDEX_CODEC_SETTING,
|
||||
IndexWarmer.INDEX_NORMS_LOADING_SETTING,
|
||||
// validate that built-in similarities don't get redefined
|
||||
Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX, (s) -> {
|
||||
Setting.groupSetting("index.similarity.", (s) -> {
|
||||
Map<String, Settings> groups = s.getAsGroups();
|
||||
for (String key : SimilarityService.BUILT_IN.keySet()) {
|
||||
if (groups.containsKey(key)) {
|
||||
throw new IllegalArgumentException("illegal value for [index.similarity."+ key + "] cannot redefine built-in similarity");
|
||||
}
|
||||
}
|
||||
}), // this allows similarity settings to be passed
|
||||
Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed
|
||||
}, Property.IndexScope), // this allows similarity settings to be passed
|
||||
Setting.groupSetting("index.analysis.", Property.IndexScope) // this allows analysis settings to be passed
|
||||
|
||||
)));
|
||||
|
||||
public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
|
||||
|
||||
public IndexScopedSettings(Settings settings, Set<Setting<?>> settingsSet) {
|
||||
super(settings, settingsSet, Setting.Scope.INDEX);
|
||||
super(settings, settingsSet, Property.IndexScope);
|
||||
}
|
||||
|
||||
private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetaData metaData) {
|
||||
|
|
|
@ -25,7 +25,9 @@ import org.elasticsearch.action.support.ToXContentToBytes;
|
|||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.MemorySizeValue;
|
||||
|
@ -37,6 +39,10 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Enumeration;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
@ -49,12 +55,12 @@ import java.util.stream.Collectors;
|
|||
|
||||
/**
|
||||
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
|
||||
* Some (dynamic=true) can by modified at run time using the API.
|
||||
* Some (SettingsProperty.Dynamic) can by modified at run time using the API.
|
||||
* All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure
|
||||
* together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward
|
||||
* to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this:
|
||||
* <pre>{@code
|
||||
* public static final Setting<Boolean>; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, false, Scope.CLUSTER);}
|
||||
* public static final Setting<Boolean>; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, SettingsProperty.NodeScope);}
|
||||
* </pre>
|
||||
* To retrieve the value of the setting a {@link Settings} object can be passed directly to the {@link Setting#get(Settings)} method.
|
||||
* <pre>
|
||||
|
@ -65,32 +71,81 @@ import java.util.stream.Collectors;
|
|||
* public enum Color {
|
||||
* RED, GREEN, BLUE;
|
||||
* }
|
||||
* public static final Setting<Color> MY_BOOLEAN = new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, false, Scope.CLUSTER);
|
||||
* public static final Setting<Color> MY_BOOLEAN =
|
||||
* new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, SettingsProperty.NodeScope);
|
||||
* }
|
||||
* </pre>
|
||||
*/
|
||||
public class Setting<T> extends ToXContentToBytes {
|
||||
|
||||
public enum Property {
|
||||
/**
|
||||
* should be filtered in some api (mask password/credentials)
|
||||
*/
|
||||
Filtered,
|
||||
|
||||
/**
|
||||
* iff this setting can be dynamically updateable
|
||||
*/
|
||||
Dynamic,
|
||||
|
||||
/**
|
||||
* mark this setting as deprecated
|
||||
*/
|
||||
Deprecated,
|
||||
|
||||
/**
|
||||
* Node scope
|
||||
*/
|
||||
NodeScope,
|
||||
|
||||
/**
|
||||
* Index scope
|
||||
*/
|
||||
IndexScope
|
||||
}
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(Setting.class);
|
||||
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
|
||||
|
||||
private final Key key;
|
||||
protected final Function<Settings, String> defaultValue;
|
||||
private final Function<String, T> parser;
|
||||
private final boolean dynamic;
|
||||
private final Scope scope;
|
||||
private final EnumSet<Property> properties;
|
||||
|
||||
private static final EnumSet<Property> EMPTY_PROPERTIES = EnumSet.noneOf(Property.class);
|
||||
|
||||
/**
|
||||
* Creates a new Setting instance
|
||||
* Creates a new Setting instance. When no scope is provided, we default to {@link Property#NodeScope}.
|
||||
* @param key the settings key for this setting.
|
||||
* @param defaultValue a default value function that returns the default values string representation.
|
||||
* @param parser a parser that parses the string rep into a complex datatype.
|
||||
* @param dynamic true iff this setting can be dynamically updateable
|
||||
* @param scope the scope of this setting
|
||||
* @param properties properties for this setting like scope, filtering...
|
||||
*/
|
||||
public Setting(Key key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
public Setting(Key key, Function<Settings, String> defaultValue, Function<String, T> parser, Property... properties) {
|
||||
assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null";
|
||||
this.key = key;
|
||||
this.defaultValue = defaultValue;
|
||||
this.parser = parser;
|
||||
this.dynamic = dynamic;
|
||||
this.scope = scope;
|
||||
if (properties == null) {
|
||||
throw new IllegalArgumentException("properties can not be null for setting [" + key + "]");
|
||||
}
|
||||
if (properties.length == 0) {
|
||||
this.properties = EMPTY_PROPERTIES;
|
||||
} else {
|
||||
this.properties = EnumSet.copyOf(Arrays.asList(properties));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new Setting instance
|
||||
* @param key the settings key for this setting.
|
||||
* @param defaultValue a default value.
|
||||
* @param parser a parser that parses the string rep into a complex datatype.
|
||||
* @param properties properties for this setting like scope, filtering...
|
||||
*/
|
||||
public Setting(String key, String defaultValue, Function<String, T> parser, Property... properties) {
|
||||
this(key, s -> defaultValue, parser, properties);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -98,11 +153,10 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* @param key the settings key for this setting.
|
||||
* @param defaultValue a default value function that returns the default values string representation.
|
||||
* @param parser a parser that parses the string rep into a complex datatype.
|
||||
* @param dynamic true iff this setting can be dynamically updateable
|
||||
* @param scope the scope of this setting
|
||||
* @param properties properties for this setting like scope, filtering...
|
||||
*/
|
||||
public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
this(new SimpleKey(key), defaultValue, parser, dynamic, scope);
|
||||
public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, Property... properties) {
|
||||
this(new SimpleKey(key), defaultValue, parser, properties);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -110,11 +164,10 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* @param key the settings key for this setting.
|
||||
* @param fallBackSetting a setting to fall back to if the current setting is not set.
|
||||
* @param parser a parser that parses the string rep into a complex datatype.
|
||||
* @param dynamic true iff this setting can be dynamically updateable
|
||||
* @param scope the scope of this setting
|
||||
* @param properties properties for this setting like scope, filtering...
|
||||
*/
|
||||
public Setting(String key, Setting<T> fallBackSetting, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
this(key, fallBackSetting::getRaw, parser, dynamic, scope);
|
||||
public Setting(String key, Setting<T> fallBackSetting, Function<String, T> parser, Property... properties) {
|
||||
this(key, fallBackSetting::getRaw, parser, properties);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -136,17 +189,46 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff this setting is dynamically updateable, otherwise <code>false</code>
|
||||
* Returns <code>true</code> if this setting is dynamically updateable, otherwise <code>false</code>
|
||||
*/
|
||||
public final boolean isDynamic() {
|
||||
return dynamic;
|
||||
return properties.contains(Property.Dynamic);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the settings scope
|
||||
* Returns the setting properties
|
||||
* @see Property
|
||||
*/
|
||||
public final Scope getScope() {
|
||||
return scope;
|
||||
public EnumSet<Property> getProperties() {
|
||||
return properties;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if this setting must be filtered, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean isFiltered() {
|
||||
return properties.contains(Property.Filtered);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if this setting has a node scope, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean hasNodeScope() {
|
||||
return properties.contains(Property.NodeScope);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if this setting has an index scope, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean hasIndexScope() {
|
||||
return properties.contains(Property.IndexScope);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if this setting is deprecated, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean isDeprecated() {
|
||||
return properties.contains(Property.Deprecated);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -209,6 +291,12 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* instead. This is useful if the value can't be parsed due to an invalid value to access the actual value.
|
||||
*/
|
||||
public String getRaw(Settings settings) {
|
||||
// They're using the setting, so we need to tell them to stop
|
||||
if (this.isDeprecated() && this.exists(settings)) {
|
||||
// It would be convenient to show its replacement key, but replacement is often not so simple
|
||||
deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and it will be removed in a future release! " +
|
||||
"See the breaking changes lists in the documentation for details", getKey());
|
||||
}
|
||||
return settings.get(getKey(), defaultValue.apply(settings));
|
||||
}
|
||||
|
||||
|
@ -225,8 +313,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("key", key.toString());
|
||||
builder.field("type", scope.name());
|
||||
builder.field("dynamic", dynamic);
|
||||
builder.field("properties", properties);
|
||||
builder.field("is_group_setting", isGroupSetting());
|
||||
builder.field("default", defaultValue.apply(Settings.EMPTY));
|
||||
builder.endObject();
|
||||
|
@ -248,14 +335,6 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings scope - settings can either be cluster settings or per index settings.
|
||||
*/
|
||||
public enum Scope {
|
||||
CLUSTER,
|
||||
INDEX;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a new updater with a noop validator.
|
||||
*/
|
||||
|
@ -353,38 +432,34 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
}
|
||||
|
||||
|
||||
public Setting(String key, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
this(key, (s) -> defaultValue, parser, dynamic, scope);
|
||||
public static Setting<Float> floatSetting(String key, float defaultValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, properties);
|
||||
}
|
||||
|
||||
public static Setting<Float> floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope);
|
||||
}
|
||||
|
||||
public static Setting<Float> floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) {
|
||||
public static Setting<Float> floatSetting(String key, float defaultValue, float minValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> {
|
||||
float value = Float.parseFloat(s);
|
||||
if (value < minValue) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
return value;
|
||||
}, dynamic, scope);
|
||||
}, properties);
|
||||
}
|
||||
|
||||
public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, int maxValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, maxValue, key), dynamic, scope);
|
||||
public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, int maxValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, maxValue, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope);
|
||||
public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<Long> longSetting(String key, long defaultValue, long minValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), dynamic, scope);
|
||||
public static Setting<Long> longSetting(String key, long defaultValue, long minValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<String> simpleString(String key, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, "", Function.identity(), dynamic, scope);
|
||||
public static Setting<String> simpleString(String key, Property... properties) {
|
||||
return new Setting<>(key, s -> "", Function.identity(), properties);
|
||||
}
|
||||
|
||||
public static int parseInt(String s, int minValue, String key) {
|
||||
|
@ -418,51 +493,58 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return timeValue;
|
||||
}
|
||||
|
||||
public static Setting<Integer> intSetting(String key, int defaultValue, boolean dynamic, Scope scope) {
|
||||
return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope);
|
||||
public static Setting<Integer> intSetting(String key, int defaultValue, Property... properties) {
|
||||
return intSetting(key, defaultValue, Integer.MIN_VALUE, properties);
|
||||
}
|
||||
|
||||
public static Setting<Boolean> boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope);
|
||||
public static Setting<Boolean> boolSetting(String key, boolean defaultValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, properties);
|
||||
}
|
||||
|
||||
public static Setting<Boolean> boolSetting(String key, Setting<Boolean> fallbackSetting, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, dynamic, scope);
|
||||
public static Setting<Boolean> boolSetting(String key, Setting<Boolean> fallbackSetting, Property... properties) {
|
||||
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope);
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, Property... properties) {
|
||||
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) {
|
||||
return byteSizeSetting(key, (s) -> value.toString(), dynamic, scope);
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, Property... properties) {
|
||||
return byteSizeSetting(key, (s) -> value.toString(), properties);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, Setting<ByteSizeValue> fallbackSettings, boolean dynamic, Scope scope) {
|
||||
return byteSizeSetting(key, fallbackSettings::getRaw, dynamic, scope);
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, Setting<ByteSizeValue> fallbackSettings,
|
||||
Property... properties) {
|
||||
return byteSizeSetting(key, fallbackSettings::getRaw, properties);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, Function<Settings, String> defaultValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope);
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, Function<Settings, String> defaultValue,
|
||||
Property... properties) {
|
||||
return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
|
||||
return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope);
|
||||
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) {
|
||||
return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties);
|
||||
}
|
||||
|
||||
public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
|
||||
return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope);
|
||||
public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser,
|
||||
Property... properties) {
|
||||
return listSetting(key, (s) -> defaultStringValue, singleValueParser, properties);
|
||||
}
|
||||
|
||||
public static <T> Setting<List<T>> listSetting(String key, Setting<List<T>> fallbackSetting, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
|
||||
return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, dynamic, scope);
|
||||
public static <T> Setting<List<T>> listSetting(String key, Setting<List<T>> fallbackSetting, Function<String, T> singleValueParser,
|
||||
Property... properties) {
|
||||
return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, properties);
|
||||
}
|
||||
|
||||
public static <T> Setting<List<T>> listSetting(String key, Function<Settings, List<String>> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
|
||||
public static <T> Setting<List<T>> listSetting(String key, Function<Settings, List<String>> defaultStringValue,
|
||||
Function<String, T> singleValueParser, Property... properties) {
|
||||
Function<String, List<T>> parser = (s) ->
|
||||
parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList());
|
||||
|
||||
return new Setting<List<T>>(new ListKey(key), (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) {
|
||||
return new Setting<List<T>>(new ListKey(key),
|
||||
(s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, properties) {
|
||||
private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?");
|
||||
@Override
|
||||
public String getRaw(Settings settings) {
|
||||
String[] array = settings.getAsArray(getKey(), null);
|
||||
|
@ -509,11 +591,11 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
throw new ElasticsearchException(ex);
|
||||
}
|
||||
}
|
||||
public static Setting<Settings> groupSetting(String key, boolean dynamic, Scope scope) {
|
||||
return groupSetting(key, dynamic, scope, (s) -> {});
|
||||
public static Setting<Settings> groupSetting(String key, Property... properties) {
|
||||
return groupSetting(key, (s) -> {}, properties);
|
||||
}
|
||||
public static Setting<Settings> groupSetting(String key, boolean dynamic, Scope scope, Consumer<Settings> validator) {
|
||||
return new Setting<Settings>(new GroupKey(key), (s) -> "", (s) -> null, dynamic, scope) {
|
||||
public static Setting<Settings> groupSetting(String key, Consumer<Settings> validator, Property... properties) {
|
||||
return new Setting<Settings>(new GroupKey(key), (s) -> "", (s) -> null, properties) {
|
||||
@Override
|
||||
public boolean isGroupSetting() {
|
||||
return true;
|
||||
|
@ -592,30 +674,37 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
};
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, Function<Settings, String> defaultValue, TimeValue minValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, defaultValue, (s) -> parseTimeValue(s, minValue, key), dynamic, scope);
|
||||
public static Setting<TimeValue> timeSetting(String key, Function<Settings, String> defaultValue, TimeValue minValue,
|
||||
Property... properties) {
|
||||
return new Setting<>(key, defaultValue, (s) -> {
|
||||
TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
|
||||
if (timeValue.millis() < minValue.millis()) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
return timeValue;
|
||||
}, properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) {
|
||||
return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, dynamic, scope);
|
||||
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, Property... properties) {
|
||||
return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope);
|
||||
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> timeSetting(String key, Setting<TimeValue> fallbackSetting, boolean dynamic, Scope scope) {
|
||||
return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope);
|
||||
public static Setting<TimeValue> timeSetting(String key, Setting<TimeValue> fallbackSetting, Property... properties) {
|
||||
return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) {
|
||||
public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, Property... properties) {
|
||||
return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> {
|
||||
final double d = Double.parseDouble(s);
|
||||
if (d < minValue) {
|
||||
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
|
||||
}
|
||||
return d;
|
||||
}, dynamic, scope);
|
||||
}, properties);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -636,8 +725,9 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* can easily be added with this setting. Yet, prefix key settings don't support updaters out of the box unless
|
||||
* {@link #getConcreteSetting(String)} is used to pull the updater.
|
||||
*/
|
||||
public static <T> Setting<T> prefixKeySetting(String prefix, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, dynamic, scope);
|
||||
public static <T> Setting<T> prefixKeySetting(String prefix, String defaultValue, Function<String, T> parser,
|
||||
Property... properties) {
|
||||
return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, properties);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -645,16 +735,19 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, adfix key settings don't support updaters
|
||||
* out of the box unless {@link #getConcreteSetting(String)} is used to pull the updater.
|
||||
*/
|
||||
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, dynamic, scope);
|
||||
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, Function<Settings, String> defaultValue,
|
||||
Function<String, T> parser, Property... properties) {
|
||||
return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, properties);
|
||||
}
|
||||
|
||||
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, dynamic, scope);
|
||||
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, String defaultValue, Function<String, T> parser,
|
||||
Property... properties) {
|
||||
return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, properties);
|
||||
}
|
||||
|
||||
public static <T> Setting<T> affixKeySetting(AffixKey key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
|
||||
return new Setting<T>(key, defaultValue, parser, dynamic, scope) {
|
||||
public static <T> Setting<T> affixKeySetting(AffixKey key, Function<Settings, String> defaultValue, Function<String, T> parser,
|
||||
Property... properties) {
|
||||
return new Setting<T>(key, defaultValue, parser, properties) {
|
||||
|
||||
@Override
|
||||
boolean isGroupSetting() {
|
||||
|
@ -669,7 +762,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
@Override
|
||||
public Setting<T> getConcreteSetting(String key) {
|
||||
if (match(key)) {
|
||||
return new Setting<>(key, defaultValue, parser, dynamic, scope);
|
||||
return new Setting<>(key, defaultValue, parser, properties);
|
||||
} else {
|
||||
throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't.");
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ public class SettingsModule extends AbstractModule {
|
|||
|
||||
private final Settings settings;
|
||||
private final Set<String> settingsFilterPattern = new HashSet<>();
|
||||
private final Map<String, Setting<?>> clusterSettings = new HashMap<>();
|
||||
private final Map<String, Setting<?>> nodeSettings = new HashMap<>();
|
||||
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
|
||||
private static final Predicate<String> TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.") && TribeService.TRIBE_SETTING_KEYS.contains(s) == false;
|
||||
|
||||
|
@ -52,7 +52,7 @@ public class SettingsModule extends AbstractModule {
|
|||
@Override
|
||||
protected void configure() {
|
||||
final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values()));
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values()));
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values()));
|
||||
// by now we are fully configured, lets check node level settings for unregistered index settings
|
||||
indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE));
|
||||
final Predicate<String> acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.or(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).negate();
|
||||
|
@ -71,19 +71,28 @@ public class SettingsModule extends AbstractModule {
|
|||
* the setting during startup.
|
||||
*/
|
||||
public void registerSetting(Setting<?> setting) {
|
||||
switch (setting.getScope()) {
|
||||
case CLUSTER:
|
||||
if (clusterSettings.containsKey(setting.getKey())) {
|
||||
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
|
||||
}
|
||||
clusterSettings.put(setting.getKey(), setting);
|
||||
break;
|
||||
case INDEX:
|
||||
if (indexSettings.containsKey(setting.getKey())) {
|
||||
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
|
||||
}
|
||||
indexSettings.put(setting.getKey(), setting);
|
||||
break;
|
||||
if (setting.isFiltered()) {
|
||||
if (settingsFilterPattern.contains(setting.getKey()) == false) {
|
||||
registerSettingsFilter(setting.getKey());
|
||||
}
|
||||
}
|
||||
|
||||
// We validate scope settings. We should have one and only one scope.
|
||||
if (setting.hasNodeScope() && setting.hasIndexScope()) {
|
||||
throw new IllegalArgumentException("More than one scope has been added to the setting [" + setting.getKey() + "]");
|
||||
}
|
||||
if (setting.hasNodeScope()) {
|
||||
if (nodeSettings.containsKey(setting.getKey())) {
|
||||
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
|
||||
}
|
||||
nodeSettings.put(setting.getKey(), setting);
|
||||
} else if (setting.hasIndexScope()) {
|
||||
if (indexSettings.containsKey(setting.getKey())) {
|
||||
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
|
||||
}
|
||||
indexSettings.put(setting.getKey(), setting);
|
||||
} else {
|
||||
throw new IllegalArgumentException("No scope found for setting [" + setting.getKey() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,21 +110,15 @@ public class SettingsModule extends AbstractModule {
|
|||
settingsFilterPattern.add(filter);
|
||||
}
|
||||
|
||||
public void registerSettingsFilterIfMissing(String filter) {
|
||||
if (settingsFilterPattern.contains(filter) == false) {
|
||||
registerSettingsFilter(filter);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a setting has already been registered
|
||||
*/
|
||||
public boolean exists(Setting<?> setting) {
|
||||
switch (setting.getScope()) {
|
||||
case CLUSTER:
|
||||
return clusterSettings.containsKey(setting.getKey());
|
||||
case INDEX:
|
||||
return indexSettings.containsKey(setting.getKey());
|
||||
if (setting.hasNodeScope()) {
|
||||
return nodeSettings.containsKey(setting.getKey());
|
||||
}
|
||||
if (setting.hasIndexScope()) {
|
||||
return indexSettings.containsKey(setting.getKey());
|
||||
}
|
||||
throw new IllegalArgumentException("setting scope is unknown. This should never happen!");
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -41,7 +42,8 @@ public class EsExecutors {
|
|||
* Settings key to manually set the number of available processors.
|
||||
* This is used to adjust thread pools sizes etc. per node.
|
||||
*/
|
||||
public static final Setting<Integer> PROCESSORS_SETTING = Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, false, Setting.Scope.CLUSTER) ;
|
||||
public static final Setting<Integer> PROCESSORS_SETTING =
|
||||
Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, Property.NodeScope);
|
||||
|
||||
/**
|
||||
* Returns the number of processors available but at most <tt>32</tt>.
|
||||
|
|
|
@ -19,11 +19,11 @@
|
|||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.apache.lucene.util.CloseableThreadLocal;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
@ -63,7 +63,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
public final class ThreadContext implements Closeable, Writeable<ThreadContext.ThreadContextStruct>{
|
||||
|
||||
public static final String PREFIX = "request.headers";
|
||||
public static final Setting<Settings> DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Settings> DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", Property.NodeScope);
|
||||
private final Map<String, String> defaultHeader;
|
||||
private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(Collections.emptyMap());
|
||||
private final ContextThreadLocal threadLocal;
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.multibindings.Multibinder;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.discovery.local.LocalDiscovery;
|
||||
|
@ -45,10 +46,11 @@ import java.util.function.Function;
|
|||
*/
|
||||
public class DiscoveryModule extends AbstractModule {
|
||||
|
||||
public static final Setting<String> DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type",
|
||||
settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> ZEN_MASTER_SERVICE_TYPE_SETTING = new Setting<>("discovery.zen.masterservice.type",
|
||||
"zen", Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> DISCOVERY_TYPE_SETTING =
|
||||
new Setting<>("discovery.type", settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(),
|
||||
Property.NodeScope);
|
||||
public static final Setting<String> ZEN_MASTER_SERVICE_TYPE_SETTING =
|
||||
new Setting<>("discovery.zen.masterservice.type", "zen", Function.identity(), Property.NodeScope);
|
||||
|
||||
private final Settings settings;
|
||||
private final Map<String, List<Class<? extends UnicastHostsProvider>>> unicastHostProviders = new HashMap<>();
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -42,16 +43,25 @@ public class DiscoverySettings extends AbstractComponent {
|
|||
* sets the timeout for a complete publishing cycle, including both sending and committing. the master
|
||||
* will continue to process the next cluster state update after this time has elapsed
|
||||
**/
|
||||
public static final Setting<TimeValue> PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> PUBLISH_TIMEOUT_SETTING =
|
||||
Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
/**
|
||||
* sets the timeout for receiving enough acks for a specific cluster state and committing it. failing
|
||||
* to receive responses within this window will cause the cluster state change to be rejected.
|
||||
*/
|
||||
public static final Setting<TimeValue> COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ClusterBlock> NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> COMMIT_TIMEOUT_SETTING =
|
||||
new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s),
|
||||
(s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<ClusterBlock> NO_MASTER_BLOCK_SETTING =
|
||||
new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<Boolean> PUBLISH_DIFF_ENABLE_SETTING =
|
||||
Setting.boolSetting("discovery.zen.publish_diff.enable", true, Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<TimeValue> INITIAL_STATE_TIMEOUT_SETTING =
|
||||
Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), Property.NodeScope);
|
||||
|
||||
private volatile ClusterBlock noMasterBlock;
|
||||
private volatile TimeValue publishTimeout;
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
|
@ -86,17 +87,28 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
|||
*/
|
||||
public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider {
|
||||
|
||||
public final static Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout",
|
||||
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Integer> JOIN_RETRY_ATTEMPTS_SETTING = Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> JOIN_RETRY_DELAY_SETTING = Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Integer> MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Boolean> SEND_LEAVE_REQUEST_SETTING = Setting.boolSetting("discovery.zen.send_leave_request", true, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Boolean> MASTER_ELECTION_FILTER_CLIENT_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_client", true, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
|
||||
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<Boolean> MASTER_ELECTION_FILTER_DATA_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_data", false, false, Setting.Scope.CLUSTER);
|
||||
public final static Setting<TimeValue> PING_TIMEOUT_SETTING =
|
||||
Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope);
|
||||
public final static Setting<TimeValue> JOIN_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("discovery.zen.join_timeout",
|
||||
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(),
|
||||
TimeValue.timeValueMillis(0), Property.NodeScope);
|
||||
public final static Setting<Integer> JOIN_RETRY_ATTEMPTS_SETTING =
|
||||
Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, Property.NodeScope);
|
||||
public final static Setting<TimeValue> JOIN_RETRY_DELAY_SETTING =
|
||||
Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), Property.NodeScope);
|
||||
public final static Setting<Integer> MAX_PINGS_FROM_ANOTHER_MASTER_SETTING =
|
||||
Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, Property.NodeScope);
|
||||
public final static Setting<Boolean> SEND_LEAVE_REQUEST_SETTING =
|
||||
Setting.boolSetting("discovery.zen.send_leave_request", true, Property.NodeScope);
|
||||
public final static Setting<Boolean> MASTER_ELECTION_FILTER_CLIENT_SETTING =
|
||||
Setting.boolSetting("discovery.zen.master_election.filter_client", true, Property.NodeScope);
|
||||
public final static Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
|
||||
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0),
|
||||
Property.NodeScope);
|
||||
public final static Setting<Boolean> MASTER_ELECTION_FILTER_DATA_SETTING =
|
||||
Setting.boolSetting("discovery.zen.master_election.filter_data", false, Property.NodeScope);
|
||||
|
||||
public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin";
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
|
||||
|
@ -40,7 +41,8 @@ import java.util.List;
|
|||
*/
|
||||
public class ElectMasterService extends AbstractComponent {
|
||||
|
||||
public static final Setting<Integer> DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING =
|
||||
Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
// This is the minimum version a master needs to be on, otherwise it gets ignored
|
||||
// This is based on the minimum compatible version of the current version this node is on
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Scope;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -37,11 +37,16 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
|||
*/
|
||||
public abstract class FaultDetection extends AbstractComponent {
|
||||
|
||||
public static final Setting<Boolean> CONNECT_ON_NETWORK_DISCONNECT_SETTING = Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, false, Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> PING_INTERVAL_SETTING = Setting.positiveTimeSetting("discovery.zen.fd.ping_interval", timeValueSeconds(1), false, Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.fd.ping_timeout", timeValueSeconds(30), false, Scope.CLUSTER);
|
||||
public static final Setting<Integer> PING_RETRIES_SETTING = Setting.intSetting("discovery.zen.fd.ping_retries", 3, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> REGISTER_CONNECTION_LISTENER_SETTING = Setting.boolSetting("discovery.zen.fd.register_connection_listener", true, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> CONNECT_ON_NETWORK_DISCONNECT_SETTING =
|
||||
Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, Property.NodeScope);
|
||||
public static final Setting<TimeValue> PING_INTERVAL_SETTING =
|
||||
Setting.positiveTimeSetting("discovery.zen.fd.ping_interval", timeValueSeconds(1), Property.NodeScope);
|
||||
public static final Setting<TimeValue> PING_TIMEOUT_SETTING =
|
||||
Setting.timeSetting("discovery.zen.fd.ping_timeout", timeValueSeconds(30), Property.NodeScope);
|
||||
public static final Setting<Integer> PING_RETRIES_SETTING =
|
||||
Setting.intSetting("discovery.zen.fd.ping_retries", 3, Property.NodeScope);
|
||||
public static final Setting<Boolean> REGISTER_CONNECTION_LISTENER_SETTING =
|
||||
Setting.boolSetting("discovery.zen.fd.register_connection_listener", true, Property.NodeScope);
|
||||
|
||||
protected final ThreadPool threadPool;
|
||||
protected final ClusterName clusterName;
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -86,8 +87,11 @@ import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPing
|
|||
public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
|
||||
|
||||
public static final String ACTION_NAME = "internal:discovery/zen/unicast";
|
||||
public static final Setting<List<String>> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = Setting.listSetting("discovery.zen.ping.unicast.hosts", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<List<String>> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING =
|
||||
Setting.listSetting("discovery.zen.ping.unicast.hosts", Collections.emptyList(), Function.identity(),
|
||||
Property.NodeScope);
|
||||
public static final Setting<Integer> DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING =
|
||||
Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, Property.NodeScope);
|
||||
|
||||
// these limits are per-address
|
||||
public static final int LIMIT_FOREIGN_PORTS_COUNT = 1;
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -46,15 +47,17 @@ import static org.elasticsearch.common.Strings.cleanPath;
|
|||
// TODO: move PathUtils to be package-private here instead of
|
||||
// public+forbidden api!
|
||||
public class Environment {
|
||||
public static final Setting<String> PATH_HOME_SETTING = Setting.simpleString("path.home", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_CONF_SETTING = Setting.simpleString("path.conf", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<List<String>> PATH_DATA_SETTING = Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_LOGS_SETTING = Setting.simpleString("path.logs", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<List<String>> PATH_REPO_SETTING = Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PIDFILE_SETTING = Setting.simpleString("pidfile", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> PATH_HOME_SETTING = Setting.simpleString("path.home", Property.NodeScope);
|
||||
public static final Setting<String> PATH_CONF_SETTING = Setting.simpleString("path.conf", Property.NodeScope);
|
||||
public static final Setting<String> PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", Property.NodeScope);
|
||||
public static final Setting<List<String>> PATH_DATA_SETTING =
|
||||
Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), Property.NodeScope);
|
||||
public static final Setting<String> PATH_LOGS_SETTING = Setting.simpleString("path.logs", Property.NodeScope);
|
||||
public static final Setting<String> PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", Property.NodeScope);
|
||||
public static final Setting<List<String>> PATH_REPO_SETTING =
|
||||
Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), Property.NodeScope);
|
||||
public static final Setting<String> PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope);
|
||||
public static final Setting<String> PIDFILE_SETTING = Setting.simpleString("pidfile", Property.NodeScope);
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Scope;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -49,7 +49,6 @@ import org.elasticsearch.index.store.FsDirectoryService;
|
|||
import org.elasticsearch.monitor.fs.FsInfo;
|
||||
import org.elasticsearch.monitor.fs.FsProbe;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -137,20 +136,20 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||
/**
|
||||
* Maximum number of data nodes that should run in an environment.
|
||||
*/
|
||||
public static final Setting<Integer> MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1, false,
|
||||
Scope.CLUSTER);
|
||||
public static final Setting<Integer> MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1,
|
||||
Property.NodeScope);
|
||||
|
||||
/**
|
||||
* If true automatically append node id to custom data paths.
|
||||
*/
|
||||
public static final Setting<Boolean> ADD_NODE_ID_TO_CUSTOM_PATH = Setting.boolSetting("node.add_id_to_custom_path", true, false,
|
||||
Scope.CLUSTER);
|
||||
public static final Setting<Boolean> ADD_NODE_ID_TO_CUSTOM_PATH =
|
||||
Setting.boolSetting("node.add_id_to_custom_path", true, Property.NodeScope);
|
||||
|
||||
/**
|
||||
* If true the [verbose] SegmentInfos.infoStream logging is sent to System.out.
|
||||
*/
|
||||
public static final Setting<Boolean> ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING = Setting
|
||||
.boolSetting("node.enable_lucene_segment_infos_trace", false, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING =
|
||||
Setting.boolSetting("node.enable_lucene_segment_infos_trace", false, Property.NodeScope);
|
||||
|
||||
public static final String NODES_FOLDER = "nodes";
|
||||
public static final String INDICES_FOLDER = "indices";
|
||||
|
@ -225,7 +224,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||
|
||||
maybeLogPathDetails();
|
||||
maybeLogHeapDetails();
|
||||
|
||||
|
||||
applySegmentInfosTrace(settings);
|
||||
assertCanWrite();
|
||||
success = true;
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
|
@ -52,20 +53,20 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
*/
|
||||
public class GatewayService extends AbstractLifecycleComponent<GatewayService> implements ClusterStateListener {
|
||||
|
||||
public static final Setting<Integer> EXPECTED_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.expected_nodes", -1, -1, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> EXPECTED_DATA_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.expected_data_nodes", -1, -1, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> EXPECTED_MASTER_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.expected_master_nodes", -1, -1, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> RECOVER_AFTER_TIME_SETTING = Setting.positiveTimeSetting(
|
||||
"gateway.recover_after_time", TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> RECOVER_AFTER_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.recover_after_nodes", -1, -1, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> RECOVER_AFTER_DATA_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.recover_after_data_nodes", -1, -1, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> RECOVER_AFTER_MASTER_NODES_SETTING = Setting.intSetting(
|
||||
"gateway.recover_after_master_nodes", 0, 0, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Integer> EXPECTED_NODES_SETTING =
|
||||
Setting.intSetting("gateway.expected_nodes", -1, -1, Property.NodeScope);
|
||||
public static final Setting<Integer> EXPECTED_DATA_NODES_SETTING =
|
||||
Setting.intSetting("gateway.expected_data_nodes", -1, -1, Property.NodeScope);
|
||||
public static final Setting<Integer> EXPECTED_MASTER_NODES_SETTING =
|
||||
Setting.intSetting("gateway.expected_master_nodes", -1, -1, Property.NodeScope);
|
||||
public static final Setting<TimeValue> RECOVER_AFTER_TIME_SETTING =
|
||||
Setting.positiveTimeSetting("gateway.recover_after_time", TimeValue.timeValueMillis(0), Property.NodeScope);
|
||||
public static final Setting<Integer> RECOVER_AFTER_NODES_SETTING =
|
||||
Setting.intSetting("gateway.recover_after_nodes", -1, -1, Property.NodeScope);
|
||||
public static final Setting<Integer> RECOVER_AFTER_DATA_NODES_SETTING =
|
||||
Setting.intSetting("gateway.recover_after_data_nodes", -1, -1, Property.NodeScope);
|
||||
public static final Setting<Integer> RECOVER_AFTER_MASTER_NODES_SETTING =
|
||||
Setting.intSetting("gateway.recover_after_master_nodes", 0, 0, Property.NodeScope);
|
||||
|
||||
public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards;
|
||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
||||
|
@ -67,9 +68,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
}
|
||||
};
|
||||
|
||||
public static final Setting<String> NODE_INITIAL_SHARDS_SETTING = new Setting<>("gateway.initial_shards", (settings) -> settings.get("gateway.local.initial_shards", "quorum"), INITIAL_SHARDS_PARSER, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> NODE_INITIAL_SHARDS_SETTING =
|
||||
new Setting<>("gateway.initial_shards", (settings) -> settings.get("gateway.local.initial_shards", "quorum"), INITIAL_SHARDS_PARSER,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
@Deprecated
|
||||
public static final Setting<String> INDEX_RECOVERY_INITIAL_SHARDS_SETTING = new Setting<>("index.recovery.initial_shards", (settings) -> NODE_INITIAL_SHARDS_SETTING.get(settings) , INITIAL_SHARDS_PARSER, true, Setting.Scope.INDEX);
|
||||
public static final Setting<String> INDEX_RECOVERY_INITIAL_SHARDS_SETTING =
|
||||
new Setting<>("index.recovery.initial_shards", (settings) -> NODE_INITIAL_SHARDS_SETTING.get(settings) , INITIAL_SHARDS_PARSER,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
|
||||
public PrimaryShardAllocator(Settings settings) {
|
||||
super(settings);
|
||||
|
|
|
@ -20,42 +20,64 @@
|
|||
package org.elasticsearch.http;
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Scope;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.transport.PortsRange;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static org.elasticsearch.common.settings.Setting.listSetting;
|
||||
|
||||
public final class HttpTransportSettings {
|
||||
|
||||
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
|
||||
public static final Setting<String> SETTING_CORS_ALLOW_ORIGIN = new Setting<String>("http.cors.allow-origin", "", (value) -> value, false, Scope.CLUSTER);
|
||||
public static final Setting<Integer> SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER);
|
||||
public static final Setting<String> SETTING_CORS_ALLOW_METHODS = new Setting<String>("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, false, Scope.CLUSTER);
|
||||
public static final Setting<String> SETTING_CORS_ALLOW_HEADERS = new Setting<String>("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER);
|
||||
public static final Setting<Integer> SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_COMPRESSION = Setting.boolSetting("http.compression", false, false, Scope.CLUSTER);
|
||||
public static final Setting<Integer> SETTING_HTTP_COMPRESSION_LEVEL = Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER);
|
||||
public static final Setting<List<String>> SETTING_HTTP_HOST = listSetting("http.host", emptyList(), s -> s, false, Scope.CLUSTER);
|
||||
public static final Setting<List<String>> SETTING_HTTP_PUBLISH_HOST = listSetting("http.publish_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER);
|
||||
public static final Setting<List<String>> SETTING_HTTP_BIND_HOST = listSetting("http.bind_host", SETTING_HTTP_HOST, s -> s, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_CORS_ENABLED =
|
||||
Setting.boolSetting("http.cors.enabled", false, Property.NodeScope);
|
||||
public static final Setting<String> SETTING_CORS_ALLOW_ORIGIN =
|
||||
new Setting<String>("http.cors.allow-origin", "", (value) -> value, Property.NodeScope);
|
||||
public static final Setting<Integer> SETTING_CORS_MAX_AGE =
|
||||
Setting.intSetting("http.cors.max-age", 1728000, Property.NodeScope);
|
||||
public static final Setting<String> SETTING_CORS_ALLOW_METHODS =
|
||||
new Setting<String>("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, Property.NodeScope);
|
||||
public static final Setting<String> SETTING_CORS_ALLOW_HEADERS =
|
||||
new Setting<String>("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS =
|
||||
Setting.boolSetting("http.cors.allow-credentials", false, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_PIPELINING =
|
||||
Setting.boolSetting("http.pipelining", true, Property.NodeScope);
|
||||
public static final Setting<Integer> SETTING_PIPELINING_MAX_EVENTS =
|
||||
Setting.intSetting("http.pipelining.max_events", 10000, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_HTTP_COMPRESSION =
|
||||
Setting.boolSetting("http.compression", false, Property.NodeScope);
|
||||
public static final Setting<Integer> SETTING_HTTP_COMPRESSION_LEVEL =
|
||||
Setting.intSetting("http.compression_level", 6, Property.NodeScope);
|
||||
public static final Setting<List<String>> SETTING_HTTP_HOST =
|
||||
listSetting("http.host", emptyList(), Function.identity(), Property.NodeScope);
|
||||
public static final Setting<List<String>> SETTING_HTTP_PUBLISH_HOST =
|
||||
listSetting("http.publish_host", SETTING_HTTP_HOST, Function.identity(), Property.NodeScope);
|
||||
public static final Setting<List<String>> SETTING_HTTP_BIND_HOST =
|
||||
listSetting("http.bind_host", SETTING_HTTP_HOST, Function.identity(), Property.NodeScope);
|
||||
|
||||
public static final Setting<PortsRange> SETTING_HTTP_PORT = new Setting<PortsRange>("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER);
|
||||
public static final Setting<Integer> SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", -1, -1, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ;
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
|
||||
public static final Setting<PortsRange> SETTING_HTTP_PORT =
|
||||
new Setting<PortsRange>("http.port", "9200-9300", PortsRange::new, Property.NodeScope);
|
||||
public static final Setting<Integer> SETTING_HTTP_PUBLISH_PORT =
|
||||
Setting.intSetting("http.publish_port", -1, -1, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED =
|
||||
Setting.boolSetting("http.detailed_errors.enabled", true, Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH =
|
||||
Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE =
|
||||
Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE =
|
||||
Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH =
|
||||
Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), Property.NodeScope);
|
||||
// don't reset cookies by default, since I don't think we really need to
|
||||
// note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies
|
||||
public static final Setting<Boolean> SETTING_HTTP_RESET_COOKIES = Setting.boolSetting("http.reset_cookies", false, false, Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_RESET_COOKIES =
|
||||
Setting.boolSetting("http.reset_cookies", false, Property.NodeScope);
|
||||
|
||||
private HttpTransportSettings() {
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.netty.OpenChannelsHandler;
|
|||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
|
@ -118,33 +119,32 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
}
|
||||
|
||||
public static Setting<ByteSizeValue> SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY =
|
||||
Setting.byteSizeSetting("http.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
|
||||
Setting.byteSizeSetting("http.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1),
|
||||
Property.NodeScope);
|
||||
public static Setting<Integer> SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS =
|
||||
Setting.intSetting("http.netty.max_composite_buffer_components", -1, false, Setting.Scope.CLUSTER);
|
||||
Setting.intSetting("http.netty.max_composite_buffer_components", -1, Property.NodeScope);
|
||||
|
||||
public static final Setting<Integer> SETTING_HTTP_WORKER_COUNT = new Setting<>("http.netty.worker_count",
|
||||
(s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2),
|
||||
(s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), false, Setting.Scope.CLUSTER);
|
||||
(s) -> Setting.parseInt(s, 1, "http.netty.worker_count"), Property.NodeScope);
|
||||
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_NO_DELAY = boolSetting("http.tcp_no_delay", NetworkService.TcpSettings
|
||||
.TCP_NO_DELAY, false,
|
||||
Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_KEEP_ALIVE = boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings
|
||||
.TCP_KEEP_ALIVE, false,
|
||||
Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_BLOCKING_SERVER = boolSetting("http.tcp.blocking_server", NetworkService
|
||||
.TcpSettings.TCP_BLOCKING_SERVER,
|
||||
false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_REUSE_ADDRESS = boolSetting("http.tcp.reuse_address", NetworkService
|
||||
.TcpSettings.TCP_REUSE_ADDRESS,
|
||||
false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_NO_DELAY =
|
||||
boolSetting("http.tcp_no_delay", NetworkService.TcpSettings.TCP_NO_DELAY, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_KEEP_ALIVE =
|
||||
boolSetting("http.tcp.keep_alive", NetworkService.TcpSettings.TCP_KEEP_ALIVE, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_BLOCKING_SERVER =
|
||||
boolSetting("http.tcp.blocking_server", NetworkService.TcpSettings.TCP_BLOCKING_SERVER, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_REUSE_ADDRESS =
|
||||
boolSetting("http.tcp.reuse_address", NetworkService.TcpSettings.TCP_REUSE_ADDRESS, Property.NodeScope);
|
||||
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp.send_buffer_size",
|
||||
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("http.tcp" +
|
||||
".receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting(
|
||||
"transport.netty.receive_predictor_size",
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_TCP_SEND_BUFFER_SIZE =
|
||||
Setting.byteSizeSetting("http.tcp.send_buffer_size", NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
|
||||
Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE =
|
||||
Setting.byteSizeSetting("http.tcp.receive_buffer_size", NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
|
||||
Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE =
|
||||
Setting.byteSizeSetting("transport.netty.receive_predictor_size",
|
||||
settings -> {
|
||||
long defaultReceiverPredictor = 512 * 1024;
|
||||
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
|
||||
|
@ -154,13 +154,11 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
|
||||
}
|
||||
return new ByteSizeValue(defaultReceiverPredictor).toString();
|
||||
}, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("http.netty" +
|
||||
".receive_predictor_min",
|
||||
SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("http.netty" +
|
||||
".receive_predictor_max",
|
||||
SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER);
|
||||
}, Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN =
|
||||
byteSizeSetting("http.netty.receive_predictor_min", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX =
|
||||
byteSizeSetting("http.netty.receive_predictor_max", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope);
|
||||
|
||||
|
||||
protected final NetworkService networkService;
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index;
|
|||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
|
@ -65,13 +66,16 @@ import java.util.function.Function;
|
|||
*/
|
||||
public final class IndexModule {
|
||||
|
||||
public static final Setting<String> INDEX_STORE_TYPE_SETTING = new Setting<>("index.store.type", "", Function.identity(), false, Setting.Scope.INDEX);
|
||||
public static final Setting<String> INDEX_STORE_TYPE_SETTING =
|
||||
new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope);
|
||||
public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity";
|
||||
public static final String INDEX_QUERY_CACHE = "index";
|
||||
public static final String NONE_QUERY_CACHE = "none";
|
||||
public static final Setting<String> INDEX_QUERY_CACHE_TYPE_SETTING = new Setting<>("index.queries.cache.type", INDEX_QUERY_CACHE, Function.identity(), false, Setting.Scope.INDEX);
|
||||
public static final Setting<String> INDEX_QUERY_CACHE_TYPE_SETTING =
|
||||
new Setting<>("index.queries.cache.type", INDEX_QUERY_CACHE, Function.identity(), Property.IndexScope);
|
||||
// for test purposes only
|
||||
public static final Setting<Boolean> INDEX_QUERY_CACHE_EVERYTHING_SETTING = Setting.boolSetting("index.queries.cache.everything", false, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_QUERY_CACHE_EVERYTHING_SETTING =
|
||||
Setting.boolSetting("index.queries.cache.everything", false, Property.IndexScope);
|
||||
private final IndexSettings indexSettings;
|
||||
private final IndexStoreConfig indexStoreConfig;
|
||||
private final AnalysisRegistry analysisRegistry;
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -36,7 +37,6 @@ import org.elasticsearch.index.translog.Translog;
|
|||
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
|
@ -50,15 +50,26 @@ import java.util.function.Predicate;
|
|||
*/
|
||||
public final class IndexSettings {
|
||||
|
||||
public static final Setting<String> DEFAULT_FIELD_SETTING = new Setting<>("index.query.default_field", AllFieldMapper.NAME, Function.identity(), false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> QUERY_STRING_LENIENT_SETTING = Setting.boolSetting("index.query_string.lenient", false, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> QUERY_STRING_ANALYZE_WILDCARD = Setting.boolSetting("indices.query.query_string.analyze_wildcard", false, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> QUERY_STRING_ALLOW_LEADING_WILDCARD = Setting.boolSetting("indices.query.query_string.allowLeadingWildcard", true, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> ALLOW_UNMAPPED = Setting.boolSetting("index.query.parse.allow_unmapped_fields", true, false, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_TRANSLOG_SYNC_INTERVAL_SETTING = Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100), false, Setting.Scope.INDEX);
|
||||
public static final Setting<Translog.Durability> INDEX_TRANSLOG_DURABILITY_SETTING = new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(), (value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_WARMER_ENABLED_SETTING = Setting.boolSetting("index.warmer.enabled", true, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_TTL_DISABLE_PURGE_SETTING = Setting.boolSetting("index.ttl.disable_purge", false, true, Setting.Scope.INDEX);
|
||||
public static final Setting<String> DEFAULT_FIELD_SETTING =
|
||||
new Setting<>("index.query.default_field", AllFieldMapper.NAME, Function.identity(), Property.IndexScope);
|
||||
public static final Setting<Boolean> QUERY_STRING_LENIENT_SETTING =
|
||||
Setting.boolSetting("index.query_string.lenient", false, Property.IndexScope);
|
||||
public static final Setting<Boolean> QUERY_STRING_ANALYZE_WILDCARD =
|
||||
Setting.boolSetting("indices.query.query_string.analyze_wildcard", false, Property.NodeScope);
|
||||
public static final Setting<Boolean> QUERY_STRING_ALLOW_LEADING_WILDCARD =
|
||||
Setting.boolSetting("indices.query.query_string.allowLeadingWildcard", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> ALLOW_UNMAPPED =
|
||||
Setting.boolSetting("index.query.parse.allow_unmapped_fields", true, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_TRANSLOG_SYNC_INTERVAL_SETTING =
|
||||
Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100),
|
||||
Property.IndexScope);
|
||||
public static final Setting<Translog.Durability> INDEX_TRANSLOG_DURABILITY_SETTING =
|
||||
new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(),
|
||||
(value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Boolean> INDEX_WARMER_ENABLED_SETTING =
|
||||
Setting.boolSetting("index.warmer.enabled", true, Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Boolean> INDEX_TTL_DISABLE_PURGE_SETTING =
|
||||
Setting.boolSetting("index.ttl.disable_purge", false, Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<String> INDEX_CHECK_ON_STARTUP = new Setting<>("index.shard.check_on_startup", "false", (s) -> {
|
||||
switch(s) {
|
||||
case "false":
|
||||
|
@ -69,7 +80,7 @@ public final class IndexSettings {
|
|||
default:
|
||||
throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, fix, checksum] but was: " + s);
|
||||
}
|
||||
}, false, Setting.Scope.INDEX);
|
||||
}, Property.IndexScope);
|
||||
|
||||
/**
|
||||
* Index setting describing the maximum value of from + size on a query.
|
||||
|
@ -79,10 +90,15 @@ public final class IndexSettings {
|
|||
* safely. 1,000,000 is probably way to high for any cluster to set
|
||||
* safely.
|
||||
*/
|
||||
public static final Setting<Integer> MAX_RESULT_WINDOW_SETTING = Setting.intSetting("index.max_result_window", 10000, 1, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> MAX_RESULT_WINDOW_SETTING =
|
||||
Setting.intSetting("index.max_result_window", 10000, 1, Property.Dynamic, Property.IndexScope);
|
||||
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
|
||||
public static final Setting<TimeValue> INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS), true, Setting.Scope.INDEX);
|
||||
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_REFRESH_INTERVAL_SETTING =
|
||||
Setting.timeSetting("index.refresh_interval", DEFAULT_REFRESH_INTERVAL, new TimeValue(-1, TimeUnit.MILLISECONDS),
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING =
|
||||
Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), Property.Dynamic,
|
||||
Property.IndexScope);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -90,7 +106,9 @@ public final class IndexSettings {
|
|||
* This setting is realtime updateable
|
||||
*/
|
||||
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
|
||||
public static final Setting<TimeValue> INDEX_GC_DELETES_SETTING = Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_GC_DELETES_SETTING =
|
||||
Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic,
|
||||
Property.IndexScope);
|
||||
|
||||
private final Index index;
|
||||
private final Version version;
|
||||
|
|
|
@ -19,14 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.ObjectSet;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
@ -56,14 +52,13 @@ public final class IndexWarmer extends AbstractComponent {
|
|||
|
||||
public static final Setting<MappedFieldType.Loading> INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading",
|
||||
MappedFieldType.Loading.LAZY.toString(), (s) -> MappedFieldType.Loading.parse(s, MappedFieldType.Loading.LAZY),
|
||||
false, Setting.Scope.INDEX);
|
||||
Property.IndexScope);
|
||||
private final List<Listener> listeners;
|
||||
|
||||
IndexWarmer(Settings settings, ThreadPool threadPool, Listener... listeners) {
|
||||
super(settings);
|
||||
ArrayList<Listener> list = new ArrayList<>();
|
||||
final Executor executor = threadPool.executor(ThreadPool.Names.WARMER);
|
||||
list.add(new NormsWarmer(executor));
|
||||
list.add(new FieldDataWarmer(executor));
|
||||
for (Listener listener : listeners) {
|
||||
list.add(listener);
|
||||
|
@ -137,64 +132,6 @@ public final class IndexWarmer extends AbstractComponent {
|
|||
TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher);
|
||||
}
|
||||
|
||||
private static class NormsWarmer implements IndexWarmer.Listener {
|
||||
private final Executor executor;
|
||||
public NormsWarmer(Executor executor) {
|
||||
this.executor = executor;
|
||||
}
|
||||
@Override
|
||||
public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
final MappedFieldType.Loading defaultLoading = indexShard.indexSettings().getValue(INDEX_NORMS_LOADING_SETTING);
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final ObjectSet<String> warmUp = new ObjectHashSet<>();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
for (FieldMapper fieldMapper : docMapper.mappers()) {
|
||||
final String indexName = fieldMapper.fieldType().name();
|
||||
MappedFieldType.Loading normsLoading = fieldMapper.fieldType().normsLoading();
|
||||
if (normsLoading == null) {
|
||||
normsLoading = defaultLoading;
|
||||
}
|
||||
if (fieldMapper.fieldType().indexOptions() != IndexOptions.NONE && !fieldMapper.fieldType().omitNorms()
|
||||
&& normsLoading == MappedFieldType.Loading.EAGER) {
|
||||
warmUp.add(indexName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
// Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single task
|
||||
executor.execute(() -> {
|
||||
try {
|
||||
for (ObjectCursor<String> stringObjectCursor : warmUp) {
|
||||
final String indexName = stringObjectCursor.value;
|
||||
final long start = System.nanoTime();
|
||||
for (final LeafReaderContext ctx : searcher.reader().leaves()) {
|
||||
final NumericDocValues values = ctx.reader().getNormValues(indexName);
|
||||
if (values != null) {
|
||||
values.get(0);
|
||||
}
|
||||
}
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace("warmed norms for [{}], took [{}]", indexName,
|
||||
TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
indexShard.warmerService().logger().warn("failed to warm-up norms", t);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
return () -> latch.await();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TerminationHandle warmTopReader(IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
return TerminationHandle.NO_WAIT;
|
||||
}
|
||||
}
|
||||
|
||||
private static class FieldDataWarmer implements IndexWarmer.Listener {
|
||||
|
||||
private final Executor executor;
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
@ -54,12 +55,23 @@ public final class IndexingSlowLog implements IndexingOperationListener {
|
|||
private final ESLogger indexLogger;
|
||||
|
||||
private static final String INDEX_INDEXING_SLOWLOG_PREFIX = "index.indexing.slowlog";
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING = Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING = Setting.boolSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".reformat", true, true, Setting.Scope.INDEX);
|
||||
public static final Setting<SlowLogLevel> INDEX_INDEXING_SLOWLOG_LEVEL_SETTING = new Setting<>(INDEX_INDEXING_SLOWLOG_PREFIX +".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING =
|
||||
Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.warn", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING =
|
||||
Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.info", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING =
|
||||
Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.debug", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING =
|
||||
Setting.timeSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".threshold.index.trace", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Boolean> INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING =
|
||||
Setting.boolSetting(INDEX_INDEXING_SLOWLOG_PREFIX +".reformat", true, Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<SlowLogLevel> INDEX_INDEXING_SLOWLOG_LEVEL_SETTING =
|
||||
new Setting<>(INDEX_INDEXING_SLOWLOG_PREFIX +".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, Property.Dynamic,
|
||||
Property.IndexScope);
|
||||
/**
|
||||
* Reads how much of the source to log. The user can specify any value they
|
||||
* like and numbers are interpreted the maximum number of characters to log
|
||||
|
@ -72,7 +84,7 @@ public final class IndexingSlowLog implements IndexingOperationListener {
|
|||
} catch (NumberFormatException e) {
|
||||
return Booleans.parseBoolean(value, true) ? Integer.MAX_VALUE : 0;
|
||||
}
|
||||
}, true, Setting.Scope.INDEX);
|
||||
}, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
IndexingSlowLog(IndexSettings indexSettings) {
|
||||
this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings());
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.index.TieredMergePolicy;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
||||
|
@ -126,15 +127,31 @@ public final class MergePolicyConfig {
|
|||
public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB);
|
||||
public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d;
|
||||
public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d;
|
||||
public static final Setting<Double> INDEX_COMPOUND_FORMAT_SETTING = new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Double> INDEX_COMPOUND_FORMAT_SETTING =
|
||||
new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
|
||||
public static final Setting<Double> INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d, true, Setting.Scope.INDEX);
|
||||
public static final Setting<ByteSizeValue> INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING = Setting.byteSizeSetting("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING = Setting.intSetting("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE, 2, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING = Setting.intSetting("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT, 2, true, Setting.Scope.INDEX);
|
||||
public static final Setting<ByteSizeValue> INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Double> INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING = Setting.doubleSetting("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER, 2.0d, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Double> INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING = Setting.doubleSetting("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT, 0.0d, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Double> INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING =
|
||||
Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<ByteSizeValue> INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING =
|
||||
Setting.byteSizeSetting("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Integer> INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING =
|
||||
Setting.intSetting("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE, 2,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Integer> INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING =
|
||||
Setting.intSetting("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT, 2,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<ByteSizeValue> INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING =
|
||||
Setting.byteSizeSetting("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Double> INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING =
|
||||
Setting.doubleSetting("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER, 2.0d,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Double> INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING =
|
||||
Setting.doubleSetting("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT, 0.0d,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; // don't convert to Setting<> and register... we only set this in tests and register via a plugin
|
||||
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.index;
|
|||
|
||||
import org.apache.lucene.index.ConcurrentMergeScheduler;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
|
||||
/**
|
||||
|
@ -51,9 +52,17 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
|
|||
*/
|
||||
public final class MergeSchedulerConfig {
|
||||
|
||||
public static final Setting<Integer> MAX_THREAD_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_thread_count", (s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> MAX_MERGE_COUNT_SETTING = new Setting<>("index.merge.scheduler.max_merge_count", (s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5), (s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"), true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> AUTO_THROTTLE_SETTING = Setting.boolSetting("index.merge.scheduler.auto_throttle", true, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Integer> MAX_THREAD_COUNT_SETTING =
|
||||
new Setting<>("index.merge.scheduler.max_thread_count",
|
||||
(s) -> Integer.toString(Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(s) / 2))),
|
||||
(s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_thread_count"), Property.Dynamic,
|
||||
Property.IndexScope);
|
||||
public static final Setting<Integer> MAX_MERGE_COUNT_SETTING =
|
||||
new Setting<>("index.merge.scheduler.max_merge_count",
|
||||
(s) -> Integer.toString(MAX_THREAD_COUNT_SETTING.get(s) + 5),
|
||||
(s) -> Setting.parseInt(s, 1, "index.merge.scheduler.max_merge_count"), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Boolean> AUTO_THROTTLE_SETTING =
|
||||
Setting.boolSetting("index.merge.scheduler.auto_throttle", true, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
private volatile boolean autoThrottle;
|
||||
private volatile int maxThreadCount;
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
|
@ -50,16 +51,35 @@ public final class SearchSlowLog {
|
|||
private final ESLogger fetchLogger;
|
||||
|
||||
private static final String INDEX_SEARCH_SLOWLOG_PREFIX = "index.search.slowlog";
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.warn", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.info", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.debug", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING = Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace", TimeValue.timeValueNanos(-1), TimeValue.timeValueMillis(-1), true, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_SEARCH_SLOWLOG_REFORMAT = Setting.boolSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat", true, true, Setting.Scope.INDEX);
|
||||
public static final Setting<SlowLogLevel> INDEX_SEARCH_SLOWLOG_LEVEL = new Setting<>(INDEX_SEARCH_SLOWLOG_PREFIX + ".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, true, Setting.Scope.INDEX);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING =
|
||||
Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.warn", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING =
|
||||
Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.info", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING =
|
||||
Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.debug", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING =
|
||||
Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.query.trace", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING =
|
||||
Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.warn", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING =
|
||||
Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.info", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING =
|
||||
Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.debug", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<TimeValue> INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING =
|
||||
Setting.timeSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".threshold.fetch.trace", TimeValue.timeValueNanos(-1),
|
||||
TimeValue.timeValueMillis(-1), Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<Boolean> INDEX_SEARCH_SLOWLOG_REFORMAT =
|
||||
Setting.boolSetting(INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat", true, Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<SlowLogLevel> INDEX_SEARCH_SLOWLOG_LEVEL =
|
||||
new Setting<>(INDEX_SEARCH_SLOWLOG_PREFIX + ".level", SlowLogLevel.TRACE.name(), SlowLogLevel::parse, Property.Dynamic,
|
||||
Property.IndexScope);
|
||||
|
||||
public SearchSlowLog(IndexSettings indexSettings) {
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper {
|
|||
public String toString() {
|
||||
return "analyzer name[" + name + "], analyzer [" + analyzer + "]";
|
||||
}
|
||||
|
||||
|
||||
/** It is an error if this is ever used, it means we screwed up! */
|
||||
static final ReuseStrategy ERROR_STRATEGY = new Analyzer.ReuseStrategy() {
|
||||
@Override
|
||||
|
|
|
@ -56,4 +56,4 @@ public class NumericDoubleAnalyzer extends NumericAnalyzer<NumericDoubleTokenize
|
|||
protected NumericDoubleTokenizer createNumericTokenizer(char[] buffer) throws IOException {
|
||||
return new NumericDoubleTokenizer(precisionStep, buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,4 +56,4 @@ public class NumericFloatAnalyzer extends NumericAnalyzer<NumericFloatTokenizer>
|
|||
protected NumericFloatTokenizer createNumericTokenizer(char[] buffer) throws IOException {
|
||||
return new NumericFloatTokenizer(precisionStep, buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,4 +56,4 @@ public class NumericLongAnalyzer extends NumericAnalyzer<NumericLongTokenizer> {
|
|||
protected NumericLongTokenizer createNumericTokenizer(char[] buffer) throws IOException {
|
||||
return new NumericLongTokenizer(precisionStep, buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.cache.RemovalListener;
|
|||
import org.elasticsearch.common.cache.RemovalNotification;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -70,7 +71,8 @@ import java.util.concurrent.Executor;
|
|||
*/
|
||||
public final class BitsetFilterCache extends AbstractIndexComponent implements LeafReader.CoreClosedListener, RemovalListener<Object, Cache<Query, BitsetFilterCache.Value>>, Closeable {
|
||||
|
||||
public static final Setting<Boolean> INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING = Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING =
|
||||
Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, Property.IndexScope);
|
||||
|
||||
private final boolean loadRandomAccessFiltersEagerly;
|
||||
private final Cache<Object, Cache<Query, Value>> loadedFilters;
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.search.QueryCache;
|
|||
import org.apache.lucene.search.QueryCachingPolicy;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -39,8 +40,6 @@ import org.elasticsearch.index.translog.TranslogConfig;
|
|||
import org.elasticsearch.indices.IndexingMemoryController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
/*
|
||||
* Holds all the configuration that is used to create an {@link Engine}.
|
||||
* Once {@link Engine} has been created with this object, changes to this
|
||||
|
@ -83,7 +82,7 @@ public final class EngineConfig {
|
|||
}
|
||||
return s;
|
||||
}
|
||||
}, false, Setting.Scope.INDEX);
|
||||
}, Property.IndexScope);
|
||||
|
||||
/** if set to true the engine will start even if the translog id in the commit point can not be found */
|
||||
public static final String INDEX_FORCE_NEW_TRANSLOG = "index.engine.force_new_translog";
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.ExceptionsHelper;
|
|||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.plain.AbstractGeoPointDVIndexFieldData;
|
||||
|
@ -67,7 +68,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
|
|||
default:
|
||||
throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,node]");
|
||||
}
|
||||
}, false, Setting.Scope.INDEX);
|
||||
}, Property.IndexScope);
|
||||
|
||||
private static final IndexFieldData.Builder MISSING_DOC_VALUES_BUILDER = (indexProperties, fieldType, cache, breakerService, mapperService1) -> {
|
||||
throw new IllegalStateException("Can't load fielddata on [" + fieldType.name()
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
|
@ -49,8 +50,10 @@ import java.util.Map;
|
|||
import java.util.stream.StreamSupport;
|
||||
|
||||
public abstract class FieldMapper extends Mapper implements Cloneable {
|
||||
public static final Setting<Boolean> IGNORE_MALFORMED_SETTING = Setting.boolSetting("index.mapping.ignore_malformed", false, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", false, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> IGNORE_MALFORMED_SETTING =
|
||||
Setting.boolSetting("index.mapping.ignore_malformed", false, Property.IndexScope);
|
||||
public static final Setting<Boolean> COERCE_SETTING =
|
||||
Setting.boolSetting("index.mapping.coerce", false, Property.IndexScope);
|
||||
public abstract static class Builder<T extends Builder, Y extends FieldMapper> extends Mapper.Builder<T, Y> {
|
||||
|
||||
protected final MappedFieldType fieldType;
|
||||
|
@ -200,11 +203,6 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
return builder;
|
||||
}
|
||||
|
||||
public T normsLoading(MappedFieldType.Loading normsLoading) {
|
||||
this.fieldType.setNormsLoading(normsLoading);
|
||||
return builder;
|
||||
}
|
||||
|
||||
public T fieldDataSettings(Settings settings) {
|
||||
this.fieldDataSettings = settings;
|
||||
return builder;
|
||||
|
@ -240,6 +238,9 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
|
||||
protected void setupFieldType(BuilderContext context) {
|
||||
fieldType.setName(buildFullName(context));
|
||||
if (context.indexCreatedVersion().before(Version.V_5_0_0)) {
|
||||
fieldType.setOmitNorms(fieldType.omitNorms() && fieldType.boost() == 1.0f);
|
||||
}
|
||||
if (fieldType.indexAnalyzer() == null && fieldType.tokenized() == false && fieldType.indexOptions() != IndexOptions.NONE) {
|
||||
fieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER);
|
||||
fieldType.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER);
|
||||
|
@ -416,15 +417,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
if (includeDefaults || fieldType().storeTermVectors() != defaultFieldType.storeTermVectors()) {
|
||||
builder.field("term_vector", termVectorOptionsToString(fieldType()));
|
||||
}
|
||||
if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms() || fieldType().normsLoading() != null) {
|
||||
builder.startObject("norms");
|
||||
if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms()) {
|
||||
builder.field("enabled", !fieldType().omitNorms());
|
||||
}
|
||||
if (fieldType().normsLoading() != null) {
|
||||
builder.field(MappedFieldType.Loading.KEY, fieldType().normsLoading());
|
||||
}
|
||||
builder.endObject();
|
||||
if (includeDefaults || fieldType().omitNorms() != defaultFieldType.omitNorms()) {
|
||||
builder.field("norms", fieldType().omitNorms() == false);
|
||||
}
|
||||
if (indexed && (includeDefaults || fieldType().indexOptions() != defaultFieldType.indexOptions())) {
|
||||
builder.field("index_options", indexOptionToString(fieldType().indexOptions()));
|
||||
|
|
|
@ -103,7 +103,6 @@ public abstract class MappedFieldType extends FieldType {
|
|||
private NamedAnalyzer searchAnalyzer;
|
||||
private NamedAnalyzer searchQuoteAnalyzer;
|
||||
private SimilarityProvider similarity;
|
||||
private Loading normsLoading;
|
||||
private FieldDataType fieldDataType;
|
||||
private Object nullValue;
|
||||
private String nullValueAsString; // for sending null value to _all field
|
||||
|
@ -117,7 +116,6 @@ public abstract class MappedFieldType extends FieldType {
|
|||
this.searchAnalyzer = ref.searchAnalyzer();
|
||||
this.searchQuoteAnalyzer = ref.searchQuoteAnalyzer();
|
||||
this.similarity = ref.similarity();
|
||||
this.normsLoading = ref.normsLoading();
|
||||
this.fieldDataType = ref.fieldDataType();
|
||||
this.nullValue = ref.nullValue();
|
||||
this.nullValueAsString = ref.nullValueAsString();
|
||||
|
@ -158,7 +156,6 @@ public abstract class MappedFieldType extends FieldType {
|
|||
Objects.equals(indexAnalyzer, fieldType.indexAnalyzer) &&
|
||||
Objects.equals(searchAnalyzer, fieldType.searchAnalyzer) &&
|
||||
Objects.equals(searchQuoteAnalyzer(), fieldType.searchQuoteAnalyzer()) &&
|
||||
Objects.equals(normsLoading, fieldType.normsLoading) &&
|
||||
Objects.equals(fieldDataType, fieldType.fieldDataType) &&
|
||||
Objects.equals(nullValue, fieldType.nullValue) &&
|
||||
Objects.equals(nullValueAsString, fieldType.nullValueAsString);
|
||||
|
@ -167,7 +164,7 @@ public abstract class MappedFieldType extends FieldType {
|
|||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), name, boost, docValues, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer,
|
||||
similarity == null ? null : similarity.name(), normsLoading, fieldDataType, nullValue, nullValueAsString);
|
||||
similarity == null ? null : similarity.name(), fieldDataType, nullValue, nullValueAsString);
|
||||
}
|
||||
|
||||
// norelease: we need to override freeze() and add safety checks that all settings are actually set
|
||||
|
@ -205,7 +202,7 @@ public abstract class MappedFieldType extends FieldType {
|
|||
conflicts.add("mapper [" + name() + "] has different [doc_values] values");
|
||||
}
|
||||
if (omitNorms() && !other.omitNorms()) {
|
||||
conflicts.add("mapper [" + name() + "] has different [omit_norms] values, cannot change from disable to enabled");
|
||||
conflicts.add("mapper [" + name() + "] has different [norms] values, cannot change from disable to enabled");
|
||||
}
|
||||
if (storeTermVectors() != other.storeTermVectors()) {
|
||||
conflicts.add("mapper [" + name() + "] has different [store_term_vector] values");
|
||||
|
@ -242,9 +239,6 @@ public abstract class MappedFieldType extends FieldType {
|
|||
if (boost() != other.boost()) {
|
||||
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types.");
|
||||
}
|
||||
if (normsLoading() != other.normsLoading()) {
|
||||
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [norms.loading] across all types.");
|
||||
}
|
||||
if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) {
|
||||
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types.");
|
||||
}
|
||||
|
@ -304,15 +298,6 @@ public abstract class MappedFieldType extends FieldType {
|
|||
this.docValues = hasDocValues;
|
||||
}
|
||||
|
||||
public Loading normsLoading() {
|
||||
return normsLoading;
|
||||
}
|
||||
|
||||
public void setNormsLoading(Loading normsLoading) {
|
||||
checkIfFrozen();
|
||||
this.normsLoading = normsLoading;
|
||||
}
|
||||
|
||||
public NamedAnalyzer indexAnalyzer() {
|
||||
return indexAnalyzer;
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.ElasticsearchGenerationException;
|
|||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
|
@ -81,9 +82,11 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
|
||||
public static final String DEFAULT_MAPPING = "_default_";
|
||||
public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, true, Setting.Scope.INDEX);
|
||||
public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING =
|
||||
Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope);
|
||||
public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true;
|
||||
public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING = Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, false, Setting.Scope.INDEX);
|
||||
public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING =
|
||||
Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, Property.IndexScope);
|
||||
private static ObjectHashSet<String> META_FIELDS = ObjectHashSet.from(
|
||||
"_uid", "_id", "_type", "_all", "_parent", "_routing", "_index",
|
||||
"_size", "_timestamp", "_ttl"
|
||||
|
|
|
@ -92,14 +92,6 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
|
|||
return super.indexOptions(indexOptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setupFieldType(BuilderContext context) {
|
||||
if (!omitNormsSet && fieldType.boost() != 1.0f) {
|
||||
fieldType.setOmitNorms(false);
|
||||
}
|
||||
super.setupFieldType(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeywordFieldMapper build(BuilderContext context) {
|
||||
setupFieldType(context);
|
||||
|
@ -128,6 +120,9 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
|
|||
} else if (propName.equals("ignore_above")) {
|
||||
builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("norms")) {
|
||||
builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode) == false);
|
||||
iterator.remove();
|
||||
} else if (parseMultiField(builder, name, parserContext, propName, propNode)) {
|
||||
iterator.remove();
|
||||
}
|
||||
|
|
|
@ -31,8 +31,10 @@ import org.apache.lucene.index.IndexableField;
|
|||
import org.apache.lucene.index.IndexableFieldType;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -52,7 +54,9 @@ import java.util.List;
|
|||
*
|
||||
*/
|
||||
public abstract class NumberFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
|
||||
private static final Setting<Boolean> COERCE_SETTING = Setting.boolSetting("index.mapping.coerce", true, false, Setting.Scope.INDEX); // this is private since it has a different default
|
||||
// this is private since it has a different default
|
||||
private static final Setting<Boolean> COERCE_SETTING =
|
||||
Setting.boolSetting("index.mapping.coerce", true, Property.IndexScope);
|
||||
|
||||
public static class Defaults {
|
||||
|
||||
|
@ -113,7 +117,6 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||
|
||||
protected void setupFieldType(BuilderContext context) {
|
||||
super.setupFieldType(context);
|
||||
fieldType.setOmitNorms(fieldType.omitNorms() && fieldType.boost() == 1.0f);
|
||||
int precisionStep = fieldType.numericPrecisionStep();
|
||||
if (precisionStep <= 0 || precisionStep >= maxPrecisionStep()) {
|
||||
fieldType.setNumericPrecisionStep(Integer.MAX_VALUE);
|
||||
|
|
|
@ -157,13 +157,30 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
fieldName);
|
||||
final Object index = node.remove("index");
|
||||
final boolean keyword = index != null && "analyzed".equals(index) == false;
|
||||
// upgrade the index setting
|
||||
node.put("index", "no".equals(index) == false);
|
||||
{
|
||||
// upgrade the index setting
|
||||
node.put("index", "no".equals(index) == false);
|
||||
}
|
||||
{
|
||||
// upgrade norms settings
|
||||
Object norms = node.remove("norms");
|
||||
if (norms instanceof Map) {
|
||||
norms = ((Map<?,?>) norms).get("enabled");
|
||||
}
|
||||
if (norms != null) {
|
||||
node.put("norms", TypeParsers.nodeBooleanValue("norms", norms, parserContext));
|
||||
}
|
||||
Object omitNorms = node.remove("omit_norms");
|
||||
if (omitNorms != null) {
|
||||
node.put("norms", TypeParsers.nodeBooleanValue("omit_norms", omitNorms, parserContext) == false);
|
||||
}
|
||||
}
|
||||
if (keyword) {
|
||||
return new KeywordFieldMapper.TypeParser().parse(fieldName, node, parserContext);
|
||||
} else {
|
||||
return new TextFieldMapper.TypeParser().parse(fieldName, node, parserContext);
|
||||
}
|
||||
|
||||
}
|
||||
throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] "
|
||||
+ "or [keyword] field instead for field [" + fieldName + "]");
|
||||
|
|
|
@ -71,7 +71,7 @@ public class TypeParsers {
|
|||
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TypeParsers.class));
|
||||
private static final Set<String> BOOLEAN_STRINGS = new HashSet<>(Arrays.asList("true", "false"));
|
||||
|
||||
private static boolean nodeBooleanValue(String name, Object node, Mapper.TypeParser.ParserContext parserContext) {
|
||||
public static boolean nodeBooleanValue(String name, Object node, Mapper.TypeParser.ParserContext parserContext) {
|
||||
// Hook onto ParseFieldMatcher so that parsing becomes strict when setting index.query.parse.strict
|
||||
if (parserContext.parseFieldMatcher().isStrict()) {
|
||||
return XContentMapValues.nodeBooleanValue(node);
|
||||
|
@ -99,9 +99,6 @@ public class TypeParsers {
|
|||
} else if (propName.equals("coerce")) {
|
||||
builder.coerce(nodeBooleanValue("coerce", propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("omit_norms")) {
|
||||
builder.omitNorms(nodeBooleanValue("omit_norms", propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("similarity")) {
|
||||
SimilarityProvider similarityProvider = resolveSimilarity(parserContext, name, propNode.toString());
|
||||
builder.similarity(similarityProvider);
|
||||
|
@ -187,6 +184,37 @@ public class TypeParsers {
|
|||
}
|
||||
}
|
||||
|
||||
public static boolean parseNorms(FieldMapper.Builder builder, String propName, Object propNode, Mapper.TypeParser.ParserContext parserContext) {
|
||||
if (propName.equals("norms")) {
|
||||
if (propNode instanceof Map) {
|
||||
final Map<String, Object> properties = nodeMapValue(propNode, "norms");
|
||||
for (Iterator<Entry<String, Object>> propsIterator = properties.entrySet().iterator(); propsIterator.hasNext();) {
|
||||
Entry<String, Object> entry2 = propsIterator.next();
|
||||
final String propName2 = Strings.toUnderscoreCase(entry2.getKey());
|
||||
final Object propNode2 = entry2.getValue();
|
||||
if (propName2.equals("enabled")) {
|
||||
builder.omitNorms(!lenientNodeBooleanValue(propNode2));
|
||||
propsIterator.remove();
|
||||
} else if (propName2.equals(Loading.KEY)) {
|
||||
// ignore for bw compat
|
||||
propsIterator.remove();
|
||||
}
|
||||
}
|
||||
DocumentMapperParser.checkNoRemainingFields(propName, properties, parserContext.indexVersionCreated());
|
||||
DEPRECATION_LOGGER.deprecated("The [norms{enabled:true/false}] way of specifying norms is deprecated, please use [norms:true/false] instead");
|
||||
} else {
|
||||
builder.omitNorms(nodeBooleanValue("norms", propNode, parserContext) == false);
|
||||
}
|
||||
return true;
|
||||
} else if (propName.equals("omit_norms")) {
|
||||
builder.omitNorms(nodeBooleanValue("norms", propNode, parserContext));
|
||||
DEPRECATION_LOGGER.deprecated("[omit_norms] is deprecated, please use [norms] instead with the opposite boolean value");
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse text field attributes. In addition to {@link #parseField common attributes}
|
||||
* this will parse analysis and term-vectors related settings.
|
||||
|
@ -194,6 +222,14 @@ public class TypeParsers {
|
|||
public static void parseTextField(FieldMapper.Builder builder, String name, Map<String, Object> fieldNode, Mapper.TypeParser.ParserContext parserContext) {
|
||||
parseField(builder, name, fieldNode, parserContext);
|
||||
parseAnalyzersAndTermVectors(builder, name, fieldNode, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = fieldNode.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
final String propName = Strings.toUnderscoreCase(entry.getKey());
|
||||
final Object propNode = entry.getValue();
|
||||
if (parseNorms(builder, propName, propNode, parserContext)) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -217,24 +253,8 @@ public class TypeParsers {
|
|||
} else if (propName.equals("boost")) {
|
||||
builder.boost(nodeFloatValue(propNode));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("omit_norms")) {
|
||||
builder.omitNorms(nodeBooleanValue("omit_norms", propNode, parserContext));
|
||||
iterator.remove();
|
||||
} else if (propName.equals("norms")) {
|
||||
final Map<String, Object> properties = nodeMapValue(propNode, "norms");
|
||||
for (Iterator<Entry<String, Object>> propsIterator = properties.entrySet().iterator(); propsIterator.hasNext();) {
|
||||
Entry<String, Object> entry2 = propsIterator.next();
|
||||
final String propName2 = Strings.toUnderscoreCase(entry2.getKey());
|
||||
final Object propNode2 = entry2.getValue();
|
||||
if (propName2.equals("enabled")) {
|
||||
builder.omitNorms(!lenientNodeBooleanValue(propNode2));
|
||||
propsIterator.remove();
|
||||
} else if (propName2.equals(Loading.KEY)) {
|
||||
builder.normsLoading(Loading.parse(nodeStringValue(propNode2, null), null));
|
||||
propsIterator.remove();
|
||||
}
|
||||
}
|
||||
DocumentMapperParser.checkNoRemainingFields(propName, properties, parserContext.indexVersionCreated());
|
||||
} else if (parserContext.indexVersionCreated().before(Version.V_5_0_0)
|
||||
&& parseNorms(builder, propName, propNode, parserContext)) {
|
||||
iterator.remove();
|
||||
} else if (propName.equals("index_options")) {
|
||||
builder.indexOptions(nodeIndexOptionValue(propNode));
|
||||
|
|
|
@ -305,7 +305,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
builder.field("store_term_vector_payloads", fieldType().storeTermVectorPayloads());
|
||||
}
|
||||
if (includeDefaults || fieldType().omitNorms() != Defaults.FIELD_TYPE.omitNorms()) {
|
||||
builder.field("omit_norms", fieldType().omitNorms());
|
||||
builder.field("norms", !fieldType().omitNorms());
|
||||
}
|
||||
|
||||
doXContentAnalyzers(builder, includeDefaults);
|
||||
|
|
|
@ -31,13 +31,12 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.metrics.MeanMetric;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
|
@ -61,7 +60,8 @@ import java.util.concurrent.TimeUnit;
|
|||
*/
|
||||
public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable {
|
||||
|
||||
public final static Setting<Boolean> INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING = Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, false, Setting.Scope.INDEX);
|
||||
public final static Setting<Boolean> INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING =
|
||||
Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, Property.IndexScope);
|
||||
|
||||
private final ConcurrentMap<BytesRef, Query> percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
|
||||
private final QueryShardContext queryShardContext;
|
||||
|
|
|
@ -45,7 +45,6 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
import org.elasticsearch.index.query.support.QueryParsers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
public class MatchQuery {
|
||||
|
||||
|
@ -336,10 +335,10 @@ public class MatchQuery {
|
|||
return prefixQuery;
|
||||
} else if (query instanceof MultiPhraseQuery) {
|
||||
MultiPhraseQuery pq = (MultiPhraseQuery)query;
|
||||
List<Term[]> terms = pq.getTermArrays();
|
||||
Term[][] terms = pq.getTermArrays();
|
||||
int[] positions = pq.getPositions();
|
||||
for (int i = 0; i < terms.size(); i++) {
|
||||
prefixQuery.add(terms.get(i), positions[i]);
|
||||
for (int i = 0; i < terms.length; i++) {
|
||||
prefixQuery.add(terms[i], positions[i]);
|
||||
}
|
||||
return prefixQuery;
|
||||
} else if (query instanceof TermQuery) {
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -62,7 +62,7 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim
|
|||
default:
|
||||
throw new IllegalArgumentException("unrecognized [index.store.fs.fs_lock] \"" + s + "\": must be native or simple");
|
||||
}
|
||||
}, false, Setting.Scope.INDEX);
|
||||
}, Property.IndexScope);
|
||||
private final CounterMetric rateLimitingTimeInNanos = new CounterMetric();
|
||||
private final ShardPath path;
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.index.store;
|
|||
|
||||
import org.apache.lucene.store.StoreRateLimiting;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -29,8 +30,12 @@ import org.elasticsearch.index.shard.ShardPath;
|
|||
*
|
||||
*/
|
||||
public class IndexStore extends AbstractIndexComponent {
|
||||
public static final Setting<IndexRateLimitingType> INDEX_STORE_THROTTLE_TYPE_SETTING = new Setting<>("index.store.throttle.type", "none", IndexRateLimitingType::fromString, true, Setting.Scope.INDEX) ;
|
||||
public static final Setting<ByteSizeValue> INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("index.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.INDEX);
|
||||
public static final Setting<IndexRateLimitingType> INDEX_STORE_THROTTLE_TYPE_SETTING =
|
||||
new Setting<>("index.store.throttle.type", "none", IndexRateLimitingType::fromString,
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
public static final Setting<ByteSizeValue> INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING =
|
||||
Setting.byteSizeSetting("index.store.throttle.max_bytes_per_sec", new ByteSizeValue(0),
|
||||
Property.Dynamic, Property.IndexScope);
|
||||
|
||||
protected final IndexStoreConfig indexStoreConfig;
|
||||
private final StoreRateLimiting rateLimiting = new StoreRateLimiting();
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.lucene.store.StoreRateLimiting;
|
|||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
||||
|
@ -36,11 +37,15 @@ public class IndexStoreConfig {
|
|||
/**
|
||||
* Configures the node / cluster level throttle type. See {@link StoreRateLimiting.Type}.
|
||||
*/
|
||||
public static final Setting<StoreRateLimiting.Type> INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<StoreRateLimiting.Type> INDICES_STORE_THROTTLE_TYPE_SETTING =
|
||||
new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString,
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
/**
|
||||
* Configures the node / cluster level throttle intensity. The default is <tt>10240 MB</tt>
|
||||
*/
|
||||
public static final Setting<ByteSizeValue> INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING =
|
||||
Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
private volatile StoreRateLimiting.Type rateLimitingType;
|
||||
private volatile ByteSizeValue rateLimitingThrottle;
|
||||
private final StoreRateLimiting rateLimiting = new StoreRateLimiting();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue