Logging: Configure the node name when we have it (#32983)

Change the logging infrastructure to handle when the node name isn't
available in `elasticsearch.yml`. In that case the node name is not
available until long after logging is configured. The biggest change is
that the node name logging no longer fixed at pattern build time.
Instead it is read from a `SetOnce` on every print. If it is unset it is
printed as `unknown` so we have something that fits in the pattern.
On normal startup we don't log anything until the node name is available
so we never see the `unknown`s.
This commit is contained in:
Nik Everett 2018-09-07 14:31:23 -04:00 committed by GitHub
parent 8d61457d9c
commit 190ea9a6de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 363 additions and 79 deletions

View File

@ -337,7 +337,13 @@ class ClusterFormationTasks {
if (node.nodeVersion.major >= 7) {
esConfig['indices.breaker.total.use_real_memory'] = false
}
esConfig.putAll(node.config.settings)
for (Map.Entry<String, Object> setting : node.config.settings) {
if (setting.value == null) {
esConfig.remove(setting.key)
} else {
esConfig.put(setting.key, setting.value)
}
}
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
writeConfig.doFirst {

View File

@ -1,2 +1,23 @@
// This file is intentionally blank. All configuration of the
// distribution is done in the parent project.
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
integTestRunner {
systemProperty 'tests.logfile',
"${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log"
}

View File

@ -0,0 +1,43 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.unconfigurednodename;
import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase;
import java.io.IOException;
import java.io.BufferedReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.security.AccessController;
import java.security.PrivilegedAction;
public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
@Override
protected BufferedReader openReader(Path logFile) throws IOException {
return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> {
try {
return Files.newBufferedReader(logFile, StandardCharsets.UTF_8);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
}

View File

@ -0,0 +1,4 @@
grant {
// Needed to read the log file
permission java.io.FilePermission "${tests.logfile}", "read";
};

View File

@ -357,7 +357,7 @@ public class EvilLoggerTests extends ESTestCase {
}
}
public void testNoNodeNameWarning() throws IOException, UserException {
public void testNoNodeNameInPatternWarning() throws IOException, UserException {
setupLogging("no_node_name");
final String path =
@ -368,7 +368,7 @@ public class EvilLoggerTests extends ESTestCase {
assertThat(events.size(), equalTo(2));
final String location = "org.elasticsearch.common.logging.LogConfigurator";
// the first message is a warning for unsupported configuration files
assertLogLine(events.get(0), Level.WARN, location, "\\[null\\] Some logging configurations have %marker but don't "
assertLogLine(events.get(0), Level.WARN, location, "\\[unknown\\] Some logging configurations have %marker but don't "
+ "have %node_name. We will automatically add %node_name to the pattern to ease the migration for users "
+ "who customize log4j2.properties but will stop this behavior in 7.0. You should manually replace "
+ "`%node_name` with `\\[%node_name\\]%marker ` in these locations:");

View File

@ -52,7 +52,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
IOException ioException = expectThrows(IOException.class, () -> {
new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
});
assertTrue(ioException.getMessage(), ioException.getMessage().startsWith(path.toString()));
}
@ -72,7 +72,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
IOException ioException = expectThrows(IOException.class, () -> {
new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
});
assertTrue(ioException.getMessage(), ioException.getMessage().startsWith("failed to test writes in data directory"));
}
@ -97,7 +97,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
IOException ioException = expectThrows(IOException.class, () -> {
new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
});
assertTrue(ioException.getMessage(), ioException.getMessage().startsWith("failed to test writes in data directory"));
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.rest-test'
integTestCluster {
setting 'node.name', null
}
integTestRunner {
systemProperty 'tests.logfile',
"${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log"
}

View File

@ -0,0 +1,43 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.unconfigured_node_name;
import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase;
import java.io.IOException;
import java.io.BufferedReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.security.AccessController;
import java.security.PrivilegedAction;
public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
@Override
protected BufferedReader openReader(Path logFile) throws IOException {
return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> {
try {
return Files.newBufferedReader(logFile, StandardCharsets.UTF_8);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
}

View File

@ -0,0 +1,4 @@
grant {
// Needed to read the log file
permission java.io.FilePermission "${tests.logfile}", "read";
};

View File

@ -37,7 +37,6 @@ import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.logging.NodeNamePatternConverter;
import org.elasticsearch.common.network.IfConfig;
import org.elasticsearch.common.settings.KeyStoreWrapper;
import org.elasticsearch.common.settings.SecureSettings;
@ -217,6 +216,11 @@ final class Bootstrap {
final BoundTransportAddress boundTransportAddress, List<BootstrapCheck> checks) throws NodeValidationException {
BootstrapChecks.check(context, boundTransportAddress, checks);
}
@Override
protected void registerDerivedNodeNameWithLogger(String nodeName) {
LogConfigurator.setNodeName(nodeName);
}
};
}
@ -289,9 +293,9 @@ final class Bootstrap {
final SecureSettings keystore = loadSecureSettings(initialEnv);
final Environment environment = createEnvironment(pidFile, keystore, initialEnv.settings(), initialEnv.configFile());
String nodeName = Node.NODE_NAME_SETTING.get(environment.settings());
NodeNamePatternConverter.setNodeName(nodeName);
if (Node.NODE_NAME_SETTING.exists(environment.settings())) {
LogConfigurator.setNodeName(Node.NODE_NAME_SETTING.get(environment.settings()));
}
try {
LogConfigurator.configure(environment);
} catch (IOException e) {

View File

@ -134,6 +134,15 @@ public class LogConfigurator {
PluginManager.addPackage(LogConfigurator.class.getPackage().getName());
}
/**
* Sets the node name. This is called before logging is configured if the
* node name is set in elasticsearch.yml. Otherwise it is called as soon
* as the node id is available.
*/
public static void setNodeName(String nodeName) {
NodeNamePatternConverter.setNodeName(nodeName);
}
private static void checkErrorListener() {
assert errorListenerIsRegistered() : "expected error listener to be registered";
if (error.get()) {
@ -158,8 +167,8 @@ public class LogConfigurator {
final LoggerContext context = (LoggerContext) LogManager.getContext(false);
final Set<String> locationsWithDeprecatedPatterns = Collections.synchronizedSet(new HashSet<>());
final List<AbstractConfiguration> configurations = new ArrayList<>();
/*
* Subclass the properties configurator to hack the new pattern in
* place so users don't have to change log4j2.properties in
@ -170,7 +179,6 @@ public class LogConfigurator {
* Everything in this subclass that isn't marked as a hack is copied
* from log4j2's source.
*/
Set<String> locationsWithDeprecatedPatterns = Collections.synchronizedSet(new HashSet<>());
final PropertiesConfigurationFactory factory = new PropertiesConfigurationFactory() {
@Override
public PropertiesConfiguration getConfiguration(final LoggerContext loggerContext, final ConfigurationSource source) {

View File

@ -30,20 +30,22 @@ import org.apache.lucene.util.SetOnce;
/**
* Converts {@code %node_name} in log4j patterns into the current node name.
* We *could* use a system property lookup instead but this is very explicit
* and fails fast if we try to use the logger without initializing the node
* name. As a bonus it ought to be ever so slightly faster because it doesn't
* have to look up the system property every time.
* We can't use a system property for this because the node name system
* property is only set if the node name is explicitly defined in
* elasticsearch.yml.
*/
@Plugin(category = PatternConverter.CATEGORY, name = "NodeNamePatternConverter")
@ConverterKeys({"node_name"})
public class NodeNamePatternConverter extends LogEventPatternConverter {
public final class NodeNamePatternConverter extends LogEventPatternConverter {
/**
* The name of this node.
*/
private static final SetOnce<String> NODE_NAME = new SetOnce<>();
/**
* Set the name of this node.
*/
public static void setNodeName(String nodeName) {
static void setNodeName(String nodeName) {
NODE_NAME.set(nodeName);
}
@ -55,18 +57,21 @@ public class NodeNamePatternConverter extends LogEventPatternConverter {
throw new IllegalArgumentException("no options supported but options provided: "
+ Arrays.toString(options));
}
return new NodeNamePatternConverter(NODE_NAME.get());
return new NodeNamePatternConverter();
}
private final String nodeName;
private NodeNamePatternConverter(String nodeName) {
private NodeNamePatternConverter() {
super("NodeName", "node_name");
this.nodeName = nodeName;
}
@Override
public void format(LogEvent event, StringBuilder toAppendTo) {
toAppendTo.append(nodeName);
/*
* We're not thrilled about this volatile read on every line logged but
* the alternatives are slightly terrifying and/or don't work with the
* security manager.
*/
String nodeName = NODE_NAME.get();
toAppendTo.append(nodeName == null ? "unknown" : nodeName);
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.env;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.SegmentInfos;
@ -37,7 +38,6 @@ import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@ -53,7 +53,6 @@ import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.monitor.fs.FsProbe;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.node.Node;
import java.io.Closeable;
import java.io.IOException;
@ -76,6 +75,7 @@ import java.util.Set;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import static java.util.Collections.unmodifiableSet;
@ -83,9 +83,6 @@ import static java.util.Collections.unmodifiableSet;
* A component that holds all data paths for a single node.
*/
public final class NodeEnvironment implements Closeable {
private final Logger logger;
public static class NodePath {
/* ${data.paths}/nodes/{node.id} */
public final Path path;
@ -139,6 +136,7 @@ public final class NodeEnvironment implements Closeable {
}
private final Logger logger = LogManager.getLogger(NodeEnvironment.class);
private final NodePath[] nodePaths;
private final Path sharedDataPath;
private final Lock[] locks;
@ -173,24 +171,27 @@ public final class NodeEnvironment implements Closeable {
public static final String INDICES_FOLDER = "indices";
public static final String NODE_LOCK_FILENAME = "node.lock";
public NodeEnvironment(Settings settings, Environment environment) throws IOException {
/**
* Setup the environment.
* @param settings settings from elasticsearch.yml
* @param nodeIdConsumer called as soon as the node id is available to the
* node name in log messages if it wasn't loaded from
* elasticsearch.yml
*/
public NodeEnvironment(Settings settings, Environment environment, Consumer<String> nodeIdConsumer) throws IOException {
if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) {
nodePaths = null;
sharedDataPath = null;
locks = null;
nodeLockId = -1;
nodeMetaData = new NodeMetaData(generateNodeId(settings));
logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
nodeIdConsumer.accept(nodeMetaData.nodeId());
return;
}
final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length];
final Lock[] locks = new Lock[nodePaths.length];
boolean success = false;
// trace logger to debug issues before the default node name is derived from the node id
Logger startupTraceLogger = Loggers.getLogger(getClass(), settings);
try {
sharedDataPath = environment.sharedDataFile();
int nodeLockId = -1;
@ -203,13 +204,13 @@ public final class NodeEnvironment implements Closeable {
Files.createDirectories(dir);
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
startupTraceLogger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
try {
locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME);
nodePaths[dirIndex] = new NodePath(dir);
nodeLockId = possibleLockId;
} catch (LockObtainFailedException ex) {
startupTraceLogger.trace(
logger.trace(
new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), ex);
// release all the ones that were obtained up until now
releaseAndNullLocks(locks);
@ -217,7 +218,7 @@ public final class NodeEnvironment implements Closeable {
}
} catch (IOException e) {
startupTraceLogger.trace(() -> new ParameterizedMessage(
logger.trace(() -> new ParameterizedMessage(
"failed to obtain node lock on {}", dir.toAbsolutePath()), e);
lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
// release all the ones that were obtained up until now
@ -242,8 +243,8 @@ public final class NodeEnvironment implements Closeable {
maxLocalStorageNodes);
throw new IllegalStateException(message, lastException);
}
this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths);
this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths);
nodeIdConsumer.accept(nodeMetaData.nodeId());
this.nodeLockId = nodeLockId;
this.locks = locks;

View File

@ -183,7 +183,7 @@ import static java.util.stream.Collectors.toList;
* A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used
* in order to use a {@link Client} to perform actions/operations against the cluster.
*/
public class Node implements Closeable {
public abstract class Node implements Closeable {
public static final Setting<Boolean> WRITE_PORTS_FILE_SETTING =
@ -229,17 +229,6 @@ public class Node implements Closeable {
}
}, Setting.Property.NodeScope);
/**
* Adds a default node name to the given setting, if it doesn't already exist
* @return the given setting if node name is already set, or a new copy with a default node name set.
*/
public static final Settings addNodeNameIfNeeded(Settings settings, final String nodeId) {
if (NODE_NAME_SETTING.exists(settings)) {
return settings;
}
return Settings.builder().put(settings).put(NODE_NAME_SETTING.getKey(), nodeId.substring(0, 7)).build();
}
private static final String CLIENT_TYPE = "node";
private final Lifecycle lifecycle = new Lifecycle();
@ -291,24 +280,34 @@ public class Node implements Closeable {
Settings tmpSettings = Settings.builder().put(environment.settings())
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build();
// create the node environment as soon as possible, to recover the node id and enable logging
/*
* Create the node environment as soon as possible so we can
* recover the node id which we might have to use to derive the
* node name. And it is important to get *that* as soon as possible
* so that log lines can contain it.
*/
boolean nodeNameExplicitlyDefined = NODE_NAME_SETTING.exists(tmpSettings);
try {
nodeEnvironment = new NodeEnvironment(tmpSettings, environment);
Consumer<String> nodeIdConsumer = nodeNameExplicitlyDefined ?
nodeId -> {} : nodeId -> registerDerivedNodeNameWithLogger(nodeIdToNodeName(nodeId));
nodeEnvironment = new NodeEnvironment(tmpSettings, environment, nodeIdConsumer);
resourcesToClose.add(nodeEnvironment);
} catch (IOException ex) {
throw new IllegalStateException("Failed to create node environment", ex);
}
final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings);
final String nodeId = nodeEnvironment.nodeId();
tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId);
// this must be captured after the node name is possibly added to the settings
final String nodeName = NODE_NAME_SETTING.get(tmpSettings);
if (hadPredefinedNodeName == false) {
logger.info("node name derived from node ID [{}]; set [{}] to override", nodeId, NODE_NAME_SETTING.getKey());
if (nodeNameExplicitlyDefined) {
logger.info("node name [{}], node ID [{}]",
NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId());
} else {
logger.info("node name [{}], node ID [{}]", nodeName, nodeId);
tmpSettings = Settings.builder()
.put(tmpSettings)
.put(NODE_NAME_SETTING.getKey(), nodeIdToNodeName(nodeEnvironment.nodeId()))
.build();
logger.info("node name derived from node ID [{}]; set [{}] to override",
nodeEnvironment.nodeId(), NODE_NAME_SETTING.getKey());
}
final JvmInfo jvmInfo = JvmInfo.jvmInfo();
logger.info(
"version[{}], pid[{}], build[{}/{}/{}/{}], OS[{}/{}/{}], JVM[{}/{}/{}/{}]",
@ -1009,6 +1008,18 @@ public class Node implements Closeable {
return networkModule.getHttpServerTransportSupplier().get();
}
/**
* If the node name was derived from the node id this is called with the
* node name as soon as it is available so that we can register the
* node name with the logger. If the node name defined in elasticsearch.yml
* this is never called.
*/
protected abstract void registerDerivedNodeNameWithLogger(String nodeName);
private String nodeIdToNodeName(String nodeId) {
return nodeId.substring(0, 7);
}
private static class LocalNodeFactory implements Function<BoundTransportAddress, DiscoveryNode> {
private final SetOnce<DiscoveryNode> localNode = new SetOnce<>();
private final String persistentNodeId;

View File

@ -79,13 +79,13 @@ public class NodeEnvironmentTests extends ESTestCase {
List<String> dataPaths = Environment.PATH_DATA_SETTING.get(settings);
// Reuse the same location and attempt to lock again
IllegalStateException ex =
expectThrows(IllegalStateException.class, () -> new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)));
IllegalStateException ex = expectThrows(IllegalStateException.class, () ->
new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {}));
assertThat(ex.getMessage(), containsString("failed to obtain node lock"));
// Close the environment that holds the lock and make sure we can get the lock after release
env.close();
env = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
env = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
assertThat(env.nodeDataPaths(), arrayWithSize(dataPaths.size()));
for (int i = 0; i < dataPaths.size(); i++) {
@ -120,7 +120,7 @@ public class NodeEnvironmentTests extends ESTestCase {
final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 2).build());
final NodeEnvironment first = newNodeEnvironment(settings);
List<String> dataPaths = Environment.PATH_DATA_SETTING.get(settings);
NodeEnvironment second = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
NodeEnvironment second = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
assertEquals(first.nodeDataPaths().length, dataPaths.size());
assertEquals(second.nodeDataPaths().length, dataPaths.size());
for (int i = 0; i < dataPaths.size(); i++) {
@ -477,7 +477,7 @@ public class NodeEnvironmentTests extends ESTestCase {
@Override
public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException {
Settings build = buildEnvSettings(settings);
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
}
public Settings buildEnvSettings(Settings settings) {
@ -492,7 +492,7 @@ public class NodeEnvironmentTests extends ESTestCase {
.put(settings)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.putList(Environment.PATH_DATA_SETTING.getKey(), dataPaths).build();
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
}
public NodeEnvironment newNodeEnvironment(String[] dataPaths, String sharedDataPath, Settings settings) throws IOException {
@ -501,6 +501,6 @@ public class NodeEnvironmentTests extends ESTestCase {
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), sharedDataPath)
.putList(Environment.PATH_DATA_SETTING.getKey(), dataPaths).build();
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
}
}

View File

@ -133,7 +133,7 @@ public class IndexModuleTests extends ESTestCase {
bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService);
scriptService = new ScriptService(settings, Collections.emptyMap(), Collections.emptyMap());
clusterService = ClusterServiceUtils.createClusterService(threadPool);
nodeEnvironment = new NodeEnvironment(settings, environment);
nodeEnvironment = new NodeEnvironment(settings, environment, nodeId -> {});
mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
}

View File

@ -178,7 +178,7 @@ public class NewPathForShardTests extends ESTestCase {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), path)
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
// Make sure all our mocking above actually worked:
NodePath[] nodePaths = nodeEnv.nodePaths();
@ -233,7 +233,7 @@ public class NewPathForShardTests extends ESTestCase {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), path)
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
// Make sure all our mocking above actually worked:
NodePath[] nodePaths = nodeEnv.nodePaths();
@ -290,7 +290,7 @@ public class NewPathForShardTests extends ESTestCase {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), path)
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
aFileStore.usableSpace = 100000;
bFileStore.usableSpace = 1000;
@ -315,7 +315,7 @@ public class NewPathForShardTests extends ESTestCase {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), path)
.putList(Environment.PATH_DATA_SETTING.getKey(), paths).build();
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings));
NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings), nodeId -> {});
// Make sure all our mocking above actually worked:
NodePath[] nodePaths = nodeEnv.nodePaths();

View File

@ -0,0 +1,96 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.logging;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.test.rest.ESRestTestCase;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.regex.Pattern;
import java.util.regex.Matcher;
import static org.hamcrest.Matchers.containsString;
/**
* Tests that extend this class verify that the node name appears in the first
* few log lines on startup. Note that this won't pass for clusters that don't
* the node name defined in elasticsearch.yml <strong>and</strong> start with
* DEBUG or TRACE level logging. Those nodes log a few lines before they
* resolve the node name.
*/
public abstract class NodeNameInLogsIntegTestCase extends ESRestTestCase {
/**
* Number of lines in the log file to check for the node name. We don't
* just check the entire log file because it could be quite long and
* exceptions don't include the node name.
*/
private static final int LINES_TO_CHECK = 10;
/**
* Open the log file. This is delegated to subclasses because the test
* framework doesn't have permission to read from the log file but
* subclasses can grant themselves that permission.
*/
protected abstract BufferedReader openReader(Path logFile) throws IOException ;
public void testNodeNameIsOnAllLinesOfLog() throws IOException {
BufferedReader logReader = openReader(getLogFile());
try {
String line = logReader.readLine();
assertNotNull("no logs at all?!", line);
Matcher m = Pattern.compile("\\] \\[([^\\]]+)\\] ").matcher(line);
if (false == m.find()) {
fail("Didn't see the node name in [" + line + "]");
}
String nodeName = m.group(1);
assertNotEquals("unknown", nodeName);
int lineNumber = 1;
while (true) {
if (lineNumber < LINES_TO_CHECK) {
break;
}
line = logReader.readLine();
if (line == null) {
break; // eof
}
lineNumber++;
assertThat(line, containsString("] [" + nodeName + "] "));
}
} finally {
logReader.close();
}
}
@SuppressForbidden(reason = "PathUtils doesn't have permission to read this file")
private Path getLogFile() {
String logFileString = System.getProperty("tests.logfile");
if (null == logFileString) {
fail("tests.logfile must be set to run this test. It is automatically "
+ "set by gradle. If you must set it yourself then it should be the absolute path to the "
+ "log file.");
}
return Paths.get(logFileString);
}
}

View File

@ -175,5 +175,8 @@ public class MockNode extends Node {
}
}
@Override
protected void registerDerivedNodeNameWithLogger(String nodeName) {
// Nothing to do because test uses the thread name
}
}

View File

@ -934,7 +934,7 @@ public abstract class ESTestCase extends LuceneTestCase {
.put(settings)
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath())
.putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()).build();
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
return new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
}
/** Return consistent index settings for the provided index version. */

View File

@ -94,7 +94,12 @@ public class WatcherScheduleEngineBenchmark {
// First clean everything and index the watcher (but not via put alert api!)
try (Node node = new Node(Settings.builder().put(SETTINGS).put("node.data", false).build()).start()) {
try (Node node = new Node(Settings.builder().put(SETTINGS).put("node.data", false).build()) {
@Override
protected void registerDerivedNodeNameWithLogger(String nodeName) {
// Nothing to do because test uses the thread name
}
}.start()) {
try (Client client = node.client()) {
ClusterHealthResponse response = client.admin().cluster().prepareHealth().setWaitForNodes("2").get();
if (response.getNumberOfNodes() != 2 && response.getNumberOfDataNodes() != 1) {