Introduce elasticsearch-core jar (#28191)
This is related to #27933. It introduces a jar named elasticsearch-core in the lib directory. This commit moves the JarHell class from server to elasticsearch-core. Additionally, PathUtils and some of Loggers are moved as JarHell depends on them.
This commit is contained in:
parent
5973c2bf31
commit
3895add2ca
|
@ -183,6 +183,7 @@ subprojects {
|
|||
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
|
||||
"org.elasticsearch:elasticsearch:${version}": ':server',
|
||||
"org.elasticsearch:elasticsearch-cli:${version}": ':server:cli',
|
||||
"org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core',
|
||||
"org.elasticsearch:elasticsearch-nio:${version}": ':libs:elasticsearch-nio',
|
||||
"org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest',
|
||||
"org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer',
|
||||
|
|
|
@ -72,6 +72,7 @@ forbiddenApisTest {
|
|||
}
|
||||
|
||||
// JarHell is part of es server, which we don't want to pull in
|
||||
// TODO: Not anymore. Now in elasticsearch-core
|
||||
jarHell.enabled=false
|
||||
|
||||
namingConventions {
|
||||
|
|
|
@ -75,6 +75,7 @@ dependencyLicenses {
|
|||
}
|
||||
|
||||
// JarHell is part of es server, which we don't want to pull in
|
||||
// TODO: Not anymore. Now in elasticsearch-core
|
||||
jarHell.enabled=false
|
||||
|
||||
namingConventions {
|
||||
|
|
|
@ -49,6 +49,7 @@ forbiddenApisTest {
|
|||
}
|
||||
|
||||
// JarHell is part of es server, which we don't want to pull in
|
||||
// TODO: Not anymore. Now in elasticsearch-core
|
||||
jarHell.enabled=false
|
||||
|
||||
// TODO: should we have licenses for our test deps?
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'nebula.optional-base'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
|
||||
archivesBaseName = 'elasticsearch-core'
|
||||
|
||||
publishing {
|
||||
publications {
|
||||
nebula {
|
||||
artifactId = archivesBaseName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}"
|
||||
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
|
||||
if (isEclipse == false || project.path == ":libs:elasticsearch-core-tests") {
|
||||
testCompile("org.elasticsearch.test:framework:${version}") {
|
||||
exclude group: 'org.elasticsearch', module: 'elasticsearch-core'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
forbiddenApisMain {
|
||||
// elasticsearch-core does not depend on server
|
||||
// TODO: Need to decide how we want to handle for forbidden signatures with the changes to core
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
// in eclipse the project is under a fake root, we need to change around the source sets
|
||||
sourceSets {
|
||||
if (project.path == ":libs:elasticsearch-core") {
|
||||
main.java.srcDirs = ['java']
|
||||
main.resources.srcDirs = ['resources']
|
||||
} else {
|
||||
test.java.srcDirs = ['java']
|
||||
test.resources.srcDirs = ['resources']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// from log4j
|
||||
'org/osgi/framework/AdaptPermission',
|
||||
'org/osgi/framework/AdminPermission',
|
||||
'org/osgi/framework/Bundle',
|
||||
'org/osgi/framework/BundleActivator',
|
||||
'org/osgi/framework/BundleContext',
|
||||
'org/osgi/framework/BundleEvent',
|
||||
'org/osgi/framework/SynchronousBundleListener',
|
||||
'org/osgi/framework/wiring/BundleWire',
|
||||
'org/osgi/framework/wiring/BundleWiring'
|
||||
]
|
|
@ -0,0 +1 @@
|
|||
7a2999229464e7a324aa503c0a52ec0f05efe7bd
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 1999-2005 The Apache Software Foundation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,5 @@
|
|||
Apache log4j
|
||||
Copyright 2007 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
@ -120,7 +119,8 @@ public class JarHell {
|
|||
// }
|
||||
// Instead we just throw an exception, and keep it clean.
|
||||
if (element.isEmpty()) {
|
||||
throw new IllegalStateException("Classpath should not contain empty elements! (outdated shell script from a previous version?) classpath='" + classPath + "'");
|
||||
throw new IllegalStateException("Classpath should not contain empty elements! (outdated shell script from a previous" +
|
||||
" version?) classpath='" + classPath + "'");
|
||||
}
|
||||
// we should be able to just Paths.get() each element, but unfortunately this is not the
|
||||
// whole story on how classpath parsing works: if you want to know, start at sun.misc.Launcher,
|
||||
|
@ -215,21 +215,13 @@ public class JarHell {
|
|||
}
|
||||
|
||||
/** inspect manifest for sure incompatibilities */
|
||||
static void checkManifest(Manifest manifest, Path jar) {
|
||||
private static void checkManifest(Manifest manifest, Path jar) {
|
||||
// give a nice error if jar requires a newer java version
|
||||
String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK");
|
||||
if (targetVersion != null) {
|
||||
checkVersionFormat(targetVersion);
|
||||
checkJavaVersion(jar.toString(), targetVersion);
|
||||
}
|
||||
|
||||
// give a nice error if jar is compiled against different es version
|
||||
String systemESVersion = Version.CURRENT.toString();
|
||||
String targetESVersion = manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Version");
|
||||
if (targetESVersion != null && targetESVersion.equals(systemESVersion) == false) {
|
||||
throw new IllegalStateException(jar + " requires Elasticsearch " + targetESVersion
|
||||
+ ", your system: " + systemESVersion);
|
||||
}
|
||||
}
|
||||
|
||||
public static void checkVersionFormat(String targetVersion) {
|
||||
|
@ -237,7 +229,8 @@ public class JarHell {
|
|||
throw new IllegalStateException(
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was %s",
|
||||
"version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have " +
|
||||
"leading zeros but was %s",
|
||||
targetVersion
|
||||
)
|
||||
);
|
||||
|
@ -263,7 +256,7 @@ public class JarHell {
|
|||
}
|
||||
}
|
||||
|
||||
static void checkClass(Map<String,Path> clazzes, String clazz, Path jarpath) {
|
||||
private static void checkClass(Map<String, Path> clazzes, String clazz, Path jarpath) {
|
||||
Path previous = clazzes.put(clazz, jarpath);
|
||||
if (previous != null) {
|
||||
if (previous.equals(jarpath)) {
|
|
@ -26,6 +26,7 @@ import java.util.Objects;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
public class JavaVersion implements Comparable<JavaVersion> {
|
||||
|
||||
private final List<Integer> version;
|
||||
|
||||
public List<Integer> getVersion() {
|
|
@ -19,12 +19,9 @@
|
|||
|
||||
package org.elasticsearch.common.logging;
|
||||
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.spi.ExtendedLogger;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
||||
/**
|
||||
* Factory to get {@link Logger}s
|
||||
|
@ -35,12 +32,6 @@ public final class ESLoggerFactory {
|
|||
|
||||
}
|
||||
|
||||
public static final Setting<Level> LOG_DEFAULT_LEVEL_SETTING =
|
||||
new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope);
|
||||
public static final Setting.AffixSetting<Level> LOG_LEVEL_SETTING =
|
||||
Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Property.Dynamic,
|
||||
Property.NodeScope));
|
||||
|
||||
public static Logger getLogger(String prefix, String name) {
|
||||
return getLogger(prefix, LogManager.getLogger(name));
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.logging;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
public class Loggers {
|
||||
|
||||
public static final String SPACE = " ";
|
||||
|
||||
public static Logger getLogger(Logger parentLogger, String s) {
|
||||
assert parentLogger instanceof PrefixLogger;
|
||||
return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s);
|
||||
}
|
||||
|
||||
public static Logger getLogger(String s) {
|
||||
return ESLoggerFactory.getLogger(s);
|
||||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz) {
|
||||
return ESLoggerFactory.getLogger(clazz);
|
||||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz, String... prefixes) {
|
||||
return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz);
|
||||
}
|
||||
|
||||
public static Logger getLogger(String name, String... prefixes) {
|
||||
return ESLoggerFactory.getLogger(formatPrefix(prefixes), name);
|
||||
}
|
||||
|
||||
private static String formatPrefix(String... prefixes) {
|
||||
String prefix = null;
|
||||
if (prefixes != null && prefixes.length > 0) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (String prefixX : prefixes) {
|
||||
if (prefixX != null) {
|
||||
if (prefixX.equals(SPACE)) {
|
||||
sb.append(" ");
|
||||
} else {
|
||||
sb.append("[").append(prefixX).append("]");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (sb.length() > 0) {
|
||||
sb.append(" ");
|
||||
prefix = sb.toString();
|
||||
}
|
||||
}
|
||||
return prefix;
|
||||
}
|
||||
}
|
|
@ -32,7 +32,7 @@ import java.util.WeakHashMap;
|
|||
* A logger that prefixes all messages with a fixed prefix specified during construction. The prefix mechanism uses the marker construct, so
|
||||
* for the prefixes to appear, the logging layout pattern must include the marker in its pattern.
|
||||
*/
|
||||
class PrefixLogger extends ExtendedLoggerWrapper {
|
||||
public class PrefixLogger extends ExtendedLoggerWrapper {
|
||||
|
||||
/*
|
||||
* We can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds a permanent reference to the marker;
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -164,7 +163,8 @@ public class JarHellTests extends ESTestCase {
|
|||
JarHell.checkJarHell(jars);
|
||||
fail("did not get expected exception");
|
||||
} catch (IllegalStateException e) {
|
||||
assertTrue(e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was bogus"));
|
||||
assertTrue(e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated " +
|
||||
"by \".\"'s and may have leading zeros but was bogus"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,33 +178,6 @@ public class JarHellTests extends ESTestCase {
|
|||
JarHell.checkJarHell(jars);
|
||||
}
|
||||
|
||||
/** make sure if a plugin is compiled against the same ES version, it works */
|
||||
public void testGoodESVersionInJar() throws Exception {
|
||||
Path dir = createTempDir();
|
||||
Manifest manifest = new Manifest();
|
||||
Attributes attributes = manifest.getMainAttributes();
|
||||
attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0");
|
||||
attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), Version.CURRENT.toString());
|
||||
Set<URL> jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class"));
|
||||
JarHell.checkJarHell(jars);
|
||||
}
|
||||
|
||||
/** make sure if a plugin is compiled against a different ES version, it fails */
|
||||
public void testBadESVersionInJar() throws Exception {
|
||||
Path dir = createTempDir();
|
||||
Manifest manifest = new Manifest();
|
||||
Attributes attributes = manifest.getMainAttributes();
|
||||
attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0");
|
||||
attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), "1.0-bogus");
|
||||
Set<URL> jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class"));
|
||||
try {
|
||||
JarHell.checkJarHell(jars);
|
||||
fail("did not get expected exception");
|
||||
} catch (IllegalStateException e) {
|
||||
assertTrue(e.getMessage().contains("requires Elasticsearch 1.0-bogus"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testValidVersions() {
|
||||
String[] versions = new String[]{"1.7", "1.7.0", "0.1.7", "1.7.0.80"};
|
||||
for (String version : versions) {
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.transport.netty4;
|
|||
import org.apache.logging.log4j.Level;
|
||||
import org.elasticsearch.ESNetty4IntegTestCase;
|
||||
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequest;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.MockLogAppender;
|
||||
|
@ -36,12 +37,12 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
appender = new MockLogAppender();
|
||||
Loggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender);
|
||||
ServerLoggers.addAppender(Loggers.getLogger(ESLoggingHandler.class), appender);
|
||||
appender.start();
|
||||
}
|
||||
|
||||
public void tearDown() throws Exception {
|
||||
Loggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender);
|
||||
ServerLoggers.removeAppender(Loggers.getLogger(ESLoggingHandler.class), appender);
|
||||
appender.stop();
|
||||
super.tearDown();
|
||||
}
|
||||
|
|
|
@ -138,7 +138,7 @@ public class EvilLoggerConfigurationTests extends ESTestCase {
|
|||
assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(Level.DEBUG));
|
||||
|
||||
final Level level = randomFrom(Level.TRACE, Level.DEBUG, Level.INFO, Level.WARN, Level.ERROR);
|
||||
Loggers.setLevel(ESLoggerFactory.getLogger("x"), level);
|
||||
ServerLoggers.setLevel(ESLoggerFactory.getLogger("x"), level);
|
||||
|
||||
assertThat(ESLoggerFactory.getLogger("x").getLevel(), equalTo(level));
|
||||
assertThat(ESLoggerFactory.getLogger("x.y").getLevel(), equalTo(level));
|
||||
|
|
|
@ -285,12 +285,12 @@ public class EvilLoggerTests extends ESTestCase {
|
|||
|
||||
final Logger hasConsoleAppender = ESLoggerFactory.getLogger("has_console_appender");
|
||||
|
||||
final Appender testLoggerConsoleAppender = Loggers.findAppender(hasConsoleAppender, ConsoleAppender.class);
|
||||
final Appender testLoggerConsoleAppender = ServerLoggers.findAppender(hasConsoleAppender, ConsoleAppender.class);
|
||||
assertNotNull(testLoggerConsoleAppender);
|
||||
assertThat(testLoggerConsoleAppender.getName(), equalTo("console"));
|
||||
final Logger hasCountingNoOpAppender = ESLoggerFactory.getLogger("has_counting_no_op_appender");
|
||||
assertNull(Loggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class));
|
||||
final Appender countingNoOpAppender = Loggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class);
|
||||
assertNull(ServerLoggers.findAppender(hasCountingNoOpAppender, ConsoleAppender.class));
|
||||
final Appender countingNoOpAppender = ServerLoggers.findAppender(hasCountingNoOpAppender, CountingNoOpAppender.class);
|
||||
assertThat(countingNoOpAppender.getName(), equalTo("counting_no_op"));
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,8 @@ archivesBaseName = 'elasticsearch'
|
|||
|
||||
dependencies {
|
||||
|
||||
compile "org.elasticsearch:elasticsearch-core:${version}"
|
||||
|
||||
compileOnly project(':libs:plugin-classloader')
|
||||
testRuntime project(':libs:plugin-classloader')
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger;
|
|||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
|
@ -102,7 +102,7 @@ public class Retry {
|
|||
this.backoff = backoffPolicy.iterator();
|
||||
this.consumer = consumer;
|
||||
this.listener = listener;
|
||||
this.logger = Loggers.getLogger(getClass(), settings);
|
||||
this.logger = ServerLoggers.getLogger(getClass(), settings);
|
||||
this.scheduler = scheduler;
|
||||
// in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood
|
||||
this.startTimestampNanos = System.nanoTime();
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.SuppressForbidden;
|
|||
import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.IfConfig;
|
||||
import org.elasticsearch.common.settings.KeyStoreWrapper;
|
||||
|
@ -300,9 +301,9 @@ final class Bootstrap {
|
|||
try {
|
||||
if (closeStandardStreams) {
|
||||
final Logger rootLogger = ESLoggerFactory.getRootLogger();
|
||||
final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class);
|
||||
final Appender maybeConsoleAppender = ServerLoggers.findAppender(rootLogger, ConsoleAppender.class);
|
||||
if (maybeConsoleAppender != null) {
|
||||
Loggers.removeAppender(rootLogger, maybeConsoleAppender);
|
||||
ServerLoggers.removeAppender(rootLogger, maybeConsoleAppender);
|
||||
}
|
||||
closeSystOut();
|
||||
}
|
||||
|
@ -333,9 +334,9 @@ final class Bootstrap {
|
|||
} catch (NodeValidationException | RuntimeException e) {
|
||||
// disable console logging, so user does not see the exception twice (jvm will show it already)
|
||||
final Logger rootLogger = ESLoggerFactory.getRootLogger();
|
||||
final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class);
|
||||
final Appender maybeConsoleAppender = ServerLoggers.findAppender(rootLogger, ConsoleAppender.class);
|
||||
if (foreground && maybeConsoleAppender != null) {
|
||||
Loggers.removeAppender(rootLogger, maybeConsoleAppender);
|
||||
ServerLoggers.removeAppender(rootLogger, maybeConsoleAppender);
|
||||
}
|
||||
Logger logger = Loggers.getLogger(Bootstrap.class);
|
||||
if (INSTANCE.node != null) {
|
||||
|
@ -368,7 +369,7 @@ final class Bootstrap {
|
|||
}
|
||||
// re-enable it if appropriate, so they can see any logging during the shutdown process
|
||||
if (foreground && maybeConsoleAppender != null) {
|
||||
Loggers.addAppender(rootLogger, maybeConsoleAppender);
|
||||
ServerLoggers.addAppender(rootLogger, maybeConsoleAppender);
|
||||
}
|
||||
|
||||
throw e;
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.logging.log4j.LogManager;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
||||
|
@ -34,7 +34,7 @@ public abstract class AbstractComponent {
|
|||
protected final Settings settings;
|
||||
|
||||
public AbstractComponent(Settings settings) {
|
||||
this.logger = Loggers.getLogger(getClass(), settings);
|
||||
this.logger = ServerLoggers.getLogger(getClass(), settings);
|
||||
this.deprecationLogger = new DeprecationLogger(logger);
|
||||
this.settings = settings;
|
||||
}
|
||||
|
|
|
@ -177,15 +177,15 @@ public class LogConfigurator {
|
|||
* @param settings the settings from which logger levels will be extracted
|
||||
*/
|
||||
private static void configureLoggerLevels(final Settings settings) {
|
||||
if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) {
|
||||
final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings);
|
||||
Loggers.setLevel(ESLoggerFactory.getRootLogger(), level);
|
||||
if (ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) {
|
||||
final Level level = ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.get(settings);
|
||||
ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level);
|
||||
}
|
||||
ESLoggerFactory.LOG_LEVEL_SETTING.getAllConcreteSettings(settings)
|
||||
ServerLoggers.LOG_LEVEL_SETTING.getAllConcreteSettings(settings)
|
||||
// do not set a log level for a logger named level (from the default log setting)
|
||||
.filter(s -> s.getKey().equals(ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.getKey()) == false).forEach(s -> {
|
||||
.filter(s -> s.getKey().equals(ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.getKey()) == false).forEach(s -> {
|
||||
final Level level = s.get(settings);
|
||||
Loggers.setLevel(ESLoggerFactory.getLogger(s.getKey().substring("logger.".length())), level);
|
||||
ServerLoggers.setLevel(ESLoggerFactory.getLogger(s.getKey().substring("logger.".length())), level);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -27,28 +27,29 @@ import org.apache.logging.log4j.core.LoggerContext;
|
|||
import org.apache.logging.log4j.core.config.Configuration;
|
||||
import org.apache.logging.log4j.core.config.Configurator;
|
||||
import org.apache.logging.log4j.core.config.LoggerConfig;
|
||||
import org.apache.logging.log4j.message.MessageFactory;
|
||||
import org.elasticsearch.common.Classes;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static javax.security.auth.login.Configuration.getConfiguration;
|
||||
import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
|
||||
|
||||
/**
|
||||
* A set of utilities around Logging.
|
||||
*/
|
||||
public class Loggers {
|
||||
public class ServerLoggers {
|
||||
|
||||
public static final String SPACE = " ";
|
||||
public static final Setting<Level> LOG_DEFAULT_LEVEL_SETTING =
|
||||
new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Setting.Property.NodeScope);
|
||||
public static final Setting.AffixSetting<Level> LOG_LEVEL_SETTING =
|
||||
Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Setting.Property.Dynamic,
|
||||
Setting.Property.NodeScope));
|
||||
|
||||
public static Logger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
|
||||
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
|
||||
|
@ -64,17 +65,17 @@ public class Loggers {
|
|||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz, Settings settings, Index index, String... prefixes) {
|
||||
return getLogger(clazz, settings, asArrayList(SPACE, index.getName(), prefixes).toArray(new String[0]));
|
||||
return getLogger(clazz, settings, asArrayList(Loggers.SPACE, index.getName(), prefixes).toArray(new String[0]));
|
||||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
|
||||
final List<String> prefixesList = prefixesList(settings, prefixes);
|
||||
return getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()]));
|
||||
return Loggers.getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()]));
|
||||
}
|
||||
|
||||
public static Logger getLogger(String loggerName, Settings settings, String... prefixes) {
|
||||
final List<String> prefixesList = prefixesList(settings, prefixes);
|
||||
return getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()]));
|
||||
return Loggers.getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()]));
|
||||
}
|
||||
|
||||
private static List<String> prefixesList(Settings settings, String... prefixes) {
|
||||
|
@ -88,48 +89,6 @@ public class Loggers {
|
|||
return prefixesList;
|
||||
}
|
||||
|
||||
public static Logger getLogger(Logger parentLogger, String s) {
|
||||
assert parentLogger instanceof PrefixLogger;
|
||||
return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s);
|
||||
}
|
||||
|
||||
public static Logger getLogger(String s) {
|
||||
return ESLoggerFactory.getLogger(s);
|
||||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz) {
|
||||
return ESLoggerFactory.getLogger(clazz);
|
||||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz, String... prefixes) {
|
||||
return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz);
|
||||
}
|
||||
|
||||
public static Logger getLogger(String name, String... prefixes) {
|
||||
return ESLoggerFactory.getLogger(formatPrefix(prefixes), name);
|
||||
}
|
||||
|
||||
private static String formatPrefix(String... prefixes) {
|
||||
String prefix = null;
|
||||
if (prefixes != null && prefixes.length > 0) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (String prefixX : prefixes) {
|
||||
if (prefixX != null) {
|
||||
if (prefixX.equals(SPACE)) {
|
||||
sb.append(" ");
|
||||
} else {
|
||||
sb.append("[").append(prefixX).append("]");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (sb.length() > 0) {
|
||||
sb.append(" ");
|
||||
prefix = sb.toString();
|
||||
}
|
||||
}
|
||||
return prefix;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the level of the logger. If the new level is null, the logger will inherit it's level from its nearest ancestor with a non-null
|
||||
* level.
|
|
@ -46,7 +46,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -111,7 +111,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
}
|
||||
|
||||
private static final class LoggingSettingUpdater implements SettingUpdater<Settings> {
|
||||
final Predicate<String> loggerPredicate = ESLoggerFactory.LOG_LEVEL_SETTING::match;
|
||||
final Predicate<String> loggerPredicate = ServerLoggers.LOG_LEVEL_SETTING::match;
|
||||
private final Settings settings;
|
||||
|
||||
LoggingSettingUpdater(Settings settings) {
|
||||
|
@ -129,10 +129,10 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
builder.put(current.filter(loggerPredicate));
|
||||
for (String key : previous.keySet()) {
|
||||
if (loggerPredicate.test(key) && builder.keys().contains(key) == false) {
|
||||
if (ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) {
|
||||
if (ServerLoggers.LOG_LEVEL_SETTING.getConcreteSetting(key).exists(settings) == false) {
|
||||
builder.putNull(key);
|
||||
} else {
|
||||
builder.put(key, ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString());
|
||||
builder.put(key, ServerLoggers.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings).toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -150,12 +150,12 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
if ("_root".equals(component)) {
|
||||
final String rootLevel = value.get(key);
|
||||
if (rootLevel == null) {
|
||||
Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings));
|
||||
ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), ServerLoggers.LOG_DEFAULT_LEVEL_SETTING.get(settings));
|
||||
} else {
|
||||
Loggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel);
|
||||
ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), rootLevel);
|
||||
}
|
||||
} else {
|
||||
Loggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key));
|
||||
ServerLoggers.setLevel(ESLoggerFactory.getLogger(component), value.get(key));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -379,8 +379,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||
EsExecutors.PROCESSORS_SETTING,
|
||||
ThreadContext.DEFAULT_HEADERS_SETTING,
|
||||
ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING,
|
||||
ESLoggerFactory.LOG_LEVEL_SETTING,
|
||||
ServerLoggers.LOG_DEFAULT_LEVEL_SETTING,
|
||||
ServerLoggers.LOG_LEVEL_SETTING,
|
||||
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING,
|
||||
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
|
||||
OsService.REFRESH_INTERVAL_SETTING,
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.common.settings;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.inject.Binder;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -35,7 +35,6 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
|
@ -58,7 +57,7 @@ public class SettingsModule implements Module {
|
|||
}
|
||||
|
||||
public SettingsModule(Settings settings, List<Setting<?>> additionalSettings, List<String> settingsFilter) {
|
||||
logger = Loggers.getLogger(getClass(), settings);
|
||||
logger = ServerLoggers.getLogger(getClass(), settings);
|
||||
this.settings = settings;
|
||||
for (Setting<?> setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) {
|
||||
registerSetting(setting);
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
|||
import org.elasticsearch.cluster.service.ClusterApplier;
|
||||
import org.elasticsearch.cluster.service.MasterService;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -109,7 +109,7 @@ public class DiscoveryModule {
|
|||
if (discoverySupplier == null) {
|
||||
throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]");
|
||||
}
|
||||
Loggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType);
|
||||
ServerLoggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType);
|
||||
discovery = Objects.requireNonNull(discoverySupplier.get());
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.elasticsearch.common.Randomness;
|
|||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -182,7 +182,7 @@ public final class NodeEnvironment implements Closeable {
|
|||
locks = null;
|
||||
nodeLockId = -1;
|
||||
nodeMetaData = new NodeMetaData(generateNodeId(settings));
|
||||
logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
|
||||
logger = ServerLoggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
|
||||
return;
|
||||
}
|
||||
final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length];
|
||||
|
@ -190,7 +190,7 @@ public final class NodeEnvironment implements Closeable {
|
|||
boolean success = false;
|
||||
|
||||
// trace logger to debug issues before the default node name is derived from the node id
|
||||
Logger startupTraceLogger = Loggers.getLogger(getClass(), settings);
|
||||
Logger startupTraceLogger = ServerLoggers.getLogger(getClass(), settings);
|
||||
|
||||
try {
|
||||
sharedDataPath = environment.sharedDataFile();
|
||||
|
@ -244,7 +244,7 @@ public final class NodeEnvironment implements Closeable {
|
|||
throw new IllegalStateException(message, lastException);
|
||||
}
|
||||
this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths);
|
||||
this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
|
||||
this.logger = ServerLoggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
|
||||
|
||||
this.nodeLockId = nodeLockId;
|
||||
this.locks = locks;
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.index;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
|
||||
public abstract class AbstractIndexComponent implements IndexComponent {
|
||||
|
||||
|
@ -33,7 +33,7 @@ public abstract class AbstractIndexComponent implements IndexComponent {
|
|||
* Constructs a new index component, with the index name and its settings.
|
||||
*/
|
||||
protected AbstractIndexComponent(IndexSettings indexSettings) {
|
||||
this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex());
|
||||
this.logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex());
|
||||
this.deprecationLogger = new DeprecationLogger(logger);
|
||||
this.indexSettings = indexSettings;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
|||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -52,7 +52,7 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
|||
}
|
||||
}
|
||||
this.listeners = Collections.unmodifiableList(new ArrayList<>(listeners));
|
||||
this.logger = Loggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex());
|
||||
this.logger = ServerLoggers.getLogger(getClass(), indexSettings.getSettings(), indexSettings.getIndex());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger;
|
|||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -374,7 +374,7 @@ public final class IndexSettings {
|
|||
this.settings = Settings.builder().put(nodeSettings).put(indexMetaData.getSettings()).build();
|
||||
this.index = indexMetaData.getIndex();
|
||||
version = Version.indexCreated(settings);
|
||||
logger = Loggers.getLogger(getClass(), settings, index);
|
||||
logger = ServerLoggers.getLogger(getClass(), settings, index);
|
||||
nodeName = Node.NODE_NAME_SETTING.get(settings);
|
||||
this.indexMetaData = indexMetaData;
|
||||
numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.index;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -87,7 +87,7 @@ public final class IndexingSlowLog implements IndexingOperationListener {
|
|||
}, Property.Dynamic, Property.IndexScope);
|
||||
|
||||
IndexingSlowLog(IndexSettings indexSettings) {
|
||||
this.indexLogger = Loggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings());
|
||||
this.indexLogger = ServerLoggers.getLogger(INDEX_INDEXING_SLOWLOG_PREFIX + ".index", indexSettings.getSettings());
|
||||
this.index = indexSettings.getIndex();
|
||||
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, this::setReformat);
|
||||
|
@ -117,7 +117,7 @@ public final class IndexingSlowLog implements IndexingOperationListener {
|
|||
|
||||
private void setLevel(SlowLogLevel level) {
|
||||
this.level = level;
|
||||
Loggers.setLevel(this.indexLogger, level.name());
|
||||
ServerLoggers.setLevel(this.indexLogger, level.name());
|
||||
}
|
||||
|
||||
private void setWarnThreshold(TimeValue warnThreshold) {
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.index;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -81,8 +81,8 @@ public final class SearchSlowLog implements SearchOperationListener {
|
|||
|
||||
public SearchSlowLog(IndexSettings indexSettings) {
|
||||
|
||||
this.queryLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings());
|
||||
this.fetchLogger = Loggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings());
|
||||
this.queryLogger = ServerLoggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query", indexSettings.getSettings());
|
||||
this.fetchLogger = ServerLoggers.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch", indexSettings.getSettings());
|
||||
|
||||
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold);
|
||||
this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos();
|
||||
|
@ -108,8 +108,8 @@ public final class SearchSlowLog implements SearchOperationListener {
|
|||
|
||||
private void setLevel(SlowLogLevel level) {
|
||||
this.level = level;
|
||||
Loggers.setLevel(queryLogger, level.name());
|
||||
Loggers.setLevel(fetchLogger, level.name());
|
||||
ServerLoggers.setLevel(queryLogger, level.name());
|
||||
ServerLoggers.setLevel(fetchLogger, level.name());
|
||||
}
|
||||
@Override
|
||||
public void onQueryPhase(SearchContext context, long tookInNanos) {
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.index.IndexWriter;
|
|||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.apache.lucene.index.MergeScheduler;
|
||||
import org.apache.lucene.index.OneMergeHelper;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.metrics.MeanMetric;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -71,7 +71,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
|
|||
this.config = indexSettings.getMergeSchedulerConfig();
|
||||
this.shardId = shardId;
|
||||
this.indexSettings = indexSettings.getSettings();
|
||||
this.logger = Loggers.getLogger(getClass(), this.indexSettings, shardId);
|
||||
this.logger = ServerLoggers.getLogger(getClass(), this.indexSettings, shardId);
|
||||
refreshConfig();
|
||||
}
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver;
|
||||
|
@ -130,7 +130,7 @@ public abstract class Engine implements Closeable {
|
|||
this.shardId = engineConfig.getShardId();
|
||||
this.allocationId = engineConfig.getAllocationId();
|
||||
this.store = engineConfig.getStore();
|
||||
this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name
|
||||
this.logger = ServerLoggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name
|
||||
engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId());
|
||||
this.eventListener = engineConfig.getEventListener();
|
||||
}
|
||||
|
|
|
@ -19,10 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
|
|
|
@ -120,7 +120,7 @@ public class RandomScoreFunctionBuilder extends ScoreFunctionBuilder<RandomScore
|
|||
/**
|
||||
* Set the field to be used for random number generation. This parameter is compulsory
|
||||
* when a {@link #seed(int) seed} is set and ignored otherwise. Note that documents that
|
||||
* have the same value for a field will get the same score.
|
||||
* have the same value for a field will get the same score.
|
||||
*/
|
||||
public RandomScoreFunctionBuilder setField(String field) {
|
||||
this.field = field;
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.index.shard;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
||||
public abstract class AbstractIndexShardComponent implements IndexShardComponent {
|
||||
|
@ -34,7 +34,7 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent
|
|||
protected AbstractIndexShardComponent(ShardId shardId, IndexSettings indexSettings) {
|
||||
this.shardId = shardId;
|
||||
this.indexSettings = indexSettings;
|
||||
this.logger = Loggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId);
|
||||
this.logger = ServerLoggers.getLogger(getClass(), this.indexSettings.getSettings(), shardId);
|
||||
this.deprecationLogger = new DeprecationLogger(logger);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.similarity;
|
|||
|
||||
import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
|
|
@ -58,7 +58,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.store.ByteArrayIndexInput;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
|
@ -159,7 +159,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException {
|
||||
super(shardId, indexSettings);
|
||||
final Settings settings = indexSettings.getSettings();
|
||||
this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId));
|
||||
this.directory = new StoreDirectory(directoryService.newDirectory(), ServerLoggers.getLogger("index.store.deletes", settings, shardId));
|
||||
this.shardLock = shardLock;
|
||||
this.onClose = onClose;
|
||||
final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING);
|
||||
|
|
|
@ -40,7 +40,7 @@ import org.elasticsearch.common.StopWatch;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -120,7 +120,7 @@ public class RecoverySourceHandler {
|
|||
this.recoveryTarget = recoveryTarget;
|
||||
this.request = request;
|
||||
this.shardId = this.request.shardId().id();
|
||||
this.logger = Loggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName());
|
||||
this.logger = ServerLoggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName());
|
||||
this.chunkSizeInBytes = fileChunkSizeInBytes;
|
||||
this.response = new RecoveryResponse();
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.elasticsearch.ExceptionsHelper;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
|
||||
|
@ -117,7 +117,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
this.cancellableThreads = new CancellableThreads();
|
||||
this.recoveryId = idGenerator.incrementAndGet();
|
||||
this.listener = listener;
|
||||
this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
|
||||
this.logger = ServerLoggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
|
||||
this.indexShard = indexShard;
|
||||
this.sourceNode = sourceNode;
|
||||
this.shardId = indexShard.shardId();
|
||||
|
|
|
@ -67,6 +67,7 @@ import org.elasticsearch.common.inject.util.Providers;
|
|||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
|
@ -143,7 +144,6 @@ import java.io.BufferedWriter;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.net.Inet6Address;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.charset.Charset;
|
||||
|
@ -267,7 +267,7 @@ public class Node implements Closeable {
|
|||
throw new IllegalStateException("Failed to create node environment", ex);
|
||||
}
|
||||
final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings);
|
||||
Logger logger = Loggers.getLogger(Node.class, tmpSettings);
|
||||
Logger logger = ServerLoggers.getLogger(Node.class, tmpSettings);
|
||||
final String nodeId = nodeEnvironment.nodeId();
|
||||
tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId);
|
||||
// this must be captured after the node name is possibly added to the settings
|
||||
|
|
|
@ -259,8 +259,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
|||
|
||||
final long maxOrd = getMaxOrd(valuesSource, context.searcher());
|
||||
assert maxOrd != -1;
|
||||
final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs());
|
||||
|
||||
final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs());
|
||||
|
||||
if (factories == AggregatorFactories.EMPTY &&
|
||||
includeExclude == null &&
|
||||
Aggregator.descendsFromBucketAggregator(parent) == false &&
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
|||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.MockLogAppender;
|
||||
|
||||
|
@ -83,11 +83,11 @@ public class MaxMapCountCheckTests extends ESTestCase {
|
|||
"I/O exception while trying to read [{}]",
|
||||
new Object[] { procSysVmMaxMapCountPath },
|
||||
e -> ioException == e));
|
||||
Loggers.addAppender(logger, appender);
|
||||
ServerLoggers.addAppender(logger, appender);
|
||||
assertThat(check.getMaxMapCount(logger), equalTo(-1L));
|
||||
appender.assertAllExpectationsMatched();
|
||||
verify(reader).close();
|
||||
Loggers.removeAppender(logger, appender);
|
||||
ServerLoggers.removeAppender(logger, appender);
|
||||
appender.stop();
|
||||
}
|
||||
|
||||
|
@ -105,11 +105,11 @@ public class MaxMapCountCheckTests extends ESTestCase {
|
|||
"unable to parse vm.max_map_count [{}]",
|
||||
new Object[] { "eof" },
|
||||
e -> e instanceof NumberFormatException && e.getMessage().equals("For input string: \"eof\"")));
|
||||
Loggers.addAppender(logger, appender);
|
||||
ServerLoggers.addAppender(logger, appender);
|
||||
assertThat(check.getMaxMapCount(logger), equalTo(-1L));
|
||||
appender.assertAllExpectationsMatched();
|
||||
verify(reader).close();
|
||||
Loggers.removeAppender(logger, appender);
|
||||
ServerLoggers.removeAppender(logger, appender);
|
||||
appender.stop();
|
||||
}
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDeci
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -342,7 +343,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
|||
new MockLogAppender.UnseenEventExpectation("no completed message logged on dry run",
|
||||
TransportClusterRerouteAction.class.getName(), Level.INFO, "allocated an empty primary*")
|
||||
);
|
||||
Loggers.addAppender(actionLogger, dryRunMockLog);
|
||||
ServerLoggers.addAppender(actionLogger, dryRunMockLog);
|
||||
|
||||
AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
|
||||
ClusterRerouteResponse dryRunResponse = client().admin().cluster().prepareReroute()
|
||||
|
@ -357,7 +358,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
|||
|
||||
dryRunMockLog.assertAllExpectationsMatched();
|
||||
dryRunMockLog.stop();
|
||||
Loggers.removeAppender(actionLogger, dryRunMockLog);
|
||||
ServerLoggers.removeAppender(actionLogger, dryRunMockLog);
|
||||
|
||||
MockLogAppender allocateMockLog = new MockLogAppender();
|
||||
allocateMockLog.start();
|
||||
|
@ -369,7 +370,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
|||
new MockLogAppender.UnseenEventExpectation("no message for second allocate empty primary",
|
||||
TransportClusterRerouteAction.class.getName(), Level.INFO, "allocated an empty primary*" + nodeName2 + "*")
|
||||
);
|
||||
Loggers.addAppender(actionLogger, allocateMockLog);
|
||||
ServerLoggers.addAppender(actionLogger, allocateMockLog);
|
||||
|
||||
AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
|
||||
AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true);
|
||||
|
@ -385,7 +386,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
|||
|
||||
allocateMockLog.assertAllExpectationsMatched();
|
||||
allocateMockLog.stop();
|
||||
Loggers.removeAppender(actionLogger, allocateMockLog);
|
||||
ServerLoggers.removeAppender(actionLogger, allocateMockLog);
|
||||
}
|
||||
|
||||
public void testClusterRerouteWithBlocks() throws Exception {
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
|
@ -63,7 +63,7 @@ public class TemplateUpgradeServiceIT extends ESIntegTestCase {
|
|||
protected final Settings settings;
|
||||
|
||||
public TestPlugin(Settings settings) {
|
||||
this.logger = Loggers.getLogger(getClass(), settings);
|
||||
this.logger = ServerLoggers.getLogger(getClass(), settings);
|
||||
this.settings = settings;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing.allocation;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ESAllocationTestCase;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
|
||||
|
@ -41,24 +40,19 @@ import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllo
|
|||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.indices.cluster.AbstractIndicesClusterStateServiceTestCase;
|
||||
import org.elasticsearch.indices.cluster.ClusterStateChanges;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing.allocation;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ESAllocationTestCase;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.cluster.service;
|
|||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
|
@ -31,6 +30,7 @@ import org.elasticsearch.cluster.NodeConnectionsService;
|
|||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -130,7 +130,7 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
"*failed to execute cluster state applier in [2s]*"));
|
||||
|
||||
Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service");
|
||||
Loggers.addAppender(clusterLogger, mockAppender);
|
||||
ServerLoggers.addAppender(clusterLogger, mockAppender);
|
||||
try {
|
||||
final CountDownLatch latch = new CountDownLatch(3);
|
||||
clusterApplierService.currentTimeOverride = System.nanoTime();
|
||||
|
@ -180,7 +180,7 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
});
|
||||
latch.await();
|
||||
} finally {
|
||||
Loggers.removeAppender(clusterLogger, mockAppender);
|
||||
ServerLoggers.removeAppender(clusterLogger, mockAppender);
|
||||
mockAppender.stop();
|
||||
}
|
||||
mockAppender.assertAllExpectationsMatched();
|
||||
|
@ -210,7 +210,7 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
"*cluster state applier task [test3] took [34s] above the warn threshold of *"));
|
||||
|
||||
Logger clusterLogger = Loggers.getLogger("org.elasticsearch.cluster.service");
|
||||
Loggers.addAppender(clusterLogger, mockAppender);
|
||||
ServerLoggers.addAppender(clusterLogger, mockAppender);
|
||||
try {
|
||||
final CountDownLatch latch = new CountDownLatch(4);
|
||||
final CountDownLatch processedFirstTask = new CountDownLatch(1);
|
||||
|
@ -276,7 +276,7 @@ public class ClusterApplierServiceTests extends ESTestCase {
|
|||
});
|
||||
latch.await();
|
||||
} finally {
|
||||
Loggers.removeAppender(clusterLogger, mockAppender);
|
||||
ServerLoggers.removeAppender(clusterLogger, mockAppender);
|
||||
mockAppender.stop();
|
||||
}
|
||||
mockAppender.assertAllExpectationsMatched();
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -231,7 +232,7 @@ public class MasterServiceTests extends ESTestCase {
|
|||
"*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)"));
|
||||
|
||||
Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName());
|
||||
Loggers.addAppender(clusterLogger, mockAppender);
|
||||
ServerLoggers.addAppender(clusterLogger, mockAppender);
|
||||
try {
|
||||
final CountDownLatch latch = new CountDownLatch(4);
|
||||
masterService.currentTimeOverride = System.nanoTime();
|
||||
|
@ -306,7 +307,7 @@ public class MasterServiceTests extends ESTestCase {
|
|||
});
|
||||
latch.await();
|
||||
} finally {
|
||||
Loggers.removeAppender(clusterLogger, mockAppender);
|
||||
ServerLoggers.removeAppender(clusterLogger, mockAppender);
|
||||
mockAppender.stop();
|
||||
}
|
||||
mockAppender.assertAllExpectationsMatched();
|
||||
|
@ -578,7 +579,7 @@ public class MasterServiceTests extends ESTestCase {
|
|||
"*cluster state update task [test4] took [34s] above the warn threshold of *"));
|
||||
|
||||
Logger clusterLogger = Loggers.getLogger(masterService.getClass().getPackage().getName());
|
||||
Loggers.addAppender(clusterLogger, mockAppender);
|
||||
ServerLoggers.addAppender(clusterLogger, mockAppender);
|
||||
try {
|
||||
final CountDownLatch latch = new CountDownLatch(5);
|
||||
final CountDownLatch processedFirstTask = new CountDownLatch(1);
|
||||
|
@ -674,7 +675,7 @@ public class MasterServiceTests extends ESTestCase {
|
|||
});
|
||||
latch.await();
|
||||
} finally {
|
||||
Loggers.removeAppender(clusterLogger, mockAppender);
|
||||
ServerLoggers.removeAppender(clusterLogger, mockAppender);
|
||||
mockAppender.stop();
|
||||
}
|
||||
mockAppender.assertAllExpectationsMatched();
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -751,8 +751,8 @@ public class ScopedSettingsTests extends ESTestCase {
|
|||
settings.applySettings(Settings.builder().build());
|
||||
assertEquals(property, ESLoggerFactory.getLogger("test").getLevel());
|
||||
} finally {
|
||||
Loggers.setLevel(ESLoggerFactory.getRootLogger(), level);
|
||||
Loggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel);
|
||||
ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level);
|
||||
ServerLoggers.setLevel(ESLoggerFactory.getLogger("test"), testLevel);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -767,7 +767,7 @@ public class ScopedSettingsTests extends ESTestCase {
|
|||
settings.applySettings(Settings.builder().build()); // here we fall back to 'logger.level' which is our default.
|
||||
assertEquals(Level.ERROR, ESLoggerFactory.getRootLogger().getLevel());
|
||||
} finally {
|
||||
Loggers.setLevel(ESLoggerFactory.getRootLogger(), level);
|
||||
ServerLoggers.setLevel(ESLoggerFactory.getRootLogger(), level);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
|
@ -52,8 +51,6 @@ import org.elasticsearch.test.ESIntegTestCase.Scope;
|
|||
import org.elasticsearch.test.InternalTestCluster.RestartCallback;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.logging.log4j.core.appender.AbstractAppender;
|
|||
import org.apache.logging.log4j.core.filter.RegexFilter;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -71,8 +72,8 @@ public class MergeSchedulerSettingsTests extends ESTestCase {
|
|||
MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings");
|
||||
mockAppender.start();
|
||||
final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings");
|
||||
Loggers.addAppender(settingsLogger, mockAppender);
|
||||
Loggers.setLevel(settingsLogger, Level.TRACE);
|
||||
ServerLoggers.addAppender(settingsLogger, mockAppender);
|
||||
ServerLoggers.setLevel(settingsLogger, Level.TRACE);
|
||||
try {
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
|
@ -91,9 +92,9 @@ public class MergeSchedulerSettingsTests extends ESTestCase {
|
|||
assertTrue(mockAppender.sawUpdateAutoThrottle);
|
||||
assertEquals(settings.getMergeSchedulerConfig().isAutoThrottle(), false);
|
||||
} finally {
|
||||
Loggers.removeAppender(settingsLogger, mockAppender);
|
||||
ServerLoggers.removeAppender(settingsLogger, mockAppender);
|
||||
mockAppender.stop();
|
||||
Loggers.setLevel(settingsLogger, (Level) null);
|
||||
ServerLoggers.setLevel(settingsLogger, (Level) null);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -102,8 +103,8 @@ public class MergeSchedulerSettingsTests extends ESTestCase {
|
|||
MockAppender mockAppender = new MockAppender("testUpdateAutoThrottleSettings");
|
||||
mockAppender.start();
|
||||
final Logger settingsLogger = Loggers.getLogger("org.elasticsearch.common.settings.IndexScopedSettings");
|
||||
Loggers.addAppender(settingsLogger, mockAppender);
|
||||
Loggers.setLevel(settingsLogger, Level.TRACE);
|
||||
ServerLoggers.addAppender(settingsLogger, mockAppender);
|
||||
ServerLoggers.setLevel(settingsLogger, Level.TRACE);
|
||||
try {
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
|
@ -123,9 +124,9 @@ public class MergeSchedulerSettingsTests extends ESTestCase {
|
|||
// Make sure we log the change:
|
||||
assertTrue(mockAppender.sawUpdateMaxThreadCount);
|
||||
} finally {
|
||||
Loggers.removeAppender(settingsLogger, mockAppender);
|
||||
ServerLoggers.removeAppender(settingsLogger, mockAppender);
|
||||
mockAppender.stop();
|
||||
Loggers.setLevel(settingsLogger, (Level) null);
|
||||
ServerLoggers.setLevel(settingsLogger, (Level) null);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -78,6 +78,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
|
@ -1924,8 +1925,8 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
|
||||
Logger rootLogger = LogManager.getRootLogger();
|
||||
Level savedLevel = rootLogger.getLevel();
|
||||
Loggers.addAppender(rootLogger, mockAppender);
|
||||
Loggers.setLevel(rootLogger, Level.DEBUG);
|
||||
ServerLoggers.addAppender(rootLogger, mockAppender);
|
||||
ServerLoggers.setLevel(rootLogger, Level.DEBUG);
|
||||
rootLogger = LogManager.getRootLogger();
|
||||
|
||||
try {
|
||||
|
@ -1936,15 +1937,15 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
assertFalse(mockAppender.sawIndexWriterMessage);
|
||||
|
||||
// Again, with TRACE, which should log IndexWriter output:
|
||||
Loggers.setLevel(rootLogger, Level.TRACE);
|
||||
ServerLoggers.setLevel(rootLogger, Level.TRACE);
|
||||
engine.index(indexForDoc(doc));
|
||||
engine.flush();
|
||||
assertTrue(mockAppender.sawIndexWriterMessage);
|
||||
|
||||
} finally {
|
||||
Loggers.removeAppender(rootLogger, mockAppender);
|
||||
ServerLoggers.removeAppender(rootLogger, mockAppender);
|
||||
mockAppender.stop();
|
||||
Loggers.setLevel(rootLogger, savedLevel);
|
||||
ServerLoggers.setLevel(rootLogger, savedLevel);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2214,8 +2215,8 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
|
||||
final Logger iwIFDLogger = Loggers.getLogger("org.elasticsearch.index.engine.Engine.IFD");
|
||||
|
||||
Loggers.addAppender(iwIFDLogger, mockAppender);
|
||||
Loggers.setLevel(iwIFDLogger, Level.DEBUG);
|
||||
ServerLoggers.addAppender(iwIFDLogger, mockAppender);
|
||||
ServerLoggers.setLevel(iwIFDLogger, Level.DEBUG);
|
||||
|
||||
try {
|
||||
// First, with DEBUG, which should NOT log IndexWriter output:
|
||||
|
@ -2226,16 +2227,16 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
assertFalse(mockAppender.sawIndexWriterIFDMessage);
|
||||
|
||||
// Again, with TRACE, which should only log IndexWriter IFD output:
|
||||
Loggers.setLevel(iwIFDLogger, Level.TRACE);
|
||||
ServerLoggers.setLevel(iwIFDLogger, Level.TRACE);
|
||||
engine.index(indexForDoc(doc));
|
||||
engine.flush();
|
||||
assertFalse(mockAppender.sawIndexWriterMessage);
|
||||
assertTrue(mockAppender.sawIndexWriterIFDMessage);
|
||||
|
||||
} finally {
|
||||
Loggers.removeAppender(iwIFDLogger, mockAppender);
|
||||
ServerLoggers.removeAppender(iwIFDLogger, mockAppender);
|
||||
mockAppender.stop();
|
||||
Loggers.setLevel(iwIFDLogger, (Level) null);
|
||||
ServerLoggers.setLevel(iwIFDLogger, (Level) null);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ List projects = [
|
|||
'test:fixtures:krb5kdc-fixture',
|
||||
'test:fixtures:old-elasticsearch',
|
||||
'test:logger-usage',
|
||||
'libs:elasticsearch-core',
|
||||
'libs:elasticsearch-nio',
|
||||
'modules:aggs-matrix-stats',
|
||||
'modules:analysis-common',
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.elasticsearch.bootstrap.BootstrapForTesting;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.elasticsearch.repositories.RepositoryMissingException;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.QueryCache;
|
||||
import org.apache.lucene.search.QueryCachingPolicy;
|
||||
import org.apache.lucene.search.ReferenceManager;
|
||||
import org.apache.lucene.search.SearcherManager;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.test.junit.listeners;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.junit.runner.Description;
|
||||
|
@ -106,7 +107,7 @@ public class LoggingListener extends RunListener {
|
|||
}
|
||||
for (final Map.Entry<String, String> entry : map.entrySet()) {
|
||||
final Logger logger = resolveLogger(entry.getKey());
|
||||
Loggers.setLevel(logger, entry.getValue());
|
||||
ServerLoggers.setLevel(logger, entry.getValue());
|
||||
}
|
||||
return existing;
|
||||
}
|
||||
|
@ -145,7 +146,7 @@ public class LoggingListener extends RunListener {
|
|||
private Map<String, String> reset(final Map<String, String> map) {
|
||||
for (final Map.Entry<String, String> previousLogger : map.entrySet()) {
|
||||
final Logger logger = resolveLogger(previousLogger.getKey());
|
||||
Loggers.setLevel(logger, previousLogger.getValue());
|
||||
ServerLoggers.setLevel(logger, previousLogger.getValue());
|
||||
}
|
||||
|
||||
return Collections.emptyMap();
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.test.rest.yaml;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.xcontent.ToXContent.Params;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.test.store;
|
|||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.ServerLoggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -95,7 +95,7 @@ public class MockFSIndexStore extends IndexStore {
|
|||
if (indexShard != null) {
|
||||
Boolean remove = shardSet.remove(indexShard);
|
||||
if (remove == Boolean.TRUE) {
|
||||
Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
|
||||
Logger logger = ServerLoggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
|
||||
MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId());
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue