From 4b219d15d2a552a58484e4b18a38a7ac827292ed Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 1 Sep 2016 13:05:22 -0400 Subject: [PATCH 1/5] Add CONSOLE to a few snippets in reference docs This allows them to be run in Console and adds them to the list of docs that are automatically tested as part of the build. Relates to #18160 --- docs/reference/indices/analyze.asciidoc | 1 + docs/reference/indices/clearcache.asciidoc | 11 +++--- docs/reference/indices/delete-index.asciidoc | 4 ++- docs/reference/indices/forcemerge.asciidoc | 10 ++++-- .../indices/get-field-mapping.asciidoc | 35 +++++++++++++------ docs/reference/indices/get-index.asciidoc | 12 ++++--- docs/reference/indices/get-mapping.asciidoc | 16 ++++++--- docs/reference/indices/get-settings.asciidoc | 13 ++++--- docs/reference/indices/open-close.asciidoc | 6 ++-- docs/reference/indices/recovery.asciidoc | 18 ++++++---- 10 files changed, 87 insertions(+), 39 deletions(-) diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index ee8b856ef41..5516d898813 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -181,4 +181,5 @@ The request returns the following result: } } -------------------------------------------------- +// TESTRESPONSE <1> Output only "keyword" attribute, since specify "attributes" in the request. diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc index 8ebb9e3488a..6a7240dc958 100644 --- a/docs/reference/indices/clearcache.asciidoc +++ b/docs/reference/indices/clearcache.asciidoc @@ -6,8 +6,10 @@ associated with one or more indices. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/twitter/_cache/clear' +POST /twitter/_cache/clear -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] The API, by default, will clear all caches. Specific caches can be cleaned explicitly by setting `query`, `fielddata` or `request`. @@ -24,8 +26,9 @@ call, or even on `_all` the indices. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_cache/clear' +POST /kimchy,elasticsearch/_cache/clear -$ curl -XPOST 'http://localhost:9200/_cache/clear' +POST /_cache/clear -------------------------------------------------- - +// CONSOLE +// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] diff --git a/docs/reference/indices/delete-index.asciidoc b/docs/reference/indices/delete-index.asciidoc index 5c652accfb9..bc057e155d0 100644 --- a/docs/reference/indices/delete-index.asciidoc +++ b/docs/reference/indices/delete-index.asciidoc @@ -5,8 +5,10 @@ The delete index API allows to delete an existing index. [source,js] -------------------------------------------------- -$ curl -XDELETE 'http://localhost:9200/twitter/' +DELETE /twitter -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] The above example deletes an index called `twitter`. Specifying an index, alias or wildcard expression is required. diff --git a/docs/reference/indices/forcemerge.asciidoc b/docs/reference/indices/forcemerge.asciidoc index a33b7fdfe2c..26baf214176 100644 --- a/docs/reference/indices/forcemerge.asciidoc +++ b/docs/reference/indices/forcemerge.asciidoc @@ -12,8 +12,10 @@ block until the previous force merge is complete. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/twitter/_forcemerge' +POST /twitter/_forcemerge -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] [float] [[forcemerge-parameters]] @@ -45,7 +47,9 @@ even on `_all` the indices. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_forcemerge' +POST /kimchy,elasticsearch/_forcemerge -$ curl -XPOST 'http://localhost:9200/_forcemerge' +POST /_forcemerge -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] diff --git a/docs/reference/indices/get-field-mapping.asciidoc b/docs/reference/indices/get-field-mapping.asciidoc index 39667dc0874..224e74605f4 100644 --- a/docs/reference/indices/get-field-mapping.asciidoc +++ b/docs/reference/indices/get-field-mapping.asciidoc @@ -9,8 +9,10 @@ The following returns the mapping of the field `text` only: [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/twitter/_mapping/tweet/field/text' +GET /twitter/_mapping/tweet/field/message -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] For which the response is (assuming `text` is a default string field): @@ -18,18 +20,28 @@ For which the response is (assuming `text` is a default string field): -------------------------------------------------- { "twitter": { - "tweet": { - "text": { - "full_name": "text", - "mapping": { - "text": { "type": "text" } + "mappings": { + "tweet": { + "message": { + "full_name": "message", + "mapping": { + "message": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } } } } } } -------------------------------------------------- - +// TESTRESPONSE [float] @@ -44,12 +56,15 @@ following are some examples: [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/twitter,kimchy/_mapping/field/message' +GET /twitter,kimchy/_mapping/field/message -curl -XGET 'http://localhost:9200/_all/_mapping/tweet,book/field/message,user.id' +GET /_all/_mapping/tweet,book/field/message,user.id -curl -XGET 'http://localhost:9200/_all/_mapping/tw*/field/*.id' +GET /_all/_mapping/tw*/field/*.id -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/^/PUT kimchy\nPUT book\n/] [float] === Specifying fields diff --git a/docs/reference/indices/get-index.asciidoc b/docs/reference/indices/get-index.asciidoc index b82bee05630..772318c71d8 100644 --- a/docs/reference/indices/get-index.asciidoc +++ b/docs/reference/indices/get-index.asciidoc @@ -1,12 +1,14 @@ [[indices-get-index]] == Get Index -The get index API allows to retrieve information about one or more indexes. +The get index API allows to retrieve information about one or more indexes. [source,js] -------------------------------------------------- -$ curl -XGET 'http://localhost:9200/twitter/' +GET /twitter -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] The above example gets the information for an index called `twitter`. Specifying an index, alias or wildcard expression is required. @@ -17,13 +19,15 @@ all indices by using `_all` or `*` as index. [float] === Filtering index information -The information returned by the get API can be filtered to include only specific features +The information returned by the get API can be filtered to include only specific features by specifying a comma delimited list of features in the URL: [source,js] -------------------------------------------------- -$ curl -XGET 'http://localhost:9200/twitter/_settings,_mappings' +GET twitter/_settings,_mappings -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] The above command will only return the settings and mappings for the index called `twitter`. diff --git a/docs/reference/indices/get-mapping.asciidoc b/docs/reference/indices/get-mapping.asciidoc index 317a708f13a..c3580917d9a 100644 --- a/docs/reference/indices/get-mapping.asciidoc +++ b/docs/reference/indices/get-mapping.asciidoc @@ -6,8 +6,10 @@ index/type. [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/twitter/_mapping/tweet' +GET /twitter/_mapping/tweet -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] [float] === Multiple Indices and Types @@ -21,17 +23,21 @@ following are some examples: [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/_mapping/twitter,kimchy' +GET /_mapping/tweet,kimchy -curl -XGET 'http://localhost:9200/_all/_mapping/tweet,book' +GET /_all/_mapping/tweet,book -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] If you want to get mappings of all indices and types then the following two examples are equivalent: [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/_all/_mapping' +GET /_all/_mapping -curl -XGET 'http://localhost:9200/_mapping' +GET /_mapping -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] diff --git a/docs/reference/indices/get-settings.asciidoc b/docs/reference/indices/get-settings.asciidoc index 4689c448b56..60d7a75a861 100644 --- a/docs/reference/indices/get-settings.asciidoc +++ b/docs/reference/indices/get-settings.asciidoc @@ -5,8 +5,10 @@ The get settings API allows to retrieve settings of index/indices: [source,js] -------------------------------------------------- -$ curl -XGET 'http://localhost:9200/twitter/_settings' +GET /twitter/_settings -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] [float] === Multiple Indices and Types @@ -20,12 +22,15 @@ Wildcard expressions are also supported. The following are some examples: [source,js] -------------------------------------------------- -curl -XGET 'http://localhost:9200/twitter,kimchy/_settings' +GET /twitter,kimchy/_settings -curl -XGET 'http://localhost:9200/_all/_settings' +GET /_all/_settings -curl -XGET 'http://localhost:9200/2013-*/_settings' +GET /log_2013_*/_settings -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] +// TEST[s/^/PUT kimchy\nPUT log_2013_01_01\n/] [float] === Filtering settings by name diff --git a/docs/reference/indices/open-close.asciidoc b/docs/reference/indices/open-close.asciidoc index afdab7bedaf..59f36112b4e 100644 --- a/docs/reference/indices/open-close.asciidoc +++ b/docs/reference/indices/open-close.asciidoc @@ -12,10 +12,12 @@ example: [source,js] -------------------------------------------------- -curl -XPOST 'localhost:9200/my_index/_close' +POST /my_index/_close -curl -XPOST 'localhost:9200/my_index/_open' +POST /my_index/_open -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_index\n/] It is possible to open and close multiple indices. An error will be thrown if the request explicitly refers to a missing index. This behaviour can be diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index c4aabac3ac3..448c423d0b6 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -8,15 +8,19 @@ For example, the following command would show recovery information for the indic [source,js] -------------------------------------------------- -curl -XGET http://localhost:9200/index1,index2/_recovery +GET index1,index2/_recovery?human -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT index1\nPUT index2\n/] To see cluster-wide recovery status simply leave out the index names. [source,js] -------------------------------------------------- -curl -XGET http://localhost:9200/_recovery?pretty&human +GET /_recovery?human -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT index1\n{"settings": {"index.number_of_shards": 1}}\n/] Response: [source,js] @@ -30,7 +34,7 @@ Response: "primary" : true, "start_time" : "2014-02-24T12:15:59.716", "start_time_in_millis": 1393244159716, - "total_time" : "2.9m" + "total_time" : "2.9m", "total_time_in_millis" : 175576, "source" : { "repository" : "my_repository", @@ -45,7 +49,7 @@ Response: }, "index" : { "size" : { - "total" : "75.4mb" + "total" : "75.4mb", "total_in_bytes" : 79063092, "reused" : "0b", "reused_in_bytes" : 0, @@ -68,7 +72,7 @@ Response: "percent" : "100.0%", "total_on_start" : 0, "total_time" : "0s", - "total_time_in_millis" : 0 + "total_time_in_millis" : 0, }, "start" : { "check_index_time" : "0s", @@ -80,6 +84,7 @@ Response: } } -------------------------------------------------- +// We should really assert that this is up to date but that is hard! The above response shows a single index recovering a single shard. In this case, the source of the recovery is a snapshot repository and the target of the recovery is the node with name "my_es_node". @@ -90,7 +95,7 @@ In some cases a higher level of detail may be preferable. Setting "detailed=true [source,js] -------------------------------------------------- -curl -XGET http://localhost:9200/_recovery?pretty&human&detailed=true +GET _recovery?human&detailed=true -------------------------------------------------- Response: @@ -170,6 +175,7 @@ Response: } } -------------------------------------------------- +// We should really assert that this is up to date but that is hard! This response shows a detailed listing (truncated for brevity) of the actual files recovered and their sizes. From 359e76f7e74fef2e7c4d5771a38103dc565ced6a Mon Sep 17 00:00:00 2001 From: Florian Hopf Date: Thu, 1 Sep 2016 09:10:12 +0200 Subject: [PATCH 2/5] Fixed wording --- docs/reference/docs/reindex.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index cdb2b8b0ffb..a53ddc34dac 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -424,7 +424,7 @@ supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`, Sending the `refresh` url parameter will cause all indexes to which the request wrote to be refreshed. This is different than the Index API's `refresh` -parameter which causes just the shard that received the new data to be indexed. +parameter which causes just the shard that received the new data to be refreshed. If the request contains `wait_for_completion=false` then Elasticsearch will perform some preflight checks, launch the request, and then return a `task` From 1e80adbfbef444980af2d9c9e0088e4f52fc90d8 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 1 Sep 2016 14:00:47 -0400 Subject: [PATCH 3/5] Configure test logging with Log4j 2 This commit configures test logging for Log4j 2. The default logger configuration uses the console appender but at the error level, so most tests are missing logging. Instead, this commit provides a configuration for tests which is picked up from the classpath by Log4j 2 when it initializes. However, this now means that we can no longer initialize Log4j with a bare-bones configuration when tests run as doing so will prevent Log4j 2 from attempting to configure logging via the classpath. Consequently, we move this needed initialization (as commented, to avoid a message about a status logger not being configured when we are preparing to configure Log4j from properties files in the config directory) to only run when we are explicitly configuring Log4j from properties files. Relates #20284 --- .../common/logging/LogConfigurator.java | 18 ++----- .../elasticsearch/common/logging/Loggers.java | 5 -- .../bootstrap/BootstrapForTesting.java | 2 - .../src/main/resources/log4j.properties | 9 ---- .../src/main/resources/log4j2-test.properties | 9 ++++ .../test/test/LoggingListenerTests.java | 48 +++++++++---------- 6 files changed, 38 insertions(+), 53 deletions(-) delete mode 100644 test/framework/src/main/resources/log4j.properties create mode 100644 test/framework/src/main/resources/log4j2-test.properties diff --git a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index 1b58b686100..3b7ec0deb34 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -50,24 +50,16 @@ import java.util.Set; public class LogConfigurator { - static { - // we initialize the status logger immediately otherwise Log4j will complain when we try to get the context - final ConfigurationBuilder builder = ConfigurationBuilderFactory.newConfigurationBuilder(); - builder.setStatusLevel(Level.ERROR); - Configurator.initialize(builder.build()); - } - - /** - * for triggering class initialization - */ - public static void init() { - } - public static void configure(final Environment environment, final boolean resolveConfig) throws IOException { final Settings settings = environment.settings(); setLogConfigurationSystemProperty(environment, settings); + // we initialize the status logger immediately otherwise Log4j will complain when we try to get the context + final ConfigurationBuilder builder = ConfigurationBuilderFactory.newConfigurationBuilder(); + builder.setStatusLevel(Level.ERROR); + Configurator.initialize(builder.build()); + final LoggerContext context = (LoggerContext) LogManager.getContext(false); if (resolveConfig) { diff --git a/core/src/main/java/org/elasticsearch/common/logging/Loggers.java b/core/src/main/java/org/elasticsearch/common/logging/Loggers.java index fb96639fb1f..ddca741389c 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ b/core/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -43,11 +43,6 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList; */ public class Loggers { - static { - // ensure that the status logger is configured before we touch any loggers - LogConfigurator.init(); - } - private static final String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch."); public static final String SPACE = " "; diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 4315c9ea2d6..96921c3c90d 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -65,8 +65,6 @@ public class BootstrapForTesting { // without making things complex??? static { - LogConfigurator.init(); - // make sure java.io.tmpdir exists always (in case code uses it in a static initializer) Path javaTmpDir = PathUtils.get(Objects.requireNonNull(System.getProperty("java.io.tmpdir"), "please set ${java.io.tmpdir} in pom.xml")); diff --git a/test/framework/src/main/resources/log4j.properties b/test/framework/src/main/resources/log4j.properties deleted file mode 100644 index 87d4560f72f..00000000000 --- a/test/framework/src/main/resources/log4j.properties +++ /dev/null @@ -1,9 +0,0 @@ -tests.es.logger.level=INFO -log4j.rootLogger=${tests.es.logger.level}, out - -log4j.logger.org.apache.http=INFO, out -log4j.additivity.org.apache.http=false - -log4j.appender.out=org.apache.log4j.ConsoleAppender -log4j.appender.out.layout=org.apache.log4j.PatternLayout -log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n diff --git a/test/framework/src/main/resources/log4j2-test.properties b/test/framework/src/main/resources/log4j2-test.properties new file mode 100644 index 00000000000..9cfe3e326aa --- /dev/null +++ b/test/framework/src/main/resources/log4j2-test.properties @@ -0,0 +1,9 @@ +status = error + +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java index 1fcb1e51038..2d428202741 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java @@ -50,27 +50,27 @@ public class LoggingListenerTests extends ESTestCase { Logger xyzLogger = Loggers.getLogger("xyz"); Logger abcLogger = Loggers.getLogger("abc"); - assertEquals(Level.ERROR, abcLogger.getLevel()); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); - assertThat(abcLogger.getLevel(), equalTo(Level.ERROR)); + assertEquals(Level.INFO, abcLogger.getLevel()); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); loggingListener.testRunStarted(suiteDescription); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); - assertThat(abcLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); Method method = TestClass.class.getMethod("annotatedTestMethod"); TestLogging annotation = method.getAnnotation(TestLogging.class); Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation); loggingListener.testStarted(testDescription); assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE)); - assertThat(abcLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); loggingListener.testFinished(testDescription); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); - assertThat(abcLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); loggingListener.testRunFinished(new Result()); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); - assertThat(abcLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); } public void testCustomLevelPerClass() throws Exception { @@ -81,24 +81,24 @@ public class LoggingListenerTests extends ESTestCase { Logger abcLogger = Loggers.getLogger("abc"); Logger xyzLogger = Loggers.getLogger("xyz"); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); - assertThat(abcLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); loggingListener.testRunStarted(suiteDescription); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "test"); loggingListener.testStarted(testDescription); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); loggingListener.testFinished(testDescription); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); loggingListener.testRunFinished(new Result()); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); - assertThat(abcLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); } public void testCustomLevelPerClassAndPerMethod() throws Exception { @@ -109,10 +109,10 @@ public class LoggingListenerTests extends ESTestCase { Logger abcLogger = Loggers.getLogger("abc"); Logger xyzLogger = Loggers.getLogger("xyz"); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); - assertThat(abcLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); loggingListener.testRunStarted(suiteDescription); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); Method method = TestClass.class.getMethod("annotatedTestMethod"); @@ -123,7 +123,7 @@ public class LoggingListenerTests extends ESTestCase { assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); loggingListener.testFinished(testDescription); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); Method method2 = TestClass.class.getMethod("annotatedTestMethod2"); @@ -134,12 +134,12 @@ public class LoggingListenerTests extends ESTestCase { assertThat(abcLogger.getLevel(), equalTo(Level.TRACE)); loggingListener.testFinished(testDescription2); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); assertThat(abcLogger.getLevel(), equalTo(Level.WARN)); loggingListener.testRunFinished(new Result()); - assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR)); - assertThat(abcLogger.getLevel(), equalTo(Level.ERROR)); + assertThat(xyzLogger.getLevel(), equalTo(Level.INFO)); + assertThat(abcLogger.getLevel(), equalTo(Level.INFO)); } /** From 5fe4cb6adcde0236cd835b3417d34e339092717d Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 1 Sep 2016 14:25:04 -0400 Subject: [PATCH 4/5] Size limit deprecation logs This commit configures the deprecation logs to be size-limited to 1 GB, and compress these logs when they roll. The default configuration will preserve up to four rolled logs. Relates #20287 --- distribution/src/main/resources/config/log4j2.properties | 9 +++++---- docs/reference/setup/configuration.asciidoc | 4 ++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/distribution/src/main/resources/config/log4j2.properties b/distribution/src/main/resources/config/log4j2.properties index 06d8200b528..2cfe038cc84 100644 --- a/distribution/src/main/resources/config/log4j2.properties +++ b/distribution/src/main/resources/config/log4j2.properties @@ -29,11 +29,12 @@ appender.deprecation_rolling.name = deprecation_rolling appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log appender.deprecation_rolling.layout.type = PatternLayout appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n -appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%d{yyyy-MM-dd}.log +appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz appender.deprecation_rolling.policies.type = Policies -appender.deprecation_rolling.policies.time.type = TimeBasedTriggeringPolicy -appender.deprecation_rolling.policies.time.interval = 1 -appender.deprecation_rolling.policies.time.modulate = true +appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling.policies.size.size = 1GB +appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling.strategy.max = 4 logger.deprecation.name = deprecation logger.deprecation.level = warn diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc index 6f9811b52de..518fb24a8bd 100644 --- a/docs/reference/setup/configuration.asciidoc +++ b/docs/reference/setup/configuration.asciidoc @@ -174,5 +174,9 @@ This will create a daily rolling deprecation log file in your log directory. Check this file regularly, especially when you intend to upgrade to a new major version. +The default logging configuration has set the roll policy for the deprecation +logs to roll and compress after 1 GB, and to preserve a maximum of five log +files (four rolled logs, and the active log). + You can disable it in the `config/log4j2.properties` file by setting the deprecation log level to `info`. From c8b984aee9cad0d6e652412905b116f08b737c56 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 1 Sep 2016 17:08:18 -0400 Subject: [PATCH 5/5] Add CONSOLE to more docs Relates to #18160 --- docs/build.gradle | 27 ++++++++------- docs/reference/indices/refresh.asciidoc | 10 ++++-- .../reference/indices/rollover-index.asciidoc | 34 +++++++++++++------ 3 files changed, 45 insertions(+), 26 deletions(-) diff --git a/docs/build.gradle b/docs/build.gradle index caf7cfea01e..d930dfb5b60 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -65,18 +65,18 @@ buildRestTests.docs = fileTree(projectDir) { Closure setupTwitter = { String name, int count -> buildRestTests.setups[name] = ''' - do: - indices.create: - index: twitter - body: - settings: - number_of_shards: 1 - number_of_replicas: 1 + indices.create: + index: twitter + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 - do: - bulk: - index: twitter - type: tweet - refresh: true - body: |''' + bulk: + index: twitter + type: tweet + refresh: true + body: |''' for (int i = 0; i < count; i++) { String user, text if (i == 0) { @@ -87,12 +87,13 @@ Closure setupTwitter = { String name, int count -> text = "some message with the number $i" } buildRestTests.setups[name] += """ - {"index":{"_id": "$i"}} - {"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}""" + {"index":{"_id": "$i"}} + {"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}""" } } setupTwitter('twitter', 5) setupTwitter('big_twitter', 120) +setupTwitter('huge_twitter', 1200) buildRestTests.setups['host'] = ''' # Fetch the http host. We use the host of the master because we know there will always be a master. diff --git a/docs/reference/indices/refresh.asciidoc b/docs/reference/indices/refresh.asciidoc index bbc1f20f409..1e27ace3625 100644 --- a/docs/reference/indices/refresh.asciidoc +++ b/docs/reference/indices/refresh.asciidoc @@ -9,8 +9,10 @@ refresh is scheduled periodically. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/twitter/_refresh' +POST /twitter/_refresh -------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] [float] === Multi Index @@ -20,7 +22,9 @@ call, or even on `_all` the indices. [source,js] -------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_refresh' +POST /kimchy,elasticsearch/_refresh -$ curl -XPOST 'http://localhost:9200/_refresh' +POST /_refresh -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index b12d93bb4b8..5e3f744ebaf 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -19,7 +19,9 @@ PUT /logs-000001 <1> } } -POST logs_write/_rollover <2> +# Add > 1000 documents to logs-000001 + +POST /logs_write/_rollover <2> { "conditions": { "max_age": "7d", @@ -28,6 +30,8 @@ POST logs_write/_rollover <2> } -------------------------------------------------- // CONSOLE +// TEST[setup:huge_twitter] +// TEST[s/# Add > 1000 documents to logs-000001/POST _reindex?refresh\n{"source":{"index":"twitter"},"dest":{"index":"logs-000001"}}/] <1> Creates an index called `logs-0000001` with the alias `logs_write`. <2> If the index pointed to by `logs_write` was created 7 or more days ago, or contains 1,000 or more documents, then the `logs-0002` index is created @@ -38,6 +42,8 @@ The above request might return the following response: [source,js] -------------------------------------------------- { + "acknowledged": true, + "shards_acknowledged": true, "old_index": "logs-000001", "new_index": "logs-000002", "rolled_over": true, <1> @@ -48,6 +54,7 @@ The above request might return the following response: } } -------------------------------------------------- +// TESTRESPONSE <1> Whether the index was rolled over. <2> Whether the rollover was dry run. <3> The result of each condition. @@ -65,9 +72,16 @@ the new index as follows: [source,js] -------------------------------------------------- -POST my_alias/_rollover/my_new_index_name -{...} +POST /my_alias/_rollover/my_new_index_name +{ + "conditions": { + "max_age": "7d", + "max_docs": 1000 + } +} -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_old_index_name\nPUT my_old_index_name\/_alias\/my_alias\n/] [float] === Defining the new index @@ -75,7 +89,7 @@ POST my_alias/_rollover/my_new_index_name The settings, mappings, and aliases for the new index are taken from any matching <>. Additionally, you can specify `settings`, `mappings`, and `aliases` in the body of the request, just like the -<> API. Values specified in the request +<> API. Values specified in the request override any values set in matching index templates. For example, the following `rollover` request overrides the `index.number_of_shards` setting: @@ -88,14 +102,14 @@ PUT /logs-000001 } } -POST logs_write/_rollover +POST /logs_write/_rollover { "conditions" : { "max_age": "7d", "max_docs": 1000 }, - "settings": { - "index.number_of_shards": 2 + "settings": { + "index.number_of_shards": 2 } } -------------------------------------------------- @@ -116,7 +130,7 @@ PUT /logs-000001 } } -POST logs_write/_rollover?dry_run +POST /logs_write/_rollover?dry_run { "conditions" : { "max_age": "7d", @@ -129,6 +143,6 @@ POST logs_write/_rollover?dry_run [float] === Wait For Active Shards -Because the rollover operation creates a new index to rollover to, the -<> setting on +Because the rollover operation creates a new index to rollover to, the +<> setting on index creation applies to the rollover action as well.