Merge branch 'master' into deprecate

This commit is contained in:
Jack Conradson 2016-09-01 14:53:49 -07:00
commit 7c552f8127
22 changed files with 180 additions and 123 deletions

View File

@ -50,24 +50,16 @@ import java.util.Set;
public class LogConfigurator {
static {
// we initialize the status logger immediately otherwise Log4j will complain when we try to get the context
final ConfigurationBuilder<BuiltConfiguration> builder = ConfigurationBuilderFactory.newConfigurationBuilder();
builder.setStatusLevel(Level.ERROR);
Configurator.initialize(builder.build());
}
/**
* for triggering class initialization
*/
public static void init() {
}
public static void configure(final Environment environment, final boolean resolveConfig) throws IOException {
final Settings settings = environment.settings();
setLogConfigurationSystemProperty(environment, settings);
// we initialize the status logger immediately otherwise Log4j will complain when we try to get the context
final ConfigurationBuilder<BuiltConfiguration> builder = ConfigurationBuilderFactory.newConfigurationBuilder();
builder.setStatusLevel(Level.ERROR);
Configurator.initialize(builder.build());
final LoggerContext context = (LoggerContext) LogManager.getContext(false);
if (resolveConfig) {

View File

@ -43,11 +43,6 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
*/
public class Loggers {
static {
// ensure that the status logger is configured before we touch any loggers
LogConfigurator.init();
}
private static final String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
public static final String SPACE = " ";

View File

@ -29,11 +29,12 @@ appender.deprecation_rolling.name = deprecation_rolling
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%d{yyyy-MM-dd}.log
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.deprecation_rolling.policies.time.interval = 1
appender.deprecation_rolling.policies.time.modulate = true
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
appender.deprecation_rolling.strategy.max = 4
logger.deprecation.name = deprecation
logger.deprecation.level = warn

View File

@ -93,6 +93,7 @@ Closure setupTwitter = { String name, int count ->
}
setupTwitter('twitter', 5)
setupTwitter('big_twitter', 120)
setupTwitter('huge_twitter', 1200)
buildRestTests.setups['host'] = '''
# Fetch the http host. We use the host of the master because we know there will always be a master.

View File

@ -424,7 +424,7 @@ supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`,
Sending the `refresh` url parameter will cause all indexes to which the request
wrote to be refreshed. This is different than the Index API's `refresh`
parameter which causes just the shard that received the new data to be indexed.
parameter which causes just the shard that received the new data to be refreshed.
If the request contains `wait_for_completion=false` then Elasticsearch will
perform some preflight checks, launch the request, and then return a `task`

View File

@ -181,4 +181,5 @@ The request returns the following result:
}
}
--------------------------------------------------
// TESTRESPONSE
<1> Output only "keyword" attribute, since specify "attributes" in the request.

View File

@ -6,8 +6,10 @@ associated with one or more indices.
[source,js]
--------------------------------------------------
$ curl -XPOST 'http://localhost:9200/twitter/_cache/clear'
POST /twitter/_cache/clear
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
The API, by default, will clear all caches. Specific caches can be cleaned
explicitly by setting `query`, `fielddata` or `request`.
@ -24,8 +26,9 @@ call, or even on `_all` the indices.
[source,js]
--------------------------------------------------
$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_cache/clear'
POST /kimchy,elasticsearch/_cache/clear
$ curl -XPOST 'http://localhost:9200/_cache/clear'
POST /_cache/clear
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]

View File

@ -5,8 +5,10 @@ The delete index API allows to delete an existing index.
[source,js]
--------------------------------------------------
$ curl -XDELETE 'http://localhost:9200/twitter/'
DELETE /twitter
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
The above example deletes an index called `twitter`. Specifying an index,
alias or wildcard expression is required.

View File

@ -12,8 +12,10 @@ block until the previous force merge is complete.
[source,js]
--------------------------------------------------
$ curl -XPOST 'http://localhost:9200/twitter/_forcemerge'
POST /twitter/_forcemerge
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
[float]
[[forcemerge-parameters]]
@ -45,7 +47,9 @@ even on `_all` the indices.
[source,js]
--------------------------------------------------
$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_forcemerge'
POST /kimchy,elasticsearch/_forcemerge
$ curl -XPOST 'http://localhost:9200/_forcemerge'
POST /_forcemerge
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]

View File

@ -9,8 +9,10 @@ The following returns the mapping of the field `text` only:
[source,js]
--------------------------------------------------
curl -XGET 'http://localhost:9200/twitter/_mapping/tweet/field/text'
GET /twitter/_mapping/tweet/field/message
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
For which the response is (assuming `text` is a default string field):
@ -18,18 +20,28 @@ For which the response is (assuming `text` is a default string field):
--------------------------------------------------
{
"twitter": {
"mappings": {
"tweet": {
"text": {
"full_name": "text",
"message": {
"full_name": "message",
"mapping": {
"text": { "type": "text" }
"message": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
}
}
}
}
}
--------------------------------------------------
// TESTRESPONSE
[float]
@ -44,12 +56,15 @@ following are some examples:
[source,js]
--------------------------------------------------
curl -XGET 'http://localhost:9200/twitter,kimchy/_mapping/field/message'
GET /twitter,kimchy/_mapping/field/message
curl -XGET 'http://localhost:9200/_all/_mapping/tweet,book/field/message,user.id'
GET /_all/_mapping/tweet,book/field/message,user.id
curl -XGET 'http://localhost:9200/_all/_mapping/tw*/field/*.id'
GET /_all/_mapping/tw*/field/*.id
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
// TEST[s/^/PUT kimchy\nPUT book\n/]
[float]
=== Specifying fields

View File

@ -5,8 +5,10 @@ The get index API allows to retrieve information about one or more indexes.
[source,js]
--------------------------------------------------
$ curl -XGET 'http://localhost:9200/twitter/'
GET /twitter
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
The above example gets the information for an index called `twitter`. Specifying an index,
alias or wildcard expression is required.
@ -22,8 +24,10 @@ by specifying a comma delimited list of features in the URL:
[source,js]
--------------------------------------------------
$ curl -XGET 'http://localhost:9200/twitter/_settings,_mappings'
GET twitter/_settings,_mappings
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
The above command will only return the settings and mappings for the index called `twitter`.

View File

@ -6,8 +6,10 @@ index/type.
[source,js]
--------------------------------------------------
curl -XGET 'http://localhost:9200/twitter/_mapping/tweet'
GET /twitter/_mapping/tweet
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
[float]
=== Multiple Indices and Types
@ -21,17 +23,21 @@ following are some examples:
[source,js]
--------------------------------------------------
curl -XGET 'http://localhost:9200/_mapping/twitter,kimchy'
GET /_mapping/tweet,kimchy
curl -XGET 'http://localhost:9200/_all/_mapping/tweet,book'
GET /_all/_mapping/tweet,book
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
If you want to get mappings of all indices and types then the following
two examples are equivalent:
[source,js]
--------------------------------------------------
curl -XGET 'http://localhost:9200/_all/_mapping'
GET /_all/_mapping
curl -XGET 'http://localhost:9200/_mapping'
GET /_mapping
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]

View File

@ -5,8 +5,10 @@ The get settings API allows to retrieve settings of index/indices:
[source,js]
--------------------------------------------------
$ curl -XGET 'http://localhost:9200/twitter/_settings'
GET /twitter/_settings
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
[float]
=== Multiple Indices and Types
@ -20,12 +22,15 @@ Wildcard expressions are also supported. The following are some examples:
[source,js]
--------------------------------------------------
curl -XGET 'http://localhost:9200/twitter,kimchy/_settings'
GET /twitter,kimchy/_settings
curl -XGET 'http://localhost:9200/_all/_settings'
GET /_all/_settings
curl -XGET 'http://localhost:9200/2013-*/_settings'
GET /log_2013_*/_settings
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
// TEST[s/^/PUT kimchy\nPUT log_2013_01_01\n/]
[float]
=== Filtering settings by name

View File

@ -12,10 +12,12 @@ example:
[source,js]
--------------------------------------------------
curl -XPOST 'localhost:9200/my_index/_close'
POST /my_index/_close
curl -XPOST 'localhost:9200/my_index/_open'
POST /my_index/_open
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT my_index\n/]
It is possible to open and close multiple indices. An error will be thrown
if the request explicitly refers to a missing index. This behaviour can be

View File

@ -8,15 +8,19 @@ For example, the following command would show recovery information for the indic
[source,js]
--------------------------------------------------
curl -XGET http://localhost:9200/index1,index2/_recovery
GET index1,index2/_recovery?human
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT index1\nPUT index2\n/]
To see cluster-wide recovery status simply leave out the index names.
[source,js]
--------------------------------------------------
curl -XGET http://localhost:9200/_recovery?pretty&human
GET /_recovery?human
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT index1\n{"settings": {"index.number_of_shards": 1}}\n/]
Response:
[source,js]
@ -30,7 +34,7 @@ Response:
"primary" : true,
"start_time" : "2014-02-24T12:15:59.716",
"start_time_in_millis": 1393244159716,
"total_time" : "2.9m"
"total_time" : "2.9m",
"total_time_in_millis" : 175576,
"source" : {
"repository" : "my_repository",
@ -45,7 +49,7 @@ Response:
},
"index" : {
"size" : {
"total" : "75.4mb"
"total" : "75.4mb",
"total_in_bytes" : 79063092,
"reused" : "0b",
"reused_in_bytes" : 0,
@ -68,7 +72,7 @@ Response:
"percent" : "100.0%",
"total_on_start" : 0,
"total_time" : "0s",
"total_time_in_millis" : 0
"total_time_in_millis" : 0,
},
"start" : {
"check_index_time" : "0s",
@ -80,6 +84,7 @@ Response:
}
}
--------------------------------------------------
// We should really assert that this is up to date but that is hard!
The above response shows a single index recovering a single shard. In this case, the source of the recovery is a snapshot repository
and the target of the recovery is the node with name "my_es_node".
@ -90,7 +95,7 @@ In some cases a higher level of detail may be preferable. Setting "detailed=true
[source,js]
--------------------------------------------------
curl -XGET http://localhost:9200/_recovery?pretty&human&detailed=true
GET _recovery?human&detailed=true
--------------------------------------------------
Response:
@ -170,6 +175,7 @@ Response:
}
}
--------------------------------------------------
// We should really assert that this is up to date but that is hard!
This response shows a detailed listing (truncated for brevity) of the actual files recovered and their sizes.

View File

@ -9,8 +9,10 @@ refresh is scheduled periodically.
[source,js]
--------------------------------------------------
$ curl -XPOST 'http://localhost:9200/twitter/_refresh'
POST /twitter/_refresh
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
[float]
=== Multi Index
@ -20,7 +22,9 @@ call, or even on `_all` the indices.
[source,js]
--------------------------------------------------
$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_refresh'
POST /kimchy,elasticsearch/_refresh
$ curl -XPOST 'http://localhost:9200/_refresh'
POST /_refresh
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]

View File

@ -19,7 +19,9 @@ PUT /logs-000001 <1>
}
}
POST logs_write/_rollover <2>
# Add > 1000 documents to logs-000001
POST /logs_write/_rollover <2>
{
"conditions": {
"max_age": "7d",
@ -28,6 +30,8 @@ POST logs_write/_rollover <2>
}
--------------------------------------------------
// CONSOLE
// TEST[setup:huge_twitter]
// TEST[s/# Add > 1000 documents to logs-000001/POST _reindex?refresh\n{"source":{"index":"twitter"},"dest":{"index":"logs-000001"}}/]
<1> Creates an index called `logs-0000001` with the alias `logs_write`.
<2> If the index pointed to by `logs_write` was created 7 or more days ago, or
contains 1,000 or more documents, then the `logs-0002` index is created
@ -38,6 +42,8 @@ The above request might return the following response:
[source,js]
--------------------------------------------------
{
"acknowledged": true,
"shards_acknowledged": true,
"old_index": "logs-000001",
"new_index": "logs-000002",
"rolled_over": true, <1>
@ -48,6 +54,7 @@ The above request might return the following response:
}
}
--------------------------------------------------
// TESTRESPONSE
<1> Whether the index was rolled over.
<2> Whether the rollover was dry run.
<3> The result of each condition.
@ -65,9 +72,16 @@ the new index as follows:
[source,js]
--------------------------------------------------
POST my_alias/_rollover/my_new_index_name
{...}
POST /my_alias/_rollover/my_new_index_name
{
"conditions": {
"max_age": "7d",
"max_docs": 1000
}
}
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT my_old_index_name\nPUT my_old_index_name\/_alias\/my_alias\n/]
[float]
=== Defining the new index
@ -88,7 +102,7 @@ PUT /logs-000001
}
}
POST logs_write/_rollover
POST /logs_write/_rollover
{
"conditions" : {
"max_age": "7d",
@ -116,7 +130,7 @@ PUT /logs-000001
}
}
POST logs_write/_rollover?dry_run
POST /logs_write/_rollover?dry_run
{
"conditions" : {
"max_age": "7d",

View File

@ -174,5 +174,9 @@ This will create a daily rolling deprecation log file in your log directory.
Check this file regularly, especially when you intend to upgrade to a new
major version.
The default logging configuration has set the roll policy for the deprecation
logs to roll and compress after 1 GB, and to preserve a maximum of five log
files (four rolled logs, and the active log).
You can disable it in the `config/log4j2.properties` file by setting the deprecation
log level to `info`.

View File

@ -65,8 +65,6 @@ public class BootstrapForTesting {
// without making things complex???
static {
LogConfigurator.init();
// make sure java.io.tmpdir exists always (in case code uses it in a static initializer)
Path javaTmpDir = PathUtils.get(Objects.requireNonNull(System.getProperty("java.io.tmpdir"),
"please set ${java.io.tmpdir} in pom.xml"));

View File

@ -1,9 +0,0 @@
tests.es.logger.level=INFO
log4j.rootLogger=${tests.es.logger.level}, out
log4j.logger.org.apache.http=INFO, out
log4j.additivity.org.apache.http=false
log4j.appender.out=org.apache.log4j.ConsoleAppender
log4j.appender.out.layout=org.apache.log4j.PatternLayout
log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n

View File

@ -0,0 +1,9 @@
status = error
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n
rootLogger.level = info
rootLogger.appenderRef.console.ref = console

View File

@ -50,27 +50,27 @@ public class LoggingListenerTests extends ESTestCase {
Logger xyzLogger = Loggers.getLogger("xyz");
Logger abcLogger = Loggers.getLogger("abc");
assertEquals(Level.ERROR, abcLogger.getLevel());
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
assertEquals(Level.INFO, abcLogger.getLevel());
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
loggingListener.testRunStarted(suiteDescription);
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
Method method = TestClass.class.getMethod("annotatedTestMethod");
TestLogging annotation = method.getAnnotation(TestLogging.class);
Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation);
loggingListener.testStarted(testDescription);
assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE));
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
loggingListener.testFinished(testDescription);
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
loggingListener.testRunFinished(new Result());
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
}
public void testCustomLevelPerClass() throws Exception {
@ -81,24 +81,24 @@ public class LoggingListenerTests extends ESTestCase {
Logger abcLogger = Loggers.getLogger("abc");
Logger xyzLogger = Loggers.getLogger("xyz");
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
loggingListener.testRunStarted(suiteDescription);
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "test");
loggingListener.testStarted(testDescription);
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
loggingListener.testFinished(testDescription);
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
loggingListener.testRunFinished(new Result());
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
}
public void testCustomLevelPerClassAndPerMethod() throws Exception {
@ -109,10 +109,10 @@ public class LoggingListenerTests extends ESTestCase {
Logger abcLogger = Loggers.getLogger("abc");
Logger xyzLogger = Loggers.getLogger("xyz");
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
loggingListener.testRunStarted(suiteDescription);
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
Method method = TestClass.class.getMethod("annotatedTestMethod");
@ -123,7 +123,7 @@ public class LoggingListenerTests extends ESTestCase {
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
loggingListener.testFinished(testDescription);
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
Method method2 = TestClass.class.getMethod("annotatedTestMethod2");
@ -134,12 +134,12 @@ public class LoggingListenerTests extends ESTestCase {
assertThat(abcLogger.getLevel(), equalTo(Level.TRACE));
loggingListener.testFinished(testDescription2);
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
loggingListener.testRunFinished(new Result());
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
}
/**