Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
11116db337
|
@ -212,7 +212,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
indices = NodeIndicesStats.readIndicesStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
os = OsStats.readOsStats(in);
|
||||
os = new OsStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
process = ProcessStats.readProcessStats(in);
|
||||
|
|
|
@ -87,7 +87,7 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
}
|
||||
}
|
||||
this.counts = new Counts(nodeInfos);
|
||||
this.os = new OsStats(nodeInfos);
|
||||
this.os = new OsStats(nodeInfos, nodeStats);
|
||||
this.process = new ProcessStats(nodeStats);
|
||||
this.jvm = new JvmStats(nodeInfos, nodeStats);
|
||||
this.networkTypes = new NetworkTypes(nodeInfos);
|
||||
|
@ -226,11 +226,12 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
final int availableProcessors;
|
||||
final int allocatedProcessors;
|
||||
final ObjectIntHashMap<String> names;
|
||||
final org.elasticsearch.monitor.os.OsStats.Mem mem;
|
||||
|
||||
/**
|
||||
* Build the stats from information about each node.
|
||||
*/
|
||||
private OsStats(List<NodeInfo> nodeInfos) {
|
||||
private OsStats(List<NodeInfo> nodeInfos, List<NodeStats> nodeStatsList) {
|
||||
this.names = new ObjectIntHashMap<>();
|
||||
int availableProcessors = 0;
|
||||
int allocatedProcessors = 0;
|
||||
|
@ -244,6 +245,22 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
}
|
||||
this.availableProcessors = availableProcessors;
|
||||
this.allocatedProcessors = allocatedProcessors;
|
||||
|
||||
long totalMemory = 0;
|
||||
long freeMemory = 0;
|
||||
for (NodeStats nodeStats : nodeStatsList) {
|
||||
if (nodeStats.getOs() != null) {
|
||||
long total = nodeStats.getOs().getMem().getTotal().bytes();
|
||||
if (total > 0) {
|
||||
totalMemory += total;
|
||||
}
|
||||
long free = nodeStats.getOs().getMem().getFree().bytes();
|
||||
if (free > 0) {
|
||||
freeMemory += free;
|
||||
}
|
||||
}
|
||||
}
|
||||
this.mem = new org.elasticsearch.monitor.os.OsStats.Mem(totalMemory, freeMemory);
|
||||
}
|
||||
|
||||
public int getAvailableProcessors() {
|
||||
|
@ -254,6 +271,10 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
return allocatedProcessors;
|
||||
}
|
||||
|
||||
public org.elasticsearch.monitor.os.OsStats.Mem getMem() {
|
||||
return mem;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String AVAILABLE_PROCESSORS = "available_processors";
|
||||
static final String ALLOCATED_PROCESSORS = "allocated_processors";
|
||||
|
@ -274,6 +295,7 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
mem.toXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
@Override
|
||||
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
|
||||
NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, true, true, true, false, true, false, false, false, false, false, false);
|
||||
List<ShardStats> shardsStats = new ArrayList<>();
|
||||
for (IndexService indexService : indicesService) {
|
||||
for (IndexShard indexShard : indexService) {
|
||||
|
|
|
@ -50,24 +50,16 @@ import java.util.Set;
|
|||
|
||||
public class LogConfigurator {
|
||||
|
||||
static {
|
||||
// we initialize the status logger immediately otherwise Log4j will complain when we try to get the context
|
||||
final ConfigurationBuilder<BuiltConfiguration> builder = ConfigurationBuilderFactory.newConfigurationBuilder();
|
||||
builder.setStatusLevel(Level.ERROR);
|
||||
Configurator.initialize(builder.build());
|
||||
}
|
||||
|
||||
/**
|
||||
* for triggering class initialization
|
||||
*/
|
||||
public static void init() {
|
||||
}
|
||||
|
||||
public static void configure(final Environment environment, final boolean resolveConfig) throws IOException {
|
||||
final Settings settings = environment.settings();
|
||||
|
||||
setLogConfigurationSystemProperty(environment, settings);
|
||||
|
||||
// we initialize the status logger immediately otherwise Log4j will complain when we try to get the context
|
||||
final ConfigurationBuilder<BuiltConfiguration> builder = ConfigurationBuilderFactory.newConfigurationBuilder();
|
||||
builder.setStatusLevel(Level.ERROR);
|
||||
Configurator.initialize(builder.build());
|
||||
|
||||
final LoggerContext context = (LoggerContext) LogManager.getContext(false);
|
||||
|
||||
if (resolveConfig) {
|
||||
|
|
|
@ -43,11 +43,6 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
|
|||
*/
|
||||
public class Loggers {
|
||||
|
||||
static {
|
||||
// ensure that the status logger is configured before we touch any loggers
|
||||
LogConfigurator.init();
|
||||
}
|
||||
|
||||
private static final String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
|
||||
|
||||
public static final String SPACE = " ";
|
||||
|
|
|
@ -300,16 +300,7 @@ public abstract class AbstractXContentParser implements XContentParser {
|
|||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
return parser.text();
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
XContentParser.NumberType numberType = parser.numberType();
|
||||
if (numberType == XContentParser.NumberType.INT) {
|
||||
return parser.intValue();
|
||||
} else if (numberType == XContentParser.NumberType.LONG) {
|
||||
return parser.longValue();
|
||||
} else if (numberType == XContentParser.NumberType.FLOAT) {
|
||||
return parser.floatValue();
|
||||
} else if (numberType == XContentParser.NumberType.DOUBLE) {
|
||||
return parser.doubleValue();
|
||||
}
|
||||
return parser.numberValue();
|
||||
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
|
||||
return parser.booleanValue();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
|
|
|
@ -173,23 +173,10 @@ public class OsProbe {
|
|||
}
|
||||
|
||||
public OsStats osStats() {
|
||||
OsStats stats = new OsStats();
|
||||
stats.timestamp = System.currentTimeMillis();
|
||||
stats.cpu = new OsStats.Cpu();
|
||||
stats.cpu.percent = getSystemCpuPercent();
|
||||
stats.cpu.loadAverage = getSystemLoadAverage();
|
||||
|
||||
OsStats.Mem mem = new OsStats.Mem();
|
||||
mem.total = getTotalPhysicalMemorySize();
|
||||
mem.free = getFreePhysicalMemorySize();
|
||||
stats.mem = mem;
|
||||
|
||||
OsStats.Swap swap = new OsStats.Swap();
|
||||
swap.total = getTotalSwapSpaceSize();
|
||||
swap.free = getFreeSwapSpaceSize();
|
||||
stats.swap = swap;
|
||||
|
||||
return stats;
|
||||
OsStats.Cpu cpu = new OsStats.Cpu(getSystemCpuPercent(), getSystemLoadAverage());
|
||||
OsStats.Mem mem = new OsStats.Mem(getTotalPhysicalMemorySize(), getFreePhysicalMemorySize());
|
||||
OsStats.Swap swap = new OsStats.Swap(getTotalSwapSpaceSize(), getFreeSwapSpaceSize());
|
||||
return new OsStats(System.currentTimeMillis(), cpu, mem , swap);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -21,28 +21,42 @@ package org.elasticsearch.monitor.os;
|
|||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class OsStats implements Streamable, ToXContent {
|
||||
public class OsStats implements Writeable, ToXContent {
|
||||
|
||||
long timestamp;
|
||||
private final long timestamp;
|
||||
private final Cpu cpu;
|
||||
private final Mem mem;
|
||||
private final Swap swap;
|
||||
|
||||
Cpu cpu = null;
|
||||
public OsStats(long timestamp, Cpu cpu, Mem mem, Swap swap) {
|
||||
this.timestamp = timestamp;
|
||||
this.cpu = Objects.requireNonNull(cpu, "cpu must not be null");
|
||||
this.mem = Objects.requireNonNull(mem, "mem must not be null");;
|
||||
this.swap = Objects.requireNonNull(swap, "swap must not be null");;
|
||||
}
|
||||
|
||||
Mem mem = null;
|
||||
public OsStats(StreamInput in) throws IOException {
|
||||
this.timestamp = in.readVLong();
|
||||
this.cpu = new Cpu(in);
|
||||
this.mem = new Mem(in);
|
||||
this.swap = new Swap(in);
|
||||
}
|
||||
|
||||
Swap swap = null;
|
||||
|
||||
OsStats() {
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(timestamp);
|
||||
cpu.writeTo(out);
|
||||
mem.writeTo(out);
|
||||
swap.writeTo(out);
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
|
@ -65,9 +79,9 @@ public class OsStats implements Streamable, ToXContent {
|
|||
static final String CPU = "cpu";
|
||||
static final String PERCENT = "percent";
|
||||
static final String LOAD_AVERAGE = "load_average";
|
||||
static final String LOAD_AVERAGE_1M = new String("1m");
|
||||
static final String LOAD_AVERAGE_5M = new String("5m");
|
||||
static final String LOAD_AVERAGE_15M = new String("15m");
|
||||
static final String LOAD_AVERAGE_1M = "1m";
|
||||
static final String LOAD_AVERAGE_5M = "5m";
|
||||
static final String LOAD_AVERAGE_15M = "15m";
|
||||
|
||||
static final String MEM = "mem";
|
||||
static final String SWAP = "swap";
|
||||
|
@ -86,105 +100,29 @@ public class OsStats implements Streamable, ToXContent {
|
|||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.OS);
|
||||
builder.field(Fields.TIMESTAMP, getTimestamp());
|
||||
if (cpu != null) {
|
||||
builder.startObject(Fields.CPU);
|
||||
builder.field(Fields.PERCENT, cpu.getPercent());
|
||||
if (cpu.getLoadAverage() != null && Arrays.stream(cpu.getLoadAverage()).anyMatch(load -> load != -1)) {
|
||||
builder.startObject(Fields.LOAD_AVERAGE);
|
||||
if (cpu.getLoadAverage()[0] != -1) {
|
||||
builder.field(Fields.LOAD_AVERAGE_1M, cpu.getLoadAverage()[0]);
|
||||
}
|
||||
if (cpu.getLoadAverage()[1] != -1) {
|
||||
builder.field(Fields.LOAD_AVERAGE_5M, cpu.getLoadAverage()[1]);
|
||||
}
|
||||
if (cpu.getLoadAverage()[2] != -1) {
|
||||
builder.field(Fields.LOAD_AVERAGE_15M, cpu.getLoadAverage()[2]);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
if (mem != null) {
|
||||
builder.startObject(Fields.MEM);
|
||||
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, mem.getTotal());
|
||||
builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, mem.getFree());
|
||||
builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, mem.getUsed());
|
||||
|
||||
builder.field(Fields.FREE_PERCENT, mem.getFreePercent());
|
||||
builder.field(Fields.USED_PERCENT, mem.getUsedPercent());
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
if (swap != null) {
|
||||
builder.startObject(Fields.SWAP);
|
||||
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, swap.getTotal());
|
||||
builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, swap.getFree());
|
||||
builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, swap.getUsed());
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
cpu.toXContent(builder, params);
|
||||
mem.toXContent(builder, params);
|
||||
swap.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static OsStats readOsStats(StreamInput in) throws IOException {
|
||||
OsStats stats = new OsStats();
|
||||
stats.readFrom(in);
|
||||
return stats;
|
||||
}
|
||||
public static class Cpu implements Writeable, ToXContent {
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
timestamp = in.readVLong();
|
||||
cpu = in.readOptionalStreamable(Cpu::new);
|
||||
if (in.readBoolean()) {
|
||||
mem = Mem.readMem(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
swap = Swap.readSwap(in);
|
||||
}
|
||||
}
|
||||
private final short percent;
|
||||
private final double[] loadAverage;
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(timestamp);
|
||||
out.writeOptionalStreamable(cpu);
|
||||
if (mem == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
mem.writeTo(out);
|
||||
}
|
||||
if (swap == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
swap.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Cpu implements Streamable {
|
||||
|
||||
short percent = -1;
|
||||
double[] loadAverage = null;
|
||||
|
||||
Cpu() {}
|
||||
|
||||
public static Cpu readCpu(StreamInput in) throws IOException {
|
||||
Cpu cpu = new Cpu();
|
||||
cpu.readFrom(in);
|
||||
return cpu;
|
||||
public Cpu(short systemCpuPercent, double[] systemLoadAverage) {
|
||||
this.percent = systemCpuPercent;
|
||||
this.loadAverage = systemLoadAverage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
percent = in.readShort();
|
||||
public Cpu(StreamInput in) throws IOException {
|
||||
this.percent = in.readShort();
|
||||
if (in.readBoolean()) {
|
||||
loadAverage = in.readDoubleArray();
|
||||
this.loadAverage = in.readDoubleArray();
|
||||
} else {
|
||||
loadAverage = null;
|
||||
this.loadAverage = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -206,12 +144,49 @@ public class OsStats implements Streamable, ToXContent {
|
|||
public double[] getLoadAverage() {
|
||||
return loadAverage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.CPU);
|
||||
builder.field(Fields.PERCENT, getPercent());
|
||||
if (getLoadAverage() != null && Arrays.stream(getLoadAverage()).anyMatch(load -> load != -1)) {
|
||||
builder.startObject(Fields.LOAD_AVERAGE);
|
||||
if (getLoadAverage()[0] != -1) {
|
||||
builder.field(Fields.LOAD_AVERAGE_1M, getLoadAverage()[0]);
|
||||
}
|
||||
if (getLoadAverage()[1] != -1) {
|
||||
builder.field(Fields.LOAD_AVERAGE_5M, getLoadAverage()[1]);
|
||||
}
|
||||
if (getLoadAverage()[2] != -1) {
|
||||
builder.field(Fields.LOAD_AVERAGE_15M, getLoadAverage()[2]);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
public static class Swap implements Streamable {
|
||||
public static class Swap implements Writeable, ToXContent {
|
||||
|
||||
long total = -1;
|
||||
long free = -1;
|
||||
private final long total;
|
||||
private final long free;
|
||||
|
||||
public Swap(long total, long free) {
|
||||
this.total = total;
|
||||
this.free = free;
|
||||
}
|
||||
|
||||
public Swap(StreamInput in) throws IOException {
|
||||
this.total = in.readLong();
|
||||
this.free = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeLong(total);
|
||||
out.writeLong(free);
|
||||
}
|
||||
|
||||
public ByteSizeValue getFree() {
|
||||
return new ByteSizeValue(free);
|
||||
|
@ -225,40 +200,30 @@ public class OsStats implements Streamable, ToXContent {
|
|||
return new ByteSizeValue(total);
|
||||
}
|
||||
|
||||
public static Swap readSwap(StreamInput in) throws IOException {
|
||||
Swap swap = new Swap();
|
||||
swap.readFrom(in);
|
||||
return swap;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
total = in.readLong();
|
||||
free = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeLong(total);
|
||||
out.writeLong(free);
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.SWAP);
|
||||
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal());
|
||||
builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, getFree());
|
||||
builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, getUsed());
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
public static class Mem implements Streamable {
|
||||
public static class Mem implements Writeable, ToXContent {
|
||||
|
||||
long total = -1;
|
||||
long free = -1;
|
||||
private final long total;
|
||||
private final long free;
|
||||
|
||||
public static Mem readMem(StreamInput in) throws IOException {
|
||||
Mem mem = new Mem();
|
||||
mem.readFrom(in);
|
||||
return mem;
|
||||
public Mem(long total, long free) {
|
||||
this.total = total;
|
||||
this.free = free;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
total = in.readLong();
|
||||
free = in.readLong();
|
||||
public Mem(StreamInput in) throws IOException {
|
||||
this.total = in.readLong();
|
||||
this.free = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -276,7 +241,7 @@ public class OsStats implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
public short getUsedPercent() {
|
||||
return calculatePercentage(getUsed().bytes(), getTotal().bytes());
|
||||
return calculatePercentage(getUsed().bytes(), total);
|
||||
}
|
||||
|
||||
public ByteSizeValue getFree() {
|
||||
|
@ -284,11 +249,23 @@ public class OsStats implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
public short getFreePercent() {
|
||||
return calculatePercentage(getFree().bytes(), getTotal().bytes());
|
||||
return calculatePercentage(free, total);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.MEM);
|
||||
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal());
|
||||
builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, getFree());
|
||||
builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, getUsed());
|
||||
builder.field(Fields.FREE_PERCENT, getFreePercent());
|
||||
builder.field(Fields.USED_PERCENT, getUsedPercent());
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
private static short calculatePercentage(long used, long max) {
|
||||
public static short calculatePercentage(long used, long max) {
|
||||
return max <= 0 ? 0 : (short) (Math.round((100d * used) / max));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,12 +21,15 @@ package org.elasticsearch.action.admin.cluster.stats;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.monitor.os.OsStats;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
@ -181,6 +184,20 @@ public class ClusterStatsIT extends ESIntegTestCase {
|
|||
assertThat(msg, response.nodesStats.getProcess().getMinOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
|
||||
assertThat(msg, response.nodesStats.getProcess().getMaxOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
|
||||
|
||||
NodesStatsResponse nodesStatsResponse = client().admin().cluster().prepareNodesStats().setOs(true).get();
|
||||
long total = 0;
|
||||
long free = 0;
|
||||
long used = 0;
|
||||
for (NodeStats nodeStats : nodesStatsResponse.getNodes()) {
|
||||
total += nodeStats.getOs().getMem().getTotal().bytes();
|
||||
free += nodeStats.getOs().getMem().getFree().bytes();
|
||||
used += nodeStats.getOs().getMem().getUsed().bytes();
|
||||
}
|
||||
assertEquals(msg, free, response.nodesStats.getOs().getMem().getFree().bytes());
|
||||
assertEquals(msg, total, response.nodesStats.getOs().getMem().getTotal().bytes());
|
||||
assertEquals(msg, used, response.nodesStats.getOs().getMem().getUsed().bytes());
|
||||
assertEquals(msg, OsStats.calculatePercentage(used, total), response.nodesStats.getOs().getMem().getUsedPercent());
|
||||
assertEquals(msg, OsStats.calculatePercentage(free, total), response.nodesStats.getOs().getMem().getFreePercent());
|
||||
}
|
||||
|
||||
public void testAllocatedProcessors() throws Exception {
|
||||
|
|
|
@ -19,13 +19,19 @@
|
|||
|
||||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.math.BigDecimal;
|
||||
import java.math.BigInteger;
|
||||
import java.util.Map;
|
||||
|
||||
public abstract class BaseXContentTestCase extends ESTestCase {
|
||||
|
||||
|
@ -156,4 +162,24 @@ public abstract class BaseXContentTestCase extends ESTestCase {
|
|||
assertNull(parser.nextToken());
|
||||
|
||||
}
|
||||
|
||||
protected void doTestBigInteger(JsonGenerator generator, ByteArrayOutputStream os) throws Exception {
|
||||
// Big integers cannot be handled explicitly, but if some values happen to be big ints,
|
||||
// we can still call parser.map() and get the bigint value so that eg. source filtering
|
||||
// keeps working
|
||||
BigInteger bigInteger = BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE);
|
||||
generator.writeStartObject();
|
||||
generator.writeFieldName("foo");
|
||||
generator.writeString("bar");
|
||||
generator.writeFieldName("bigint");
|
||||
generator.writeNumber(bigInteger);
|
||||
generator.writeEndObject();
|
||||
generator.flush();
|
||||
byte[] serialized = os.toByteArray();
|
||||
|
||||
XContentParser parser = xcontentType().xContent().createParser(serialized);
|
||||
Map<String, Object> map = parser.map();
|
||||
assertEquals("bar", map.get("foo"));
|
||||
assertEquals(bigInteger, map.get("bigint"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,14 @@
|
|||
|
||||
package org.elasticsearch.common.xcontent.cbor;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.dataformat.cbor.CBORFactory;
|
||||
|
||||
import org.elasticsearch.common.xcontent.BaseXContentTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
|
||||
public class CborXContentTests extends BaseXContentTestCase {
|
||||
|
||||
@Override
|
||||
|
@ -29,4 +34,9 @@ public class CborXContentTests extends BaseXContentTestCase {
|
|||
return XContentType.CBOR;
|
||||
}
|
||||
|
||||
public void testBigInteger() throws Exception {
|
||||
ByteArrayOutputStream os = new ByteArrayOutputStream();
|
||||
JsonGenerator generator = new CBORFactory().createGenerator(os);
|
||||
doTestBigInteger(generator, os);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,14 @@
|
|||
|
||||
package org.elasticsearch.common.xcontent.json;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
|
||||
import org.elasticsearch.common.xcontent.BaseXContentTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
|
||||
public class JsonXContentTests extends BaseXContentTestCase {
|
||||
|
||||
@Override
|
||||
|
@ -29,4 +34,9 @@ public class JsonXContentTests extends BaseXContentTestCase {
|
|||
return XContentType.JSON;
|
||||
}
|
||||
|
||||
public void testBigInteger() throws Exception {
|
||||
ByteArrayOutputStream os = new ByteArrayOutputStream();
|
||||
JsonGenerator generator = new JsonFactory().createGenerator(os);
|
||||
doTestBigInteger(generator, os);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,14 @@
|
|||
|
||||
package org.elasticsearch.common.xcontent.smile;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.dataformat.smile.SmileFactory;
|
||||
|
||||
import org.elasticsearch.common.xcontent.BaseXContentTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
|
||||
public class SmileXContentTests extends BaseXContentTestCase {
|
||||
|
||||
@Override
|
||||
|
@ -29,4 +34,9 @@ public class SmileXContentTests extends BaseXContentTestCase {
|
|||
return XContentType.SMILE;
|
||||
}
|
||||
|
||||
public void testBigInteger() throws Exception {
|
||||
ByteArrayOutputStream os = new ByteArrayOutputStream();
|
||||
JsonGenerator generator = new SmileFactory().createGenerator(os);
|
||||
doTestBigInteger(generator, os);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,14 @@
|
|||
|
||||
package org.elasticsearch.common.xcontent.yaml;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
|
||||
|
||||
import org.elasticsearch.common.xcontent.BaseXContentTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
|
||||
public class YamlXContentTests extends BaseXContentTestCase {
|
||||
|
||||
@Override
|
||||
|
@ -29,4 +34,9 @@ public class YamlXContentTests extends BaseXContentTestCase {
|
|||
return XContentType.YAML;
|
||||
}
|
||||
|
||||
public void testBigInteger() throws Exception {
|
||||
ByteArrayOutputStream os = new ByteArrayOutputStream();
|
||||
JsonGenerator generator = new YAMLFactory().createGenerator(os);
|
||||
doTestBigInteger(generator, os);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -126,7 +126,6 @@ import java.util.concurrent.CyclicBarrier;
|
|||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.IntFunction;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY;
|
||||
|
@ -172,10 +171,10 @@ public class InternalEngineTests extends ESTestCase {
|
|||
codecName = "default";
|
||||
}
|
||||
defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder()
|
||||
.put(IndexSettings.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us
|
||||
.put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us
|
||||
.put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD,
|
||||
.put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(),
|
||||
between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY)))
|
||||
.build()); // TODO randomize more settings
|
||||
threadPool = new TestThreadPool(getClass().getName());
|
||||
|
|
|
@ -49,7 +49,7 @@ public class OsProbeTests extends ESTestCase {
|
|||
assertNotNull(stats);
|
||||
assertThat(stats.getTimestamp(), greaterThan(0L));
|
||||
assertThat(stats.getCpu().getPercent(), anyOf(equalTo((short) -1), is(both(greaterThanOrEqualTo((short) 0)).and(lessThanOrEqualTo((short) 100)))));
|
||||
double[] loadAverage = stats.getCpu().loadAverage;
|
||||
double[] loadAverage = stats.getCpu().getLoadAverage();
|
||||
if (loadAverage != null) {
|
||||
assertThat(loadAverage.length, equalTo(3));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.monitor.os;
|
||||
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class OsStatsTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
int numLoadAverages = randomIntBetween(1, 5);
|
||||
double loadAverages[] = new double[numLoadAverages];
|
||||
for (int i = 0; i < loadAverages.length; i++) {
|
||||
loadAverages[i] = randomDouble();
|
||||
}
|
||||
OsStats.Cpu cpu = new OsStats.Cpu(randomShort(), loadAverages);
|
||||
OsStats.Mem mem = new OsStats.Mem(randomLong(), randomLong());
|
||||
OsStats.Swap swap = new OsStats.Swap(randomLong(), randomLong());
|
||||
OsStats osStats = new OsStats(System.currentTimeMillis(), cpu, mem, swap);
|
||||
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
osStats.writeTo(out);
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
OsStats deserializedOsStats = new OsStats(in);
|
||||
assertEquals(osStats.getTimestamp(), deserializedOsStats.getTimestamp());
|
||||
assertEquals(osStats.getCpu().getPercent(), deserializedOsStats.getCpu().getPercent());
|
||||
assertArrayEquals(osStats.getCpu().getLoadAverage(), deserializedOsStats.getCpu().getLoadAverage(), 0);
|
||||
assertEquals(osStats.getMem().getFree(), deserializedOsStats.getMem().getFree());
|
||||
assertEquals(osStats.getMem().getTotal(), deserializedOsStats.getMem().getTotal());
|
||||
assertEquals(osStats.getSwap().getFree(), deserializedOsStats.getSwap().getFree());
|
||||
assertEquals(osStats.getSwap().getTotal(), deserializedOsStats.getSwap().getTotal());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -29,11 +29,12 @@ appender.deprecation_rolling.name = deprecation_rolling
|
|||
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
|
||||
appender.deprecation_rolling.layout.type = PatternLayout
|
||||
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
|
||||
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%d{yyyy-MM-dd}.log
|
||||
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
|
||||
appender.deprecation_rolling.policies.type = Policies
|
||||
appender.deprecation_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.deprecation_rolling.policies.time.interval = 1
|
||||
appender.deprecation_rolling.policies.time.modulate = true
|
||||
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||
appender.deprecation_rolling.policies.size.size = 1GB
|
||||
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.deprecation_rolling.strategy.max = 4
|
||||
|
||||
logger.deprecation.name = deprecation
|
||||
logger.deprecation.level = warn
|
||||
|
|
|
@ -65,18 +65,18 @@ buildRestTests.docs = fileTree(projectDir) {
|
|||
Closure setupTwitter = { String name, int count ->
|
||||
buildRestTests.setups[name] = '''
|
||||
- do:
|
||||
indices.create:
|
||||
index: twitter
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
indices.create:
|
||||
index: twitter
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
- do:
|
||||
bulk:
|
||||
index: twitter
|
||||
type: tweet
|
||||
refresh: true
|
||||
body: |'''
|
||||
bulk:
|
||||
index: twitter
|
||||
type: tweet
|
||||
refresh: true
|
||||
body: |'''
|
||||
for (int i = 0; i < count; i++) {
|
||||
String user, text
|
||||
if (i == 0) {
|
||||
|
@ -87,12 +87,13 @@ Closure setupTwitter = { String name, int count ->
|
|||
text = "some message with the number $i"
|
||||
}
|
||||
buildRestTests.setups[name] += """
|
||||
{"index":{"_id": "$i"}}
|
||||
{"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}"""
|
||||
{"index":{"_id": "$i"}}
|
||||
{"user": "$user", "message": "$text", "date": "2009-11-15T14:12:12", "likes": $i}"""
|
||||
}
|
||||
}
|
||||
setupTwitter('twitter', 5)
|
||||
setupTwitter('big_twitter', 120)
|
||||
setupTwitter('huge_twitter', 1200)
|
||||
|
||||
buildRestTests.setups['host'] = '''
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
[[lang-javascript]]
|
||||
=== JavaScript Language Plugin
|
||||
|
||||
deprecated[5.0.0,JavaScript will be replaced by the new scripting language <<modules-scripting-painless, `Painless`>>]
|
||||
|
||||
The JavaScript language plugin enables the use of JavaScript in Elasticsearch
|
||||
scripts, via Mozilla's
|
||||
https://developer.mozilla.org/en-US/docs/Mozilla/Projects/Rhino[Rhino JavaScript] engine.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
[[lang-python]]
|
||||
=== Python Language Plugin
|
||||
|
||||
deprecated[5.0.0,Python will be replaced by the new scripting language <<modules-scripting-painless, `Painless`>>]
|
||||
|
||||
The Python language plugin enables the use of Python in Elasticsearch
|
||||
scripts, via the http://www.jython.org/[Jython] Java implementation of Python.
|
||||
|
||||
|
|
|
@ -116,7 +116,17 @@ Will return, for example:
|
|||
"name": "Mac OS X",
|
||||
"count": 1
|
||||
}
|
||||
]
|
||||
],
|
||||
"mem" : {
|
||||
"total" : "16gb",
|
||||
"total_in_bytes" : 17179869184,
|
||||
"free" : "78.1mb",
|
||||
"free_in_bytes" : 81960960,
|
||||
"used" : "15.9gb",
|
||||
"used_in_bytes" : 17097908224,
|
||||
"free_percent" : 0,
|
||||
"used_percent" : 100
|
||||
}
|
||||
},
|
||||
"process": {
|
||||
"cpu": {
|
||||
|
|
|
@ -424,7 +424,7 @@ supports `refresh`, `wait_for_completion`, `wait_for_active_shards`, `timeout`,
|
|||
|
||||
Sending the `refresh` url parameter will cause all indexes to which the request
|
||||
wrote to be refreshed. This is different than the Index API's `refresh`
|
||||
parameter which causes just the shard that received the new data to be indexed.
|
||||
parameter which causes just the shard that received the new data to be refreshed.
|
||||
|
||||
If the request contains `wait_for_completion=false` then Elasticsearch will
|
||||
perform some preflight checks, launch the request, and then return a `task`
|
||||
|
|
|
@ -181,4 +181,5 @@ The request returns the following result:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> Output only "keyword" attribute, since specify "attributes" in the request.
|
||||
|
|
|
@ -6,8 +6,10 @@ associated with one or more indices.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPOST 'http://localhost:9200/twitter/_cache/clear'
|
||||
POST /twitter/_cache/clear
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
The API, by default, will clear all caches. Specific caches can be cleaned
|
||||
explicitly by setting `query`, `fielddata` or `request`.
|
||||
|
@ -24,8 +26,9 @@ call, or even on `_all` the indices.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_cache/clear'
|
||||
POST /kimchy,elasticsearch/_cache/clear
|
||||
|
||||
$ curl -XPOST 'http://localhost:9200/_cache/clear'
|
||||
POST /_cache/clear
|
||||
--------------------------------------------------
|
||||
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]
|
||||
|
|
|
@ -5,8 +5,10 @@ The delete index API allows to delete an existing index.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XDELETE 'http://localhost:9200/twitter/'
|
||||
DELETE /twitter
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
The above example deletes an index called `twitter`. Specifying an index,
|
||||
alias or wildcard expression is required.
|
||||
|
|
|
@ -12,8 +12,10 @@ block until the previous force merge is complete.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPOST 'http://localhost:9200/twitter/_forcemerge'
|
||||
POST /twitter/_forcemerge
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
[float]
|
||||
[[forcemerge-parameters]]
|
||||
|
@ -45,7 +47,9 @@ even on `_all` the indices.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_forcemerge'
|
||||
POST /kimchy,elasticsearch/_forcemerge
|
||||
|
||||
$ curl -XPOST 'http://localhost:9200/_forcemerge'
|
||||
POST /_forcemerge
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]
|
||||
|
|
|
@ -9,8 +9,10 @@ The following returns the mapping of the field `text` only:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET 'http://localhost:9200/twitter/_mapping/tweet/field/text'
|
||||
GET /twitter/_mapping/tweet/field/message
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
For which the response is (assuming `text` is a default string field):
|
||||
|
||||
|
@ -18,18 +20,28 @@ For which the response is (assuming `text` is a default string field):
|
|||
--------------------------------------------------
|
||||
{
|
||||
"twitter": {
|
||||
"tweet": {
|
||||
"text": {
|
||||
"full_name": "text",
|
||||
"mapping": {
|
||||
"text": { "type": "text" }
|
||||
"mappings": {
|
||||
"tweet": {
|
||||
"message": {
|
||||
"full_name": "message",
|
||||
"mapping": {
|
||||
"message": {
|
||||
"type": "text",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 256
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
// TESTRESPONSE
|
||||
|
||||
|
||||
[float]
|
||||
|
@ -44,12 +56,15 @@ following are some examples:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET 'http://localhost:9200/twitter,kimchy/_mapping/field/message'
|
||||
GET /twitter,kimchy/_mapping/field/message
|
||||
|
||||
curl -XGET 'http://localhost:9200/_all/_mapping/tweet,book/field/message,user.id'
|
||||
GET /_all/_mapping/tweet,book/field/message,user.id
|
||||
|
||||
curl -XGET 'http://localhost:9200/_all/_mapping/tw*/field/*.id'
|
||||
GET /_all/_mapping/tw*/field/*.id
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
// TEST[s/^/PUT kimchy\nPUT book\n/]
|
||||
|
||||
[float]
|
||||
=== Specifying fields
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
[[indices-get-index]]
|
||||
== Get Index
|
||||
|
||||
The get index API allows to retrieve information about one or more indexes.
|
||||
The get index API allows to retrieve information about one or more indexes.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XGET 'http://localhost:9200/twitter/'
|
||||
GET /twitter
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
The above example gets the information for an index called `twitter`. Specifying an index,
|
||||
alias or wildcard expression is required.
|
||||
|
@ -17,13 +19,15 @@ all indices by using `_all` or `*` as index.
|
|||
[float]
|
||||
=== Filtering index information
|
||||
|
||||
The information returned by the get API can be filtered to include only specific features
|
||||
The information returned by the get API can be filtered to include only specific features
|
||||
by specifying a comma delimited list of features in the URL:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XGET 'http://localhost:9200/twitter/_settings,_mappings'
|
||||
GET twitter/_settings,_mappings
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
The above command will only return the settings and mappings for the index called `twitter`.
|
||||
|
||||
|
|
|
@ -6,8 +6,10 @@ index/type.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET 'http://localhost:9200/twitter/_mapping/tweet'
|
||||
GET /twitter/_mapping/tweet
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
[float]
|
||||
=== Multiple Indices and Types
|
||||
|
@ -21,17 +23,21 @@ following are some examples:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET 'http://localhost:9200/_mapping/twitter,kimchy'
|
||||
GET /_mapping/tweet,kimchy
|
||||
|
||||
curl -XGET 'http://localhost:9200/_all/_mapping/tweet,book'
|
||||
GET /_all/_mapping/tweet,book
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
If you want to get mappings of all indices and types then the following
|
||||
two examples are equivalent:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET 'http://localhost:9200/_all/_mapping'
|
||||
GET /_all/_mapping
|
||||
|
||||
curl -XGET 'http://localhost:9200/_mapping'
|
||||
GET /_mapping
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
|
|
@ -5,8 +5,10 @@ The get settings API allows to retrieve settings of index/indices:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XGET 'http://localhost:9200/twitter/_settings'
|
||||
GET /twitter/_settings
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
[float]
|
||||
=== Multiple Indices and Types
|
||||
|
@ -20,12 +22,15 @@ Wildcard expressions are also supported. The following are some examples:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET 'http://localhost:9200/twitter,kimchy/_settings'
|
||||
GET /twitter,kimchy/_settings
|
||||
|
||||
curl -XGET 'http://localhost:9200/_all/_settings'
|
||||
GET /_all/_settings
|
||||
|
||||
curl -XGET 'http://localhost:9200/2013-*/_settings'
|
||||
GET /log_2013_*/_settings
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
// TEST[s/^/PUT kimchy\nPUT log_2013_01_01\n/]
|
||||
|
||||
[float]
|
||||
=== Filtering settings by name
|
||||
|
|
|
@ -12,10 +12,12 @@ example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XPOST 'localhost:9200/my_index/_close'
|
||||
POST /my_index/_close
|
||||
|
||||
curl -XPOST 'localhost:9200/my_index/_open'
|
||||
POST /my_index/_open
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT my_index\n/]
|
||||
|
||||
It is possible to open and close multiple indices. An error will be thrown
|
||||
if the request explicitly refers to a missing index. This behaviour can be
|
||||
|
|
|
@ -8,15 +8,19 @@ For example, the following command would show recovery information for the indic
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET http://localhost:9200/index1,index2/_recovery
|
||||
GET index1,index2/_recovery?human
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT index1\nPUT index2\n/]
|
||||
|
||||
To see cluster-wide recovery status simply leave out the index names.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET http://localhost:9200/_recovery?pretty&human
|
||||
GET /_recovery?human
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT index1\n{"settings": {"index.number_of_shards": 1}}\n/]
|
||||
|
||||
Response:
|
||||
[source,js]
|
||||
|
@ -30,7 +34,7 @@ Response:
|
|||
"primary" : true,
|
||||
"start_time" : "2014-02-24T12:15:59.716",
|
||||
"start_time_in_millis": 1393244159716,
|
||||
"total_time" : "2.9m"
|
||||
"total_time" : "2.9m",
|
||||
"total_time_in_millis" : 175576,
|
||||
"source" : {
|
||||
"repository" : "my_repository",
|
||||
|
@ -45,7 +49,7 @@ Response:
|
|||
},
|
||||
"index" : {
|
||||
"size" : {
|
||||
"total" : "75.4mb"
|
||||
"total" : "75.4mb",
|
||||
"total_in_bytes" : 79063092,
|
||||
"reused" : "0b",
|
||||
"reused_in_bytes" : 0,
|
||||
|
@ -68,7 +72,7 @@ Response:
|
|||
"percent" : "100.0%",
|
||||
"total_on_start" : 0,
|
||||
"total_time" : "0s",
|
||||
"total_time_in_millis" : 0
|
||||
"total_time_in_millis" : 0,
|
||||
},
|
||||
"start" : {
|
||||
"check_index_time" : "0s",
|
||||
|
@ -80,6 +84,7 @@ Response:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// We should really assert that this is up to date but that is hard!
|
||||
|
||||
The above response shows a single index recovering a single shard. In this case, the source of the recovery is a snapshot repository
|
||||
and the target of the recovery is the node with name "my_es_node".
|
||||
|
@ -90,7 +95,7 @@ In some cases a higher level of detail may be preferable. Setting "detailed=true
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET http://localhost:9200/_recovery?pretty&human&detailed=true
|
||||
GET _recovery?human&detailed=true
|
||||
--------------------------------------------------
|
||||
|
||||
Response:
|
||||
|
@ -170,6 +175,7 @@ Response:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// We should really assert that this is up to date but that is hard!
|
||||
|
||||
This response shows a detailed listing (truncated for brevity) of the actual files recovered and their sizes.
|
||||
|
||||
|
|
|
@ -9,8 +9,10 @@ refresh is scheduled periodically.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPOST 'http://localhost:9200/twitter/_refresh'
|
||||
POST /twitter/_refresh
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
||||
[float]
|
||||
=== Multi Index
|
||||
|
@ -20,7 +22,9 @@ call, or even on `_all` the indices.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_refresh'
|
||||
POST /kimchy,elasticsearch/_refresh
|
||||
|
||||
$ curl -XPOST 'http://localhost:9200/_refresh'
|
||||
POST /_refresh
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/]
|
||||
|
|
|
@ -19,7 +19,9 @@ PUT /logs-000001 <1>
|
|||
}
|
||||
}
|
||||
|
||||
POST logs_write/_rollover <2>
|
||||
# Add > 1000 documents to logs-000001
|
||||
|
||||
POST /logs_write/_rollover <2>
|
||||
{
|
||||
"conditions": {
|
||||
"max_age": "7d",
|
||||
|
@ -28,6 +30,8 @@ POST logs_write/_rollover <2>
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:huge_twitter]
|
||||
// TEST[s/# Add > 1000 documents to logs-000001/POST _reindex?refresh\n{"source":{"index":"twitter"},"dest":{"index":"logs-000001"}}/]
|
||||
<1> Creates an index called `logs-0000001` with the alias `logs_write`.
|
||||
<2> If the index pointed to by `logs_write` was created 7 or more days ago, or
|
||||
contains 1,000 or more documents, then the `logs-0002` index is created
|
||||
|
@ -38,6 +42,8 @@ The above request might return the following response:
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"acknowledged": true,
|
||||
"shards_acknowledged": true,
|
||||
"old_index": "logs-000001",
|
||||
"new_index": "logs-000002",
|
||||
"rolled_over": true, <1>
|
||||
|
@ -48,6 +54,7 @@ The above request might return the following response:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> Whether the index was rolled over.
|
||||
<2> Whether the rollover was dry run.
|
||||
<3> The result of each condition.
|
||||
|
@ -65,9 +72,16 @@ the new index as follows:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST my_alias/_rollover/my_new_index_name
|
||||
{...}
|
||||
POST /my_alias/_rollover/my_new_index_name
|
||||
{
|
||||
"conditions": {
|
||||
"max_age": "7d",
|
||||
"max_docs": 1000
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/^/PUT my_old_index_name\nPUT my_old_index_name\/_alias\/my_alias\n/]
|
||||
|
||||
[float]
|
||||
=== Defining the new index
|
||||
|
@ -75,7 +89,7 @@ POST my_alias/_rollover/my_new_index_name
|
|||
The settings, mappings, and aliases for the new index are taken from any
|
||||
matching <<indices-templates,index templates>>. Additionally, you can specify
|
||||
`settings`, `mappings`, and `aliases` in the body of the request, just like the
|
||||
<<indices-create-index,create index>> API. Values specified in the request
|
||||
<<indices-create-index,create index>> API. Values specified in the request
|
||||
override any values set in matching index templates. For example, the following
|
||||
`rollover` request overrides the `index.number_of_shards` setting:
|
||||
|
||||
|
@ -88,14 +102,14 @@ PUT /logs-000001
|
|||
}
|
||||
}
|
||||
|
||||
POST logs_write/_rollover
|
||||
POST /logs_write/_rollover
|
||||
{
|
||||
"conditions" : {
|
||||
"max_age": "7d",
|
||||
"max_docs": 1000
|
||||
},
|
||||
"settings": {
|
||||
"index.number_of_shards": 2
|
||||
"settings": {
|
||||
"index.number_of_shards": 2
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
@ -116,7 +130,7 @@ PUT /logs-000001
|
|||
}
|
||||
}
|
||||
|
||||
POST logs_write/_rollover?dry_run
|
||||
POST /logs_write/_rollover?dry_run
|
||||
{
|
||||
"conditions" : {
|
||||
"max_age": "7d",
|
||||
|
@ -129,6 +143,6 @@ POST logs_write/_rollover?dry_run
|
|||
[float]
|
||||
=== Wait For Active Shards
|
||||
|
||||
Because the rollover operation creates a new index to rollover to, the
|
||||
<<create-index-wait-for-active-shards,wait for active shards>> setting on
|
||||
Because the rollover operation creates a new index to rollover to, the
|
||||
<<create-index-wait-for-active-shards,wait for active shards>> setting on
|
||||
index creation applies to the rollover action as well.
|
||||
|
|
|
@ -349,7 +349,9 @@ The `setQuery(BytesReference)` method have been removed in favor of using `setQu
|
|||
==== ClusterStatsResponse
|
||||
|
||||
Removed the `getMemoryAvailable` method from `OsStats`, which could be previously accessed calling
|
||||
`clusterStatsResponse.getNodesStats().getOs().getMemoryAvailable()`.
|
||||
`clusterStatsResponse.getNodesStats().getOs().getMemoryAvailable()`. It is now replaced with
|
||||
`clusterStatsResponse.getNodesStats().getOs().getMem()` which exposes `getTotal()`, `getFree()`,
|
||||
`getUsed()`, `getFreePercent()` and `getUsedPercent()`.
|
||||
|
||||
==== setRefresh(boolean) has been removed
|
||||
|
||||
|
|
|
@ -29,9 +29,9 @@ document exists in an index. The old endpoint will keep working until 6.0.
|
|||
|
||||
==== Removed `mem` section from `/_cluster/stats` response
|
||||
|
||||
The `mem` section contained only one value, the total memory available
|
||||
throughout all nodes in the cluster. The section was removed as it didn't
|
||||
prove useful.
|
||||
The `mem` section contained only the `total` value, which was actually the
|
||||
memory available throughout all nodes in the cluster. The section contains now
|
||||
`total`, `free`, `used`, `used_percent` and `free_percent`.
|
||||
|
||||
==== Revised node roles aggregate returned by `/_cluster/stats`
|
||||
|
||||
|
|
|
@ -5,12 +5,7 @@ The scripting module enables you to use scripts to evaluate custom
|
|||
expressions. For example, you could use a script to return "script fields"
|
||||
as part of a search request or evaluate a custom score for a query.
|
||||
|
||||
TIP: Elasticsearch now has a built-in scripting language called _Painless_
|
||||
that provides a more secure alternative for implementing
|
||||
scripts for Elasticsearch. We encourage you to try it out --
|
||||
for more information, see <<modules-scripting-painless, Painless Scripting Language>>.
|
||||
|
||||
The default scripting language is http://groovy-lang.org/[groovy].
|
||||
The default scripting language is <<modules-scripting-painless, `Painless`>>.
|
||||
Additional `lang` plugins enable you to run scripts written in other languages.
|
||||
Everywhere a script can be used, you can include a `lang` parameter
|
||||
to specify the language of the script.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
[[modules-scripting-groovy]]
|
||||
=== Groovy Scripting Language
|
||||
|
||||
deprecated[5.0.0,Groovy will be replaced by the new scripting language <<modules-scripting-painless, `Painless`>>]
|
||||
|
||||
Groovy is the default scripting language available in Elasticsearch. Although
|
||||
limited by the <<java-security-manager,Java Security Manager>>, it is not a
|
||||
sandboxed language and only `file` scripts may be used by default.
|
||||
|
|
|
@ -12,7 +12,7 @@ the same pattern:
|
|||
"params": { ... } <3>
|
||||
}
|
||||
-------------------------------------
|
||||
<1> The language the script is written in, which defaults to `groovy`.
|
||||
<1> The language the script is written in, which defaults to `painless`.
|
||||
<2> The script itself which may be specfied as `inline`, `id`, or `file`.
|
||||
<3> Any named parameters that should be passed into the script.
|
||||
|
||||
|
|
|
@ -59,6 +59,6 @@ for appears.
|
|||
|
||||
For better solutions for _search-as-you-type_ see the
|
||||
<<search-suggesters-completion,completion suggester>> and
|
||||
{guide}/_index_time_search_as_you_type.html[Index-Time Search-as-You-Type].
|
||||
{defguide}/_index_time_search_as_you_type.html[Index-Time Search-as-You-Type].
|
||||
|
||||
===================================================
|
||||
|
|
|
@ -174,5 +174,9 @@ This will create a daily rolling deprecation log file in your log directory.
|
|||
Check this file regularly, especially when you intend to upgrade to a new
|
||||
major version.
|
||||
|
||||
The default logging configuration has set the roll policy for the deprecation
|
||||
logs to roll and compress after 1 GB, and to preserve a maximum of five log
|
||||
files (four rolled logs, and the active log).
|
||||
|
||||
You can disable it in the `config/log4j2.properties` file by setting the deprecation
|
||||
log level to `info`.
|
||||
|
|
|
@ -96,6 +96,9 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
|||
|
||||
public GroovyScriptEngineService(Settings settings) {
|
||||
super(settings);
|
||||
|
||||
deprecationLogger.deprecated("[groovy] scripts are deprecated, use [painless] scripts instead");
|
||||
|
||||
// Creates the classloader here in order to isolate Groovy-land code
|
||||
final SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
|
@ -182,6 +185,8 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
|||
|
||||
@Override
|
||||
public ExecutableScript executable(CompiledScript compiledScript, Map<String, Object> vars) {
|
||||
deprecationLogger.deprecated("[groovy] scripts are deprecated, use [painless] scripts instead");
|
||||
|
||||
try {
|
||||
Map<String, Object> allVars = new HashMap<>();
|
||||
if (vars != null) {
|
||||
|
@ -195,6 +200,8 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
|
|||
|
||||
@Override
|
||||
public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map<String, Object> vars) {
|
||||
deprecationLogger.deprecated("[groovy] scripts are deprecated, use [painless] scripts instead");
|
||||
|
||||
return new SearchScript() {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.SpecialPermission;
|
|||
import org.elasticsearch.bootstrap.BootstrapInfo;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.script.ClassPermission;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
|
@ -138,6 +139,8 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements
|
|||
public JavaScriptScriptEngineService(Settings settings) {
|
||||
super(settings);
|
||||
|
||||
deprecationLogger.deprecated("[javascript] scripts are deprecated, use [painless] scripts instead");
|
||||
|
||||
Context ctx = Context.enter();
|
||||
try {
|
||||
globalScope = ctx.initStandardObjects(null, true);
|
||||
|
@ -173,6 +176,8 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements
|
|||
|
||||
@Override
|
||||
public ExecutableScript executable(CompiledScript compiledScript, @Nullable Map<String, Object> vars) {
|
||||
deprecationLogger.deprecated("[javascript] scripts are deprecated, use [painless] scripts instead");
|
||||
|
||||
Context ctx = Context.enter();
|
||||
try {
|
||||
Scriptable scope = ctx.newObject(globalScope);
|
||||
|
@ -192,6 +197,8 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements
|
|||
|
||||
@Override
|
||||
public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map<String, Object> vars) {
|
||||
deprecationLogger.deprecated("[javascript] scripts are deprecated, use [painless] scripts instead");
|
||||
|
||||
Context ctx = Context.enter();
|
||||
try {
|
||||
final Scriptable scope = ctx.newObject(globalScope);
|
||||
|
|
|
@ -62,6 +62,8 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri
|
|||
public PythonScriptEngineService(Settings settings) {
|
||||
super(settings);
|
||||
|
||||
deprecationLogger.deprecated("[python] scripts are deprecated, use [painless] scripts instead.");
|
||||
|
||||
// classloader created here
|
||||
final SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
|
@ -118,11 +120,15 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri
|
|||
|
||||
@Override
|
||||
public ExecutableScript executable(CompiledScript compiledScript, Map<String, Object> vars) {
|
||||
deprecationLogger.deprecated("[python] scripts are deprecated, use [painless] scripts instead");
|
||||
|
||||
return new PythonExecutableScript((PyCode) compiledScript.compiled(), vars);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchScript search(final CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map<String, Object> vars) {
|
||||
deprecationLogger.deprecated("[python] scripts are deprecated, use [painless] scripts instead");
|
||||
|
||||
return new SearchScript() {
|
||||
@Override
|
||||
public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException {
|
||||
|
|
|
@ -19,6 +19,11 @@
|
|||
- gte: { nodes.count.ingest: 0}
|
||||
- gte: { nodes.count.coordinating_only: 0}
|
||||
- is_true: nodes.os
|
||||
- is_true: nodes.os.mem.total_in_bytes
|
||||
- is_true: nodes.os.mem.free_in_bytes
|
||||
- is_true: nodes.os.mem.used_in_bytes
|
||||
- is_true: nodes.os.mem.free_percent
|
||||
- is_true: nodes.os.mem.used_percent
|
||||
- is_true: nodes.process
|
||||
- is_true: nodes.jvm
|
||||
- is_true: nodes.fs
|
||||
|
|
|
@ -1,11 +1,22 @@
|
|||
---
|
||||
setup:
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
mappings:
|
||||
test:
|
||||
properties:
|
||||
bigint:
|
||||
type: keyword
|
||||
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 }
|
||||
body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1, "bigint": 72057594037927936 }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
|
@ -90,6 +101,17 @@ setup:
|
|||
- match: { hits.hits.0._source.include.field1: v1 }
|
||||
- is_false: hits.hits.0._source.include.field2
|
||||
|
||||
---
|
||||
"_source include on bigint":
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
_source:
|
||||
includes: bigint
|
||||
query: { match_all: {} }
|
||||
- match: { hits.hits.0._source.bigint: 72057594037927936 }
|
||||
- is_false: hits.hits.0._source.include.field2
|
||||
|
||||
---
|
||||
"fields in body":
|
||||
- do:
|
||||
|
|
|
@ -65,8 +65,6 @@ public class BootstrapForTesting {
|
|||
// without making things complex???
|
||||
|
||||
static {
|
||||
LogConfigurator.init();
|
||||
|
||||
// make sure java.io.tmpdir exists always (in case code uses it in a static initializer)
|
||||
Path javaTmpDir = PathUtils.get(Objects.requireNonNull(System.getProperty("java.io.tmpdir"),
|
||||
"please set ${java.io.tmpdir} in pom.xml"));
|
||||
|
|
|
@ -75,8 +75,10 @@ public class ClientYamlTestExecutionContext {
|
|||
response = e.getRestTestResponse();
|
||||
throw e;
|
||||
} finally {
|
||||
// if we hit a bad exception the response is null
|
||||
Object repsponseBody = response != null ? response.getBody() : null;
|
||||
//we always stash the last response body
|
||||
stash.stashValue("body", response.getBody());
|
||||
stash.stashValue("body", repsponseBody);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
tests.es.logger.level=INFO
|
||||
log4j.rootLogger=${tests.es.logger.level}, out
|
||||
|
||||
log4j.logger.org.apache.http=INFO, out
|
||||
log4j.additivity.org.apache.http=false
|
||||
|
||||
log4j.appender.out=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.out.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n
|
|
@ -0,0 +1,9 @@
|
|||
status = error
|
||||
|
||||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
|
@ -50,27 +50,27 @@ public class LoggingListenerTests extends ESTestCase {
|
|||
Logger xyzLogger = Loggers.getLogger("xyz");
|
||||
Logger abcLogger = Loggers.getLogger("abc");
|
||||
|
||||
assertEquals(Level.ERROR, abcLogger.getLevel());
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertEquals(Level.INFO, abcLogger.getLevel());
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
loggingListener.testRunStarted(suiteDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
|
||||
Method method = TestClass.class.getMethod("annotatedTestMethod");
|
||||
TestLogging annotation = method.getAnnotation(TestLogging.class);
|
||||
Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation);
|
||||
loggingListener.testStarted(testDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.TRACE));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
|
||||
loggingListener.testFinished(testDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
|
||||
loggingListener.testRunFinished(new Result());
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
}
|
||||
|
||||
public void testCustomLevelPerClass() throws Exception {
|
||||
|
@ -81,24 +81,24 @@ public class LoggingListenerTests extends ESTestCase {
|
|||
Logger abcLogger = Loggers.getLogger("abc");
|
||||
Logger xyzLogger = Loggers.getLogger("xyz");
|
||||
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
loggingListener.testRunStarted(suiteDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "test");
|
||||
loggingListener.testStarted(testDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
loggingListener.testFinished(testDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
loggingListener.testRunFinished(new Result());
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
}
|
||||
|
||||
public void testCustomLevelPerClassAndPerMethod() throws Exception {
|
||||
|
@ -109,10 +109,10 @@ public class LoggingListenerTests extends ESTestCase {
|
|||
Logger abcLogger = Loggers.getLogger("abc");
|
||||
Logger xyzLogger = Loggers.getLogger("xyz");
|
||||
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
loggingListener.testRunStarted(suiteDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
Method method = TestClass.class.getMethod("annotatedTestMethod");
|
||||
|
@ -123,7 +123,7 @@ public class LoggingListenerTests extends ESTestCase {
|
|||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
loggingListener.testFinished(testDescription);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
Method method2 = TestClass.class.getMethod("annotatedTestMethod2");
|
||||
|
@ -134,12 +134,12 @@ public class LoggingListenerTests extends ESTestCase {
|
|||
assertThat(abcLogger.getLevel(), equalTo(Level.TRACE));
|
||||
|
||||
loggingListener.testFinished(testDescription2);
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.WARN));
|
||||
|
||||
loggingListener.testRunFinished(new Result());
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.ERROR));
|
||||
assertThat(xyzLogger.getLevel(), equalTo(Level.INFO));
|
||||
assertThat(abcLogger.getLevel(), equalTo(Level.INFO));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue