Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
8f71c29c16
|
@ -460,7 +460,7 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveriesCollection.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryFailedException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoverySettings.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoverySource.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]PeerRecoverySourceService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryState.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]StartRecoveryRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]store[/\\]IndicesStore.java" checks="LineLength" />
|
||||
|
@ -722,7 +722,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]settings[/\\]ClusterSettingsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]structure[/\\]RoutingIteratorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]codecs[/\\]CodecTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]BooleansTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreContainerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreTests.java" checks="LineLength" />
|
||||
|
|
|
@ -1,34 +1,53 @@
|
|||
Steps to execute the benchmark:
|
||||
### Steps to execute the benchmark
|
||||
|
||||
1. Start Elasticsearch on the target host (ideally *not* on the same machine)
|
||||
2. Create an empty index with the mapping you want to benchmark
|
||||
3. Build an uberjar with `gradle :client:benchmark:shadowJar` and execute it.
|
||||
4. Delete the index
|
||||
5. Repeat steps 2. - 4. for multiple iterations. The first iterations are intended as warmup for Elasticsearch itself. Always start the same benchmark in step 3!
|
||||
4. After the benchmark: Shutdown Elasticsearch and delete the data directory
|
||||
1. Build `client-benchmark-noop-api-plugin` with `gradle :client:client-benchmark-noop-api-plugin:assemble`
|
||||
2. Install it on the target host with `bin/elasticsearch-plugin install file:///full/path/to/client-benchmark-noop-api-plugin.zip`
|
||||
3. Start Elasticsearch on the target host (ideally *not* on the same machine)
|
||||
4. Build an uberjar with `gradle :client:benchmark:shadowJar` and execute it.
|
||||
|
||||
Repeat all steps above for the other benchmark candidate.
|
||||
|
||||
Example benchmark:
|
||||
### Example benchmark
|
||||
|
||||
* Download benchmark data from http://benchmarks.elastic.co/corpora/geonames/documents.json.bz2 and decompress
|
||||
* Use the mapping file https://github.com/elastic/rally-tracks/blob/master/geonames/mappings.json to create the index
|
||||
In general, you should define a few GC-related settings `-Xms8192M -Xmx8192M -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails` and keep an eye on GC activity. You can also define `-XX:+PrintCompilation` to see JIT activity.
|
||||
|
||||
Example command line parameter list:
|
||||
#### Bulk indexing
|
||||
|
||||
Download benchmark data from http://benchmarks.elastic.co/corpora/geonames/documents.json.bz2 and decompress them.
|
||||
|
||||
Example command line parameters:
|
||||
|
||||
```
|
||||
rest 192.168.2.2 /home/your_user_name/.rally/benchmarks/data/geonames/documents.json geonames type 8647880 5000 "{ \"query\": { \"match_phrase\": { \"name\": \"Sankt Georgen\" } } }\""
|
||||
rest bulk 192.168.2.2 ./documents.json geonames type 8647880 5000
|
||||
```
|
||||
|
||||
The parameters are in order:
|
||||
|
||||
* Client type: Use either "rest" or "transport"
|
||||
* Benchmark type: Use either "bulk" or "search"
|
||||
* Benchmark target host IP (the host where Elasticsearch is running)
|
||||
* full path to the file that should be bulk indexed
|
||||
* name of the index
|
||||
* name of the (sole) type in the index
|
||||
* number of documents in the file
|
||||
* bulk size
|
||||
* a search request body (remember to escape double quotes). The `TransportClientBenchmark` uses `QueryBuilders.wrapperQuery()` internally which automatically adds a root key `query`, so it must not be present in the command line parameter.
|
||||
|
||||
You should also define a few GC-related settings `-Xms4096M -Xmx4096M -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails` and keep an eye on GC activity. You can also define `-XX:+PrintCompilation` to see JIT activity.
|
||||
|
||||
|
||||
#### Bulk indexing
|
||||
|
||||
Example command line parameters:
|
||||
|
||||
```
|
||||
rest search 192.168.2.2 geonames "{ \"query\": { \"match_phrase\": { \"name\": \"Sankt Georgen\" } } }\"" 500,1000,1100,1200
|
||||
```
|
||||
|
||||
The parameters are in order:
|
||||
|
||||
* Client type: Use either "rest" or "transport"
|
||||
* Benchmark type: Use either "bulk" or "search"
|
||||
* Benchmark target host IP (the host where Elasticsearch is running)
|
||||
* name of the index
|
||||
* a search request body (remember to escape double quotes). The `TransportClientBenchmark` uses `QueryBuilders.wrapperQuery()` internally which automatically adds a root key `query`, so it must not be present in the command line parameter.
|
||||
* A comma-separated list of target throughput rates
|
||||
|
||||
|
||||
|
|
|
@ -50,6 +50,8 @@ dependencies {
|
|||
compile 'org.apache.commons:commons-math3:3.2'
|
||||
|
||||
compile("org.elasticsearch.client:rest:${version}")
|
||||
// bottleneck should be the client, not Elasticsearch
|
||||
compile project(path: ':client:client-benchmark-noop-api-plugin')
|
||||
// for transport client
|
||||
compile("org.elasticsearch:elasticsearch:${version}")
|
||||
compile("org.elasticsearch.client:transport:${version}")
|
||||
|
|
|
@ -27,7 +27,11 @@ import org.elasticsearch.common.SuppressForbidden;
|
|||
import java.io.Closeable;
|
||||
import java.lang.management.GarbageCollectorMXBean;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public abstract class AbstractBenchmark<T extends Closeable> {
|
||||
private static final int SEARCH_BENCHMARK_ITERATIONS = 10_000;
|
||||
|
@ -40,52 +44,111 @@ public abstract class AbstractBenchmark<T extends Closeable> {
|
|||
|
||||
@SuppressForbidden(reason = "system out is ok for a command line tool")
|
||||
public final void run(String[] args) throws Exception {
|
||||
if (args.length < 6) {
|
||||
System.err.println(
|
||||
"usage: benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize [search request body]");
|
||||
if (args.length < 1) {
|
||||
System.err.println("usage: [search|bulk]");
|
||||
System.exit(1);
|
||||
}
|
||||
String benchmarkTargetHost = args[0];
|
||||
String indexFilePath = args[1];
|
||||
String indexName = args[2];
|
||||
String typeName = args[3];
|
||||
int totalDocs = Integer.valueOf(args[4]);
|
||||
int bulkSize = Integer.valueOf(args[5]);
|
||||
switch (args[0]) {
|
||||
case "search":
|
||||
runSearchBenchmark(args);
|
||||
break;
|
||||
case "bulk":
|
||||
runBulkIndexBenchmark(args);
|
||||
break;
|
||||
default:
|
||||
System.err.println("Unknown benchmark type [" + args[0] + "]");
|
||||
System.exit(1);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a command line tool")
|
||||
private void runBulkIndexBenchmark(String[] args) throws Exception {
|
||||
if (args.length != 7) {
|
||||
System.err.println(
|
||||
"usage: 'bulk' benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize");
|
||||
System.exit(1);
|
||||
}
|
||||
String benchmarkTargetHost = args[1];
|
||||
String indexFilePath = args[2];
|
||||
String indexName = args[3];
|
||||
String typeName = args[4];
|
||||
int totalDocs = Integer.valueOf(args[5]);
|
||||
int bulkSize = Integer.valueOf(args[6]);
|
||||
|
||||
int totalIterationCount = (int) Math.floor(totalDocs / bulkSize);
|
||||
// consider 40% of all iterations as warmup iterations
|
||||
int warmupIterations = (int) (0.4d * totalIterationCount);
|
||||
int iterations = totalIterationCount - warmupIterations;
|
||||
String searchBody = (args.length == 7) ? args[6] : null;
|
||||
|
||||
T client = client(benchmarkTargetHost);
|
||||
|
||||
BenchmarkRunner benchmark = new BenchmarkRunner(warmupIterations, iterations,
|
||||
new BulkBenchmarkTask(
|
||||
bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations + iterations, bulkSize));
|
||||
bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations, iterations, bulkSize));
|
||||
|
||||
try {
|
||||
benchmark.run();
|
||||
if (searchBody != null) {
|
||||
for (int run = 1; run <= 5; run++) {
|
||||
System.out.println("=============");
|
||||
System.out.println(" Trial run " + run);
|
||||
System.out.println("=============");
|
||||
|
||||
for (int throughput = 100; throughput <= 100_000; throughput *= 10) {
|
||||
//GC between trials to reduce the likelihood of a GC occurring in the middle of a trial.
|
||||
runGc();
|
||||
BenchmarkRunner searchBenchmark = new BenchmarkRunner(SEARCH_BENCHMARK_ITERATIONS, SEARCH_BENCHMARK_ITERATIONS,
|
||||
new SearchBenchmarkTask(
|
||||
searchRequestExecutor(client, indexName), searchBody, 2 * SEARCH_BENCHMARK_ITERATIONS, throughput));
|
||||
System.out.printf("Target throughput = %d ops / s%n", throughput);
|
||||
searchBenchmark.run();
|
||||
}
|
||||
}
|
||||
}
|
||||
runTrials(() -> {
|
||||
runGc();
|
||||
benchmark.run();
|
||||
});
|
||||
} finally {
|
||||
client.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a command line tool")
|
||||
private void runSearchBenchmark(String[] args) throws Exception {
|
||||
if (args.length != 5) {
|
||||
System.err.println(
|
||||
"usage: 'search' benchmarkTargetHostIp indexName searchRequestBody throughputRates");
|
||||
System.exit(1);
|
||||
}
|
||||
String benchmarkTargetHost = args[1];
|
||||
String indexName = args[2];
|
||||
String searchBody = args[3];
|
||||
List<Integer> throughputRates = Arrays.asList(args[4].split(",")).stream().map(Integer::valueOf).collect(Collectors.toList());
|
||||
|
||||
T client = client(benchmarkTargetHost);
|
||||
|
||||
try {
|
||||
runTrials(() -> {
|
||||
for (int throughput : throughputRates) {
|
||||
//GC between trials to reduce the likelihood of a GC occurring in the middle of a trial.
|
||||
runGc();
|
||||
BenchmarkRunner benchmark = new BenchmarkRunner(SEARCH_BENCHMARK_ITERATIONS, SEARCH_BENCHMARK_ITERATIONS,
|
||||
new SearchBenchmarkTask(
|
||||
searchRequestExecutor(client, indexName), searchBody, SEARCH_BENCHMARK_ITERATIONS,
|
||||
SEARCH_BENCHMARK_ITERATIONS, throughput));
|
||||
System.out.printf("Target throughput = %d ops / s%n", throughput);
|
||||
benchmark.run();
|
||||
}
|
||||
});
|
||||
} finally {
|
||||
client.close();
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a command line tool")
|
||||
private void runTrials(Runnable runner) {
|
||||
int totalWarmupTrialRuns = 1;
|
||||
for (int run = 1; run <= totalWarmupTrialRuns; run++) {
|
||||
System.out.println("======================");
|
||||
System.out.println(" Warmup trial run " + run + "/" + totalWarmupTrialRuns);
|
||||
System.out.println("======================");
|
||||
runner.run();
|
||||
}
|
||||
|
||||
int totalTrialRuns = 5;
|
||||
for (int run = 1; run <= totalTrialRuns; run++) {
|
||||
System.out.println("================");
|
||||
System.out.println(" Trial run " + run + "/" + totalTrialRuns);
|
||||
System.out.println("================");
|
||||
|
||||
runner.run();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -37,7 +37,7 @@ public class BenchmarkMain {
|
|||
benchmark = new RestClientBenchmark();
|
||||
break;
|
||||
default:
|
||||
System.err.println("Unknown benchmark type [" + type + "]");
|
||||
System.err.println("Unknown client type [" + type + "]");
|
||||
System.exit(1);
|
||||
}
|
||||
benchmark.run(Arrays.copyOfRange(args, 1, args.length));
|
||||
|
|
|
@ -40,8 +40,8 @@ public final class BenchmarkRunner {
|
|||
}
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a command line tool")
|
||||
public void run() throws Exception {
|
||||
SampleRecorder recorder = new SampleRecorder(warmupIterations, iterations);
|
||||
public void run() {
|
||||
SampleRecorder recorder = new SampleRecorder(iterations);
|
||||
System.out.printf("Running %s with %d warmup iterations and %d iterations.%n",
|
||||
task.getClass().getSimpleName(), warmupIterations, iterations);
|
||||
|
||||
|
@ -52,6 +52,8 @@ public final class BenchmarkRunner {
|
|||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
|
||||
List<Sample> samples = recorder.getSamples();
|
||||
|
@ -62,17 +64,24 @@ public final class BenchmarkRunner {
|
|||
}
|
||||
|
||||
for (Metrics metrics : summaryMetrics) {
|
||||
System.out.printf(Locale.ROOT, "Operation: %s%n", metrics.operation);
|
||||
String stats = String.format(Locale.ROOT,
|
||||
"Throughput = %f ops/s, p90 = %f ms, p95 = %f ms, p99 = %f ms, p99.9 = %f ms, p99.99 = %f ms",
|
||||
metrics.throughput,
|
||||
metrics.serviceTimeP90, metrics.serviceTimeP95,
|
||||
metrics.serviceTimeP99, metrics.serviceTimeP999,
|
||||
metrics.serviceTimeP9999);
|
||||
System.out.println(repeat(stats.length(), '-'));
|
||||
System.out.println(stats);
|
||||
String throughput = String.format(Locale.ROOT, "Throughput [ops/s]: %f", metrics.throughput);
|
||||
String serviceTimes = String.format(Locale.ROOT,
|
||||
"Service time [ms]: p50 = %f, p90 = %f, p95 = %f, p99 = %f, p99.9 = %f, p99.99 = %f",
|
||||
metrics.serviceTimeP50, metrics.serviceTimeP90, metrics.serviceTimeP95,
|
||||
metrics.serviceTimeP99, metrics.serviceTimeP999, metrics.serviceTimeP9999);
|
||||
String latencies = String.format(Locale.ROOT,
|
||||
"Latency [ms]: p50 = %f, p90 = %f, p95 = %f, p99 = %f, p99.9 = %f, p99.99 = %f",
|
||||
metrics.latencyP50, metrics.latencyP90, metrics.latencyP95,
|
||||
metrics.latencyP99, metrics.latencyP999, metrics.latencyP9999);
|
||||
|
||||
int lineLength = Math.max(serviceTimes.length(), latencies.length());
|
||||
|
||||
System.out.println(repeat(lineLength, '-'));
|
||||
System.out.println(throughput);
|
||||
System.out.println(serviceTimes);
|
||||
System.out.println(latencies);
|
||||
System.out.printf("success count = %d, error count = %d%n", metrics.successCount, metrics.errorCount);
|
||||
System.out.println(repeat(stats.length(), '-'));
|
||||
System.out.println(repeat(lineLength, '-'));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,23 +23,38 @@ public final class Metrics {
|
|||
public final long successCount;
|
||||
public final long errorCount;
|
||||
public final double throughput;
|
||||
public final double serviceTimeP50;
|
||||
public final double serviceTimeP90;
|
||||
public final double serviceTimeP95;
|
||||
public final double serviceTimeP99;
|
||||
public final double serviceTimeP999;
|
||||
public final double serviceTimeP9999;
|
||||
public final double latencyP50;
|
||||
public final double latencyP90;
|
||||
public final double latencyP95;
|
||||
public final double latencyP99;
|
||||
public final double latencyP999;
|
||||
public final double latencyP9999;
|
||||
|
||||
public Metrics(String operation, long successCount, long errorCount, double throughput,
|
||||
double serviceTimeP90, double serviceTimeP95, double serviceTimeP99,
|
||||
double serviceTimeP999, double serviceTimeP9999) {
|
||||
double serviceTimeP50, double serviceTimeP90, double serviceTimeP95, double serviceTimeP99,
|
||||
double serviceTimeP999, double serviceTimeP9999, double latencyP50, double latencyP90,
|
||||
double latencyP95, double latencyP99, double latencyP999, double latencyP9999) {
|
||||
this.operation = operation;
|
||||
this.successCount = successCount;
|
||||
this.errorCount = errorCount;
|
||||
this.throughput = throughput;
|
||||
this.serviceTimeP50 = serviceTimeP50;
|
||||
this.serviceTimeP90 = serviceTimeP90;
|
||||
this.serviceTimeP95 = serviceTimeP95;
|
||||
this.serviceTimeP99 = serviceTimeP99;
|
||||
this.serviceTimeP999 = serviceTimeP999;
|
||||
this.serviceTimeP9999 = serviceTimeP9999;
|
||||
this.latencyP50 = latencyP50;
|
||||
this.latencyP90 = latencyP90;
|
||||
this.latencyP95 = latencyP95;
|
||||
this.latencyP99 = latencyP99;
|
||||
this.latencyP999 = latencyP999;
|
||||
this.latencyP9999 = latencyP9999;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,13 +50,16 @@ public final class MetricsCalculator {
|
|||
for (Map.Entry<String, List<Sample>> operationAndMetrics : samplesPerOperation.entrySet()) {
|
||||
List<Sample> samples = operationAndMetrics.getValue();
|
||||
double[] serviceTimes = new double[samples.size()];
|
||||
double[] latencies = new double[samples.size()];
|
||||
int it = 0;
|
||||
long firstStart = Long.MAX_VALUE;
|
||||
long latestEnd = Long.MIN_VALUE;
|
||||
for (Sample sample : samples) {
|
||||
firstStart = Math.min(sample.getStartTimestamp(), firstStart);
|
||||
latestEnd = Math.max(sample.getStopTimestamp(), latestEnd);
|
||||
serviceTimes[it++] = sample.getServiceTime();
|
||||
serviceTimes[it] = sample.getServiceTime();
|
||||
latencies[it] = sample.getLatency();
|
||||
it++;
|
||||
}
|
||||
|
||||
metrics.add(new Metrics(operationAndMetrics.getKey(),
|
||||
|
@ -65,11 +68,18 @@ public final class MetricsCalculator {
|
|||
// throughput calculation is based on the total (Wall clock) time it took to generate all samples
|
||||
calculateThroughput(samples.size(), latestEnd - firstStart),
|
||||
// convert ns -> ms without losing precision
|
||||
StatUtils.percentile(serviceTimes, 50.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(serviceTimes, 90.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(serviceTimes, 95.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(serviceTimes, 99.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(serviceTimes, 99.9d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(serviceTimes, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L)));
|
||||
StatUtils.percentile(serviceTimes, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 50.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 90.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 95.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 99.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 99.9d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L)));
|
||||
}
|
||||
return metrics;
|
||||
}
|
||||
|
|
|
@ -20,12 +20,14 @@ package org.elasticsearch.client.benchmark.metrics;
|
|||
|
||||
public final class Sample {
|
||||
private final String operation;
|
||||
private final long expectedStartTimestamp;
|
||||
private final long startTimestamp;
|
||||
private final long stopTimestamp;
|
||||
private final boolean success;
|
||||
|
||||
public Sample(String operation, long startTimestamp, long stopTimestamp, boolean success) {
|
||||
public Sample(String operation, long expectedStartTimestamp, long startTimestamp, long stopTimestamp, boolean success) {
|
||||
this.operation = operation;
|
||||
this.expectedStartTimestamp = expectedStartTimestamp;
|
||||
this.startTimestamp = startTimestamp;
|
||||
this.stopTimestamp = stopTimestamp;
|
||||
this.success = success;
|
||||
|
@ -48,7 +50,10 @@ public final class Sample {
|
|||
}
|
||||
|
||||
public long getServiceTime() {
|
||||
// this is *not* latency, we're not including wait time in the queue (on purpose)
|
||||
return stopTimestamp - startTimestamp;
|
||||
}
|
||||
|
||||
public long getLatency() {
|
||||
return stopTimestamp - expectedStartTimestamp;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,21 +28,14 @@ import java.util.List;
|
|||
* This class is NOT threadsafe.
|
||||
*/
|
||||
public final class SampleRecorder {
|
||||
private final int warmupIterations;
|
||||
private final List<Sample> samples;
|
||||
private int currentIteration;
|
||||
|
||||
public SampleRecorder(int warmupIterations, int iterations) {
|
||||
this.warmupIterations = warmupIterations;
|
||||
public SampleRecorder(int iterations) {
|
||||
this.samples = new ArrayList<>(iterations);
|
||||
}
|
||||
|
||||
public void addSample(Sample sample) {
|
||||
currentIteration++;
|
||||
// only add samples after warmup
|
||||
if (currentIteration > warmupIterations) {
|
||||
samples.add(sample);
|
||||
}
|
||||
samples.add(sample);
|
||||
}
|
||||
|
||||
public List<Sample> getSamples() {
|
||||
|
|
|
@ -43,15 +43,18 @@ import java.util.concurrent.TimeUnit;
|
|||
public class BulkBenchmarkTask implements BenchmarkTask {
|
||||
private final BulkRequestExecutor requestExecutor;
|
||||
private final String indexFilePath;
|
||||
private final int totalIterations;
|
||||
private final int warmupIterations;
|
||||
private final int measurementIterations;
|
||||
private final int bulkSize;
|
||||
private LoadGenerator generator;
|
||||
private ExecutorService executorService;
|
||||
|
||||
public BulkBenchmarkTask(BulkRequestExecutor requestExecutor, String indexFilePath, int totalIterations, int bulkSize) {
|
||||
public BulkBenchmarkTask(BulkRequestExecutor requestExecutor, String indexFilePath, int warmupIterations, int measurementIterations,
|
||||
int bulkSize) {
|
||||
this.requestExecutor = requestExecutor;
|
||||
this.indexFilePath = indexFilePath;
|
||||
this.totalIterations = totalIterations;
|
||||
this.warmupIterations = warmupIterations;
|
||||
this.measurementIterations = measurementIterations;
|
||||
this.bulkSize = bulkSize;
|
||||
}
|
||||
|
||||
|
@ -60,7 +63,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
|
|||
public void setUp(SampleRecorder sampleRecorder) {
|
||||
BlockingQueue<List<String>> bulkQueue = new ArrayBlockingQueue<>(256);
|
||||
|
||||
BulkIndexer runner = new BulkIndexer(bulkQueue, totalIterations, sampleRecorder, requestExecutor);
|
||||
BulkIndexer runner = new BulkIndexer(bulkQueue, warmupIterations, measurementIterations, sampleRecorder, requestExecutor);
|
||||
|
||||
executorService = Executors.newSingleThreadExecutor((r) -> new Thread(r, "bulk-index-runner"));
|
||||
executorService.submit(runner);
|
||||
|
@ -135,21 +138,23 @@ public class BulkBenchmarkTask implements BenchmarkTask {
|
|||
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
|
||||
|
||||
private final BlockingQueue<List<String>> bulkData;
|
||||
private final int totalIterations;
|
||||
private final int warmupIterations;
|
||||
private final int measurementIterations;
|
||||
private final BulkRequestExecutor bulkRequestExecutor;
|
||||
private final SampleRecorder sampleRecorder;
|
||||
|
||||
public BulkIndexer(BlockingQueue<List<String>> bulkData, int totalIterations, SampleRecorder sampleRecorder,
|
||||
BulkRequestExecutor bulkRequestExecutor) {
|
||||
public BulkIndexer(BlockingQueue<List<String>> bulkData, int warmupIterations, int measurementIterations,
|
||||
SampleRecorder sampleRecorder, BulkRequestExecutor bulkRequestExecutor) {
|
||||
this.bulkData = bulkData;
|
||||
this.totalIterations = totalIterations;
|
||||
this.warmupIterations = warmupIterations;
|
||||
this.measurementIterations = measurementIterations;
|
||||
this.bulkRequestExecutor = bulkRequestExecutor;
|
||||
this.sampleRecorder = sampleRecorder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
for (int iteration = 0; iteration < totalIterations; iteration++) {
|
||||
for (int iteration = 0; iteration < warmupIterations + measurementIterations; iteration++) {
|
||||
boolean success = false;
|
||||
List<String> currentBulk;
|
||||
try {
|
||||
|
@ -158,8 +163,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
|
|||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
}
|
||||
// Yes, this approach is prone to coordinated omission *but* we have to consider that we want to benchmark a closed system
|
||||
// with backpressure here instead of an open system. So this is actually correct in this case.
|
||||
//measure only service time, latency is not that interesting for a throughput benchmark
|
||||
long start = System.nanoTime();
|
||||
try {
|
||||
success = bulkRequestExecutor.bulkIndex(currentBulk);
|
||||
|
@ -167,7 +171,9 @@ public class BulkBenchmarkTask implements BenchmarkTask {
|
|||
logger.warn("Error while executing bulk request", ex);
|
||||
}
|
||||
long stop = System.nanoTime();
|
||||
sampleRecorder.addSample(new Sample("bulk", start, stop, success));
|
||||
if (iteration < warmupIterations) {
|
||||
sampleRecorder.addSample(new Sample("bulk", start, start, stop, success));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,20 +25,20 @@ import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class SearchBenchmarkTask implements BenchmarkTask {
|
||||
private static final long MICROS_PER_SEC = TimeUnit.SECONDS.toMicros(1L);
|
||||
private static final long NANOS_PER_MICRO = TimeUnit.MICROSECONDS.toNanos(1L);
|
||||
|
||||
private final SearchRequestExecutor searchRequestExecutor;
|
||||
private final String searchRequestBody;
|
||||
private final int iterations;
|
||||
private final int warmupIterations;
|
||||
private final int measurementIterations;
|
||||
private final int targetThroughput;
|
||||
|
||||
private SampleRecorder sampleRecorder;
|
||||
|
||||
public SearchBenchmarkTask(SearchRequestExecutor searchRequestExecutor, String body, int iterations, int targetThroughput) {
|
||||
public SearchBenchmarkTask(SearchRequestExecutor searchRequestExecutor, String body, int warmupIterations,
|
||||
int measurementIterations, int targetThroughput) {
|
||||
this.searchRequestExecutor = searchRequestExecutor;
|
||||
this.searchRequestBody = body;
|
||||
this.iterations = iterations;
|
||||
this.warmupIterations = warmupIterations;
|
||||
this.measurementIterations = measurementIterations;
|
||||
this.targetThroughput = targetThroughput;
|
||||
}
|
||||
|
||||
|
@ -49,28 +49,25 @@ public class SearchBenchmarkTask implements BenchmarkTask {
|
|||
|
||||
@Override
|
||||
public void run() throws Exception {
|
||||
for (int iteration = 0; iteration < this.iterations; iteration++) {
|
||||
final long start = System.nanoTime();
|
||||
boolean success = searchRequestExecutor.search(searchRequestBody);
|
||||
final long stop = System.nanoTime();
|
||||
sampleRecorder.addSample(new Sample("search", start, stop, success));
|
||||
|
||||
int waitTime = (int) Math.floor(MICROS_PER_SEC / targetThroughput - (stop - start) / NANOS_PER_MICRO);
|
||||
if (waitTime > 0) {
|
||||
waitMicros(waitTime);
|
||||
}
|
||||
}
|
||||
runIterations(warmupIterations, false);
|
||||
runIterations(measurementIterations, true);
|
||||
}
|
||||
|
||||
private void waitMicros(int waitTime) throws InterruptedException {
|
||||
// Thread.sleep() time is not very accurate (it's most of the time around 1 - 2 ms off)
|
||||
// we busy spin all the time to avoid introducing additional measurement artifacts (noticed 100% skew on 99.9th percentile)
|
||||
// this approach is not suitable for low throughput rates (in the second range) though
|
||||
if (waitTime > 0) {
|
||||
long end = System.nanoTime() + 1000L * waitTime;
|
||||
while (end > System.nanoTime()) {
|
||||
private void runIterations(int iterations, boolean addSample) {
|
||||
long interval = TimeUnit.SECONDS.toNanos(1L) / targetThroughput;
|
||||
|
||||
long totalStart = System.nanoTime();
|
||||
for (int iteration = 0; iteration < iterations; iteration++) {
|
||||
long expectedStart = totalStart + iteration * interval;
|
||||
while (System.nanoTime() < expectedStart) {
|
||||
// busy spin
|
||||
}
|
||||
long start = System.nanoTime();
|
||||
boolean success = searchRequestExecutor.search(searchRequestBody);
|
||||
long stop = System.nanoTime();
|
||||
if (addSample) {
|
||||
sampleRecorder.addSample(new Sample("search", expectedStart, start, stop, success));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,14 +19,20 @@
|
|||
package org.elasticsearch.client.benchmark.rest;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpStatus;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.conn.ConnectionKeepAliveStrategy;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
import org.elasticsearch.client.benchmark.AbstractBenchmark;
|
||||
import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor;
|
||||
import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor;
|
||||
|
@ -45,7 +51,12 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
|
|||
|
||||
@Override
|
||||
protected RestClient client(String benchmarkTargetHost) {
|
||||
return RestClient.builder(new HttpHost(benchmarkTargetHost, 9200)).build();
|
||||
return RestClient
|
||||
.builder(new HttpHost(benchmarkTargetHost, 9200))
|
||||
.setHttpClientConfigCallback(b -> b.setDefaultHeaders(
|
||||
Collections.singleton(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "gzip"))))
|
||||
.setRequestConfigCallback(b -> b.setContentCompressionEnabled(true))
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -77,7 +88,7 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
|
|||
}
|
||||
HttpEntity entity = new NStringEntity(bulkRequestBody.toString(), ContentType.APPLICATION_JSON);
|
||||
try {
|
||||
Response response = client.performRequest("POST", "/geonames/type/_bulk", Collections.emptyMap(), entity);
|
||||
Response response = client.performRequest("POST", "/geonames/type/_noop_bulk", Collections.emptyMap(), entity);
|
||||
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
|
||||
} catch (Exception e) {
|
||||
throw new ElasticsearchException(e);
|
||||
|
@ -91,7 +102,7 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
|
|||
|
||||
private RestSearchRequestExecutor(RestClient client, String indexName) {
|
||||
this.client = client;
|
||||
this.endpoint = "/" + indexName + "/_search";
|
||||
this.endpoint = "/" + indexName + "/_noop_search";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.client.benchmark.transport;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
|
@ -30,6 +29,11 @@ import org.elasticsearch.client.transport.TransportClient;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugin.noop.NoopPlugin;
|
||||
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction;
|
||||
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkRequestBuilder;
|
||||
import org.elasticsearch.plugin.noop.action.search.NoopSearchAction;
|
||||
import org.elasticsearch.plugin.noop.action.search.NoopSearchRequestBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.transport.client.PreBuiltTransportClient;
|
||||
|
||||
|
@ -46,7 +50,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
|
|||
|
||||
@Override
|
||||
protected TransportClient client(String benchmarkTargetHost) throws Exception {
|
||||
TransportClient client = new PreBuiltTransportClient(Settings.EMPTY);
|
||||
TransportClient client = new PreBuiltTransportClient(Settings.EMPTY, NoopPlugin.class);
|
||||
client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(benchmarkTargetHost), 9300));
|
||||
return client;
|
||||
}
|
||||
|
@ -74,7 +78,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
|
|||
|
||||
@Override
|
||||
public boolean bulkIndex(List<String> bulkData) {
|
||||
BulkRequestBuilder builder = client.prepareBulk();
|
||||
NoopBulkRequestBuilder builder = NoopBulkAction.INSTANCE.newRequestBuilder(client);
|
||||
for (String bulkItem : bulkData) {
|
||||
builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8)));
|
||||
}
|
||||
|
@ -103,8 +107,11 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
|
|||
@Override
|
||||
public boolean search(String source) {
|
||||
final SearchResponse response;
|
||||
NoopSearchRequestBuilder builder = NoopSearchAction.INSTANCE.newRequestBuilder(client);
|
||||
try {
|
||||
response = client.prepareSearch(indexName).setQuery(QueryBuilders.wrapperQuery(source)).execute().get();
|
||||
builder.setIndices(indexName);
|
||||
builder.setQuery(QueryBuilders.wrapperQuery(source));
|
||||
response = client.execute(NoopSearchAction.INSTANCE, builder.request()).get();
|
||||
return response.status() == RestStatus.OK;
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
### Purpose
|
||||
|
||||
This plugin provides empty REST and transport endpoints for bulk indexing and search. It is used to avoid accidental server-side bottlenecks in client-side benchmarking.
|
||||
|
||||
### Build Instructions
|
||||
|
||||
Build the plugin with `gradle :client:client-benchmark-noop-api-plugin:assemble` from the Elasticsearch root project directory.
|
||||
|
||||
### Installation Instructions
|
||||
|
||||
After, the binary has been built, install it with `bin/elasticsearch-plugin install file:///full/path/to/noop-plugin.zip`.
|
||||
|
||||
### Usage
|
||||
|
||||
The plugin provides two REST endpoints:
|
||||
|
||||
* `/_noop_bulk` and all variations that the bulk endpoint provides (except that all no op endpoints are called `_noop_bulk` instead of `_bulk`)
|
||||
* `_noop_search` and all variations that the search endpoint provides (except that all no op endpoints are called `_noop_search` instead of `_search`)
|
||||
|
||||
The corresponding transport actions are:
|
||||
|
||||
* `org.elasticsearch.plugin.noop.action.bulk.TransportNoopBulkAction`
|
||||
* `org.elasticsearch.plugin.noop.action.search.TransportNoopSearchAction`
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
group = 'org.elasticsearch.plugin'
|
||||
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
apply plugin: 'com.bmuschko.nexus'
|
||||
|
||||
esplugin {
|
||||
name 'client-benchmark-noop-api'
|
||||
description 'Stubbed out Elasticsearch actions that can be used for client-side benchmarking'
|
||||
classname 'org.elasticsearch.plugin.noop.NoopPlugin'
|
||||
}
|
||||
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
|
||||
// no unit tests
|
||||
test.enabled = false
|
||||
integTest.enabled = false
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop;
|
||||
|
||||
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction;
|
||||
import org.elasticsearch.plugin.noop.action.bulk.RestNoopBulkAction;
|
||||
import org.elasticsearch.plugin.noop.action.bulk.TransportNoopBulkAction;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.plugin.noop.action.search.NoopSearchAction;
|
||||
import org.elasticsearch.plugin.noop.action.search.RestNoopSearchAction;
|
||||
import org.elasticsearch.plugin.noop.action.search.TransportNoopSearchAction;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestHandler;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class NoopPlugin extends Plugin implements ActionPlugin {
|
||||
@Override
|
||||
public List<ActionHandler<? extends ActionRequest<?>, ? extends ActionResponse>> getActions() {
|
||||
return Arrays.asList(
|
||||
new ActionHandler<>(NoopBulkAction.INSTANCE, TransportNoopBulkAction.class),
|
||||
new ActionHandler<>(NoopSearchAction.INSTANCE, TransportNoopSearchAction.class)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Class<? extends RestHandler>> getRestHandlers() {
|
||||
return Arrays.asList(RestNoopBulkAction.class, RestNoopSearchAction.class);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class NoopBulkAction extends Action<BulkRequest, BulkResponse, NoopBulkRequestBuilder> {
|
||||
public static final String NAME = "mock:data/write/bulk";
|
||||
|
||||
public static final NoopBulkAction INSTANCE = new NoopBulkAction();
|
||||
|
||||
private NoopBulkAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NoopBulkRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NoopBulkRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BulkResponse newResponse() {
|
||||
return new BulkResponse(null, 0);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.WriteRequestBuilder;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse, NoopBulkRequestBuilder>
|
||||
implements WriteRequestBuilder<NoopBulkRequestBuilder> {
|
||||
|
||||
public NoopBulkRequestBuilder(ElasticsearchClient client, NoopBulkAction action) {
|
||||
super(client, action, new BulkRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
|
||||
* (for example, if no id is provided, one will be generated, or usage of the create flag).
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(IndexRequest request) {
|
||||
super.request.add(request);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
|
||||
* (for example, if no id is provided, one will be generated, or usage of the create flag).
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(IndexRequestBuilder request) {
|
||||
super.request.add(request.request());
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link DeleteRequest} to the list of actions to execute.
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(DeleteRequest request) {
|
||||
super.request.add(request);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link DeleteRequest} to the list of actions to execute.
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(DeleteRequestBuilder request) {
|
||||
super.request.add(request.request());
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Adds an {@link UpdateRequest} to the list of actions to execute.
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(UpdateRequest request) {
|
||||
super.request.add(request);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link UpdateRequest} to the list of actions to execute.
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(UpdateRequestBuilder request) {
|
||||
super.request.add(request.request());
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(byte[] data, int from, int length) throws Exception {
|
||||
request.add(data, from, length, null, null);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType)
|
||||
throws Exception {
|
||||
request.add(data, from, length, defaultIndex, defaultType);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that must be active before proceeding with the write.
|
||||
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
|
||||
*/
|
||||
public NoopBulkRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
request.waitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical
|
||||
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
|
||||
* to get the ActiveShardCount.
|
||||
*/
|
||||
public NoopBulkRequestBuilder setWaitForActiveShards(final int waitForActiveShards) {
|
||||
return setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards));
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
public final NoopBulkRequestBuilder setTimeout(TimeValue timeout) {
|
||||
request.timeout(timeout);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
public final NoopBulkRequestBuilder setTimeout(String timeout) {
|
||||
request.timeout(timeout);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of actions currently in the bulk.
|
||||
*/
|
||||
public int numberOfActions() {
|
||||
return request.numberOfActions();
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.action.RestBuilderListener;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.PUT;
|
||||
import static org.elasticsearch.rest.RestStatus.OK;
|
||||
|
||||
public class RestNoopBulkAction extends BaseRestHandler {
|
||||
@Inject
|
||||
public RestNoopBulkAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
|
||||
controller.registerHandler(POST, "/_noop_bulk", this);
|
||||
controller.registerHandler(PUT, "/_noop_bulk", this);
|
||||
controller.registerHandler(POST, "/{index}/_noop_bulk", this);
|
||||
controller.registerHandler(PUT, "/{index}/_noop_bulk", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/_noop_bulk", this);
|
||||
controller.registerHandler(PUT, "/{index}/{type}/_noop_bulk", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception {
|
||||
BulkRequest bulkRequest = Requests.bulkRequest();
|
||||
String defaultIndex = request.param("index");
|
||||
String defaultType = request.param("type");
|
||||
String defaultRouting = request.param("routing");
|
||||
String fieldsParam = request.param("fields");
|
||||
String defaultPipeline = request.param("pipeline");
|
||||
String[] defaultFields = fieldsParam != null ? Strings.commaDelimitedListToStringArray(fieldsParam) : null;
|
||||
|
||||
String waitForActiveShards = request.param("wait_for_active_shards");
|
||||
if (waitForActiveShards != null) {
|
||||
bulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
|
||||
}
|
||||
bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
|
||||
bulkRequest.setRefreshPolicy(request.param("refresh"));
|
||||
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultPipeline, null, true);
|
||||
|
||||
// short circuit the call to the transport layer
|
||||
BulkRestBuilderListener listener = new BulkRestBuilderListener(channel, request);
|
||||
listener.onResponse(bulkRequest);
|
||||
|
||||
}
|
||||
|
||||
private static class BulkRestBuilderListener extends RestBuilderListener<BulkRequest> {
|
||||
private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, "update",
|
||||
new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
|
||||
|
||||
private final RestRequest request;
|
||||
|
||||
|
||||
public BulkRestBuilderListener(RestChannel channel, RestRequest request) {
|
||||
super(channel);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestResponse buildResponse(BulkRequest bulkRequest, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
builder.field(Fields.TOOK, 0);
|
||||
builder.field(Fields.ERRORS, false);
|
||||
builder.startArray(Fields.ITEMS);
|
||||
for (int idx = 0; idx < bulkRequest.numberOfActions(); idx++) {
|
||||
builder.startObject();
|
||||
ITEM_RESPONSE.toXContent(builder, request);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(OK, builder);
|
||||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String ITEMS = "items";
|
||||
static final String ERRORS = "errors";
|
||||
static final String TOOK = "took";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public class TransportNoopBulkAction extends HandledTransportAction<BulkRequest, BulkResponse> {
|
||||
private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, "update",
|
||||
new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
|
||||
|
||||
@Inject
|
||||
public TransportNoopBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NoopBulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(BulkRequest request, ActionListener<BulkResponse> listener) {
|
||||
final int itemCount = request.subRequests().size();
|
||||
// simulate at least a realistic amount of data that gets serialized
|
||||
BulkItemResponse[] bulkItemResponses = new BulkItemResponse[itemCount];
|
||||
for (int idx = 0; idx < itemCount; idx++) {
|
||||
bulkItemResponses[idx] = ITEM_RESPONSE;
|
||||
}
|
||||
listener.onResponse(new BulkResponse(bulkItemResponses, 0));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.search;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class NoopSearchAction extends Action<SearchRequest, SearchResponse, NoopSearchRequestBuilder> {
|
||||
public static final NoopSearchAction INSTANCE = new NoopSearchAction();
|
||||
public static final String NAME = "mock:data/read/search";
|
||||
|
||||
public NoopSearchAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NoopSearchRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NoopSearchRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchResponse newResponse() {
|
||||
return new SearchResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,496 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
import org.elasticsearch.search.slice.SliceBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.search.suggest.SuggestBuilder;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse, NoopSearchRequestBuilder> {
|
||||
|
||||
public NoopSearchRequestBuilder(ElasticsearchClient client, NoopSearchAction action) {
|
||||
super(client, action, new SearchRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the indices the search will be executed on.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setIndices(String... indices) {
|
||||
request.indices(indices);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The document types to execute the search against. Defaults to be executed against
|
||||
* all types.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setTypes(String... types) {
|
||||
request.types(types);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setSearchType(SearchType searchType) {
|
||||
request.searchType(searchType);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The a string representation search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}. Can be
|
||||
* one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
|
||||
* "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch".
|
||||
*/
|
||||
public NoopSearchRequestBuilder setSearchType(String searchType) {
|
||||
request.searchType(searchType);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* If set, will enable scrolling of the search request.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setScroll(Scroll scroll) {
|
||||
request.scroll(scroll);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* If set, will enable scrolling of the search request for the specified timeout.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setScroll(TimeValue keepAlive) {
|
||||
request.scroll(keepAlive);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* If set, will enable scrolling of the search request for the specified timeout.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setScroll(String keepAlive) {
|
||||
request.scroll(keepAlive);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* An optional timeout to control how long search is allowed to take.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setTimeout(TimeValue timeout) {
|
||||
sourceBuilder().timeout(timeout);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* An optional document count, upon collecting which the search
|
||||
* query will early terminate
|
||||
*/
|
||||
public NoopSearchRequestBuilder setTerminateAfter(int terminateAfter) {
|
||||
sourceBuilder().terminateAfter(terminateAfter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A comma separated list of routing values to control the shards the search will be executed on.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setRouting(String routing) {
|
||||
request.routing(routing);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The routing values to control the shards that the search will be executed on.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setRouting(String... routing) {
|
||||
request.routing(routing);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
|
||||
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
|
||||
* a custom value, which guarantees that the same order will be used across different requests.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setPreference(String preference) {
|
||||
request.preference(preference);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies what type of requested indices to ignore and wildcard indices expressions.
|
||||
* <p>
|
||||
* For example indices that don't exist.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
|
||||
request().indicesOptions(indicesOptions);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new search source builder with a search query.
|
||||
*
|
||||
* @see org.elasticsearch.index.query.QueryBuilders
|
||||
*/
|
||||
public NoopSearchRequestBuilder setQuery(QueryBuilder queryBuilder) {
|
||||
sourceBuilder().query(queryBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a filter that will be executed after the query has been executed and only has affect on the search hits
|
||||
* (not aggregations). This filter is always executed as last filtering mechanism.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setPostFilter(QueryBuilder postFilter) {
|
||||
sourceBuilder().postFilter(postFilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the minimum score below which docs will be filtered out.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setMinScore(float minScore) {
|
||||
sourceBuilder().minScore(minScore);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* From index to start the search from. Defaults to <tt>0</tt>.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setFrom(int from) {
|
||||
sourceBuilder().from(from);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of search hits to return. Defaults to <tt>10</tt>.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setSize(int size) {
|
||||
sourceBuilder().size(size);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should each {@link org.elasticsearch.search.SearchHit} be returned with an
|
||||
* explanation of the hit (ranking).
|
||||
*/
|
||||
public NoopSearchRequestBuilder setExplain(boolean explain) {
|
||||
sourceBuilder().explain(explain);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should each {@link org.elasticsearch.search.SearchHit} be returned with its
|
||||
* version.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setVersion(boolean version) {
|
||||
sourceBuilder().version(version);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the boost a specific index will receive when the query is executed against it.
|
||||
*
|
||||
* @param index The index to apply the boost against
|
||||
* @param indexBoost The boost to apply to the index
|
||||
*/
|
||||
public NoopSearchRequestBuilder addIndexBoost(String index, float indexBoost) {
|
||||
sourceBuilder().indexBoost(index, indexBoost);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The stats groups this request will be aggregated under.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setStats(String... statsGroups) {
|
||||
sourceBuilder().stats(Arrays.asList(statsGroups));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The stats groups this request will be aggregated under.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setStats(List<String> statsGroups) {
|
||||
sourceBuilder().stats(statsGroups);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the response should contain the stored _source for every hit
|
||||
*/
|
||||
public NoopSearchRequestBuilder setFetchSource(boolean fetch) {
|
||||
sourceBuilder().fetchSource(fetch);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
|
||||
* elements.
|
||||
*
|
||||
* @param include An optional include (optionally wildcarded) pattern to filter the returned _source
|
||||
* @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
|
||||
*/
|
||||
public NoopSearchRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
|
||||
sourceBuilder().fetchSource(include, exclude);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
|
||||
* elements.
|
||||
*
|
||||
* @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
|
||||
* @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
|
||||
*/
|
||||
public NoopSearchRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
|
||||
sourceBuilder().fetchSource(includes, excludes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a docvalue based field to load and return. The field does not have to be stored,
|
||||
* but its recommended to use non analyzed or numeric fields.
|
||||
*
|
||||
* @param name The field to get from the docvalue
|
||||
*/
|
||||
public NoopSearchRequestBuilder addDocValueField(String name) {
|
||||
sourceBuilder().docValueField(name);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a stored field to load and return (note, it must be stored) as part of the search request.
|
||||
* If none are specified, the source of the document will be return.
|
||||
*/
|
||||
public NoopSearchRequestBuilder addStoredField(String field) {
|
||||
sourceBuilder().storedField(field);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Adds a script based field to load and return. The field does not have to be stored,
|
||||
* but its recommended to use non analyzed or numeric fields.
|
||||
*
|
||||
* @param name The name that will represent this value in the return hit
|
||||
* @param script The script to use
|
||||
*/
|
||||
public NoopSearchRequestBuilder addScriptField(String name, Script script) {
|
||||
sourceBuilder().scriptField(name, script);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a sort against the given field name and the sort ordering.
|
||||
*
|
||||
* @param field The name of the field
|
||||
* @param order The sort ordering
|
||||
*/
|
||||
public NoopSearchRequestBuilder addSort(String field, SortOrder order) {
|
||||
sourceBuilder().sort(field, order);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a generic sort builder.
|
||||
*
|
||||
* @see org.elasticsearch.search.sort.SortBuilders
|
||||
*/
|
||||
public NoopSearchRequestBuilder addSort(SortBuilder sort) {
|
||||
sourceBuilder().sort(sort);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the sort values that indicates which docs this request should "search after".
|
||||
*/
|
||||
public NoopSearchRequestBuilder searchAfter(Object[] values) {
|
||||
sourceBuilder().searchAfter(values);
|
||||
return this;
|
||||
}
|
||||
|
||||
public NoopSearchRequestBuilder slice(SliceBuilder builder) {
|
||||
sourceBuilder().slice(builder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
|
||||
* <tt>false</tt>.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setTrackScores(boolean trackScores) {
|
||||
sourceBuilder().trackScores(trackScores);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets the fields to load and return as part of the search request. If none
|
||||
* are specified, the source of the document will be returned.
|
||||
*/
|
||||
public NoopSearchRequestBuilder storedFields(String... fields) {
|
||||
sourceBuilder().storedFields(Arrays.asList(fields));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an aggregation to the search operation.
|
||||
*/
|
||||
public NoopSearchRequestBuilder addAggregation(AggregationBuilder aggregation) {
|
||||
sourceBuilder().aggregation(aggregation);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an aggregation to the search operation.
|
||||
*/
|
||||
public NoopSearchRequestBuilder addAggregation(PipelineAggregationBuilder aggregation) {
|
||||
sourceBuilder().aggregation(aggregation);
|
||||
return this;
|
||||
}
|
||||
|
||||
public NoopSearchRequestBuilder highlighter(HighlightBuilder highlightBuilder) {
|
||||
sourceBuilder().highlighter(highlightBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delegates to {@link org.elasticsearch.search.builder.SearchSourceBuilder#suggest(SuggestBuilder)}
|
||||
*/
|
||||
public NoopSearchRequestBuilder suggest(SuggestBuilder suggestBuilder) {
|
||||
sourceBuilder().suggest(suggestBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
|
||||
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
|
||||
*
|
||||
* @param rescorer rescorer configuration
|
||||
* @return this for chaining
|
||||
*/
|
||||
public NoopSearchRequestBuilder setRescorer(RescoreBuilder<?> rescorer) {
|
||||
sourceBuilder().clearRescorers();
|
||||
return addRescorer(rescorer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
|
||||
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
|
||||
*
|
||||
* @param rescorer rescorer configuration
|
||||
* @param window rescore window
|
||||
* @return this for chaining
|
||||
*/
|
||||
public NoopSearchRequestBuilder setRescorer(RescoreBuilder rescorer, int window) {
|
||||
sourceBuilder().clearRescorers();
|
||||
return addRescorer(rescorer.windowSize(window));
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new rescorer.
|
||||
*
|
||||
* @param rescorer rescorer configuration
|
||||
* @return this for chaining
|
||||
*/
|
||||
public NoopSearchRequestBuilder addRescorer(RescoreBuilder<?> rescorer) {
|
||||
sourceBuilder().addRescorer(rescorer);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new rescorer.
|
||||
*
|
||||
* @param rescorer rescorer configuration
|
||||
* @param window rescore window
|
||||
* @return this for chaining
|
||||
*/
|
||||
public NoopSearchRequestBuilder addRescorer(RescoreBuilder<?> rescorer, int window) {
|
||||
sourceBuilder().addRescorer(rescorer.windowSize(window));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all rescorers from the builder.
|
||||
*
|
||||
* @return this for chaining
|
||||
*/
|
||||
public NoopSearchRequestBuilder clearRescorers() {
|
||||
sourceBuilder().clearRescorers();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source of the request as a SearchSourceBuilder.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setSource(SearchSourceBuilder source) {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets if this request should use the request cache or not, assuming that it can (for
|
||||
* example, if "now" is used, it will never be cached). By default (not set, or null,
|
||||
* will default to the index level setting if request cache is enabled or not).
|
||||
*/
|
||||
public NoopSearchRequestBuilder setRequestCache(Boolean requestCache) {
|
||||
request.requestCache(requestCache);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the query be profiled. Defaults to <code>false</code>
|
||||
*/
|
||||
public NoopSearchRequestBuilder setProfile(boolean profile) {
|
||||
sourceBuilder().profile(profile);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
if (request.source() != null) {
|
||||
return request.source().toString();
|
||||
}
|
||||
return new SearchSourceBuilder().toString();
|
||||
}
|
||||
|
||||
private SearchSourceBuilder sourceBuilder() {
|
||||
if (request.source() == null) {
|
||||
request.source(new SearchSourceBuilder());
|
||||
}
|
||||
return request.source();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.search;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestStatusToXContentListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
||||
public class RestNoopSearchAction extends BaseRestHandler {
|
||||
|
||||
@Inject
|
||||
public RestNoopSearchAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_noop_search", this);
|
||||
controller.registerHandler(POST, "/_noop_search", this);
|
||||
controller.registerHandler(GET, "/{index}/_noop_search", this);
|
||||
controller.registerHandler(POST, "/{index}/_noop_search", this);
|
||||
controller.registerHandler(GET, "/{index}/{type}/_noop_search", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/_noop_search", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
client.execute(NoopSearchAction.INSTANCE, searchRequest, new RestStatusToXContentListener<>(channel));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHits;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
public class TransportNoopSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
|
||||
@Inject
|
||||
public TransportNoopSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters
|
||||
actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NoopSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
SearchRequest::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
listener.onResponse(new SearchResponse(new InternalSearchResponse(
|
||||
new InternalSearchHits(
|
||||
new InternalSearchHit[0], 0L, 0.0f),
|
||||
new InternalAggregations(Collections.emptyList()),
|
||||
new Suggest(Collections.emptyList()),
|
||||
new SearchProfileShardResults(Collections.emptyMap()), false, false), "", 1, 1, 0, new ShardSearchFailure[0]));
|
||||
}
|
||||
}
|
|
@ -32,7 +32,7 @@ import java.util.Objects;
|
|||
* Holds an elasticsearch response. It wraps the {@link HttpResponse} returned and associates it with
|
||||
* its corresponding {@link RequestLine} and {@link HttpHost}.
|
||||
*/
|
||||
public final class Response {
|
||||
public class Response {
|
||||
|
||||
private final RequestLine requestLine;
|
||||
private final HttpHost host;
|
||||
|
|
|
@ -33,7 +33,7 @@ public final class ResponseException extends IOException {
|
|||
|
||||
private Response response;
|
||||
|
||||
ResponseException(Response response) throws IOException {
|
||||
public ResponseException(Response response) throws IOException {
|
||||
super(buildMessage(response));
|
||||
this.response = response;
|
||||
}
|
||||
|
|
|
@ -65,19 +65,23 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* Client that connects to an elasticsearch cluster through http.
|
||||
* Client that connects to an Elasticsearch cluster through HTTP.
|
||||
* <p>
|
||||
* Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults.
|
||||
* The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later
|
||||
* by calling {@link #setHosts(HttpHost...)}.
|
||||
* <p>
|
||||
* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When
|
||||
* sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and
|
||||
* retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously
|
||||
* failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that
|
||||
* deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown.
|
||||
*
|
||||
* <p>
|
||||
* Requests can be either synchronous or asynchronous. The asynchronous variants all end with {@code Async}.
|
||||
* <p>
|
||||
* Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format.
|
||||
*/
|
||||
public final class RestClient implements Closeable {
|
||||
public class RestClient implements Closeable {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(RestClient.class);
|
||||
|
||||
|
@ -124,41 +128,41 @@ public final class RestClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters
|
||||
* and request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), (HttpEntity)null, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param params the query_string parameters
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, params, (HttpEntity)null, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, Header...)}
|
||||
* which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer}
|
||||
* will be used to consume the response body.
|
||||
|
@ -168,10 +172,10 @@ public final class RestClient implements Closeable {
|
|||
* @param params the query_string parameters
|
||||
* @param entity the body of the request, null if not applicable
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, Header... headers) throws IOException {
|
||||
|
@ -180,7 +184,7 @@ public final class RestClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to. Blocks until the request is completed and returns
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Blocks until the request is completed and returns
|
||||
* its response or fails by throwing an exception. Selects a host out of the provided ones in a round-robin fashion. Failing hosts
|
||||
* are marked dead and retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times
|
||||
* they previously failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead
|
||||
|
@ -193,37 +197,37 @@ public final class RestClient implements Closeable {
|
|||
* @param responseConsumer the {@link HttpAsyncResponseConsumer} callback. Controls how the response
|
||||
* body gets streamed from a non-blocking HTTP connection on the client side.
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
Header... headers) throws IOException {
|
||||
SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis);
|
||||
performRequest(method, endpoint, params, entity, responseConsumer, listener, headers);
|
||||
performRequestAsync(method, endpoint, params, entity, responseConsumer, listener, headers);
|
||||
return listener.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
|
||||
* {@link #performRequest(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body.
|
||||
* {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
*/
|
||||
public void performRequest(String method, String endpoint, ResponseListener responseListener, Header... headers) {
|
||||
performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, responseListener, headers);
|
||||
public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) {
|
||||
performRequestAsync(method, endpoint, Collections.<String, String>emptyMap(), null, responseListener, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
|
||||
* {@link #performRequest(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body.
|
||||
* {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
|
@ -231,15 +235,15 @@ public final class RestClient implements Closeable {
|
|||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
*/
|
||||
public void performRequest(String method, String endpoint, Map<String, String> params,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
performRequest(method, endpoint, params, null, responseListener, headers);
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
performRequestAsync(method, endpoint, params, null, responseListener, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* the provided {@link ResponseListener} will be notified upon completion or failure.
|
||||
* Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, ResponseListener, Header...)}
|
||||
* Shortcut to {@link #performRequestAsync(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, ResponseListener, Header...)}
|
||||
* which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer}
|
||||
* will be used to consume the response body.
|
||||
*
|
||||
|
@ -250,14 +254,14 @@ public final class RestClient implements Closeable {
|
|||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
*/
|
||||
public void performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, ResponseListener responseListener, Header... headers) {
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, ResponseListener responseListener, Header... headers) {
|
||||
HttpAsyncResponseConsumer<HttpResponse> responseConsumer = new HeapBufferedAsyncResponseConsumer();
|
||||
performRequest(method, endpoint, params, entity, responseConsumer, responseListener, headers);
|
||||
performRequestAsync(method, endpoint, params, entity, responseConsumer, responseListener, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to. The request is executed asynchronously
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. The request is executed asynchronously
|
||||
* and the provided {@link ResponseListener} gets notified upon request completion or failure.
|
||||
* Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain
|
||||
* amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures,
|
||||
|
@ -273,20 +277,20 @@ public final class RestClient implements Closeable {
|
|||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
*/
|
||||
public void performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
URI uri = buildUri(endpoint, params);
|
||||
HttpRequestBase request = createHttpRequest(method, uri, entity);
|
||||
setHeaders(request, headers);
|
||||
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
|
||||
long startTime = System.nanoTime();
|
||||
performRequest(startTime, nextHost().iterator(), request, responseConsumer, failureTrackingResponseListener);
|
||||
performRequestAsync(startTime, nextHost().iterator(), request, responseConsumer, failureTrackingResponseListener);
|
||||
}
|
||||
|
||||
private void performRequest(final long startTime, final Iterator<HttpHost> hosts, final HttpRequestBase request,
|
||||
final HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
final FailureTrackingResponseListener listener) {
|
||||
private void performRequestAsync(final long startTime, final Iterator<HttpHost> hosts, final HttpRequestBase request,
|
||||
final HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
final FailureTrackingResponseListener listener) {
|
||||
final HttpHost host = hosts.next();
|
||||
//we stream the request body if the entity allows for it
|
||||
HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request);
|
||||
|
@ -340,7 +344,7 @@ public final class RestClient implements Closeable {
|
|||
} else {
|
||||
listener.trackFailure(exception);
|
||||
request.reset();
|
||||
performRequest(startTime, hosts, request, responseConsumer, listener);
|
||||
performRequestAsync(startTime, hosts, request, responseConsumer, listener);
|
||||
}
|
||||
} else {
|
||||
listener.onDefinitiveFailure(exception);
|
||||
|
|
|
@ -226,7 +226,7 @@ public class RestClientIntegTests extends RestClientTestCase {
|
|||
for (int i = 0; i < numRequests; i++) {
|
||||
final String method = RestClientTestUtil.randomHttpMethod(getRandom());
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
restClient.performRequest(method, "/" + statusCode, new ResponseListener() {
|
||||
restClient.performRequestAsync(method, "/" + statusCode, new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
responses.add(new TestResponse(method, statusCode, response));
|
||||
|
|
|
@ -42,7 +42,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
* {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation needs to be lazily set to the
|
||||
* previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}.
|
||||
*/
|
||||
public final class Sniffer implements Closeable {
|
||||
public class Sniffer implements Closeable {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(Sniffer.class);
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
public ElasticsearchException(StreamInput in) throws IOException {
|
||||
super(in.readOptionalString(), in.readException());
|
||||
readStackTrace(this, in);
|
||||
headers.putAll(in.readMapOfLists());
|
||||
headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -196,7 +196,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
out.writeOptionalString(this.getMessage());
|
||||
out.writeException(this.getCause());
|
||||
writeStackTraces(this, out);
|
||||
out.writeMapOfLists(headers);
|
||||
out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString);
|
||||
}
|
||||
|
||||
public static ElasticsearchException readException(StreamInput input, int id) throws IOException {
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
|
@ -168,31 +169,35 @@ public class TransportClusterAllocationExplainAction
|
|||
if (node.getId().equals(assignedNodeId)) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
|
||||
finalExplanation = "the shard is already assigned to this node";
|
||||
} else if (hasPendingAsyncFetch &&
|
||||
shard.primary() == false &&
|
||||
shard.unassigned() &&
|
||||
shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
nodeDecision.type() != Decision.Type.YES) {
|
||||
} else if (shard.unassigned() && shard.primary() == false &&
|
||||
shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && nodeDecision.type() != Decision.Type.YES) {
|
||||
finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() +
|
||||
" decision and the shard's state is still being fetched";
|
||||
" decision";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (hasPendingAsyncFetch &&
|
||||
shard.unassigned() &&
|
||||
shard.allocatedPostIndexCreate(indexMetaData)) {
|
||||
} else if (shard.unassigned() && shard.primary() == false &&
|
||||
shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && hasPendingAsyncFetch) {
|
||||
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
} else if (shard.primary() && shard.unassigned() &&
|
||||
(shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE ||
|
||||
shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT)
|
||||
&& hasPendingAsyncFetch) {
|
||||
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
|
||||
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.NONE) {
|
||||
finalExplanation = "there is no copy of the shard available";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) {
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) {
|
||||
finalExplanation = "the copy of the shard is corrupt";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) {
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) {
|
||||
finalExplanation = "the copy of the shard cannot be read";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else {
|
||||
|
@ -258,7 +263,7 @@ public class TransportClusterAllocationExplainAction
|
|||
Float weight = weights.get(node);
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
|
||||
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()),
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.inSyncAllocationIds(shard.getId()),
|
||||
allocation.hasPendingAsyncFetch());
|
||||
explanations.put(node, nodeExplanation);
|
||||
}
|
||||
|
|
|
@ -21,11 +21,12 @@ package org.elasticsearch.action.admin.cluster.stats;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectIntCursor;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -39,11 +40,13 @@ import org.elasticsearch.plugins.PluginInfo;
|
|||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
public class ClusterStatsNodes implements ToXContent {
|
||||
|
||||
|
@ -54,6 +57,7 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
private final JvmStats jvm;
|
||||
private final FsInfo.Path fs;
|
||||
private final Set<PluginInfo> plugins;
|
||||
private final NetworkTypes networkTypes;
|
||||
|
||||
ClusterStatsNodes(List<ClusterStatsNodeResponse> nodeResponses) {
|
||||
this.versions = new HashSet<>();
|
||||
|
@ -86,6 +90,7 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
this.os = new OsStats(nodeInfos);
|
||||
this.process = new ProcessStats(nodeStats);
|
||||
this.jvm = new JvmStats(nodeInfos, nodeStats);
|
||||
this.networkTypes = new NetworkTypes(nodeInfos);
|
||||
}
|
||||
|
||||
public Counts getCounts() {
|
||||
|
@ -124,6 +129,7 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
static final String JVM = "jvm";
|
||||
static final String FS = "fs";
|
||||
static final String PLUGINS = "plugins";
|
||||
static final String NETWORK_TYPES = "network_types";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -158,6 +164,10 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
pluginInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.startObject(Fields.NETWORK_TYPES);
|
||||
networkTypes.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
@ -506,4 +516,43 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
return vmVersion.hashCode();
|
||||
}
|
||||
}
|
||||
|
||||
static class NetworkTypes implements ToXContent {
|
||||
|
||||
private final Map<String, AtomicInteger> transportTypes;
|
||||
private final Map<String, AtomicInteger> httpTypes;
|
||||
|
||||
private NetworkTypes(final List<NodeInfo> nodeInfos) {
|
||||
final Map<String, AtomicInteger> transportTypes = new HashMap<>();
|
||||
final Map<String, AtomicInteger> httpTypes = new HashMap<>();
|
||||
for (final NodeInfo nodeInfo : nodeInfos) {
|
||||
final Settings settings = nodeInfo.getSettings();
|
||||
final String transportType =
|
||||
settings.get(NetworkModule.TRANSPORT_TYPE_KEY, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
||||
final String httpType =
|
||||
settings.get(NetworkModule.HTTP_TYPE_KEY, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
transportTypes.computeIfAbsent(transportType, k -> new AtomicInteger()).incrementAndGet();
|
||||
httpTypes.computeIfAbsent(httpType, k -> new AtomicInteger()).incrementAndGet();
|
||||
}
|
||||
this.transportTypes = Collections.unmodifiableMap(transportTypes);
|
||||
this.httpTypes = Collections.unmodifiableMap(httpTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
|
||||
builder.startObject("transport_types");
|
||||
for (final Map.Entry<String, AtomicInteger> entry : transportTypes.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue().get());
|
||||
}
|
||||
builder.endObject();
|
||||
builder.startObject("http_types");
|
||||
for (final Map.Entry<String, AtomicInteger> entry : httpTypes.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue().get());
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
|
||||
@Override
|
||||
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
|
||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false, false);
|
||||
NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false, false);
|
||||
List<ShardStats> shardsStats = new ArrayList<>();
|
||||
for (IndexService indexService : indicesService) {
|
||||
|
|
|
@ -94,14 +94,13 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
logger.trace("using cluster state version [{}] to determine shards", state.version());
|
||||
// collect relevant shard ids of the requested indices for fetching store infos
|
||||
for (String index : concreteIndices) {
|
||||
IndexMetaData indexMetaData = state.metaData().index(index);
|
||||
IndexRoutingTable indexShardRoutingTables = routingTables.index(index);
|
||||
if (indexShardRoutingTables == null) {
|
||||
continue;
|
||||
}
|
||||
for (IndexShardRoutingTable routing : indexShardRoutingTables) {
|
||||
final int shardId = routing.shardId().id();
|
||||
ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing, indexMetaData);
|
||||
ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing);
|
||||
if (request.shardStatuses().contains(shardHealth.getStatus())) {
|
||||
shardIdsToFetch.add(routing.shardId());
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
|
|||
"Dynamic mappings are not available on the node that holds the primary yet");
|
||||
}
|
||||
}
|
||||
final boolean created = indexShard.index(operation);
|
||||
indexShard.index(operation);
|
||||
|
||||
// update the version on request so it will happen on the replicas
|
||||
final long version = operation.version();
|
||||
|
@ -197,7 +197,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
|
|||
|
||||
assert request.versionType().validateVersionForWrites(request.version());
|
||||
|
||||
IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.version(), created);
|
||||
IndexResponse response = new IndexResponse(shardId, request.type(), request.id(), request.version(), operation.isCreated());
|
||||
return new WriteResult<>(response, operation.getTranslogLocation());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,11 +25,14 @@ import org.elasticsearch.action.UnavailableShardsException;
|
|||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.AllocationId;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -40,10 +43,13 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class ReplicationOperation<
|
||||
Request extends ReplicationRequest<Request>,
|
||||
|
@ -65,7 +71,7 @@ public class ReplicationOperation<
|
|||
* operations and the primary finishes.</li>
|
||||
* </ul>
|
||||
*/
|
||||
private final AtomicInteger pendingShards = new AtomicInteger();
|
||||
private final AtomicInteger pendingActions = new AtomicInteger();
|
||||
private final AtomicInteger successfulShards = new AtomicInteger();
|
||||
private final boolean executeOnReplicas;
|
||||
private final Primary<Request, ReplicaRequest, PrimaryResultT> primary;
|
||||
|
@ -102,7 +108,7 @@ public class ReplicationOperation<
|
|||
}
|
||||
|
||||
totalShards.incrementAndGet();
|
||||
pendingShards.incrementAndGet();
|
||||
pendingActions.incrementAndGet();
|
||||
primaryResult = primary.perform(request);
|
||||
final ReplicaRequest replicaRequest = primaryResult.replicaRequest();
|
||||
assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term";
|
||||
|
@ -110,19 +116,45 @@ public class ReplicationOperation<
|
|||
logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request);
|
||||
}
|
||||
|
||||
performOnReplicas(primaryId, replicaRequest);
|
||||
// we have to get a new state after successfully indexing into the primary in order to honour recovery semantics.
|
||||
// we have to make sure that every operation indexed into the primary after recovery start will also be replicated
|
||||
// to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then.
|
||||
ClusterState clusterState = clusterStateSupplier.get();
|
||||
final List<ShardRouting> shards = getShards(primaryId, clusterState);
|
||||
Set<String> inSyncAllocationIds = getInSyncAllocationIds(primaryId, clusterState);
|
||||
|
||||
markUnavailableShardsAsStale(replicaRequest, inSyncAllocationIds, shards);
|
||||
|
||||
performOnReplicas(replicaRequest, shards);
|
||||
|
||||
successfulShards.incrementAndGet();
|
||||
decPendingAndFinishIfNeeded();
|
||||
}
|
||||
|
||||
private void performOnReplicas(ShardId primaryId, ReplicaRequest replicaRequest) {
|
||||
// we have to get a new state after successfully indexing into the primary in order to honour recovery semantics.
|
||||
// we have to make sure that every operation indexed into the primary after recovery start will also be replicated
|
||||
// to the recovery target. If we use an old cluster state, we may miss a relocation that has started since then.
|
||||
// If the index gets deleted after primary operation, we skip replication
|
||||
final List<ShardRouting> shards = getShards(primaryId, clusterStateSupplier.get());
|
||||
private void markUnavailableShardsAsStale(ReplicaRequest replicaRequest, Set<String> inSyncAllocationIds, List<ShardRouting> shards) {
|
||||
if (inSyncAllocationIds.isEmpty() == false && shards.isEmpty() == false) {
|
||||
Set<String> availableAllocationIds = shards.stream()
|
||||
.map(ShardRouting::allocationId)
|
||||
.filter(Objects::nonNull)
|
||||
.map(AllocationId::getId)
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
// if inSyncAllocationIds contains allocation ids of shards that don't exist in RoutingTable, mark copies as stale
|
||||
for (String allocationId : Sets.difference(inSyncAllocationIds, availableAllocationIds)) {
|
||||
// mark copy as stale
|
||||
pendingActions.incrementAndGet();
|
||||
replicasProxy.markShardCopyAsStale(replicaRequest.shardId(), allocationId, replicaRequest.primaryTerm(),
|
||||
ReplicationOperation.this::decPendingAndFinishIfNeeded,
|
||||
ReplicationOperation.this::onPrimaryDemoted,
|
||||
throwable -> decPendingAndFinishIfNeeded()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void performOnReplicas(ReplicaRequest replicaRequest, List<ShardRouting> shards) {
|
||||
final String localNodeId = primary.routingEntry().currentNodeId();
|
||||
// If the index gets deleted after primary operation, we skip replication
|
||||
for (final ShardRouting shard : shards) {
|
||||
if (executeOnReplicas == false || shard.unassigned()) {
|
||||
if (shard.primary() == false) {
|
||||
|
@ -147,7 +179,7 @@ public class ReplicationOperation<
|
|||
}
|
||||
|
||||
totalShards.incrementAndGet();
|
||||
pendingShards.incrementAndGet();
|
||||
pendingActions.incrementAndGet();
|
||||
replicasProxy.performOn(shard, replicaRequest, new ActionListener<TransportResponse.Empty>() {
|
||||
@Override
|
||||
public void onResponse(TransportResponse.Empty empty) {
|
||||
|
@ -222,6 +254,14 @@ public class ReplicationOperation<
|
|||
}
|
||||
}
|
||||
|
||||
protected Set<String> getInSyncAllocationIds(ShardId shardId, ClusterState clusterState) {
|
||||
IndexMetaData indexMetaData = clusterState.metaData().index(shardId.getIndex());
|
||||
if (indexMetaData != null) {
|
||||
return indexMetaData.inSyncAllocationIds(shardId.id());
|
||||
}
|
||||
return Collections.emptySet();
|
||||
}
|
||||
|
||||
protected List<ShardRouting> getShards(ShardId shardId, ClusterState state) {
|
||||
// can be null if the index is deleted / closed on us..
|
||||
final IndexShardRoutingTable shardRoutingTable = state.getRoutingTable().shardRoutingTableOrNull(shardId);
|
||||
|
@ -230,8 +270,8 @@ public class ReplicationOperation<
|
|||
}
|
||||
|
||||
private void decPendingAndFinishIfNeeded() {
|
||||
assert pendingShards.get() > 0;
|
||||
if (pendingShards.decrementAndGet() == 0) {
|
||||
assert pendingActions.get() > 0;
|
||||
if (pendingActions.decrementAndGet() == 0) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
|
@ -337,6 +377,20 @@ public class ReplicationOperation<
|
|||
*/
|
||||
void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure);
|
||||
|
||||
/**
|
||||
* Marks shard copy as stale, removing its allocation id from the set of in-sync allocation ids.
|
||||
*
|
||||
* @param shardId shard id
|
||||
* @param allocationId allocation id to remove from the set of in-sync allocation ids
|
||||
* @param primaryTerm the primary term of the primary shard when requesting the failure
|
||||
* @param onSuccess a callback to call when the allocation id has been successfully removed from the in-sync set.
|
||||
* @param onPrimaryDemoted a callback to call when the request failed because the current primary was already demoted
|
||||
* by the master.
|
||||
* @param onIgnoredFailure a callback to call when the request failed, but the failure can be safely ignored.
|
||||
*/
|
||||
void markShardCopyAsStale(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure);
|
||||
}
|
||||
|
||||
public static class RetryOnPrimaryException extends ElasticsearchException {
|
||||
|
|
|
@ -867,29 +867,39 @@ public abstract class TransportReplicationAction<
|
|||
|
||||
@Override
|
||||
public void failShard(ShardRouting replica, long primaryTerm, String message, Exception exception,
|
||||
Runnable onSuccess, Consumer<Exception> onFailure, Consumer<Exception> onIgnoredFailure) {
|
||||
shardStateAction.remoteShardFailed(
|
||||
replica, primaryTerm, message, exception,
|
||||
new ShardStateAction.Listener() {
|
||||
@Override
|
||||
public void onSuccess() {
|
||||
onSuccess.run();
|
||||
}
|
||||
Runnable onSuccess, Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
|
||||
shardStateAction.remoteShardFailed(replica.shardId(), replica.allocationId().getId(), primaryTerm, message, exception,
|
||||
createListener(onSuccess, onPrimaryDemoted, onIgnoredFailure));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception shardFailedError) {
|
||||
if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) {
|
||||
onFailure.accept(shardFailedError);
|
||||
} else {
|
||||
// these can occur if the node is shutting down and are okay
|
||||
// any other exception here is not expected and merits investigation
|
||||
assert shardFailedError instanceof TransportException ||
|
||||
shardFailedError instanceof NodeClosedException : shardFailedError;
|
||||
onIgnoredFailure.accept(shardFailedError);
|
||||
}
|
||||
@Override
|
||||
public void markShardCopyAsStale(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
|
||||
shardStateAction.remoteShardFailed(shardId, allocationId, primaryTerm, "mark copy as stale", null,
|
||||
createListener(onSuccess, onPrimaryDemoted, onIgnoredFailure));
|
||||
}
|
||||
|
||||
private ShardStateAction.Listener createListener(final Runnable onSuccess, final Consumer<Exception> onPrimaryDemoted,
|
||||
final Consumer<Exception> onIgnoredFailure) {
|
||||
return new ShardStateAction.Listener() {
|
||||
@Override
|
||||
public void onSuccess() {
|
||||
onSuccess.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception shardFailedError) {
|
||||
if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) {
|
||||
onPrimaryDemoted.accept(shardFailedError);
|
||||
} else {
|
||||
// these can occur if the node is shutting down and are okay
|
||||
// any other exception here is not expected and merits investigation
|
||||
assert shardFailedError instanceof TransportException ||
|
||||
shardFailedError instanceof NodeClosedException : shardFailedError;
|
||||
onIgnoredFailure.accept(shardFailedError);
|
||||
}
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,9 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
|
@ -128,29 +131,38 @@ public abstract class TransportWriteAction<
|
|||
* We call this before replication because this might wait for a refresh and that can take a while. This way we wait for the
|
||||
* refresh in parallel on the primary and on the replica.
|
||||
*/
|
||||
postWriteActions(indexShard, request, location, this, logger);
|
||||
new AsyncAfterWriteAction(indexShard, request, location, this, logger).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void respond(ActionListener<Response> listener) {
|
||||
this.listener = listener;
|
||||
respondIfPossible();
|
||||
respondIfPossible(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}.
|
||||
*/
|
||||
protected void respondIfPossible() {
|
||||
protected void respondIfPossible(Exception ex) {
|
||||
if (finishedAsyncActions && listener != null) {
|
||||
super.respond(listener);
|
||||
if (ex == null) {
|
||||
super.respond(listener);
|
||||
} else {
|
||||
listener.onFailure(ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void onFailure(Exception exception) {
|
||||
finishedAsyncActions = true;
|
||||
respondIfPossible(exception);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void respondAfterAsyncAction(boolean forcedRefresh) {
|
||||
public synchronized void onSuccess(boolean forcedRefresh) {
|
||||
finalResponse.setForcedRefresh(forcedRefresh);
|
||||
finishedAsyncActions = true;
|
||||
respondIfPossible();
|
||||
respondIfPossible(null);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -162,68 +174,144 @@ public abstract class TransportWriteAction<
|
|||
private ActionListener<TransportResponse.Empty> listener;
|
||||
|
||||
public WriteReplicaResult(IndexShard indexShard, ReplicatedWriteRequest<?> request, Translog.Location location) {
|
||||
postWriteActions(indexShard, request, location, this, logger);
|
||||
new AsyncAfterWriteAction(indexShard, request, location, this, logger).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void respond(ActionListener<TransportResponse.Empty> listener) {
|
||||
this.listener = listener;
|
||||
respondIfPossible();
|
||||
respondIfPossible(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Respond if the refresh has occurred and the listener is ready. Always called while synchronized on {@code this}.
|
||||
*/
|
||||
protected void respondIfPossible() {
|
||||
protected void respondIfPossible(Exception ex) {
|
||||
if (finishedAsyncActions && listener != null) {
|
||||
super.respond(listener);
|
||||
if (ex == null) {
|
||||
super.respond(listener);
|
||||
} else {
|
||||
listener.onFailure(ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void respondAfterAsyncAction(boolean forcedRefresh) {
|
||||
public void onFailure(Exception ex) {
|
||||
finishedAsyncActions = true;
|
||||
respondIfPossible();
|
||||
respondIfPossible(ex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void onSuccess(boolean forcedRefresh) {
|
||||
finishedAsyncActions = true;
|
||||
respondIfPossible(null);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* callback used by {@link AsyncAfterWriteAction} to notify that all post
|
||||
* process actions have been executed
|
||||
*/
|
||||
private interface RespondingWriteResult {
|
||||
void respondAfterAsyncAction(boolean forcedRefresh);
|
||||
/**
|
||||
* Called on successful processing of all post write actions
|
||||
* @param forcedRefresh <code>true</code> iff this write has caused a refresh
|
||||
*/
|
||||
void onSuccess(boolean forcedRefresh);
|
||||
|
||||
/**
|
||||
* Called on failure if a post action failed.
|
||||
*/
|
||||
void onFailure(Exception ex);
|
||||
}
|
||||
|
||||
static void postWriteActions(final IndexShard indexShard,
|
||||
final WriteRequest<?> request,
|
||||
@Nullable final Translog.Location location,
|
||||
final RespondingWriteResult respond,
|
||||
final ESLogger logger) {
|
||||
boolean pendingOps = false;
|
||||
boolean immediateRefresh = false;
|
||||
switch (request.getRefreshPolicy()) {
|
||||
case IMMEDIATE:
|
||||
indexShard.refresh("refresh_flag_index");
|
||||
immediateRefresh = true;
|
||||
break;
|
||||
case WAIT_UNTIL:
|
||||
if (location != null) {
|
||||
pendingOps = true;
|
||||
indexShard.addRefreshListener(location, forcedRefresh -> {
|
||||
if (forcedRefresh) {
|
||||
logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request);
|
||||
}
|
||||
respond.respondAfterAsyncAction(forcedRefresh);
|
||||
});
|
||||
/**
|
||||
* This class encapsulates post write actions like async waits for
|
||||
* translog syncs or waiting for a refresh to happen making the write operation
|
||||
* visible.
|
||||
*/
|
||||
static final class AsyncAfterWriteAction {
|
||||
private final Location location;
|
||||
private final boolean waitUntilRefresh;
|
||||
private final boolean sync;
|
||||
private final AtomicInteger pendingOps = new AtomicInteger(1);
|
||||
private final AtomicBoolean refreshed = new AtomicBoolean(false);
|
||||
private final AtomicReference<Exception> syncFailure = new AtomicReference<>(null);
|
||||
private final RespondingWriteResult respond;
|
||||
private final IndexShard indexShard;
|
||||
private final WriteRequest<?> request;
|
||||
private final ESLogger logger;
|
||||
|
||||
AsyncAfterWriteAction(final IndexShard indexShard,
|
||||
final WriteRequest<?> request,
|
||||
@Nullable final Translog.Location location,
|
||||
final RespondingWriteResult respond,
|
||||
final ESLogger logger) {
|
||||
this.indexShard = indexShard;
|
||||
this.request = request;
|
||||
boolean waitUntilRefresh = false;
|
||||
switch (request.getRefreshPolicy()) {
|
||||
case IMMEDIATE:
|
||||
indexShard.refresh("refresh_flag_index");
|
||||
refreshed.set(true);
|
||||
break;
|
||||
case WAIT_UNTIL:
|
||||
if (location != null) {
|
||||
waitUntilRefresh = true;
|
||||
pendingOps.incrementAndGet();
|
||||
}
|
||||
break;
|
||||
case NONE:
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown refresh policy: " + request.getRefreshPolicy());
|
||||
}
|
||||
this.waitUntilRefresh = waitUntilRefresh;
|
||||
this.respond = respond;
|
||||
this.location = location;
|
||||
if ((sync = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null)) {
|
||||
pendingOps.incrementAndGet();
|
||||
}
|
||||
this.logger = logger;
|
||||
assert pendingOps.get() >= 0 && pendingOps.get() <= 3 : "pendingOpts was: " + pendingOps.get();
|
||||
}
|
||||
|
||||
/** calls the response listener if all pending operations have returned otherwise it just decrements the pending opts counter.*/
|
||||
private void maybeFinish() {
|
||||
final int numPending = pendingOps.decrementAndGet();
|
||||
if (numPending == 0) {
|
||||
if (syncFailure.get() != null) {
|
||||
respond.onFailure(syncFailure.get());
|
||||
} else {
|
||||
respond.onSuccess(refreshed.get());
|
||||
}
|
||||
break;
|
||||
case NONE:
|
||||
break;
|
||||
}
|
||||
assert numPending >= 0 && numPending <= 2: "numPending must either 2, 1 or 0 but was " + numPending ;
|
||||
}
|
||||
boolean fsyncTranslog = indexShard.getTranslogDurability() == Translog.Durability.REQUEST && location != null;
|
||||
if (fsyncTranslog) {
|
||||
indexShard.sync(location);
|
||||
}
|
||||
indexShard.maybeFlush();
|
||||
if (pendingOps == false) {
|
||||
respond.respondAfterAsyncAction(immediateRefresh);
|
||||
|
||||
void run() {
|
||||
// we either respond immediately ie. if we we don't fsync per request or wait for refresh
|
||||
// OR we got an pass async operations on and wait for them to return to respond.
|
||||
indexShard.maybeFlush();
|
||||
maybeFinish(); // decrement the pendingOpts by one, if there is nothing else to do we just respond with success.
|
||||
if (waitUntilRefresh) {
|
||||
assert pendingOps.get() > 0;
|
||||
indexShard.addRefreshListener(location, forcedRefresh -> {
|
||||
if (forcedRefresh) {
|
||||
logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request);
|
||||
}
|
||||
refreshed.set(forcedRefresh);
|
||||
maybeFinish();
|
||||
});
|
||||
}
|
||||
if (sync) {
|
||||
assert pendingOps.get() > 0;
|
||||
indexShard.sync(location, (ex) -> {
|
||||
syncFailure.set(ex);
|
||||
maybeFinish();
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -292,7 +292,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
|
||||
sb.append(TAB).append(TAB).append(shard).append(": ");
|
||||
sb.append("p_term [").append(indexMetaData.primaryTerm(shard)).append("], ");
|
||||
sb.append("a_ids ").append(indexMetaData.activeAllocationIds(shard)).append("\n");
|
||||
sb.append("a_ids ").append(indexMetaData.inSyncAllocationIds(shard)).append("\n");
|
||||
}
|
||||
}
|
||||
sb.append(blocks().prettyPrint());
|
||||
|
@ -501,8 +501,8 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject(IndexMetaData.KEY_ACTIVE_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.getActiveAllocationIds()) {
|
||||
builder.startObject(IndexMetaData.KEY_IN_SYNC_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.getInSyncAllocationIds()) {
|
||||
builder.startArray(String.valueOf(cursor.key));
|
||||
for (String allocationId : cursor.value) {
|
||||
builder.value(allocationId);
|
||||
|
|
|
@ -127,29 +127,32 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Send a shard failed request to the master node to update the cluster state with the failure of a shard on another node.
|
||||
* Send a shard failed request to the master node to update the cluster state with the failure of a shard on another node. This means
|
||||
* that the shard should be failed because a write made it into the primary but was not replicated to this shard copy. If the shard
|
||||
* does not exist anymore but still has an entry in the in-sync set, remove its allocation id from the in-sync set.
|
||||
*
|
||||
* @param shardRouting the shard to fail
|
||||
* @param primaryTerm the primary term associated with the primary shard that is failing the shard.
|
||||
* @param shardId shard id of the shard to fail
|
||||
* @param allocationId allocation id of the shard to fail
|
||||
* @param primaryTerm the primary term associated with the primary shard that is failing the shard. Must be strictly positive.
|
||||
* @param message the reason for the failure
|
||||
* @param failure the underlying cause of the failure
|
||||
* @param listener callback upon completion of the request
|
||||
*/
|
||||
public void remoteShardFailed(final ShardRouting shardRouting, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
public void remoteShardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
assert primaryTerm > 0L : "primary term should be strictly positive";
|
||||
shardFailed(shardRouting, primaryTerm, message, failure, listener);
|
||||
shardFailed(shardId, allocationId, primaryTerm, message, failure, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a shard failed request to the master node to update the cluster state when a shard on the local node failed.
|
||||
*/
|
||||
public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
shardFailed(shardRouting, 0L, message, failure, listener);
|
||||
shardFailed(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, failure, listener);
|
||||
}
|
||||
|
||||
private void shardFailed(final ShardRouting shardRouting, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
private void shardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
ShardEntry shardEntry = new ShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), primaryTerm, message, failure);
|
||||
ShardEntry shardEntry = new ShardEntry(shardId, allocationId, primaryTerm, message, failure);
|
||||
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardEntry, listener);
|
||||
}
|
||||
|
||||
|
@ -248,16 +251,23 @@ public class ShardStateAction extends AbstractComponent {
|
|||
BatchResult.Builder<ShardEntry> batchResultBuilder = BatchResult.builder();
|
||||
List<ShardEntry> tasksToBeApplied = new ArrayList<>();
|
||||
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>();
|
||||
Set<ShardRouting> seenShardRoutings = new HashSet<>(); // to prevent duplicates
|
||||
List<FailedRerouteAllocation.StaleShard> staleShardsToBeApplied = new ArrayList<>();
|
||||
|
||||
for (ShardEntry task : tasks) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(task.shardId.getIndex());
|
||||
if (indexMetaData == null) {
|
||||
// tasks that correspond to non-existent shards are marked as successful
|
||||
// tasks that correspond to non-existent indices are marked as successful
|
||||
logger.debug("{} ignoring shard failed task [{}] (unknown index {})", task.shardId, task, task.shardId.getIndex());
|
||||
batchResultBuilder.success(task);
|
||||
} else {
|
||||
// non-local requests
|
||||
// The primary term is 0 if the shard failed itself. It is > 0 if a write was done on a primary but was failed to be
|
||||
// replicated to the shard copy with the provided allocation id. In case where the shard failed itself, it's ok to just
|
||||
// remove the corresponding routing entry from the routing table. In case where a write could not be replicated,
|
||||
// however, it is important to ensure that the shard copy with the missing write is considered as stale from that point
|
||||
// on, which is implemented by removing the allocation id of the shard copy from the in-sync allocations set.
|
||||
// We check here that the primary to which the write happened was not already failed in an earlier cluster state update.
|
||||
// This prevents situations where a new primary has already been selected and replication failures from an old stale
|
||||
// primary unnecessarily fail currently active shards.
|
||||
if (task.primaryTerm > 0) {
|
||||
long currentPrimaryTerm = indexMetaData.primaryTerm(task.shardId.id());
|
||||
if (currentPrimaryTerm != task.primaryTerm) {
|
||||
|
@ -274,28 +284,32 @@ public class ShardStateAction extends AbstractComponent {
|
|||
|
||||
ShardRouting matched = currentState.getRoutingTable().getByAllocationId(task.shardId, task.allocationId);
|
||||
if (matched == null) {
|
||||
// tasks that correspond to non-existent shards are marked as successful
|
||||
logger.debug("{} ignoring shard failed task [{}] (shard does not exist anymore)", task.shardId, task);
|
||||
batchResultBuilder.success(task);
|
||||
} else {
|
||||
// remove duplicate actions as allocation service expects a clean list without duplicates
|
||||
if (seenShardRoutings.contains(matched)) {
|
||||
logger.trace("{} ignoring shard failed task [{}] (already scheduled to fail {})", task.shardId, task, matched);
|
||||
Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(task.shardId.id());
|
||||
// mark shard copies without routing entries that are in in-sync allocations set only as stale if the reason why
|
||||
// they were failed is because a write made it into the primary but not to this copy (which corresponds to
|
||||
// the check "primaryTerm > 0").
|
||||
if (task.primaryTerm > 0 && inSyncAllocationIds.contains(task.allocationId)) {
|
||||
logger.debug("{} marking shard {} as stale (shard failed task: [{}])", task.shardId, task.allocationId, task);
|
||||
tasksToBeApplied.add(task);
|
||||
staleShardsToBeApplied.add(new FailedRerouteAllocation.StaleShard(task.shardId, task.allocationId));
|
||||
} else {
|
||||
logger.debug("{} failing shard {} (shard failed task: [{}])", task.shardId, matched, task);
|
||||
tasksToBeApplied.add(task);
|
||||
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(matched, task.message, task.failure));
|
||||
seenShardRoutings.add(matched);
|
||||
// tasks that correspond to non-existent shards are marked as successful
|
||||
logger.debug("{} ignoring shard failed task [{}] (shard does not exist anymore)", task.shardId, task);
|
||||
batchResultBuilder.success(task);
|
||||
}
|
||||
} else {
|
||||
// failing a shard also possibly marks it as stale (see IndexMetaDataUpdater)
|
||||
logger.debug("{} failing shard {} (shard failed task: [{}])", task.shardId, matched, task);
|
||||
tasksToBeApplied.add(task);
|
||||
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(matched, task.message, task.failure));
|
||||
}
|
||||
}
|
||||
}
|
||||
assert tasksToBeApplied.size() >= shardRoutingsToBeApplied.size();
|
||||
assert tasksToBeApplied.size() == shardRoutingsToBeApplied.size() + staleShardsToBeApplied.size();
|
||||
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
try {
|
||||
RoutingAllocation.Result result = applyFailedShards(currentState, shardRoutingsToBeApplied);
|
||||
RoutingAllocation.Result result = applyFailedShards(currentState, shardRoutingsToBeApplied, staleShardsToBeApplied);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
|
@ -311,8 +325,9 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
// visible for testing
|
||||
RoutingAllocation.Result applyFailedShards(ClusterState currentState, List<FailedRerouteAllocation.FailedShard> failedShards) {
|
||||
return allocationService.applyFailedShards(currentState, failedShards);
|
||||
RoutingAllocation.Result applyFailedShards(ClusterState currentState, List<FailedRerouteAllocation.FailedShard> failedShards,
|
||||
List<FailedRerouteAllocation.StaleShard> staleShards) {
|
||||
return allocationService.applyFailedShards(currentState, failedShards, staleShards);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -54,7 +54,7 @@ public final class ClusterIndexHealth implements Iterable<ClusterShardHealth>, W
|
|||
|
||||
for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) {
|
||||
int shardId = shardRoutingTable.shardId().id();
|
||||
shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable, indexMetaData));
|
||||
shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable));
|
||||
}
|
||||
|
||||
// update the index status
|
||||
|
|
|
@ -19,12 +19,11 @@
|
|||
|
||||
package org.elasticsearch.cluster.health;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.Reason;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -41,7 +40,7 @@ public final class ClusterShardHealth implements Writeable {
|
|||
private final int unassignedShards;
|
||||
private final boolean primaryActive;
|
||||
|
||||
public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable, final IndexMetaData indexMetaData) {
|
||||
public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable) {
|
||||
this.shardId = shardId;
|
||||
int computeActiveShards = 0;
|
||||
int computeRelocatingShards = 0;
|
||||
|
@ -69,7 +68,7 @@ public final class ClusterShardHealth implements Writeable {
|
|||
computeStatus = ClusterHealthStatus.YELLOW;
|
||||
}
|
||||
} else {
|
||||
computeStatus = getInactivePrimaryHealth(primaryRouting, indexMetaData);
|
||||
computeStatus = getInactivePrimaryHealth(primaryRouting);
|
||||
}
|
||||
this.status = computeStatus;
|
||||
this.activeShards = computeActiveShards;
|
||||
|
@ -131,28 +130,25 @@ public final class ClusterShardHealth implements Writeable {
|
|||
/**
|
||||
* Checks if an inactive primary shard should cause the cluster health to go RED.
|
||||
*
|
||||
* Normally, an inactive primary shard in an index should cause the cluster health to be RED. However,
|
||||
* there are exceptions where a health status of RED is inappropriate, namely in these scenarios:
|
||||
* 1. Index Creation. When an index is first created, the primary shards are in the initializing state, so
|
||||
* there is a small window where the cluster health is RED due to the primaries not being activated yet.
|
||||
* However, this leads to a false sense that the cluster is in an unhealthy state, when in reality, its
|
||||
* simply a case of needing to wait for the primaries to initialize.
|
||||
* 2. When a cluster is in the recovery state, and the shard never had any allocation ids assigned to it,
|
||||
* which indicates the index was created and before allocation of the primary occurred for this shard,
|
||||
* a cluster restart happened.
|
||||
*
|
||||
* Here, we check for these scenarios and set the cluster health to YELLOW if any are applicable.
|
||||
* An inactive primary shard in an index should cause the cluster health to be RED to make it visible that some of the existing data is
|
||||
* unavailable. In case of index creation, snapshot restore or index shrinking, which are unexceptional events in the cluster lifecycle,
|
||||
* cluster health should not turn RED for the time where primaries are still in the initializing state but go to YELLOW instead.
|
||||
* However, in case of exceptional events, for example when the primary shard cannot be assigned to a node or initialization fails at
|
||||
* some point, cluster health should still turn RED.
|
||||
*
|
||||
* NB: this method should *not* be called on active shards nor on non-primary shards.
|
||||
*/
|
||||
public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting, final IndexMetaData indexMetaData) {
|
||||
public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting) {
|
||||
assert shardRouting.primary() : "cannot invoke on a replica shard: " + shardRouting;
|
||||
assert shardRouting.active() == false : "cannot invoke on an active shard: " + shardRouting;
|
||||
assert shardRouting.unassignedInfo() != null : "cannot invoke on a shard with no UnassignedInfo: " + shardRouting;
|
||||
assert shardRouting.recoverySource() != null : "cannot invoke on a shard that has no recovery source" + shardRouting;
|
||||
final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
|
||||
if (unassignedInfo.getLastAllocationStatus() != AllocationStatus.DECIDERS_NO
|
||||
&& shardRouting.allocatedPostIndexCreate(indexMetaData) == false
|
||||
&& (unassignedInfo.getReason() == Reason.INDEX_CREATED || unassignedInfo.getReason() == Reason.CLUSTER_RECOVERED)) {
|
||||
RecoverySource.Type recoveryType = shardRouting.recoverySource().getType();
|
||||
if (unassignedInfo.getLastAllocationStatus() != AllocationStatus.DECIDERS_NO && unassignedInfo.getNumFailedAllocations() == 0
|
||||
&& (recoveryType == RecoverySource.Type.EMPTY_STORE
|
||||
|| recoveryType == RecoverySource.Type.LOCAL_SHARDS
|
||||
|| recoveryType == RecoverySource.Type.SNAPSHOT)) {
|
||||
return ClusterHealthStatus.YELLOW;
|
||||
} else {
|
||||
return ClusterHealthStatus.RED;
|
||||
|
|
|
@ -232,7 +232,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
|
||||
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
|
||||
public static final String KEY_IN_SYNC_ALLOCATIONS = "in_sync_allocations";
|
||||
static final String KEY_VERSION = "version";
|
||||
static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards";
|
||||
static final String KEY_SETTINGS = "settings";
|
||||
|
@ -262,7 +262,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
|
||||
private final ImmutableOpenMap<String, Custom> customs;
|
||||
|
||||
private final ImmutableOpenIntMap<Set<String>> activeAllocationIds;
|
||||
private final ImmutableOpenIntMap<Set<String>> inSyncAllocationIds;
|
||||
|
||||
private final transient int totalNumberOfShards;
|
||||
|
||||
|
@ -279,7 +279,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
|
||||
private IndexMetaData(Index index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings,
|
||||
ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
|
||||
ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> activeAllocationIds,
|
||||
ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> inSyncAllocationIds,
|
||||
DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
|
||||
Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion,
|
||||
int routingNumShards, ActiveShardCount waitForActiveShards) {
|
||||
|
@ -296,7 +296,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
this.mappings = mappings;
|
||||
this.customs = customs;
|
||||
this.aliases = aliases;
|
||||
this.activeAllocationIds = activeAllocationIds;
|
||||
this.inSyncAllocationIds = inSyncAllocationIds;
|
||||
this.requireFilters = requireFilters;
|
||||
this.includeFilters = includeFilters;
|
||||
this.excludeFilters = excludeFilters;
|
||||
|
@ -340,7 +340,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
* a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary.
|
||||
*
|
||||
* Note: since we increment the term every time a shard is assigned, the term for any operational shard (i.e., a shard
|
||||
* that can be indexed into) is larger than 0. See {@link IndexMetaDataUpdater#applyChanges(MetaData)}.
|
||||
* that can be indexed into) is larger than 0. See {@link IndexMetaDataUpdater#applyChanges}.
|
||||
**/
|
||||
public long primaryTerm(int shardId) {
|
||||
return this.primaryTerms[shardId];
|
||||
|
@ -447,13 +447,13 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return (T) customs.get(type);
|
||||
}
|
||||
|
||||
public ImmutableOpenIntMap<Set<String>> getActiveAllocationIds() {
|
||||
return activeAllocationIds;
|
||||
public ImmutableOpenIntMap<Set<String>> getInSyncAllocationIds() {
|
||||
return inSyncAllocationIds;
|
||||
}
|
||||
|
||||
public Set<String> activeAllocationIds(int shardId) {
|
||||
public Set<String> inSyncAllocationIds(int shardId) {
|
||||
assert shardId >= 0 && shardId < numberOfShards;
|
||||
return activeAllocationIds.get(shardId);
|
||||
return inSyncAllocationIds.get(shardId);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
@ -518,7 +518,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
if (Arrays.equals(primaryTerms, that.primaryTerms) == false) {
|
||||
return false;
|
||||
}
|
||||
if (!activeAllocationIds.equals(that.activeAllocationIds)) {
|
||||
if (!inSyncAllocationIds.equals(that.inSyncAllocationIds)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -536,7 +536,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
result = 31 * result + Long.hashCode(routingFactor);
|
||||
result = 31 * result + Long.hashCode(routingNumShards);
|
||||
result = 31 * result + Arrays.hashCode(primaryTerms);
|
||||
result = 31 * result + activeAllocationIds.hashCode();
|
||||
result = 31 * result + inSyncAllocationIds.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
|
||||
private final Diff<ImmutableOpenMap<String, AliasMetaData>> aliases;
|
||||
private final Diff<ImmutableOpenMap<String, Custom>> customs;
|
||||
private final Diff<ImmutableOpenIntMap<Set<String>>> activeAllocationIds;
|
||||
private final Diff<ImmutableOpenIntMap<Set<String>>> inSyncAllocationIds;
|
||||
|
||||
public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) {
|
||||
index = after.index.getName();
|
||||
|
@ -585,7 +585,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
|
||||
aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
activeAllocationIds = DiffableUtils.diff(before.activeAllocationIds, after.activeAllocationIds,
|
||||
inSyncAllocationIds = DiffableUtils.diff(before.inSyncAllocationIds, after.inSyncAllocationIds,
|
||||
DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
}
|
||||
|
||||
|
@ -610,7 +610,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
|
||||
inSyncAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
|
||||
DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
}
|
||||
|
||||
|
@ -625,7 +625,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
mappings.writeTo(out);
|
||||
aliases.writeTo(out);
|
||||
customs.writeTo(out);
|
||||
activeAllocationIds.writeTo(out);
|
||||
inSyncAllocationIds.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -639,7 +639,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
builder.mappings.putAll(mappings.apply(part.mappings));
|
||||
builder.aliases.putAll(aliases.apply(part.aliases));
|
||||
builder.customs.putAll(customs.apply(part.customs));
|
||||
builder.activeAllocationIds.putAll(activeAllocationIds.apply(part.activeAllocationIds));
|
||||
builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds));
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
@ -668,11 +668,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
|
||||
builder.putCustom(type, customIndexMetaData);
|
||||
}
|
||||
int activeAllocationIdsSize = in.readVInt();
|
||||
for (int i = 0; i < activeAllocationIdsSize; i++) {
|
||||
int inSyncAllocationIdsSize = in.readVInt();
|
||||
for (int i = 0; i < inSyncAllocationIdsSize; i++) {
|
||||
int key = in.readVInt();
|
||||
Set<String> allocationIds = DiffableUtils.StringSetValueSerializer.getInstance().read(in, key);
|
||||
builder.putActiveAllocationIds(key, allocationIds);
|
||||
builder.putInSyncAllocationIds(key, allocationIds);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
@ -698,8 +698,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
out.writeString(cursor.key);
|
||||
cursor.value.writeTo(out);
|
||||
}
|
||||
out.writeVInt(activeAllocationIds.size());
|
||||
for (IntObjectCursor<Set<String>> cursor : activeAllocationIds) {
|
||||
out.writeVInt(inSyncAllocationIds.size());
|
||||
for (IntObjectCursor<Set<String>> cursor : inSyncAllocationIds) {
|
||||
out.writeVInt(cursor.key);
|
||||
DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.value, out);
|
||||
}
|
||||
|
@ -723,7 +723,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
|
||||
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
|
||||
private final ImmutableOpenMap.Builder<String, Custom> customs;
|
||||
private final ImmutableOpenIntMap.Builder<Set<String>> activeAllocationIds;
|
||||
private final ImmutableOpenIntMap.Builder<Set<String>> inSyncAllocationIds;
|
||||
private Integer routingNumShards;
|
||||
|
||||
public Builder(String index) {
|
||||
|
@ -731,7 +731,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
this.mappings = ImmutableOpenMap.builder();
|
||||
this.aliases = ImmutableOpenMap.builder();
|
||||
this.customs = ImmutableOpenMap.builder();
|
||||
this.activeAllocationIds = ImmutableOpenIntMap.builder();
|
||||
this.inSyncAllocationIds = ImmutableOpenIntMap.builder();
|
||||
}
|
||||
|
||||
public Builder(IndexMetaData indexMetaData) {
|
||||
|
@ -744,7 +744,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
|
||||
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
|
||||
this.routingNumShards = indexMetaData.routingNumShards;
|
||||
this.activeAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.activeAllocationIds);
|
||||
this.inSyncAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.inSyncAllocationIds);
|
||||
}
|
||||
|
||||
public String index() {
|
||||
|
@ -854,13 +854,13 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
|
||||
activeAllocationIds.put(shardId, new HashSet(allocationIds));
|
||||
return this;
|
||||
public Set<String> getInSyncAllocationIds(int shardId) {
|
||||
return inSyncAllocationIds.get(shardId);
|
||||
}
|
||||
|
||||
public Set<String> getActiveAllocationIds(int shardId) {
|
||||
return activeAllocationIds.get(shardId);
|
||||
public Builder putInSyncAllocationIds(int shardId, Set<String> allocationIds) {
|
||||
inSyncAllocationIds.put(shardId, new HashSet(allocationIds));
|
||||
return this;
|
||||
}
|
||||
|
||||
public long version() {
|
||||
|
@ -938,13 +938,13 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
|
||||
}
|
||||
|
||||
// fill missing slots in activeAllocationIds with empty set if needed and make all entries immutable
|
||||
ImmutableOpenIntMap.Builder<Set<String>> filledActiveAllocationIds = ImmutableOpenIntMap.builder();
|
||||
// fill missing slots in inSyncAllocationIds with empty set if needed and make all entries immutable
|
||||
ImmutableOpenIntMap.Builder<Set<String>> filledInSyncAllocationIds = ImmutableOpenIntMap.builder();
|
||||
for (int i = 0; i < numberOfShards; i++) {
|
||||
if (activeAllocationIds.containsKey(i)) {
|
||||
filledActiveAllocationIds.put(i, Collections.unmodifiableSet(new HashSet<>(activeAllocationIds.get(i))));
|
||||
if (inSyncAllocationIds.containsKey(i)) {
|
||||
filledInSyncAllocationIds.put(i, Collections.unmodifiableSet(new HashSet<>(inSyncAllocationIds.get(i))));
|
||||
} else {
|
||||
filledActiveAllocationIds.put(i, Collections.emptySet());
|
||||
filledInSyncAllocationIds.put(i, Collections.emptySet());
|
||||
}
|
||||
}
|
||||
final Map<String, String> requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(settings).getAsMap();
|
||||
|
@ -1005,7 +1005,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
|
||||
final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
|
||||
return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
|
||||
tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters,
|
||||
tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters,
|
||||
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion, getRoutingNumShards(), waitForActiveShards);
|
||||
}
|
||||
|
||||
|
@ -1056,8 +1056,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.startObject(KEY_ACTIVE_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.activeAllocationIds) {
|
||||
builder.startObject(KEY_IN_SYNC_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.inSyncAllocationIds) {
|
||||
builder.startArray(String.valueOf(cursor.key));
|
||||
for (String allocationId : cursor.value) {
|
||||
builder.value(allocationId);
|
||||
|
@ -1108,7 +1108,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
|
||||
}
|
||||
} else if (KEY_ACTIVE_ALLOCATIONS.equals(currentFieldName)) {
|
||||
} else if (KEY_IN_SYNC_ALLOCATIONS.equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
|
@ -1120,7 +1120,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
allocationIds.add(parser.text());
|
||||
}
|
||||
}
|
||||
builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
|
||||
builder.putInSyncAllocationIds(Integer.valueOf(shardId), allocationIds);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unexpected token: " + token);
|
||||
}
|
||||
|
|
|
@ -23,9 +23,15 @@ import com.carrotsearch.hppc.IntSet;
|
|||
import com.carrotsearch.hppc.cursors.IntCursor;
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.LocalShardsRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -129,10 +135,10 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
"from the routing table");
|
||||
}
|
||||
if (shardRouting.active() &&
|
||||
indexMetaData.activeAllocationIds(shardRouting.id()).contains(shardRouting.allocationId().getId()) == false) {
|
||||
throw new IllegalStateException("active shard routing " + shardRouting + " has no corresponding entry in the " +
|
||||
"in-sync allocation set " + indexMetaData.activeAllocationIds(shardRouting.id()));
|
||||
}
|
||||
indexMetaData.inSyncAllocationIds(shardRouting.id()).contains(shardRouting.allocationId().getId()) == false) {
|
||||
throw new IllegalStateException("active shard routing " + shardRouting + " has no corresponding entry in the in-sync " +
|
||||
"allocation set " + indexMetaData.inSyncAllocationIds(shardRouting.id()));
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
@ -353,54 +359,57 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
* Initializes a new empty index, as if it was created from an API.
|
||||
*/
|
||||
public Builder initializeAsNew(IndexMetaData indexMetaData) {
|
||||
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
|
||||
RecoverySource primaryRecoverySource = indexMetaData.getMergeSourceIndex() != null ?
|
||||
LocalShardsRecoverySource.INSTANCE :
|
||||
StoreRecoverySource.EMPTY_STORE_INSTANCE;
|
||||
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null), primaryRecoverySource);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes a new empty index, as if it was created from an API.
|
||||
* Initializes an existing index.
|
||||
*/
|
||||
public Builder initializeAsRecovery(IndexMetaData indexMetaData) {
|
||||
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null));
|
||||
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null), null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes a new index caused by dangling index imported.
|
||||
*/
|
||||
public Builder initializeAsFromDangling(IndexMetaData indexMetaData) {
|
||||
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.DANGLING_INDEX_IMPORTED, null));
|
||||
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.DANGLING_INDEX_IMPORTED, null), null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes a new empty index, as as a result of opening a closed index.
|
||||
*/
|
||||
public Builder initializeAsFromCloseToOpen(IndexMetaData indexMetaData) {
|
||||
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, null));
|
||||
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, null), null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes a new empty index, to be restored from a snapshot
|
||||
*/
|
||||
public Builder initializeAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards) {
|
||||
public Builder initializeAsNewRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards) {
|
||||
final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NEW_INDEX_RESTORED,
|
||||
"restore_source[" + restoreSource.snapshot().getRepository() + "/" +
|
||||
restoreSource.snapshot().getSnapshotId().getName() + "]");
|
||||
return initializeAsRestore(indexMetaData, restoreSource, ignoreShards, true, unassignedInfo);
|
||||
"restore_source[" + recoverySource.snapshot().getRepository() + "/" +
|
||||
recoverySource.snapshot().getSnapshotId().getName() + "]");
|
||||
return initializeAsRestore(indexMetaData, recoverySource, ignoreShards, true, unassignedInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes an existing index, to be restored from a snapshot
|
||||
*/
|
||||
public Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
|
||||
public Builder initializeAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource) {
|
||||
final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED,
|
||||
"restore_source[" + restoreSource.snapshot().getRepository() + "/" +
|
||||
restoreSource.snapshot().getSnapshotId().getName() + "]");
|
||||
return initializeAsRestore(indexMetaData, restoreSource, null, false, unassignedInfo);
|
||||
"restore_source[" + recoverySource.snapshot().getRepository() + "/" +
|
||||
recoverySource.snapshot().getSnapshotId().getName() + "]");
|
||||
return initializeAsRestore(indexMetaData, recoverySource, null, false, unassignedInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes an index, to be restored from snapshot
|
||||
*/
|
||||
private Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards, boolean asNew, UnassignedInfo unassignedInfo) {
|
||||
private Builder initializeAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards, boolean asNew, UnassignedInfo unassignedInfo) {
|
||||
assert indexMetaData.getIndex().equals(index);
|
||||
if (!shards.isEmpty()) {
|
||||
throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
|
||||
|
@ -409,11 +418,14 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
ShardId shardId = new ShardId(index, shardNumber);
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
|
||||
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
|
||||
boolean primary = i == 0;
|
||||
if (asNew && ignoreShards.contains(shardNumber)) {
|
||||
// This shards wasn't completely snapshotted - restore it as new shard
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, null, i == 0, unassignedInfo));
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, primary,
|
||||
primary ? StoreRecoverySource.EMPTY_STORE_INSTANCE : PeerRecoverySource.INSTANCE, unassignedInfo));
|
||||
} else {
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, i == 0 ? restoreSource : null, i == 0, unassignedInfo));
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, primary,
|
||||
primary ? recoverySource : PeerRecoverySource.INSTANCE, unassignedInfo));
|
||||
}
|
||||
}
|
||||
shards.put(shardNumber, indexShardRoutingBuilder.build());
|
||||
|
@ -423,17 +435,28 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
|
||||
/**
|
||||
* Initializes a new empty index, with an option to control if its from an API or not.
|
||||
*
|
||||
* @param primaryRecoverySource recovery source for primary shards. If null, it is automatically determined based on active
|
||||
* allocation ids
|
||||
*/
|
||||
private Builder initializeEmpty(IndexMetaData indexMetaData, UnassignedInfo unassignedInfo) {
|
||||
private Builder initializeEmpty(IndexMetaData indexMetaData, UnassignedInfo unassignedInfo, @Nullable RecoverySource primaryRecoverySource) {
|
||||
assert indexMetaData.getIndex().equals(index);
|
||||
if (!shards.isEmpty()) {
|
||||
throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
|
||||
}
|
||||
for (int shardNumber = 0; shardNumber < indexMetaData.getNumberOfShards(); shardNumber++) {
|
||||
ShardId shardId = new ShardId(index, shardNumber);
|
||||
if (primaryRecoverySource == null) {
|
||||
if (indexMetaData.inSyncAllocationIds(shardNumber).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
primaryRecoverySource = indexMetaData.getMergeSourceIndex() != null ? LocalShardsRecoverySource.INSTANCE : StoreRecoverySource.EMPTY_STORE_INSTANCE;
|
||||
} else {
|
||||
primaryRecoverySource = StoreRecoverySource.EXISTING_STORE_INSTANCE;
|
||||
}
|
||||
}
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
|
||||
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, null, i == 0, unassignedInfo));
|
||||
boolean primary = i == 0;
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, primary, primary ? primaryRecoverySource : PeerRecoverySource.INSTANCE, unassignedInfo));
|
||||
}
|
||||
shards.put(shardNumber, indexShardRoutingBuilder.build());
|
||||
}
|
||||
|
@ -445,7 +468,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
int shardNumber = cursor.value;
|
||||
ShardId shardId = new ShardId(index, shardNumber);
|
||||
// version 0, will get updated when reroute will happen
|
||||
ShardRouting shard = ShardRouting.newUnassigned(shardId, null, false, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null));
|
||||
ShardRouting shard = ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null));
|
||||
shards.put(shardNumber,
|
||||
new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build()
|
||||
);
|
||||
|
|
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Represents the recovery source of a shard. Available recovery types are:
|
||||
*
|
||||
* - {@link StoreRecoverySource} recovery from the local store (empty or with existing data)
|
||||
* - {@link PeerRecoverySource} recovery from a primary on another node
|
||||
* - {@link SnapshotRecoverySource} recovery from a snapshot
|
||||
* - {@link LocalShardsRecoverySource} recovery from other shards of another index on the same node
|
||||
*/
|
||||
public abstract class RecoverySource implements Writeable, ToXContent {
|
||||
|
||||
@Override
|
||||
public final XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("type", getType());
|
||||
addAdditionalFields(builder, params);
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
/**
|
||||
* to be overridden by subclasses
|
||||
*/
|
||||
public void addAdditionalFields(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
public static RecoverySource readFrom(StreamInput in) throws IOException {
|
||||
Type type = Type.values()[in.readByte()];
|
||||
switch (type) {
|
||||
case EMPTY_STORE: return StoreRecoverySource.EMPTY_STORE_INSTANCE;
|
||||
case EXISTING_STORE: return StoreRecoverySource.EXISTING_STORE_INSTANCE;
|
||||
case PEER: return PeerRecoverySource.INSTANCE;
|
||||
case SNAPSHOT: return new SnapshotRecoverySource(in);
|
||||
case LOCAL_SHARDS: return LocalShardsRecoverySource.INSTANCE;
|
||||
default: throw new IllegalArgumentException("unknown recovery type: " + type.name());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte((byte) getType().ordinal());
|
||||
writeAdditionalFields(out);
|
||||
}
|
||||
|
||||
/**
|
||||
* to be overridden by subclasses
|
||||
*/
|
||||
protected void writeAdditionalFields(StreamOutput out) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
public enum Type {
|
||||
EMPTY_STORE,
|
||||
EXISTING_STORE,
|
||||
PEER,
|
||||
SNAPSHOT,
|
||||
LOCAL_SHARDS
|
||||
}
|
||||
|
||||
public abstract Type getType();
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
RecoverySource that = (RecoverySource) o;
|
||||
|
||||
return getType() == that.getType();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return getType().hashCode();
|
||||
}
|
||||
|
||||
/**
|
||||
* recovery from an existing on-disk store or a fresh copy
|
||||
*/
|
||||
public abstract static class StoreRecoverySource extends RecoverySource {
|
||||
public static final StoreRecoverySource EMPTY_STORE_INSTANCE = new StoreRecoverySource() {
|
||||
@Override
|
||||
public Type getType() {
|
||||
return Type.EMPTY_STORE;
|
||||
}
|
||||
};
|
||||
public static final StoreRecoverySource EXISTING_STORE_INSTANCE = new StoreRecoverySource() {
|
||||
@Override
|
||||
public Type getType() {
|
||||
return Type.EXISTING_STORE;
|
||||
}
|
||||
};
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getType() == Type.EMPTY_STORE ? "new shard recovery" : "existing recovery";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* recovery from other shards on same node (shrink index action)
|
||||
*/
|
||||
public static class LocalShardsRecoverySource extends RecoverySource {
|
||||
|
||||
public static final LocalShardsRecoverySource INSTANCE = new LocalShardsRecoverySource();
|
||||
|
||||
private LocalShardsRecoverySource() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type getType() {
|
||||
return Type.LOCAL_SHARDS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "local shards recovery";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* recovery from a snapshot
|
||||
*/
|
||||
public static class SnapshotRecoverySource extends RecoverySource {
|
||||
private final Snapshot snapshot;
|
||||
private final String index;
|
||||
private final Version version;
|
||||
|
||||
public SnapshotRecoverySource(Snapshot snapshot, Version version, String index) {
|
||||
this.snapshot = Objects.requireNonNull(snapshot);
|
||||
this.version = Objects.requireNonNull(version);
|
||||
this.index = Objects.requireNonNull(index);
|
||||
}
|
||||
|
||||
SnapshotRecoverySource(StreamInput in) throws IOException {
|
||||
snapshot = new Snapshot(in);
|
||||
version = Version.readVersion(in);
|
||||
index = in.readString();
|
||||
}
|
||||
|
||||
public Snapshot snapshot() {
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
public String index() {
|
||||
return index;
|
||||
}
|
||||
|
||||
public Version version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void writeAdditionalFields(StreamOutput out) throws IOException {
|
||||
snapshot.writeTo(out);
|
||||
Version.writeVersion(version, out);
|
||||
out.writeString(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type getType() {
|
||||
return Type.SNAPSHOT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addAdditionalFields(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.field("repository", snapshot.getRepository())
|
||||
.field("snapshot", snapshot.getSnapshotId().getName())
|
||||
.field("version", version.toString())
|
||||
.field("index", index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "snapshot recovery from " + snapshot.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked") SnapshotRecoverySource that = (SnapshotRecoverySource) o;
|
||||
return snapshot.equals(that.snapshot) && index.equals(that.index) && version.equals(that.version);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(snapshot, index, version);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* peer recovery from a primary shard
|
||||
*/
|
||||
public static class PeerRecoverySource extends RecoverySource {
|
||||
|
||||
public static final PeerRecoverySource INSTANCE = new PeerRecoverySource();
|
||||
|
||||
private PeerRecoverySource() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type getType() {
|
||||
return Type.PEER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "peer recovery";
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,115 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Represents snapshot and index from which a recovering index should be restored
|
||||
*/
|
||||
public class RestoreSource implements Streamable, ToXContent {
|
||||
|
||||
private Snapshot snapshot;
|
||||
|
||||
private String index;
|
||||
|
||||
private Version version;
|
||||
|
||||
RestoreSource() {
|
||||
}
|
||||
|
||||
public RestoreSource(Snapshot snapshot, Version version, String index) {
|
||||
this.snapshot = Objects.requireNonNull(snapshot);
|
||||
this.version = Objects.requireNonNull(version);
|
||||
this.index = Objects.requireNonNull(index);
|
||||
}
|
||||
|
||||
public Snapshot snapshot() {
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
public String index() {
|
||||
return index;
|
||||
}
|
||||
|
||||
public Version version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public static RestoreSource readOptionalRestoreSource(StreamInput in) throws IOException {
|
||||
return in.readOptionalStreamable(RestoreSource::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
snapshot = new Snapshot(in);
|
||||
version = Version.readVersion(in);
|
||||
index = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
snapshot.writeTo(out);
|
||||
Version.writeVersion(version, out);
|
||||
out.writeString(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
return builder.startObject()
|
||||
.field("repository", snapshot.getRepository())
|
||||
.field("snapshot", snapshot.getSnapshotId().getName())
|
||||
.field("version", version.toString())
|
||||
.field("index", index)
|
||||
.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return snapshot.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked") RestoreSource that = (RestoreSource) o;
|
||||
return snapshot.equals(that.snapshot) && index.equals(that.index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(snapshot, index);
|
||||
}
|
||||
}
|
|
@ -26,7 +26,7 @@ public interface RoutingChangesObserver {
|
|||
/**
|
||||
* Called when unassigned shard is initialized. Does not include initializing relocation target shards.
|
||||
*/
|
||||
void shardInitialized(ShardRouting unassignedShard);
|
||||
void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard);
|
||||
|
||||
/**
|
||||
* Called when an initializing shard is started.
|
||||
|
@ -77,7 +77,7 @@ public interface RoutingChangesObserver {
|
|||
class AbstractRoutingChangesObserver implements RoutingChangesObserver {
|
||||
|
||||
@Override
|
||||
public void shardInitialized(ShardRouting unassignedShard) {
|
||||
public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {
|
||||
|
||||
}
|
||||
|
||||
|
@ -131,9 +131,9 @@ public interface RoutingChangesObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void shardInitialized(ShardRouting unassignedShard) {
|
||||
public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {
|
||||
for (RoutingChangesObserver routingChangesObserver : routingChangesObservers) {
|
||||
routingChangesObserver.shardInitialized(unassignedShard);
|
||||
routingChangesObserver.shardInitialized(unassignedShard, initializedShard);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
|
||||
Recoveries.getOrAdd(recoveriesPerNode, routing.currentNodeId()).addIncoming(howMany);
|
||||
|
||||
if (routing.isPeerRecovery()) {
|
||||
if (routing.recoverySource().getType() == RecoverySource.Type.PEER) {
|
||||
// add/remove corresponding outgoing recovery on node with primary shard
|
||||
if (primary == null) {
|
||||
throw new IllegalStateException("shard is peer recovering but primary is unassigned");
|
||||
|
@ -177,7 +177,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
// primary is done relocating, move non-primary recoveries from old primary to new primary
|
||||
int numRecoveringReplicas = 0;
|
||||
for (ShardRouting assigned : assignedShards(routing.shardId())) {
|
||||
if (assigned.primary() == false && assigned.isPeerRecovery()) {
|
||||
if (assigned.primary() == false && assigned.initializing() &&
|
||||
assigned.recoverySource().getType() == RecoverySource.Type.PEER) {
|
||||
numRecoveringReplicas++;
|
||||
}
|
||||
}
|
||||
|
@ -198,7 +199,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
@Nullable
|
||||
private ShardRouting findAssignedPrimaryIfPeerRecovery(ShardRouting routing) {
|
||||
ShardRouting primary = null;
|
||||
if (routing.isPeerRecovery()) {
|
||||
if (routing.recoverySource() != null && routing.recoverySource().getType() == RecoverySource.Type.PEER) {
|
||||
List<ShardRouting> shardRoutings = assignedShards.get(routing.shardId());
|
||||
if (shardRoutings != null) {
|
||||
for (ShardRouting shardRouting : shardRoutings) {
|
||||
|
@ -420,7 +421,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
addRecovery(initializedShard);
|
||||
assignedShardsAdd(initializedShard);
|
||||
routingChangesObserver.shardInitialized(unassignedShard);
|
||||
routingChangesObserver.shardInitialized(unassignedShard, initializedShard);
|
||||
return initializedShard;
|
||||
}
|
||||
|
||||
|
@ -627,7 +628,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
private ShardRouting promoteActiveReplicaShardToPrimary(ShardRouting replicaShard) {
|
||||
assert replicaShard.active() : "non-active shard cannot be promoted to primary: " + replicaShard;
|
||||
assert replicaShard.primary() == false : "primary shard cannot be promoted to primary: " + replicaShard;
|
||||
ShardRouting primaryShard = replicaShard.moveToPrimary();
|
||||
ShardRouting primaryShard = replicaShard.moveActiveReplicaToPrimary();
|
||||
updateAssigned(replicaShard, primaryShard);
|
||||
return primaryShard;
|
||||
}
|
||||
|
@ -701,7 +702,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
if (candidate.relocating()) {
|
||||
cancelRelocation(candidate);
|
||||
}
|
||||
ShardRouting reinitializedShard = candidate.reinitializeShard();
|
||||
ShardRouting reinitializedShard = candidate.reinitializePrimaryShard();
|
||||
updateAssigned(candidate, reinitializedShard);
|
||||
inactivePrimaryCount++;
|
||||
inactiveShardCount++;
|
||||
|
@ -738,7 +739,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
assert shard.unassigned() == false : "only assigned shards can be moved to unassigned (" + shard + ")";
|
||||
assert shard.primary() : "only primary can be demoted to replica (" + shard + ")";
|
||||
remove(shard);
|
||||
ShardRouting unassigned = shard.moveToUnassigned(unassignedInfo).moveFromPrimary();
|
||||
ShardRouting unassigned = shard.moveToUnassigned(unassignedInfo).moveUnassignedFromPrimary();
|
||||
unassignedShards.add(unassigned);
|
||||
return unassigned;
|
||||
}
|
||||
|
@ -832,7 +833,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
currInfo.getNumFailedAllocations(), currInfo.getUnassignedTimeInNanos(),
|
||||
currInfo.getUnassignedTimeInMillis(), currInfo.isDelayed(),
|
||||
allocationStatus);
|
||||
ShardRouting updatedShard = shard.updateUnassignedInfo(newInfo);
|
||||
ShardRouting updatedShard = shard.updateUnassigned(newInfo, shard.recoverySource());
|
||||
changes.unassignedInfoUpdated(shard, newInfo);
|
||||
shard = updatedShard;
|
||||
}
|
||||
|
@ -891,14 +892,16 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
|
||||
/**
|
||||
* updates the unassigned info on the current unassigned shard
|
||||
* updates the unassigned info and recovery source on the current unassigned shard
|
||||
*
|
||||
* @param unassignedInfo the new unassigned info to use
|
||||
* @param recoverySource the new recovery source to use
|
||||
* @return the shard with unassigned info updated
|
||||
*/
|
||||
public ShardRouting updateUnassignedInfo(UnassignedInfo unassignedInfo, RoutingChangesObserver changes) {
|
||||
public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource,
|
||||
RoutingChangesObserver changes) {
|
||||
nodes.ensureMutable();
|
||||
ShardRouting updatedShardRouting = current.updateUnassignedInfo(unassignedInfo);
|
||||
ShardRouting updatedShardRouting = current.updateUnassigned(unassignedInfo, recoverySource);
|
||||
changes.unassignedInfoUpdated(current, unassignedInfo);
|
||||
updateShardRouting(updatedShardRouting);
|
||||
return updatedShardRouting;
|
||||
|
@ -1040,9 +1043,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
if (routing.initializing()) {
|
||||
incoming++;
|
||||
}
|
||||
if (routing.primary() && routing.isPeerRecovery() == false) {
|
||||
if (routing.primary() && routing.isRelocationTarget() == false) {
|
||||
for (ShardRouting assigned : routingNodes.assignedShards.get(routing.shardId())) {
|
||||
if (assigned.isPeerRecovery()) {
|
||||
if (assigned.initializing() && assigned.recoverySource().getType() == RecoverySource.Type.PEER) {
|
||||
outgoing++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.DiffableUtils;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -540,16 +541,16 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder addAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
|
||||
public Builder addAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsRestore(indexMetaData, restoreSource);
|
||||
.initializeAsRestore(indexMetaData, recoverySource);
|
||||
add(indexRoutingBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder addAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards) {
|
||||
public Builder addAsNewRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsNewRestore(indexMetaData, restoreSource, ignoreShards);
|
||||
.initializeAsNewRestore(indexMetaData, recoverySource, ignoreShards);
|
||||
add(indexRoutingBuilder);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -51,7 +51,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
private final String relocatingNodeId;
|
||||
private final boolean primary;
|
||||
private final ShardRoutingState state;
|
||||
private final RestoreSource restoreSource;
|
||||
private final RecoverySource recoverySource;
|
||||
private final UnassignedInfo unassignedInfo;
|
||||
private final AllocationId allocationId;
|
||||
private final transient List<ShardRouting> asList;
|
||||
|
@ -64,29 +64,31 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
* by either this class or tests. Visible for testing.
|
||||
*/
|
||||
ShardRouting(ShardId shardId, String currentNodeId,
|
||||
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state,
|
||||
String relocatingNodeId, boolean primary, ShardRoutingState state, RecoverySource recoverySource,
|
||||
UnassignedInfo unassignedInfo, AllocationId allocationId, long expectedShardSize) {
|
||||
this.shardId = shardId;
|
||||
this.currentNodeId = currentNodeId;
|
||||
this.relocatingNodeId = relocatingNodeId;
|
||||
this.primary = primary;
|
||||
this.state = state;
|
||||
this.asList = Collections.singletonList(this);
|
||||
this.restoreSource = restoreSource;
|
||||
this.recoverySource = recoverySource;
|
||||
this.unassignedInfo = unassignedInfo;
|
||||
this.allocationId = allocationId;
|
||||
this.expectedShardSize = expectedShardSize;
|
||||
this.targetRelocatingShard = initializeTargetRelocatingShard();
|
||||
this.asList = Collections.singletonList(this);
|
||||
assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
|
||||
assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
|
||||
assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta";
|
||||
assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) : "recovery source only available on unassigned or initializing shard but was " + state;
|
||||
assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary : "replica shards always recover from primary";
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private ShardRouting initializeTargetRelocatingShard() {
|
||||
if (state == ShardRoutingState.RELOCATING) {
|
||||
return new ShardRouting(shardId, relocatingNodeId, currentNodeId, restoreSource, primary,
|
||||
ShardRoutingState.INITIALIZING, unassignedInfo, AllocationId.newTargetRelocation(allocationId), expectedShardSize);
|
||||
return new ShardRouting(shardId, relocatingNodeId, currentNodeId, primary, ShardRoutingState.INITIALIZING,
|
||||
PeerRecoverySource.INSTANCE, unassignedInfo, AllocationId.newTargetRelocation(allocationId), expectedShardSize);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
@ -95,8 +97,8 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
/**
|
||||
* Creates a new unassigned shard.
|
||||
*/
|
||||
public static ShardRouting newUnassigned(ShardId shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) {
|
||||
return new ShardRouting(shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
public static ShardRouting newUnassigned(ShardId shardId, boolean primary, RecoverySource recoverySource, UnassignedInfo unassignedInfo) {
|
||||
return new ShardRouting(shardId, null, null, primary, ShardRoutingState.UNASSIGNED, recoverySource, unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
public Index index() {
|
||||
|
@ -199,13 +201,6 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
return targetRelocatingShard;
|
||||
}
|
||||
|
||||
/**
|
||||
* Snapshot id and repository where this shard is being restored from
|
||||
*/
|
||||
public RestoreSource restoreSource() {
|
||||
return restoreSource;
|
||||
}
|
||||
|
||||
/**
|
||||
* Additional metadata on why the shard is/was unassigned. The metadata is kept around
|
||||
* until the shard moves to STARTED.
|
||||
|
@ -244,32 +239,6 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
return shardId;
|
||||
}
|
||||
|
||||
public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) {
|
||||
if (active()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// initializing replica might not have unassignedInfo
|
||||
assert unassignedInfo != null || (primary == false && state == ShardRoutingState.INITIALIZING);
|
||||
if (unassignedInfo != null && unassignedInfo.getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
// when no shards with this id have ever been active for this index
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* returns true for initializing shards that recover their data from another shard copy
|
||||
*/
|
||||
public boolean isPeerRecovery() {
|
||||
return state == ShardRoutingState.INITIALIZING && (primary() == false || relocatingNodeId != null);
|
||||
}
|
||||
|
||||
/**
|
||||
* A shard iterator with just this shard in it.
|
||||
*/
|
||||
|
@ -283,7 +252,11 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
relocatingNodeId = in.readOptionalString();
|
||||
primary = in.readBoolean();
|
||||
state = ShardRoutingState.fromValue(in.readByte());
|
||||
restoreSource = RestoreSource.readOptionalRestoreSource(in);
|
||||
if (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) {
|
||||
recoverySource = RecoverySource.readFrom(in);
|
||||
} else {
|
||||
recoverySource = null;
|
||||
}
|
||||
unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
|
||||
allocationId = in.readOptionalWriteable(AllocationId::new);
|
||||
final long shardSize;
|
||||
|
@ -312,7 +285,9 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
out.writeOptionalString(relocatingNodeId);
|
||||
out.writeBoolean(primary);
|
||||
out.writeByte(state.value());
|
||||
out.writeOptionalStreamable(restoreSource);
|
||||
if (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) {
|
||||
recoverySource.writeTo(out);
|
||||
}
|
||||
out.writeOptionalWriteable(unassignedInfo);
|
||||
out.writeOptionalWriteable(allocationId);
|
||||
if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING) {
|
||||
|
@ -326,10 +301,10 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
writeToThin(out);
|
||||
}
|
||||
|
||||
public ShardRouting updateUnassignedInfo(UnassignedInfo unassignedInfo) {
|
||||
public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource) {
|
||||
assert this.unassignedInfo != null : "can only update unassign info if they are already set";
|
||||
assert this.unassignedInfo.isDelayed() || (unassignedInfo.isDelayed() == false) : "cannot transition from non-delayed to delayed";
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state,
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, state, recoverySource,
|
||||
unassignedInfo, allocationId, expectedShardSize);
|
||||
}
|
||||
|
||||
|
@ -338,7 +313,17 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
*/
|
||||
public ShardRouting moveToUnassigned(UnassignedInfo unassignedInfo) {
|
||||
assert state != ShardRoutingState.UNASSIGNED : this;
|
||||
return new ShardRouting(shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED,
|
||||
final RecoverySource recoverySource;
|
||||
if (active()) {
|
||||
if (primary()) {
|
||||
recoverySource = StoreRecoverySource.EXISTING_STORE_INSTANCE;
|
||||
} else {
|
||||
recoverySource = PeerRecoverySource.INSTANCE;
|
||||
}
|
||||
} else {
|
||||
recoverySource = recoverySource();
|
||||
}
|
||||
return new ShardRouting(shardId, null, null, primary, ShardRoutingState.UNASSIGNED, recoverySource,
|
||||
unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
|
@ -356,7 +341,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
} else {
|
||||
allocationId = AllocationId.newInitializing(existingAllocationId);
|
||||
}
|
||||
return new ShardRouting(shardId, nodeId, null, restoreSource, primary, ShardRoutingState.INITIALIZING,
|
||||
return new ShardRouting(shardId, nodeId, null, primary, ShardRoutingState.INITIALIZING, recoverySource,
|
||||
unassignedInfo, allocationId, expectedShardSize);
|
||||
}
|
||||
|
||||
|
@ -367,7 +352,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
*/
|
||||
public ShardRouting relocate(String relocatingNodeId, long expectedShardSize) {
|
||||
assert state == ShardRoutingState.STARTED : "current shard has to be started in order to be relocated " + this;
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, primary, ShardRoutingState.RELOCATING,
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, primary, ShardRoutingState.RELOCATING, recoverySource,
|
||||
null, AllocationId.newRelocation(allocationId), expectedShardSize);
|
||||
}
|
||||
|
||||
|
@ -379,7 +364,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
assert state == ShardRoutingState.RELOCATING : this;
|
||||
assert assignedToNode() : this;
|
||||
assert relocatingNodeId != null : this;
|
||||
return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.STARTED,
|
||||
return new ShardRouting(shardId, currentNodeId, null, primary, ShardRoutingState.STARTED, recoverySource,
|
||||
null, AllocationId.cancelRelocation(allocationId), UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
|
@ -393,17 +378,19 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
assert state == ShardRoutingState.INITIALIZING : this;
|
||||
assert assignedToNode() : this;
|
||||
assert relocatingNodeId != null : this;
|
||||
return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, state, unassignedInfo,
|
||||
return new ShardRouting(shardId, currentNodeId, null, primary, state, recoverySource, unassignedInfo,
|
||||
AllocationId.finishRelocation(allocationId), expectedShardSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves the shard from started to initializing
|
||||
* Moves the primary shard from started to initializing
|
||||
*/
|
||||
public ShardRouting reinitializeShard() {
|
||||
assert state == ShardRoutingState.STARTED;
|
||||
return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.INITIALIZING,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null), AllocationId.newInitializing(), UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
public ShardRouting reinitializePrimaryShard() {
|
||||
assert state == ShardRoutingState.STARTED : this;
|
||||
assert primary : this;
|
||||
return new ShardRouting(shardId, currentNodeId, null, primary, ShardRoutingState.INITIALIZING,
|
||||
StoreRecoverySource.EXISTING_STORE_INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, null),
|
||||
AllocationId.newInitializing(), UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -418,39 +405,36 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
// relocation target
|
||||
allocationId = AllocationId.finishRelocation(allocationId);
|
||||
}
|
||||
return new ShardRouting(shardId, currentNodeId, null, restoreSource, primary, ShardRoutingState.STARTED, null, allocationId,
|
||||
return new ShardRouting(shardId, currentNodeId, null, primary, ShardRoutingState.STARTED, null, null, allocationId,
|
||||
UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Make the shard primary unless it's not Primary
|
||||
* Make the active shard primary unless it's not primary
|
||||
*
|
||||
* @throws IllegalShardRoutingStateException if shard is already a primary
|
||||
*/
|
||||
public ShardRouting moveToPrimary() {
|
||||
public ShardRouting moveActiveReplicaToPrimary() {
|
||||
assert active(): "expected an active shard " + this;
|
||||
if (primary) {
|
||||
throw new IllegalShardRoutingStateException(this, "Already primary, can't move to primary");
|
||||
}
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, true, state, unassignedInfo, allocationId,
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, true, state, recoverySource, unassignedInfo, allocationId,
|
||||
expectedShardSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the primary shard to non-primary
|
||||
* Set the unassigned primary shard to non-primary
|
||||
*
|
||||
* @throws IllegalShardRoutingStateException if shard is already a replica
|
||||
*/
|
||||
public ShardRouting moveFromPrimary() {
|
||||
public ShardRouting moveUnassignedFromPrimary() {
|
||||
assert state == ShardRoutingState.UNASSIGNED : "expected an unassigned shard " + this;
|
||||
if (!primary) {
|
||||
throw new IllegalShardRoutingStateException(this, "Not primary, can't move to replica");
|
||||
}
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, restoreSource, false, state, unassignedInfo, allocationId,
|
||||
expectedShardSize);
|
||||
}
|
||||
|
||||
/** returns true if this routing has the same shardId as another */
|
||||
public boolean isSameShard(ShardRouting other) {
|
||||
return getIndexName().equals(other.getIndexName()) && id() == other.id();
|
||||
return new ShardRouting(shardId, currentNodeId, relocatingNodeId, false, state, PeerRecoverySource.INSTANCE, unassignedInfo,
|
||||
allocationId, expectedShardSize);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -490,8 +474,8 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
assert b == false || this.currentNodeId().equals(other.relocatingNodeId) :
|
||||
"ShardRouting is a relocation target but current node id isn't equal to source relocating node. This [" + this + "], other [" + other + "]";
|
||||
|
||||
assert b == false || isSameShard(other) :
|
||||
"ShardRouting is a relocation target but both routings are not of the same shard. This [" + this + "], other [" + other + "]";
|
||||
assert b == false || this.shardId.equals(other.shardId) :
|
||||
"ShardRouting is a relocation target but both routings are not of the same shard id. This [" + this + "], other [" + other + "]";
|
||||
|
||||
assert b == false || this.primary == other.primary :
|
||||
"ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]";
|
||||
|
@ -517,7 +501,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
assert b == false || other.currentNodeId().equals(this.relocatingNodeId) :
|
||||
"ShardRouting is a relocation source but relocating node isn't equal to other's current node. This [" + this + "], other [" + other + "]";
|
||||
|
||||
assert b == false || isSameShard(other) :
|
||||
assert b == false || this.shardId.equals(other.shardId) :
|
||||
"ShardRouting is a relocation source but both routings are not of the same shard. This [" + this + "], target [" + other + "]";
|
||||
|
||||
assert b == false || this.primary == other.primary :
|
||||
|
@ -526,7 +510,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
return b;
|
||||
}
|
||||
|
||||
/** returns true if the current routing is identical to the other routing in all but meta fields, i.e., version and unassigned info */
|
||||
/** returns true if the current routing is identical to the other routing in all but meta fields, i.e., unassigned info */
|
||||
public boolean equalsIgnoringMetaData(ShardRouting other) {
|
||||
if (primary != other.primary) {
|
||||
return false;
|
||||
|
@ -546,7 +530,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
if (state != other.state) {
|
||||
return false;
|
||||
}
|
||||
if (restoreSource != null ? !restoreSource.equals(other.restoreSource) : other.restoreSource != null) {
|
||||
if (recoverySource != null ? !recoverySource.equals(other.recoverySource) : other.recoverySource != null) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -582,7 +566,7 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
h = 31 * h + (relocatingNodeId != null ? relocatingNodeId.hashCode() : 0);
|
||||
h = 31 * h + (primary ? 1 : 0);
|
||||
h = 31 * h + (state != null ? state.hashCode() : 0);
|
||||
h = 31 * h + (restoreSource != null ? restoreSource.hashCode() : 0);
|
||||
h = 31 * h + (recoverySource != null ? recoverySource.hashCode() : 0);
|
||||
h = 31 * h + (allocationId != null ? allocationId.hashCode() : 0);
|
||||
h = 31 * h + (unassignedInfo != null ? unassignedInfo.hashCode() : 0);
|
||||
hashCode = h;
|
||||
|
@ -610,8 +594,8 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
} else {
|
||||
sb.append("[R]");
|
||||
}
|
||||
if (this.restoreSource != null) {
|
||||
sb.append(", restoring[" + restoreSource + "]");
|
||||
if (recoverySource != null) {
|
||||
sb.append(", recovery_source[").append(recoverySource).append("]");
|
||||
}
|
||||
sb.append(", s[").append(state).append("]");
|
||||
if (allocationId != null) {
|
||||
|
@ -638,9 +622,8 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) {
|
||||
builder.field("expected_shard_size_in_bytes", expectedShardSize);
|
||||
}
|
||||
if (restoreSource() != null) {
|
||||
builder.field("restore_source");
|
||||
restoreSource().toXContent(builder, params);
|
||||
if (recoverySource != null) {
|
||||
builder.field("recovery_source", recoverySource);
|
||||
}
|
||||
if (allocationId != null) {
|
||||
builder.field("allocation_id");
|
||||
|
@ -659,4 +642,14 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
public long getExpectedShardSize() {
|
||||
return expectedShardSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns recovery source for the given shard. Replica shards always recover from the primary {@link PeerRecoverySource}.
|
||||
*
|
||||
* @return recovery source or null if shard is {@link #active()}
|
||||
*/
|
||||
@Nullable
|
||||
public RecoverySource recoverySource() {
|
||||
return recoverySource;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -108,7 +108,11 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
/**
|
||||
* Unassigned as a result of a failed primary while the replica was initializing.
|
||||
*/
|
||||
PRIMARY_FAILED
|
||||
PRIMARY_FAILED,
|
||||
/**
|
||||
* Unassigned after forcing an empty primary
|
||||
*/
|
||||
FORCED_EMPTY_PRIMARY
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -111,7 +111,7 @@ public class AllocationService extends AbstractComponent {
|
|||
RoutingTable oldRoutingTable = allocation.routingTable();
|
||||
RoutingNodes newRoutingNodes = allocation.routingNodes();
|
||||
final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(oldRoutingTable.version(), newRoutingNodes).build();
|
||||
MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges();
|
||||
MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges(newRoutingTable);
|
||||
assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(ClusterState.builder(clusterName).
|
||||
|
@ -124,20 +124,29 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
|
||||
return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
|
||||
return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)),
|
||||
Collections.emptyList());
|
||||
}
|
||||
|
||||
public Result applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards) {
|
||||
return applyFailedShards(clusterState, failedShards, Collections.emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies the failed shards. Note, only assigned ShardRouting instances that exist in the routing table should be
|
||||
* provided as parameter and no duplicates should be contained.
|
||||
* provided as parameter. Also applies a list of allocation ids to remove from the in-sync set for shard copies for which there
|
||||
* are no routing entries in the routing table.
|
||||
*
|
||||
* <p>
|
||||
* If the same instance of the routing table is returned, then no change has been made.</p>
|
||||
*/
|
||||
public Result applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards) {
|
||||
if (failedShards.isEmpty()) {
|
||||
public Result applyFailedShards(ClusterState clusterState, List<FailedRerouteAllocation.FailedShard> failedShards,
|
||||
List<FailedRerouteAllocation.StaleShard> staleShards) {
|
||||
if (staleShards.isEmpty() && failedShards.isEmpty()) {
|
||||
return Result.unchanged(clusterState);
|
||||
}
|
||||
clusterState = IndexMetaDataUpdater.removeStaleIdsWithoutRoutings(clusterState, staleShards);
|
||||
|
||||
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
|
||||
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
|
||||
routingNodes.unassigned().shuffle();
|
||||
|
@ -209,9 +218,10 @@ public class AllocationService extends AbstractComponent {
|
|||
final long newComputedLeftDelayNanos = unassignedInfo.getRemainingDelay(allocation.getCurrentNanoTime(),
|
||||
metaData.getIndexSafe(shardRouting.index()).getSettings());
|
||||
if (newComputedLeftDelayNanos == 0) {
|
||||
unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(),
|
||||
unassignedIterator.updateUnassigned(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(),
|
||||
unassignedInfo.getFailure(), unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(),
|
||||
unassignedInfo.getUnassignedTimeInMillis(), false, unassignedInfo.getLastAllocationStatus()), allocation.changes());
|
||||
unassignedInfo.getUnassignedTimeInMillis(), false, unassignedInfo.getLastAllocationStatus()),
|
||||
shardRouting.recoverySource(), allocation.changes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,21 @@ public class FailedRerouteAllocation extends RoutingAllocation {
|
|||
}
|
||||
}
|
||||
|
||||
public static class StaleShard {
|
||||
public final ShardId shardId;
|
||||
public final String allocationId;
|
||||
|
||||
public StaleShard(ShardId shardId, String allocationId) {
|
||||
this.shardId = shardId;
|
||||
this.allocationId = allocationId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "stale shard, shard " + shardId + ", alloc. id [" + allocationId + "]";
|
||||
}
|
||||
}
|
||||
|
||||
private final List<FailedShard> failedShards;
|
||||
|
||||
public FailedRerouteAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, ClusterState clusterState,
|
||||
|
|
|
@ -19,15 +19,21 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingChangesObserver;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation.StaleShard;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -39,7 +45,7 @@ import java.util.stream.Collectors;
|
|||
* Observer that tracks changes made to RoutingNodes in order to update the primary terms and in-sync allocation ids in
|
||||
* {@link IndexMetaData} once the allocation round has completed.
|
||||
*
|
||||
* Primary terms are updated on primary initialization or primary promotion.
|
||||
* Primary terms are updated on primary initialization or when an active primary fails.
|
||||
*
|
||||
* Allocation ids are added for shards that become active and removed for shards that stop being active.
|
||||
*/
|
||||
|
@ -47,15 +53,16 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
|
|||
private final Map<ShardId, Updates> shardChanges = new HashMap<>();
|
||||
|
||||
@Override
|
||||
public void shardInitialized(ShardRouting unassignedShard) {
|
||||
if (unassignedShard.primary()) {
|
||||
increasePrimaryTerm(unassignedShard);
|
||||
}
|
||||
}
|
||||
public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {
|
||||
assert initializedShard.isRelocationTarget() == false : "shardInitialized is not called on relocation target: " + initializedShard;
|
||||
if (initializedShard.primary()) {
|
||||
increasePrimaryTerm(initializedShard.shardId());
|
||||
|
||||
@Override
|
||||
public void replicaPromoted(ShardRouting replicaShard) {
|
||||
increasePrimaryTerm(replicaShard);
|
||||
Updates updates = changes(initializedShard.shardId());
|
||||
assert updates.initializedPrimary == null : "Primary cannot be initialized more than once in same allocation round: " +
|
||||
"(previous: " + updates.initializedPrimary + ", next: " + initializedShard + ")";
|
||||
updates.initializedPrimary = initializedShard;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -65,8 +72,20 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
|
|||
|
||||
@Override
|
||||
public void shardFailed(ShardRouting failedShard, UnassignedInfo unassignedInfo) {
|
||||
if (failedShard.active()) {
|
||||
if (failedShard.active() && unassignedInfo.getReason() != UnassignedInfo.Reason.NODE_LEFT) {
|
||||
removeAllocationId(failedShard);
|
||||
|
||||
if (failedShard.primary()) {
|
||||
Updates updates = changes(failedShard.shardId());
|
||||
if (updates.firstFailedPrimary == null) {
|
||||
// more than one primary can be failed (because of batching, primary can be failed, replica promoted and then failed...)
|
||||
updates.firstFailedPrimary = failedShard;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (failedShard.active() && failedShard.primary()) {
|
||||
increasePrimaryTerm(failedShard.shardId());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,49 +101,27 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
|
|||
|
||||
/**
|
||||
* Updates the current {@link MetaData} based on the changes of this RoutingChangesObserver. Specifically
|
||||
* we update {@link IndexMetaData#getActiveAllocationIds()} and {@link IndexMetaData#primaryTerm(int)} based on
|
||||
* we update {@link IndexMetaData#getInSyncAllocationIds()} and {@link IndexMetaData#primaryTerm(int)} based on
|
||||
* the changes made during this allocation.
|
||||
*
|
||||
* @param oldMetaData {@link MetaData} object from before the routing nodes was changed.
|
||||
* @param newRoutingTable {@link RoutingTable} object after routing changes were applied.
|
||||
* @return adapted {@link MetaData}, potentially the original one if no change was needed.
|
||||
*/
|
||||
public MetaData applyChanges(MetaData oldMetaData) {
|
||||
public MetaData applyChanges(MetaData oldMetaData, RoutingTable newRoutingTable) {
|
||||
Map<Index, List<Map.Entry<ShardId, Updates>>> changesGroupedByIndex =
|
||||
shardChanges.entrySet().stream().collect(Collectors.groupingBy(e -> e.getKey().getIndex()));
|
||||
|
||||
MetaData.Builder metaDataBuilder = null;
|
||||
for (Map.Entry<Index, List<Map.Entry<ShardId, Updates>>> indexChanges : changesGroupedByIndex.entrySet()) {
|
||||
Index index = indexChanges.getKey();
|
||||
final IndexMetaData oldIndexMetaData = oldMetaData.index(index);
|
||||
if (oldIndexMetaData == null) {
|
||||
throw new IllegalStateException("no metadata found for index " + index);
|
||||
}
|
||||
final IndexMetaData oldIndexMetaData = oldMetaData.getIndexSafe(index);
|
||||
IndexMetaData.Builder indexMetaDataBuilder = null;
|
||||
for (Map.Entry<ShardId, Updates> shardEntry : indexChanges.getValue()) {
|
||||
ShardId shardId = shardEntry.getKey();
|
||||
Updates updates = shardEntry.getValue();
|
||||
|
||||
assert Sets.haveEmptyIntersection(updates.addedAllocationIds, updates.removedAllocationIds) :
|
||||
"Allocation ids cannot be both added and removed in the same allocation round, added ids: " +
|
||||
updates.addedAllocationIds + ", removed ids: " + updates.removedAllocationIds;
|
||||
|
||||
Set<String> activeAllocationIds = new HashSet<>(oldIndexMetaData.activeAllocationIds(shardId.id()));
|
||||
activeAllocationIds.addAll(updates.addedAllocationIds);
|
||||
activeAllocationIds.removeAll(updates.removedAllocationIds);
|
||||
// only update active allocation ids if there is an active shard
|
||||
if (activeAllocationIds.isEmpty() == false) {
|
||||
if (indexMetaDataBuilder == null) {
|
||||
indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
|
||||
}
|
||||
indexMetaDataBuilder.putActiveAllocationIds(shardId.id(), activeAllocationIds);
|
||||
}
|
||||
|
||||
if (updates.increaseTerm) {
|
||||
if (indexMetaDataBuilder == null) {
|
||||
indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
|
||||
}
|
||||
indexMetaDataBuilder.primaryTerm(shardId.id(), oldIndexMetaData.primaryTerm(shardId.id()) + 1);
|
||||
}
|
||||
indexMetaDataBuilder = updateInSyncAllocations(newRoutingTable, oldIndexMetaData, indexMetaDataBuilder, shardId, updates);
|
||||
indexMetaDataBuilder = updatePrimaryTerm(oldIndexMetaData, indexMetaDataBuilder, shardId, updates);
|
||||
}
|
||||
|
||||
if (indexMetaDataBuilder != null) {
|
||||
|
@ -142,6 +139,149 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates in-sync allocations with routing changes that were made to the routing table.
|
||||
*/
|
||||
private IndexMetaData.Builder updateInSyncAllocations(RoutingTable newRoutingTable, IndexMetaData oldIndexMetaData,
|
||||
IndexMetaData.Builder indexMetaDataBuilder, ShardId shardId, Updates updates) {
|
||||
assert Sets.haveEmptyIntersection(updates.addedAllocationIds, updates.removedAllocationIds) :
|
||||
"allocation ids cannot be both added and removed in the same allocation round, added ids: " +
|
||||
updates.addedAllocationIds + ", removed ids: " + updates.removedAllocationIds;
|
||||
|
||||
Set<String> oldInSyncAllocationIds = oldIndexMetaData.inSyncAllocationIds(shardId.id());
|
||||
|
||||
// check if we have been force-initializing an empty primary or a stale primary
|
||||
if (updates.initializedPrimary != null && oldInSyncAllocationIds.isEmpty() == false &&
|
||||
oldInSyncAllocationIds.contains(updates.initializedPrimary.allocationId().getId()) == false) {
|
||||
// we're not reusing an existing in-sync allocation id to initialize a primary, which means that we're either force-allocating
|
||||
// an empty or a stale primary (see AllocateEmptyPrimaryAllocationCommand or AllocateStalePrimaryAllocationCommand).
|
||||
RecoverySource.Type recoverySourceType = updates.initializedPrimary.recoverySource().getType();
|
||||
boolean emptyPrimary = recoverySourceType == RecoverySource.Type.EMPTY_STORE;
|
||||
assert updates.addedAllocationIds.isEmpty() : (emptyPrimary ? "empty" : "stale") +
|
||||
" primary is not force-initialized in same allocation round where shards are started";
|
||||
|
||||
if (indexMetaDataBuilder == null) {
|
||||
indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
|
||||
}
|
||||
if (emptyPrimary) {
|
||||
// forcing an empty primary resets the in-sync allocations to the empty set (ShardRouting.allocatedPostIndexCreate)
|
||||
indexMetaDataBuilder.putInSyncAllocationIds(shardId.id(), Collections.emptySet());
|
||||
} else {
|
||||
// forcing a stale primary resets the in-sync allocations to the singleton set with the stale id
|
||||
indexMetaDataBuilder.putInSyncAllocationIds(shardId.id(),
|
||||
Collections.singleton(updates.initializedPrimary.allocationId().getId()));
|
||||
}
|
||||
} else {
|
||||
// standard path for updating in-sync ids
|
||||
Set<String> inSyncAllocationIds = new HashSet<>(oldInSyncAllocationIds);
|
||||
inSyncAllocationIds.addAll(updates.addedAllocationIds);
|
||||
inSyncAllocationIds.removeAll(updates.removedAllocationIds);
|
||||
|
||||
// Prevent set of inSyncAllocationIds to grow unboundedly. This can happen for example if we don't write to a primary
|
||||
// but repeatedly shut down nodes that have active replicas.
|
||||
// We use number_of_replicas + 1 (= possible active shard copies) to bound the inSyncAllocationIds set
|
||||
int maxActiveShards = oldIndexMetaData.getNumberOfReplicas() + 1; // +1 for the primary
|
||||
if (inSyncAllocationIds.size() > maxActiveShards) {
|
||||
// trim entries that have no corresponding shard routing in the cluster state (i.e. trim unavailable copies)
|
||||
List<ShardRouting> assignedShards = newRoutingTable.shardRoutingTable(shardId).assignedShards();
|
||||
assert assignedShards.size() <= maxActiveShards :
|
||||
"cannot have more assigned shards " + assignedShards + " than maximum possible active shards " + maxActiveShards;
|
||||
Set<String> assignedAllocations = assignedShards.stream().map(s -> s.allocationId().getId()).collect(Collectors.toSet());
|
||||
inSyncAllocationIds = inSyncAllocationIds.stream()
|
||||
.sorted(Comparator.comparing(assignedAllocations::contains).reversed()) // values with routing entries first
|
||||
.limit(maxActiveShards)
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
// only update in-sync allocation ids if there is at least one entry remaining. Assume for example that there only
|
||||
// ever was a primary active and now it failed. If we were to remove the allocation id from the in-sync set, this would
|
||||
// create an empty primary on the next allocation (see ShardRouting#allocatedPostIndexCreate)
|
||||
if (inSyncAllocationIds.isEmpty() && oldInSyncAllocationIds.isEmpty() == false) {
|
||||
assert updates.firstFailedPrimary != null :
|
||||
"in-sync set became empty but active primary wasn't failed: " + oldInSyncAllocationIds;
|
||||
if (updates.firstFailedPrimary != null) {
|
||||
// add back allocation id of failed primary
|
||||
inSyncAllocationIds.add(updates.firstFailedPrimary.allocationId().getId());
|
||||
}
|
||||
}
|
||||
|
||||
assert inSyncAllocationIds.isEmpty() == false || oldInSyncAllocationIds.isEmpty() :
|
||||
"in-sync allocations cannot become empty after they have been non-empty: " + oldInSyncAllocationIds;
|
||||
|
||||
// be extra safe here and only update in-sync set if it is non-empty
|
||||
if (inSyncAllocationIds.isEmpty() == false) {
|
||||
if (indexMetaDataBuilder == null) {
|
||||
indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
|
||||
}
|
||||
indexMetaDataBuilder.putInSyncAllocationIds(shardId.id(), inSyncAllocationIds);
|
||||
}
|
||||
}
|
||||
return indexMetaDataBuilder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes allocation ids from the in-sync set for shard copies for which there is no routing entries in the routing table.
|
||||
* This method is called in AllocationService before any changes to the routing table are made.
|
||||
*/
|
||||
public static ClusterState removeStaleIdsWithoutRoutings(ClusterState clusterState, List<StaleShard> staleShards) {
|
||||
MetaData oldMetaData = clusterState.metaData();
|
||||
RoutingTable oldRoutingTable = clusterState.routingTable();
|
||||
MetaData.Builder metaDataBuilder = null;
|
||||
// group staleShards entries by index
|
||||
for (Map.Entry<Index, List<StaleShard>> indexEntry : staleShards.stream().collect(
|
||||
Collectors.groupingBy(fs -> fs.shardId.getIndex())).entrySet()) {
|
||||
final IndexMetaData oldIndexMetaData = oldMetaData.getIndexSafe(indexEntry.getKey());
|
||||
IndexMetaData.Builder indexMetaDataBuilder = null;
|
||||
// group staleShards entries by shard id
|
||||
for (Map.Entry<ShardId, List<StaleShard>> shardEntry : indexEntry.getValue().stream().collect(
|
||||
Collectors.groupingBy(staleShard -> staleShard.shardId)).entrySet()) {
|
||||
int shardNumber = shardEntry.getKey().getId();
|
||||
Set<String> oldInSyncAllocations = oldIndexMetaData.inSyncAllocationIds(shardNumber);
|
||||
Set<String> idsToRemove = shardEntry.getValue().stream().map(e -> e.allocationId).collect(Collectors.toSet());
|
||||
assert idsToRemove.stream().allMatch(id -> oldRoutingTable.getByAllocationId(shardEntry.getKey(), id) == null) :
|
||||
"removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable.prettyPrint();
|
||||
Set<String> remainingInSyncAllocations = Sets.difference(oldInSyncAllocations, idsToRemove);
|
||||
assert remainingInSyncAllocations.isEmpty() == false : "Set of in-sync ids cannot become empty for shard " +
|
||||
shardEntry.getKey() + " (before: " + oldInSyncAllocations + ", ids to remove: " + idsToRemove + ")";
|
||||
// be extra safe here: if the in-sync set were to become empty, this would create an empty primary on the next allocation
|
||||
// (see ShardRouting#allocatedPostIndexCreate)
|
||||
if (remainingInSyncAllocations.isEmpty() == false) {
|
||||
if (indexMetaDataBuilder == null) {
|
||||
indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
|
||||
}
|
||||
indexMetaDataBuilder.putInSyncAllocationIds(shardNumber, remainingInSyncAllocations);
|
||||
}
|
||||
}
|
||||
|
||||
if (indexMetaDataBuilder != null) {
|
||||
if (metaDataBuilder == null) {
|
||||
metaDataBuilder = MetaData.builder(oldMetaData);
|
||||
}
|
||||
metaDataBuilder.put(indexMetaDataBuilder);
|
||||
}
|
||||
}
|
||||
|
||||
if (metaDataBuilder != null) {
|
||||
return ClusterState.builder(clusterState).metaData(metaDataBuilder).build();
|
||||
} else {
|
||||
return clusterState;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Increases the primary term if {@link #increasePrimaryTerm} was called for this shard id.
|
||||
*/
|
||||
private IndexMetaData.Builder updatePrimaryTerm(IndexMetaData oldIndexMetaData, IndexMetaData.Builder indexMetaDataBuilder,
|
||||
ShardId shardId, Updates updates) {
|
||||
if (updates.increaseTerm) {
|
||||
if (indexMetaDataBuilder == null) {
|
||||
indexMetaDataBuilder = IndexMetaData.builder(oldIndexMetaData);
|
||||
}
|
||||
indexMetaDataBuilder.primaryTerm(shardId.id(), oldIndexMetaData.primaryTerm(shardId.id()) + 1);
|
||||
}
|
||||
return indexMetaDataBuilder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method that creates update entry for the given shard id if such an entry does not exist yet.
|
||||
*/
|
||||
|
@ -166,13 +306,15 @@ public class IndexMetaDataUpdater extends RoutingChangesObserver.AbstractRouting
|
|||
/**
|
||||
* Increase primary term for this shard id
|
||||
*/
|
||||
private void increasePrimaryTerm(ShardRouting shardRouting) {
|
||||
changes(shardRouting.shardId()).increaseTerm = true;
|
||||
private void increasePrimaryTerm(ShardId shardId) {
|
||||
changes(shardId).increaseTerm = true;
|
||||
}
|
||||
|
||||
private static class Updates {
|
||||
private boolean increaseTerm; // whether primary term should be increased
|
||||
private Set<String> addedAllocationIds = new HashSet<>(); // allocation ids that should be added to the in-sync set
|
||||
private Set<String> removedAllocationIds = new HashSet<>(); // allocation ids that should be removed from the in-sync set
|
||||
private ShardRouting initializedPrimary = null; // primary that was initialized from unassigned
|
||||
private ShardRouting firstFailedPrimary = null; // first active primary that was failed
|
||||
}
|
||||
}
|
||||
|
|
|
@ -307,8 +307,8 @@ public class RoutingAllocation {
|
|||
/**
|
||||
* Returns updated {@link MetaData} based on the changes that were made to the routing nodes
|
||||
*/
|
||||
public MetaData updateMetaDataWithRoutingChanges() {
|
||||
return indexMetaDataUpdater.applyChanges(metaData);
|
||||
public MetaData updateMetaDataWithRoutingChanges(RoutingTable newRoutingTable) {
|
||||
return indexMetaDataUpdater.applyChanges(metaData, newRoutingTable);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -38,8 +38,9 @@ public class RoutingNodesChangedObserver implements RoutingChangesObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void shardInitialized(ShardRouting unassignedShard) {
|
||||
public void shardInitialized(ShardRouting unassignedShard, ShardRouting initializedShard) {
|
||||
assert unassignedShard.unassigned() : "expected unassigned shard " + unassignedShard;
|
||||
assert initializedShard.initializing() : "expected initializing shard " + initializedShard;
|
||||
setChanged();
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
|
@ -187,7 +188,7 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
|
|||
* @param shardRouting the shard routing that is to be matched in unassigned shards
|
||||
*/
|
||||
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode, ShardRouting shardRouting) {
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null);
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -198,16 +199,19 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
|
|||
* @param routingNode the node to initialize it to
|
||||
* @param shardRouting the shard routing that is to be matched in unassigned shards
|
||||
* @param unassignedInfo unassigned info to override
|
||||
* @param recoverySource recovery source to override
|
||||
*/
|
||||
protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode,
|
||||
ShardRouting shardRouting, @Nullable UnassignedInfo unassignedInfo) {
|
||||
ShardRouting shardRouting, @Nullable UnassignedInfo unassignedInfo,
|
||||
@Nullable RecoverySource recoverySource) {
|
||||
for (RoutingNodes.UnassignedShards.UnassignedIterator it = routingNodes.unassigned().iterator(); it.hasNext(); ) {
|
||||
ShardRouting unassigned = it.next();
|
||||
if (!unassigned.equalsIgnoringMetaData(shardRouting)) {
|
||||
continue;
|
||||
}
|
||||
if (unassignedInfo != null) {
|
||||
unassigned = it.updateUnassignedInfo(unassignedInfo, allocation.changes());
|
||||
if (unassignedInfo != null || recoverySource != null) {
|
||||
unassigned = it.updateUnassigned(unassignedInfo != null ? unassignedInfo : unassigned.unassignedInfo(),
|
||||
recoverySource != null ? recoverySource : unassigned.recoverySource(), allocation.changes());
|
||||
}
|
||||
it.initialize(routingNode.nodeId(), null, allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes());
|
||||
return;
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
|
@ -115,21 +117,20 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
|
|||
return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned");
|
||||
}
|
||||
|
||||
if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && acceptDataLoss == false) {
|
||||
if (shardRouting.recoverySource().getType() != RecoverySource.Type.EMPTY_STORE && acceptDataLoss == false) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation,
|
||||
"allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
|
||||
}
|
||||
|
||||
UnassignedInfo unassignedInfoToUpdate = null;
|
||||
if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
// we need to move the unassigned info back to treat it as if it was index creation
|
||||
unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
|
||||
if (shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY) {
|
||||
unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY,
|
||||
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
|
||||
shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false,
|
||||
shardRouting.unassignedInfo().getLastAllocationStatus());
|
||||
}
|
||||
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate);
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate, StoreRecoverySource.EMPTY_STORE_INSTANCE);
|
||||
|
||||
return new RerouteExplanation(this, allocation.decision(Decision.YES, name() + " (allocation command)", "ignore deciders"));
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
|
@ -120,8 +120,7 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation
|
|||
"allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
|
||||
}
|
||||
|
||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
if (shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
if (shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation,
|
||||
"trying to allocate an existing primary shard [" + index + "][" + shardId + "], while no such shard has ever been active");
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterInfo;
|
|||
import org.elasticsearch.cluster.DiskUsage;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
|
@ -120,14 +121,14 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
logger.trace("node [{}] has {}% used disk", node.nodeId(), usedDiskPercentage);
|
||||
}
|
||||
|
||||
// a flag for whether the primary shard has been previously allocated
|
||||
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData);
|
||||
// flag that determines whether the low threshold checks below can be skipped. We use this for a primary shard that is freshly
|
||||
// allocated and empty.
|
||||
boolean skipLowTresholdChecks = shardRouting.primary() &&
|
||||
shardRouting.active() == false && shardRouting.recoverySource().getType() == RecoverySource.Type.EMPTY_STORE;
|
||||
|
||||
// checks for exact byte comparisons
|
||||
if (freeBytes < diskThresholdSettings.getFreeBytesThresholdLow().bytes()) {
|
||||
// If the shard is a replica or has a primary that has already been allocated before, check the low threshold
|
||||
if (!shardRouting.primary() || (shardRouting.primary() && primaryHasBeenAllocated)) {
|
||||
if (skipLowTresholdChecks == false) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, preventing allocation",
|
||||
diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId());
|
||||
|
@ -162,8 +163,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
|
||||
// checks for percentage comparisons
|
||||
if (freeDiskPercentage < diskThresholdSettings.getFreeDiskThresholdLow()) {
|
||||
// If the shard is a replica or has a primary that has already been allocated before, check the low threshold
|
||||
if (!shardRouting.primary() || (shardRouting.primary() && primaryHasBeenAllocated)) {
|
||||
// If the shard is a replica or is a non-empty primary, check the low threshold
|
||||
if (skipLowTresholdChecks == false) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("more than the allowed {} used disk threshold ({} used) on node [{}], preventing allocation",
|
||||
Strings.format1Decimals(usedDiskThresholdLow, "%"),
|
||||
|
@ -378,12 +379,13 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
public static long getExpectedShardSize(ShardRouting shard, RoutingAllocation allocation, long defaultValue) {
|
||||
final IndexMetaData metaData = allocation.metaData().getIndexSafe(shard.index());
|
||||
final ClusterInfo info = allocation.clusterInfo();
|
||||
if (metaData.getMergeSourceIndex() != null && shard.allocatedPostIndexCreate(metaData) == false) {
|
||||
if (metaData.getMergeSourceIndex() != null && shard.active() == false &&
|
||||
shard.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) {
|
||||
// in the shrink index case we sum up the source index shards since we basically make a copy of the shard in
|
||||
// the worst case
|
||||
long targetShardSize = 0;
|
||||
final Index mergeSourceIndex = metaData.getMergeSourceIndex();
|
||||
final IndexMetaData sourceIndexMeta = allocation.metaData().getIndexSafe(metaData.getMergeSourceIndex());
|
||||
final IndexMetaData sourceIndexMeta = allocation.metaData().getIndexSafe(mergeSourceIndex);
|
||||
final Set<ShardId> shardIds = IndexMetaData.selectShrinkShards(shard.id(), sourceIndexMeta, metaData.getNumberOfShards());
|
||||
for (IndexShardRoutingTable shardRoutingTable : allocation.routingTable().index(mergeSourceIndex.getName())) {
|
||||
if (shardIds.contains(shardRoutingTable.shardId())) {
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing.allocation.decider;
|
|||
import java.util.Locale;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
|
@ -113,7 +114,8 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
case NONE:
|
||||
return allocation.decision(Decision.NO, NAME, "no allocations are allowed");
|
||||
case NEW_PRIMARIES:
|
||||
if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
if (shardRouting.primary() && shardRouting.active() == false &&
|
||||
shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE) {
|
||||
return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed");
|
||||
} else {
|
||||
return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden");
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing.allocation.decider;
|
|||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
|
@ -92,7 +93,7 @@ public class FilterAllocationDecider extends AllocationDecider {
|
|||
// this is a setting that can only be set within the system!
|
||||
IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
DiscoveryNodeFilters initialRecoveryFilters = indexMd.getInitialRecoveryFilters();
|
||||
if (shardRouting.allocatedPostIndexCreate(indexMd) == false &&
|
||||
if (shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE &&
|
||||
initialRecoveryFilters != null &&
|
||||
initialRecoveryFilters.match(node.node()) == false) {
|
||||
return allocation.decision(Decision.NO, NAME, "node does not match index initial recovery filters [%s]",
|
||||
|
|
|
@ -19,7 +19,8 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import org.elasticsearch.cluster.routing.RestoreSource;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
|
@ -45,12 +46,12 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
|
|||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
if (shardRouting.primary()) {
|
||||
if (shardRouting.currentNodeId() == null) {
|
||||
if (shardRouting.restoreSource() != null) {
|
||||
if (shardRouting.recoverySource() != null && shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) {
|
||||
// restoring from a snapshot - check that the node can handle the version
|
||||
return isVersionCompatible(shardRouting.restoreSource(), node, allocation);
|
||||
return isVersionCompatible((SnapshotRecoverySource)shardRouting.recoverySource(), node, allocation);
|
||||
} else {
|
||||
// fresh primary, we can allocate wherever
|
||||
return allocation.decision(Decision.YES, NAME, "the primary shard is new and can be allocated anywhere");
|
||||
// existing or fresh primary on the node
|
||||
return allocation.decision(Decision.YES, NAME, "the primary shard is new or already existed on the node");
|
||||
}
|
||||
} else {
|
||||
// relocating primary, only migrate to newer host
|
||||
|
@ -83,14 +84,14 @@ public class NodeVersionAllocationDecider extends AllocationDecider {
|
|||
}
|
||||
}
|
||||
|
||||
private Decision isVersionCompatible(RestoreSource restoreSource, final RoutingNode target, RoutingAllocation allocation) {
|
||||
if (target.node().getVersion().onOrAfter(restoreSource.version())) {
|
||||
private Decision isVersionCompatible(SnapshotRecoverySource recoverySource, final RoutingNode target, RoutingAllocation allocation) {
|
||||
if (target.node().getVersion().onOrAfter(recoverySource.version())) {
|
||||
/* we can allocate if we can restore from a snapshot that is older or on the same version */
|
||||
return allocation.decision(Decision.YES, NAME, "target node version [%s] is the same or newer than snapshot version [%s]",
|
||||
target.node().getVersion(), restoreSource.version());
|
||||
target.node().getVersion(), recoverySource.version());
|
||||
} else {
|
||||
return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than the snapshot version [%s]",
|
||||
target.node().getVersion(), restoreSource.version());
|
||||
target.node().getVersion(), recoverySource.version());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
|
@ -111,7 +112,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
if (shardRouting.primary() && shardRouting.unassigned()) {
|
||||
assert initializingShard(shardRouting, node.nodeId()).isPeerRecovery() == false;
|
||||
assert initializingShard(shardRouting, node.nodeId()).recoverySource().getType() != RecoverySource.Type.PEER;
|
||||
// primary is unassigned, means we are going to do recovery from store, snapshot or local shards
|
||||
// count *just the primaries* currently doing recovery on the node and check against primariesInitialRecoveries
|
||||
|
||||
|
@ -132,7 +133,7 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
}
|
||||
} else {
|
||||
// Peer recovery
|
||||
assert initializingShard(shardRouting, node.nodeId()).isPeerRecovery();
|
||||
assert initializingShard(shardRouting, node.nodeId()).recoverySource().getType() == RecoverySource.Type.PEER;
|
||||
|
||||
// Allocating a shard to this node will increase the incoming recoveries
|
||||
int currentInRecoveries = allocation.routingNodes().getIncomingRecoveries(node.nodeId());
|
||||
|
|
|
@ -431,27 +431,35 @@ public abstract class StreamInput extends InputStream {
|
|||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a {@link Map} of {@code K}-type keys to {@code V}-type {@link List}s.
|
||||
* <pre><code>
|
||||
* Map<String, List<String>> map = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
|
||||
* </code></pre>
|
||||
*
|
||||
* @param keyReader The key reader
|
||||
* @param valueReader The value reader
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
public <K, V> Map<K, List<V>> readMapOfLists(final Writeable.Reader<K> keyReader, final Writeable.Reader<V> valueReader)
|
||||
throws IOException {
|
||||
final int size = readVInt();
|
||||
if (size == 0) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
final Map<K, List<V>> map = new HashMap<>(size);
|
||||
for (int i = 0; i < size; ++i) {
|
||||
map.put(keyReader.read(this), readList(valueReader));
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@SuppressWarnings("unchecked")
|
||||
public Map<String, Object> readMap() throws IOException {
|
||||
return (Map<String, Object>) readGenericValue();
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a map of strings to string lists.
|
||||
*/
|
||||
public Map<String, List<String>> readMapOfLists() throws IOException {
|
||||
int size = readVInt();
|
||||
if (size == 0) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
Map<String, List<String>> map = new HashMap<>(size);
|
||||
for (int i = 0; i < size; ++i) {
|
||||
map.put(readString(), readList(StreamInput::readString));
|
||||
}
|
||||
return map;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"})
|
||||
@Nullable
|
||||
public Object readGenericValue() throws IOException {
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.io.stream.Writeable.Writer;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.ReadableInstant;
|
||||
|
@ -51,6 +52,7 @@ import java.nio.file.NotDirectoryException;
|
|||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -413,23 +415,50 @@ public abstract class StreamOutput extends OutputStream {
|
|||
}
|
||||
|
||||
/**
|
||||
* Writes a map of strings to string lists.
|
||||
* write map to stream with consistent order
|
||||
* to make sure every map generated bytes order are same.
|
||||
* This method is compatible with {@code StreamInput.readMap} and {@code StreamInput.readGenericValue}
|
||||
* This method only will handle the map keys order, not maps contained within the map
|
||||
*/
|
||||
public void writeMapOfLists(Map<String, List<String>> map) throws IOException {
|
||||
writeVInt(map.size());
|
||||
|
||||
for (Map.Entry<String, List<String>> entry : map.entrySet()) {
|
||||
writeString(entry.getKey());
|
||||
writeVInt(entry.getValue().size());
|
||||
for (String v : entry.getValue()) {
|
||||
writeString(v);
|
||||
}
|
||||
public void writeMapWithConsistentOrder(@Nullable Map<String, ? extends Object> map)
|
||||
throws IOException {
|
||||
if (map == null) {
|
||||
writeByte((byte) -1);
|
||||
return;
|
||||
}
|
||||
assert false == (map instanceof LinkedHashMap);
|
||||
this.writeByte((byte) 10);
|
||||
this.writeVInt(map.size());
|
||||
Iterator<? extends Map.Entry<String, ?>> iterator =
|
||||
map.entrySet().stream().sorted((a, b) -> a.getKey().compareTo(b.getKey())).iterator();
|
||||
while (iterator.hasNext()) {
|
||||
Map.Entry<String, ?> next = iterator.next();
|
||||
this.writeString(next.getKey());
|
||||
this.writeGenericValue(next.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
interface Writer {
|
||||
void write(StreamOutput o, Object value) throws IOException;
|
||||
/**
|
||||
* Write a {@link Map} of {@code K}-type keys to {@code V}-type {@link List}s.
|
||||
* <pre><code>
|
||||
* Map<String, List<String>> map = ...;
|
||||
* out.writeMapOfLists(map, StreamOutput::writeString, StreamOutput::writeString);
|
||||
* </code></pre>
|
||||
*
|
||||
* @param keyWriter The key writer
|
||||
* @param valueWriter The value writer
|
||||
*/
|
||||
public <K, V> void writeMapOfLists(final Map<K, List<V>> map, final Writer<K> keyWriter, final Writer<V> valueWriter)
|
||||
throws IOException {
|
||||
writeVInt(map.size());
|
||||
|
||||
for (final Map.Entry<K, List<V>> entry : map.entrySet()) {
|
||||
keyWriter.write(this, entry.getKey());
|
||||
writeVInt(entry.getValue().size());
|
||||
for (final V value : entry.getValue()) {
|
||||
valueWriter.write(this, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static final Map<Class<?>, Writer> WRITERS;
|
||||
|
@ -549,6 +578,12 @@ public abstract class StreamOutput extends OutputStream {
|
|||
WRITERS = Collections.unmodifiableMap(writers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Notice: when serialization a map, the stream out map with the stream in map maybe have the
|
||||
* different key-value orders, they will maybe have different stream order.
|
||||
* If want to keep stream out map and stream in map have the same stream order when stream,
|
||||
* can use {@code writeMapWithConsistentOrder}
|
||||
*/
|
||||
public void writeGenericValue(@Nullable Object value) throws IOException {
|
||||
if (value == null) {
|
||||
writeByte((byte) -1);
|
||||
|
|
|
@ -25,26 +25,69 @@ import java.io.IOException;
|
|||
* Implementers can be written to a {@linkplain StreamOutput} and read from a {@linkplain StreamInput}. This allows them to be "thrown
|
||||
* across the wire" using Elasticsearch's internal protocol. If the implementer also implements equals and hashCode then a copy made by
|
||||
* serializing and deserializing must be equal and have the same hashCode. It isn't required that such a copy be entirely unchanged.
|
||||
*
|
||||
* <p>
|
||||
* Prefer implementing this interface over implementing {@link Streamable} where possible. Lots of code depends on {@linkplain Streamable}
|
||||
* so this isn't always possible.
|
||||
*/
|
||||
public interface Writeable {
|
||||
|
||||
/**
|
||||
* Write this into the {@linkplain StreamOutput}.
|
||||
*/
|
||||
void writeTo(StreamOutput out) throws IOException;
|
||||
void writeTo(final StreamOutput out) throws IOException;
|
||||
|
||||
/**
|
||||
* Reference to a method that can write some object to a {@link StreamOutput}.
|
||||
* <p>
|
||||
* By convention this is a method from {@link StreamOutput} itself (e.g., {@link StreamOutput#writeString}). If the value can be
|
||||
* {@code null}, then the "optional" variant of methods should be used!
|
||||
* <p>
|
||||
* Most classes should implement {@link Writeable} and the {@link Writeable#writeTo(StreamOutput)} method should <em>use</em>
|
||||
* {@link StreamOutput} methods directly or this indirectly:
|
||||
* <pre><code>
|
||||
* public void writeTo(StreamOutput out) throws IOException {
|
||||
* out.writeVInt(someValue);
|
||||
* out.writeMapOfLists(someMap, StreamOutput::writeString, StreamOutput::writeString);
|
||||
* }
|
||||
* </code></pre>
|
||||
*/
|
||||
@FunctionalInterface
|
||||
interface Writer<V> {
|
||||
|
||||
/**
|
||||
* Write {@code V}-type {@code value} to the {@code out}put stream.
|
||||
*
|
||||
* @param out Output to write the {@code value} too
|
||||
* @param value The value to add
|
||||
*/
|
||||
void write(final StreamOutput out, final V value) throws IOException;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Reference to a method that can read some object from a stream. By convention this is a constructor that takes
|
||||
* {@linkplain StreamInput} as an argument for most classes and a static method for things like enums. Returning null from one of these
|
||||
* is always wrong - for that we use methods like {@link StreamInput#readOptionalWriteable(Reader)}.
|
||||
* <p>
|
||||
* As most classes will implement this via a constructor (or a static method in the case of enumerations), it's something that should
|
||||
* look like:
|
||||
* <pre><code>
|
||||
* public MyClass(final StreamInput in) throws IOException {
|
||||
* this.someValue = in.readVInt();
|
||||
* this.someMap = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
|
||||
* }
|
||||
* </code></pre>
|
||||
*/
|
||||
@FunctionalInterface
|
||||
interface Reader<R> {
|
||||
interface Reader<V> {
|
||||
|
||||
/**
|
||||
* Read R from a stream.
|
||||
* Read {@code V}-type value from a stream.
|
||||
*
|
||||
* @param in Input to read the value from
|
||||
*/
|
||||
R read(StreamInput in) throws IOException;
|
||||
V read(final StreamInput in) throws IOException;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -107,9 +107,6 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
IndexScope
|
||||
}
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(Setting.class);
|
||||
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
|
||||
|
||||
private final Key key;
|
||||
protected final Function<Settings, String> defaultValue;
|
||||
@Nullable
|
||||
|
@ -322,6 +319,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
// They're using the setting, so we need to tell them to stop
|
||||
if (this.isDeprecated() && this.exists(settings)) {
|
||||
// It would be convenient to show its replacement key, but replacement is often not so simple
|
||||
final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(getClass()));
|
||||
deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and it will be removed in a future release! " +
|
||||
"See the breaking changes lists in the documentation for details", getKey());
|
||||
}
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.util.concurrent;
|
||||
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* This async IO processor allows to batch IO operations and have a single writer processing the write operations.
|
||||
* This can be used to ensure that threads can continue with other work while the actual IO operation is still processed
|
||||
* by a single worker. A worker in this context can be any caller of the {@link #put(Object, Consumer)} method since it will
|
||||
* hijack a worker if nobody else is currently processing queued items. If the internal queue has reached it's capacity incoming threads
|
||||
* might be blocked until other items are processed
|
||||
*/
|
||||
public abstract class AsyncIOProcessor<Item> {
|
||||
private final ESLogger logger;
|
||||
private final ArrayBlockingQueue<Tuple<Item, Consumer<Exception>>> queue;
|
||||
private final Semaphore promiseSemaphore = new Semaphore(1);
|
||||
|
||||
protected AsyncIOProcessor(ESLogger logger, int queueSize) {
|
||||
this.logger = logger;
|
||||
this.queue = new ArrayBlockingQueue<>(queueSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the given item to the queue. The listener is notified once the item is processed
|
||||
*/
|
||||
public final void put(Item item, Consumer<Exception> listener) {
|
||||
Objects.requireNonNull(item, "item must not be null");
|
||||
Objects.requireNonNull(listener, "listener must not be null");
|
||||
// the algorithm here tires to reduce the load on each individual caller.
|
||||
// we try to have only one caller that processes pending items to disc while others just add to the queue but
|
||||
// at the same time never overload the node by pushing too many items into the queue.
|
||||
|
||||
// we first try make a promise that we are responsible for the processing
|
||||
final boolean promised = promiseSemaphore.tryAcquire();
|
||||
final Tuple<Item, Consumer<Exception>> itemTuple = new Tuple<>(item, listener);
|
||||
if (promised == false) {
|
||||
// in this case we are not responsible and can just block until there is space
|
||||
try {
|
||||
queue.put(new Tuple<>(item, listener));
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
listener.accept(e);
|
||||
}
|
||||
}
|
||||
|
||||
// here we have to try to make the promise again otherwise there is a race when a thread puts an entry without making the promise
|
||||
// while we are draining that mean we might exit below too early in the while loop if the drainAndSync call is fast.
|
||||
if (promised || promiseSemaphore.tryAcquire()) {
|
||||
final List<Tuple<Item, Consumer<Exception>>> candidates = new ArrayList<>();
|
||||
try {
|
||||
if (promised) {
|
||||
// we are responsible for processing we don't need to add the tuple to the queue we can just add it to the candidates
|
||||
candidates.add(itemTuple);
|
||||
}
|
||||
// since we made the promise to process we gotta do it here at least once
|
||||
drainAndProcess(candidates);
|
||||
} finally {
|
||||
promiseSemaphore.release(); // now to ensure we are passing it on we release the promise so another thread can take over
|
||||
}
|
||||
while (queue.isEmpty() == false && promiseSemaphore.tryAcquire()) {
|
||||
// yet if the queue is not empty AND nobody else has yet made the promise to take over we continue processing
|
||||
try {
|
||||
drainAndProcess(candidates);
|
||||
} finally {
|
||||
promiseSemaphore.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void drainAndProcess(List<Tuple<Item, Consumer<Exception>>> candidates) {
|
||||
queue.drainTo(candidates);
|
||||
processList(candidates);
|
||||
candidates.clear();
|
||||
}
|
||||
|
||||
private void processList(List<Tuple<Item, Consumer<Exception>>> candidates) {
|
||||
Exception exception = null;
|
||||
if (candidates.isEmpty() == false) {
|
||||
try {
|
||||
write(candidates);
|
||||
} catch (Exception ex) { // if this fails we are in deep shit - fail the request
|
||||
logger.debug("failed to write candidates", ex);
|
||||
// this exception is passed to all listeners - we don't retry. if this doesn't work we are in deep shit
|
||||
exception = ex;
|
||||
}
|
||||
}
|
||||
for (Tuple<Item, Consumer<Exception>> tuple : candidates) {
|
||||
Consumer<Exception> consumer = tuple.v2();
|
||||
try {
|
||||
consumer.accept(exception);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify callback", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes or processes the items out or to disk.
|
||||
*/
|
||||
protected abstract void write(List<Tuple<Item, Consumer<Exception>>> candidates) throws IOException;
|
||||
}
|
|
@ -269,7 +269,7 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
}
|
||||
|
||||
this.requestHeaders = requestHeaders;
|
||||
this.responseHeaders = in.readMapOfLists();
|
||||
this.responseHeaders = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
|
||||
this.transientHeaders = Collections.emptyMap();
|
||||
}
|
||||
|
||||
|
@ -370,7 +370,7 @@ public final class ThreadContext implements Closeable, Writeable {
|
|||
out.writeString(entry.getValue());
|
||||
}
|
||||
|
||||
out.writeMapOfLists(responseHeaders);
|
||||
out.writeMapOfLists(responseHeaders, StreamOutput::writeString, StreamOutput::writeString);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,12 +19,15 @@
|
|||
|
||||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.Reader;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A generic abstraction on top of handling content, inspired by JSON and pull parsing.
|
||||
|
@ -42,27 +45,20 @@ public interface XContent {
|
|||
* Creates a new generator using the provided output stream.
|
||||
*/
|
||||
default XContentGenerator createGenerator(OutputStream os) throws IOException {
|
||||
return createGenerator(os, null, true);
|
||||
return createGenerator(os, Collections.emptySet(), Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new generator using the provided output stream and some
|
||||
* inclusive filters. Same as createGenerator(os, filters, true).
|
||||
*/
|
||||
default XContentGenerator createGenerator(OutputStream os, String[] filters) throws IOException {
|
||||
return createGenerator(os, filters, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new generator using the provided output stream and some
|
||||
* filters.
|
||||
* Creates a new generator using the provided output stream and some inclusive and/or exclusive filters. When both exclusive and
|
||||
* inclusive filters are provided, the underlying generator will first use exclusion filters to remove fields and then will check the
|
||||
* remaining fields against the inclusive filters.
|
||||
*
|
||||
* @param inclusive
|
||||
* If true only paths matching a filter will be included in
|
||||
* output. If false no path matching a filter will be included in
|
||||
* output
|
||||
* @param os the output stream
|
||||
* @param includes the inclusive filters: only fields and objects that match the inclusive filters will be written to the output.
|
||||
* @param excludes the exclusive filters: only fields and objects that don't match the exclusive filters will be written to the output.
|
||||
*/
|
||||
XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException;
|
||||
XContentGenerator createGenerator(OutputStream os, Set<String> includes, Set<String> excludes) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a parser over the provided string content.
|
||||
*/
|
||||
|
|
|
@ -19,21 +19,8 @@
|
|||
|
||||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.math.BigDecimal;
|
||||
import java.math.RoundingMode;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Calendar;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.io.BytesStream;
|
||||
|
@ -47,6 +34,21 @@ import org.joda.time.ReadableInstant;
|
|||
import org.joda.time.format.DateTimeFormatter;
|
||||
import org.joda.time.format.ISODateTimeFormat;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.math.BigDecimal;
|
||||
import java.math.RoundingMode;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Calendar;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* A utility to build XContent (ie json).
|
||||
*/
|
||||
|
@ -58,12 +60,8 @@ public final class XContentBuilder implements BytesStream, Releasable {
|
|||
return new XContentBuilder(xContent, new BytesStreamOutput());
|
||||
}
|
||||
|
||||
public static XContentBuilder builder(XContent xContent, String[] filters) throws IOException {
|
||||
return new XContentBuilder(xContent, new BytesStreamOutput(), filters);
|
||||
}
|
||||
|
||||
public static XContentBuilder builder(XContent xContent, String[] filters, boolean inclusive) throws IOException {
|
||||
return new XContentBuilder(xContent, new BytesStreamOutput(), filters, inclusive);
|
||||
public static XContentBuilder builder(XContent xContent, Set<String> includes, Set<String> excludes) throws IOException {
|
||||
return new XContentBuilder(xContent, new BytesStreamOutput(), includes, excludes);
|
||||
}
|
||||
|
||||
private XContentGenerator generator;
|
||||
|
@ -77,7 +75,7 @@ public final class XContentBuilder implements BytesStream, Releasable {
|
|||
* to call {@link #close()} when the builder is done with.
|
||||
*/
|
||||
public XContentBuilder(XContent xContent, OutputStream bos) throws IOException {
|
||||
this(xContent, bos, null);
|
||||
this(xContent, bos, Collections.emptySet(), Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,20 +84,24 @@ public final class XContentBuilder implements BytesStream, Releasable {
|
|||
* filter will be written to the output stream. Make sure to call
|
||||
* {@link #close()} when the builder is done with.
|
||||
*/
|
||||
public XContentBuilder(XContent xContent, OutputStream bos, String[] filters) throws IOException {
|
||||
this(xContent, bos, filters, true);
|
||||
public XContentBuilder(XContent xContent, OutputStream bos, Set<String> includes) throws IOException {
|
||||
this(xContent, bos, includes, Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new builder using the provided xcontent, an OutputStream and
|
||||
* some filters. If {@code filters} are specified and {@code inclusive} is
|
||||
* true, only those values matching a filter will be written to the output
|
||||
* stream. If {@code inclusive} is false, those matching will be excluded.
|
||||
* Creates a new builder using the provided XContent, output stream and some inclusive and/or exclusive filters. When both exclusive and
|
||||
* inclusive filters are provided, the underlying builder will first use exclusion filters to remove fields and then will check the
|
||||
* remaining fields against the inclusive filters.
|
||||
* <p>
|
||||
* Make sure to call {@link #close()} when the builder is done with.
|
||||
*
|
||||
* @param os the output stream
|
||||
* @param includes the inclusive filters: only fields and objects that match the inclusive filters will be written to the output.
|
||||
* @param excludes the exclusive filters: only fields and objects that don't match the exclusive filters will be written to the output.
|
||||
*/
|
||||
public XContentBuilder(XContent xContent, OutputStream bos, String[] filters, boolean inclusive) throws IOException {
|
||||
this.bos = bos;
|
||||
this.generator = xContent.createGenerator(bos, filters, inclusive);
|
||||
public XContentBuilder(XContent xContent, OutputStream os, Set<String> includes, Set<String> excludes) throws IOException {
|
||||
this.bos = os;
|
||||
this.generator = xContent.createGenerator(bos, includes, excludes);
|
||||
}
|
||||
|
||||
public XContentType contentType() {
|
||||
|
|
|
@ -35,6 +35,7 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.Reader;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A CBOR based content implementation using Jackson.
|
||||
|
@ -70,8 +71,8 @@ public class CborXContent implements XContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
|
||||
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
|
||||
public XContentGenerator createGenerator(OutputStream os, Set<String> includes, Set<String> excludes) throws IOException {
|
||||
return new CborXContentGenerator(cborFactory.createGenerator(os, JsonEncoding.UTF8), os, includes, excludes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,23 +20,22 @@
|
|||
package org.elasticsearch.common.xcontent.cbor;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
|
||||
|
||||
import java.io.OutputStream;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class CborXContentGenerator extends JsonXContentGenerator {
|
||||
|
||||
public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
this(jsonGenerator, os, filters, true);
|
||||
public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) {
|
||||
this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet());
|
||||
}
|
||||
|
||||
public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
|
||||
super(jsonGenerator, os, filters, inclusive);
|
||||
public CborXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set<String> includes, Set<String> excludes) {
|
||||
super(jsonGenerator, os, includes, excludes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -35,6 +35,7 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.Reader;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A JSON based content implementation using Jackson.
|
||||
|
@ -92,8 +93,8 @@ public class JsonXContent implements XContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
|
||||
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
|
||||
public XContentGenerator createGenerator(OutputStream os, Set<String> includes, Set<String> excludes) throws IOException {
|
||||
return new JsonXContentGenerator(jsonFactory.createGenerator(os, JsonEncoding.UTF8), os, includes, excludes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -27,10 +27,10 @@ import com.fasterxml.jackson.core.io.SerializedString;
|
|||
import com.fasterxml.jackson.core.json.JsonWriteContext;
|
||||
import com.fasterxml.jackson.core.util.DefaultIndenter;
|
||||
import com.fasterxml.jackson.core.util.DefaultPrettyPrinter;
|
||||
import com.fasterxml.jackson.core.util.JsonGeneratorDelegate;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentGenerator;
|
||||
|
@ -43,6 +43,9 @@ import java.io.BufferedInputStream;
|
|||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -72,23 +75,38 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
private static final DefaultPrettyPrinter.Indenter INDENTER = new DefaultIndenter(" ", LF.getValue());
|
||||
private boolean prettyPrint = false;
|
||||
|
||||
public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
|
||||
public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) {
|
||||
this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet());
|
||||
}
|
||||
|
||||
public JsonXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set<String> includes, Set<String> excludes) {
|
||||
Objects.requireNonNull(includes, "Including filters must not be null");
|
||||
Objects.requireNonNull(excludes, "Excluding filters must not be null");
|
||||
this.os = os;
|
||||
if (jsonGenerator instanceof GeneratorBase) {
|
||||
this.base = (GeneratorBase) jsonGenerator;
|
||||
} else {
|
||||
this.base = null;
|
||||
}
|
||||
|
||||
if (CollectionUtils.isEmpty(filters)) {
|
||||
this.generator = jsonGenerator;
|
||||
this.filter = null;
|
||||
} else {
|
||||
this.filter = new FilteringGeneratorDelegate(jsonGenerator,
|
||||
new FilterPathBasedFilter(filters, inclusive), true, true);
|
||||
this.generator = this.filter;
|
||||
JsonGenerator generator = jsonGenerator;
|
||||
|
||||
boolean hasExcludes = excludes.isEmpty() == false;
|
||||
if (hasExcludes) {
|
||||
generator = new FilteringGeneratorDelegate(generator, new FilterPathBasedFilter(excludes, false), true, true);
|
||||
}
|
||||
|
||||
this.os = os;
|
||||
boolean hasIncludes = includes.isEmpty() == false;
|
||||
if (hasIncludes) {
|
||||
generator = new FilteringGeneratorDelegate(generator, new FilterPathBasedFilter(includes, true), true, true);
|
||||
}
|
||||
|
||||
if (hasExcludes || hasIncludes) {
|
||||
this.filter = (FilteringGeneratorDelegate) generator;
|
||||
} else {
|
||||
this.filter = null;
|
||||
}
|
||||
this.generator = generator;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -122,23 +140,34 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
generator.writeEndArray();
|
||||
}
|
||||
|
||||
protected boolean isFiltered() {
|
||||
private boolean isFiltered() {
|
||||
return filter != null;
|
||||
}
|
||||
|
||||
protected boolean inRoot() {
|
||||
private JsonGenerator getLowLevelGenerator() {
|
||||
if (isFiltered()) {
|
||||
JsonStreamContext context = filter.getFilterContext();
|
||||
return ((context != null) && (context.inRoot() && context.getCurrentName() == null));
|
||||
JsonGenerator delegate = filter.getDelegate();
|
||||
if (delegate instanceof JsonGeneratorDelegate) {
|
||||
// In case of combined inclusion and exclusion filters, we have one and only one another delegating level
|
||||
delegate = ((JsonGeneratorDelegate) delegate).getDelegate();
|
||||
assert delegate instanceof JsonGeneratorDelegate == false;
|
||||
}
|
||||
return delegate;
|
||||
}
|
||||
return false;
|
||||
return generator;
|
||||
}
|
||||
|
||||
private boolean inRoot() {
|
||||
JsonStreamContext context = generator.getOutputContext();
|
||||
return ((context != null) && (context.inRoot() && context.getCurrentName() == null));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeStartObject() throws IOException {
|
||||
if (isFiltered() && inRoot()) {
|
||||
// Bypass generator to always write the root start object
|
||||
filter.getDelegate().writeStartObject();
|
||||
if (inRoot()) {
|
||||
// Use the low level generator to write the startObject so that the root
|
||||
// start object is always written even if a filtered generator is used
|
||||
getLowLevelGenerator().writeStartObject();
|
||||
return;
|
||||
}
|
||||
generator.writeStartObject();
|
||||
|
@ -146,9 +175,10 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
|
||||
@Override
|
||||
public void writeEndObject() throws IOException {
|
||||
if (isFiltered() && inRoot()) {
|
||||
// Bypass generator to always write the root end object
|
||||
filter.getDelegate().writeEndObject();
|
||||
if (inRoot()) {
|
||||
// Use the low level generator to write the startObject so that the root
|
||||
// start object is always written even if a filtered generator is used
|
||||
getLowLevelGenerator().writeEndObject();
|
||||
return;
|
||||
}
|
||||
generator.writeEndObject();
|
||||
|
@ -390,7 +420,8 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
}
|
||||
if (writeLineFeedAtEnd) {
|
||||
flush();
|
||||
generator.writeRaw(LF);
|
||||
// Bypass generator to always write the line feed
|
||||
getLowLevelGenerator().writeRaw(LF);
|
||||
}
|
||||
generator.close();
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.Reader;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A Smile based content implementation using Jackson.
|
||||
|
@ -71,8 +72,8 @@ public class SmileXContent implements XContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
|
||||
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
|
||||
public XContentGenerator createGenerator(OutputStream os, Set<String> includes, Set<String> excludes) throws IOException {
|
||||
return new SmileXContentGenerator(smileFactory.createGenerator(os, JsonEncoding.UTF8), os, includes, excludes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,23 +20,22 @@
|
|||
package org.elasticsearch.common.xcontent.smile;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
|
||||
|
||||
import java.io.OutputStream;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class SmileXContentGenerator extends JsonXContentGenerator {
|
||||
|
||||
public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
this(jsonGenerator, os, filters, true);
|
||||
public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) {
|
||||
this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet());
|
||||
}
|
||||
|
||||
public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
|
||||
super(jsonGenerator, os, filters, inclusive);
|
||||
public SmileXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set<String> includes, Set<String> excludes) {
|
||||
super(jsonGenerator, os, includes, excludes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,10 +21,10 @@
|
|||
package org.elasticsearch.common.xcontent.support.filtering;
|
||||
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
public class FilterPath {
|
||||
|
||||
|
@ -75,8 +75,8 @@ public class FilterPath {
|
|||
return next;
|
||||
}
|
||||
|
||||
public static FilterPath[] compile(String... filters) {
|
||||
if (CollectionUtils.isEmpty(filters)) {
|
||||
public static FilterPath[] compile(Set<String> filters) {
|
||||
if (filters == null || filters.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.util.CollectionUtils;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
public class FilterPathBasedFilter extends TokenFilter {
|
||||
|
||||
|
@ -53,7 +54,7 @@ public class FilterPathBasedFilter extends TokenFilter {
|
|||
this.filters = filters;
|
||||
}
|
||||
|
||||
public FilterPathBasedFilter(String[] filters, boolean inclusive) {
|
||||
public FilterPathBasedFilter(Set<String> filters, boolean inclusive) {
|
||||
this(FilterPath.compile(filters), inclusive);
|
||||
}
|
||||
|
||||
|
@ -103,11 +104,6 @@ public class FilterPathBasedFilter extends TokenFilter {
|
|||
|
||||
@Override
|
||||
protected boolean _includeScalar() {
|
||||
for (FilterPath filter : filters) {
|
||||
if (filter.matches()) {
|
||||
return inclusive;
|
||||
}
|
||||
}
|
||||
return !inclusive;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.Reader;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A YAML based content implementation using Jackson.
|
||||
|
@ -66,8 +67,8 @@ public class YamlXContent implements XContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentGenerator createGenerator(OutputStream os, String[] filters, boolean inclusive) throws IOException {
|
||||
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os, filters, inclusive);
|
||||
public XContentGenerator createGenerator(OutputStream os, Set<String> includes, Set<String> excludes) throws IOException {
|
||||
return new YamlXContentGenerator(yamlFactory.createGenerator(os, JsonEncoding.UTF8), os, includes, excludes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,23 +20,22 @@
|
|||
package org.elasticsearch.common.xcontent.yaml;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContentGenerator;
|
||||
|
||||
import java.io.OutputStream;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class YamlXContentGenerator extends JsonXContentGenerator {
|
||||
|
||||
public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String... filters) {
|
||||
this(jsonGenerator, os, filters, true);
|
||||
public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os) {
|
||||
this(jsonGenerator, os, Collections.emptySet(), Collections.emptySet());
|
||||
}
|
||||
|
||||
public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, String[] filters, boolean inclusive) {
|
||||
super(jsonGenerator, os, filters, inclusive);
|
||||
public YamlXContentGenerator(JsonGenerator jsonGenerator, OutputStream os, Set<String> includes, Set<String> excludes) {
|
||||
super(jsonGenerator, os, includes, excludes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -39,7 +39,7 @@ public class ShardLockObtainFailedException extends Exception {
|
|||
|
||||
@Override
|
||||
public String getMessage() {
|
||||
StringBuffer sb = new StringBuffer();
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(shardId.toString());
|
||||
sb.append(": ");
|
||||
sb.append(super.getMessage());
|
||||
|
|
|
@ -24,15 +24,14 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -105,11 +104,8 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
continue;
|
||||
}
|
||||
|
||||
final IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
|
||||
// don't go wild here and create a new IndexSetting object for every shard this could cause a lot of garbage
|
||||
// on cluster restart if we allocate a boat load of shards
|
||||
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
// when we create a fresh index
|
||||
if (shard.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE &&
|
||||
shard.recoverySource().getType() != RecoverySource.Type.SNAPSHOT) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -121,14 +117,17 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
continue;
|
||||
}
|
||||
|
||||
final Set<String> lastActiveAllocationIds = indexMetaData.activeAllocationIds(shard.id());
|
||||
final boolean snapshotRestore = shard.restoreSource() != null;
|
||||
// don't create a new IndexSetting object for every shard as this could cause a lot of garbage
|
||||
// on cluster restart if we allocate a boat load of shards
|
||||
final IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
|
||||
final Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(shard.id());
|
||||
final boolean snapshotRestore = shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT;
|
||||
final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
|
||||
|
||||
final NodeShardsResult nodeShardsResult;
|
||||
final boolean enoughAllocationsFound;
|
||||
|
||||
if (lastActiveAllocationIds.isEmpty()) {
|
||||
if (inSyncAllocationIds.isEmpty()) {
|
||||
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) : "trying to allocated a primary with an empty allocation id set, but index is new";
|
||||
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
|
||||
// fall back to old version-based allocation mode
|
||||
|
@ -141,18 +140,18 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
}
|
||||
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, shard);
|
||||
} else {
|
||||
assert lastActiveAllocationIds.isEmpty() == false;
|
||||
assert inSyncAllocationIds.isEmpty() == false;
|
||||
// use allocation ids to select nodes
|
||||
nodeShardsResult = buildAllocationIdBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode,
|
||||
allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState);
|
||||
allocation.getIgnoreNodes(shard.shardId()), inSyncAllocationIds, shardState);
|
||||
enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0;
|
||||
logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodeShardsResult.orderedAllocationCandidates.size(), shard, lastActiveAllocationIds);
|
||||
logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodeShardsResult.orderedAllocationCandidates.size(), shard, inSyncAllocationIds);
|
||||
}
|
||||
|
||||
if (enoughAllocationsFound == false){
|
||||
if (snapshotRestore) {
|
||||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource());
|
||||
logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.recoverySource());
|
||||
} else if (recoverOnAnyNode) {
|
||||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id());
|
||||
|
|
|
@ -76,8 +76,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
}
|
||||
|
||||
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
|
||||
IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
|
||||
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
if (shard.unassignedInfo() != null && shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -119,7 +118,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
"existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node ["+ nodeWithHighestMatch + "]",
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false, UnassignedInfo.AllocationStatus.NO_ATTEMPT);
|
||||
// don't cancel shard in the loop as it will cause a ConcurrentModificationException
|
||||
shardCancellationActions.add(() -> routingNodes.failShard(logger, shard, unassignedInfo, indexMetaData, allocation.changes()));
|
||||
shardCancellationActions.add(() -> routingNodes.failShard(logger, shard, unassignedInfo, metaData.getIndexSafe(shard.index()), allocation.changes()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -132,7 +131,6 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
public void allocateUnassigned(RoutingAllocation allocation) {
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||
MetaData metaData = allocation.metaData();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
ShardRouting shard = unassignedIterator.next();
|
||||
if (shard.primary()) {
|
||||
|
@ -140,8 +138,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
}
|
||||
|
||||
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
|
||||
IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
|
||||
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
if (shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,9 @@ import org.elasticsearch.common.transport.BoundTransportAddress;
|
|||
|
||||
public interface HttpServerTransport extends LifecycleComponent {
|
||||
|
||||
String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker";
|
||||
String HTTP_SERVER_BOSS_THREAD_NAME_PREFIX = "http_server_boss";
|
||||
|
||||
BoundTransportAddress boundAddress();
|
||||
|
||||
HttpInfo info();
|
||||
|
|
|
@ -277,7 +277,7 @@ public abstract class Engine implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
public abstract boolean index(Index operation) throws EngineException;
|
||||
public abstract void index(Index operation) throws EngineException;
|
||||
|
||||
public abstract void delete(Delete delete) throws EngineException;
|
||||
|
||||
|
@ -847,6 +847,7 @@ public abstract class Engine implements Closeable {
|
|||
public static class Index extends Operation {
|
||||
|
||||
private final ParsedDocument doc;
|
||||
private boolean created;
|
||||
|
||||
public Index(Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime) {
|
||||
super(uid, version, versionType, origin, startTime);
|
||||
|
@ -905,6 +906,14 @@ public abstract class Engine implements Closeable {
|
|||
return this.doc.source();
|
||||
}
|
||||
|
||||
public boolean isCreated() {
|
||||
return created;
|
||||
}
|
||||
|
||||
public void setCreated(boolean created) {
|
||||
this.created = created;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int estimatedSizeInBytes() {
|
||||
return (id().length() + type().length()) * 2 + source().length() + 12;
|
||||
|
|
|
@ -386,16 +386,15 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean index(Index index) {
|
||||
final boolean created;
|
||||
public void index(Index index) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (index.origin().isRecovery()) {
|
||||
// Don't throttle recovery operations
|
||||
created = innerIndex(index);
|
||||
innerIndex(index);
|
||||
} else {
|
||||
try (Releasable r = throttle.acquireThrottle()) {
|
||||
created = innerIndex(index);
|
||||
innerIndex(index);
|
||||
}
|
||||
}
|
||||
} catch (IllegalStateException | IOException e) {
|
||||
|
@ -406,10 +405,9 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
throw new IndexFailedEngineException(shardId, index.type(), index.id(), e);
|
||||
}
|
||||
return created;
|
||||
}
|
||||
|
||||
private boolean innerIndex(Index index) throws IOException {
|
||||
private void innerIndex(Index index) throws IOException {
|
||||
try (Releasable ignored = acquireLock(index.uid())) {
|
||||
lastWriteNanos = index.startTime();
|
||||
final long currentVersion;
|
||||
|
@ -424,15 +422,16 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
final long expectedVersion = index.version();
|
||||
if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) return false;
|
||||
if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) {
|
||||
index.setCreated(false);
|
||||
return;
|
||||
}
|
||||
|
||||
final long updatedVersion = updateVersion(index, currentVersion, expectedVersion);
|
||||
|
||||
final boolean created = indexOrUpdate(index, currentVersion, versionValue);
|
||||
indexOrUpdate(index, currentVersion, versionValue);
|
||||
|
||||
maybeAddToTranslog(index, updatedVersion, Translog.Index::new, NEW_VERSION_VALUE);
|
||||
|
||||
return created;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -442,16 +441,14 @@ public class InternalEngine extends Engine {
|
|||
return updatedVersion;
|
||||
}
|
||||
|
||||
private boolean indexOrUpdate(final Index index, final long currentVersion, final VersionValue versionValue) throws IOException {
|
||||
final boolean created;
|
||||
private void indexOrUpdate(final Index index, final long currentVersion, final VersionValue versionValue) throws IOException {
|
||||
if (currentVersion == Versions.NOT_FOUND) {
|
||||
// document does not exists, we can optimize for create
|
||||
created = true;
|
||||
index.setCreated(true);
|
||||
index(index, indexWriter);
|
||||
} else {
|
||||
created = update(index, versionValue, indexWriter);
|
||||
update(index, versionValue, indexWriter);
|
||||
}
|
||||
return created;
|
||||
}
|
||||
|
||||
private static void index(final Index index, final IndexWriter indexWriter) throws IOException {
|
||||
|
@ -462,19 +459,17 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
|
||||
private static boolean update(final Index index, final VersionValue versionValue, final IndexWriter indexWriter) throws IOException {
|
||||
final boolean created;
|
||||
private static void update(final Index index, final VersionValue versionValue, final IndexWriter indexWriter) throws IOException {
|
||||
if (versionValue != null) {
|
||||
created = versionValue.delete(); // we have a delete which is not GC'ed...
|
||||
index.setCreated(versionValue.delete()); // we have a delete which is not GC'ed...
|
||||
} else {
|
||||
created = false;
|
||||
index.setCreated(false);
|
||||
}
|
||||
if (index.docs().size() > 1) {
|
||||
indexWriter.updateDocuments(index.uid(), index.docs());
|
||||
} else {
|
||||
indexWriter.updateDocument(index.uid(), index.docs().get(0));
|
||||
}
|
||||
return created;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -106,7 +106,7 @@ public class ShadowEngine extends Engine {
|
|||
|
||||
|
||||
@Override
|
||||
public boolean index(Index index) throws EngineException {
|
||||
public void index(Index index) throws EngineException {
|
||||
throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine");
|
||||
}
|
||||
|
||||
|
|
|
@ -41,28 +41,19 @@ import org.elasticsearch.index.mapper.DocumentMapper;
|
|||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.search.fetch.subphase.ParentFieldSubFetchPhase;
|
||||
import org.elasticsearch.search.lookup.LeafSearchLookup;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
|
@ -218,44 +209,14 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
fields.put(ParentFieldMapper.NAME, new GetField(ParentFieldMapper.NAME, Collections.singletonList(parentId)));
|
||||
}
|
||||
|
||||
// now, go and do the script thingy if needed
|
||||
|
||||
if (gFields != null && gFields.length > 0) {
|
||||
SearchLookup searchLookup = null;
|
||||
for (String field : gFields) {
|
||||
Object value = null;
|
||||
FieldMapper fieldMapper = docMapper.mappers().smartNameFieldMapper(field);
|
||||
if (fieldMapper == null) {
|
||||
if (docMapper.objectMappers().get(field) != null) {
|
||||
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
|
||||
throw new IllegalArgumentException("field [" + field + "] isn't a leaf field");
|
||||
}
|
||||
} else if (!fieldMapper.fieldType().stored() && !fieldMapper.isGenerated()) {
|
||||
if (searchLookup == null) {
|
||||
searchLookup = new SearchLookup(mapperService, null, new String[]{type});
|
||||
LeafSearchLookup leafSearchLookup = searchLookup.getLeafSearchLookup(docIdAndVersion.context);
|
||||
searchLookup.source().setSource(source);
|
||||
leafSearchLookup.setDocument(docIdAndVersion.docId);
|
||||
}
|
||||
|
||||
List<Object> values = searchLookup.source().extractRawValues(field);
|
||||
if (!values.isEmpty()) {
|
||||
for (int i = 0; i < values.size(); i++) {
|
||||
values.set(i, fieldMapper.fieldType().valueForSearch(values.get(i)));
|
||||
}
|
||||
value = values;
|
||||
}
|
||||
}
|
||||
|
||||
if (value != null) {
|
||||
if (fields == null) {
|
||||
fields = new HashMap<>(2);
|
||||
}
|
||||
if (value instanceof List) {
|
||||
fields.put(field, new GetField(field, (List) value));
|
||||
} else {
|
||||
fields.put(field, new GetField(field, Collections.singletonList(value)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -291,8 +291,4 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
super.doMerge(mergeWith, updateAllTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isGenerated() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||
import org.apache.lucene.util.LegacyNumericUtils;
|
||||
|
@ -139,7 +140,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper,
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
|
||||
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
|
||||
|
||||
public Y build(Mapper.BuilderContext context) {
|
||||
GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType;
|
||||
|
@ -166,10 +167,17 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
}
|
||||
geoPointFieldType.setLatLonEnabled(latMapper.fieldType(), lonMapper.fieldType());
|
||||
}
|
||||
KeywordFieldMapper geoHashMapper = null;
|
||||
FieldMapper geoHashMapper = null;
|
||||
if (enableGeoHash || enableGeoHashPrefix) {
|
||||
// TODO: possible also implicitly enable geohash if geohash precision is set
|
||||
geoHashMapper = new KeywordFieldMapper.Builder(Names.GEOHASH).index(true).includeInAll(false).store(fieldType.stored()).build(context);
|
||||
if (context.indexCreatedVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
geoHashMapper = new KeywordFieldMapper.Builder(Names.GEOHASH)
|
||||
.index(true).includeInAll(false).store(fieldType.stored()).build(context);
|
||||
} else {
|
||||
geoHashMapper = new StringFieldMapper.Builder(Names.GEOHASH)
|
||||
.tokenized(false).index(true).omitNorms(true).indexOptions(IndexOptions.DOCS)
|
||||
.includeInAll(false).store(fieldType.stored()).build(context);
|
||||
}
|
||||
geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix);
|
||||
}
|
||||
context.path().remove();
|
||||
|
@ -376,12 +384,12 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
|
||||
protected FieldMapper lonMapper;
|
||||
|
||||
protected KeywordFieldMapper geoHashMapper;
|
||||
protected FieldMapper geoHashMapper;
|
||||
|
||||
protected Explicit<Boolean> ignoreMalformed;
|
||||
|
||||
protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
FieldMapper latMapper, FieldMapper lonMapper, KeywordFieldMapper geoHashMapper,
|
||||
FieldMapper latMapper, FieldMapper lonMapper, FieldMapper geoHashMapper,
|
||||
MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
this.latMapper = latMapper;
|
||||
|
@ -552,7 +560,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||
@Override
|
||||
public FieldMapper updateFieldType(Map<String, MappedFieldType> fullNameToFieldType) {
|
||||
BaseGeoPointFieldMapper updated = (BaseGeoPointFieldMapper) super.updateFieldType(fullNameToFieldType);
|
||||
KeywordFieldMapper geoUpdated = geoHashMapper == null ? null : (KeywordFieldMapper) geoHashMapper.updateFieldType(fullNameToFieldType);
|
||||
FieldMapper geoUpdated = geoHashMapper == null ? null : geoHashMapper.updateFieldType(fullNameToFieldType);
|
||||
FieldMapper latUpdated = latMapper == null ? null : latMapper.updateFieldType(fullNameToFieldType);
|
||||
FieldMapper lonUpdated = lonMapper == null ? null : lonMapper.updateFieldType(fullNameToFieldType);
|
||||
if (updated == this
|
||||
|
|
|
@ -246,6 +246,11 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
super(simpleName);
|
||||
assert indexSettings != null;
|
||||
this.indexCreatedVersion = Version.indexCreated(indexSettings);
|
||||
if (indexCreatedVersion.onOrAfter(Version.V_5_0_0_alpha6)) {
|
||||
if (simpleName.isEmpty()) {
|
||||
throw new IllegalArgumentException("name cannot be empty string");
|
||||
}
|
||||
}
|
||||
fieldType.freeze();
|
||||
this.fieldType = fieldType;
|
||||
defaultFieldType.freeze();
|
||||
|
@ -660,14 +665,4 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fields might not be available before indexing, for example _all, token_count,...
|
||||
* When get is called and these fields are requested, this case needs special treatment.
|
||||
*
|
||||
* @return If the field is available before indexing or not.
|
||||
*/
|
||||
public boolean isGenerated() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -289,8 +289,4 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
|
|||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isGenerated() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
@Override
|
||||
public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, FieldMapper latMapper,
|
||||
FieldMapper lonMapper, KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
FieldMapper lonMapper, FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
||||
|
@ -104,7 +104,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||
|
||||
public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
|
||||
FieldMapper latMapper, FieldMapper lonMapper,
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
ignoreMalformed, copyTo);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.document.SortedSetDocValuesField;
|
|||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -32,10 +33,14 @@ import org.elasticsearch.index.fielddata.IndexFieldData;
|
|||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static org.elasticsearch.index.mapper.TypeParsers.parseField;
|
||||
|
||||
/**
|
||||
|
@ -45,6 +50,12 @@ public final class KeywordFieldMapper extends FieldMapper {
|
|||
|
||||
public static final String CONTENT_TYPE = "keyword";
|
||||
|
||||
private static final List<String> SUPPORTED_PARAMETERS_FOR_AUTO_DOWNGRADE_TO_STRING = unmodifiableList(Arrays.asList(
|
||||
"type",
|
||||
// common keyword parameters, for which the upgrade is straightforward
|
||||
"index", "store", "doc_values", "omit_norms", "norms", "boost", "fields", "copy_to",
|
||||
"include_in_all", "ignore_above", "index_options", "similarity"));
|
||||
|
||||
public static class Defaults {
|
||||
public static final MappedFieldType FIELD_TYPE = new KeywordFieldType();
|
||||
|
||||
|
@ -103,6 +114,29 @@ public final class KeywordFieldMapper extends FieldMapper {
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().before(Version.V_5_0_0_alpha1)) {
|
||||
// Downgrade "keyword" to "string" in indexes created in 2.x so you can use modern syntax against old indexes
|
||||
Set<String> unsupportedParameters = new HashSet<>(node.keySet());
|
||||
unsupportedParameters.removeAll(SUPPORTED_PARAMETERS_FOR_AUTO_DOWNGRADE_TO_STRING);
|
||||
if (false == SUPPORTED_PARAMETERS_FOR_AUTO_DOWNGRADE_TO_STRING.containsAll(node.keySet())) {
|
||||
throw new IllegalArgumentException("Automatic downgrade from [keyword] to [string] failed because parameters "
|
||||
+ unsupportedParameters + " are not supported for automatic downgrades.");
|
||||
}
|
||||
{ // Downgrade "index"
|
||||
Object index = node.get("index");
|
||||
if (index == null || Boolean.TRUE.equals(index)) {
|
||||
index = "not_analyzed";
|
||||
} else if (Boolean.FALSE.equals(index)) {
|
||||
index = "no";
|
||||
} else {
|
||||
throw new IllegalArgumentException(
|
||||
"Can't parse [index] value [" + index + "] for field [" + name + "], expected [true] or [false]");
|
||||
}
|
||||
node.put("index", index);
|
||||
}
|
||||
|
||||
return new StringFieldMapper.TypeParser().parse(name, node, parserContext);
|
||||
}
|
||||
KeywordFieldMapper.Builder builder = new KeywordFieldMapper.Builder(name);
|
||||
parseField(builder, name, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
@ -160,6 +194,16 @@ public final class KeywordFieldMapper extends FieldMapper {
|
|||
failIfNoDocValues();
|
||||
return new DocValuesIndexFieldData.Builder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object valueForSearch(Object value) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
// keywords are internally stored as utf8 bytes
|
||||
BytesRef binaryValue = (BytesRef) value;
|
||||
return binaryValue.utf8ToString();
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean includeInAll;
|
||||
|
@ -212,12 +256,14 @@ public final class KeywordFieldMapper extends FieldMapper {
|
|||
context.allEntries().addText(fieldType().name(), value, fieldType().boost());
|
||||
}
|
||||
|
||||
// convert to utf8 only once before feeding postings/dv/stored fields
|
||||
final BytesRef binaryValue = new BytesRef(value);
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
Field field = new Field(fieldType().name(), value, fieldType());
|
||||
Field field = new Field(fieldType().name(), binaryValue, fieldType());
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
fields.add(new SortedSetDocValuesField(fieldType().name(), new BytesRef(value)));
|
||||
fields.add(new SortedSetDocValuesField(fieldType().name(), binaryValue));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement
|
|||
}
|
||||
|
||||
public static class Defaults extends BaseGeoPointFieldMapper.Defaults{
|
||||
public static final Explicit<Boolean> COERCE = new Explicit(false, false);
|
||||
public static final Explicit<Boolean> COERCE = new Explicit<>(false, false);
|
||||
|
||||
public static final GeoPointFieldType FIELD_TYPE = new GeoPointFieldType();
|
||||
|
||||
|
@ -100,7 +100,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement
|
|||
@Override
|
||||
public LegacyGeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
|
||||
MappedFieldType defaultFieldType, Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper,
|
||||
KeywordFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
FieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
|
||||
CopyTo copyTo) {
|
||||
fieldType.setTokenized(false);
|
||||
setupFieldType(context);
|
||||
|
@ -261,7 +261,7 @@ public class LegacyGeoPointFieldMapper extends BaseGeoPointFieldMapper implement
|
|||
protected Explicit<Boolean> coerce;
|
||||
|
||||
public LegacyGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper, KeywordFieldMapper geoHashMapper,
|
||||
Settings indexSettings, FieldMapper latMapper, FieldMapper lonMapper, FieldMapper geoHashMapper,
|
||||
MultiFields multiFields, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
|
||||
ignoreMalformed, copyTo);
|
||||
|
|
|
@ -187,9 +187,4 @@ public class LegacyTokenCountFieldMapper extends LegacyIntegerFieldMapper {
|
|||
builder.field("analyzer", analyzer());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isGenerated() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -22,15 +22,14 @@ package org.elasticsearch.index.mapper;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.similarity.SimilarityProvider;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Function;
|
||||
|
||||
public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
||||
|
@ -172,6 +171,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
|
|||
private final String simpleName;
|
||||
|
||||
public Mapper(String simpleName) {
|
||||
Objects.requireNonNull(simpleName);
|
||||
this.simpleName = simpleName;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue