Merge remote-tracking branch 'origin/master' into feature/client_aggs_parsing
This commit is contained in:
commit
67a9696e55
|
@ -83,6 +83,6 @@
|
||||||
))
|
))
|
||||||
(c-basic-offset . 4)
|
(c-basic-offset . 4)
|
||||||
(c-comment-only-line-offset . (0 . 0))
|
(c-comment-only-line-offset . (0 . 0))
|
||||||
(fill-column . 100)
|
(fill-column . 140)
|
||||||
(fci-rule-column . 100)
|
(fci-rule-column . 140)
|
||||||
(compile-command . "gradle compileTestJava"))))
|
(compile-command . "gradle compileTestJava"))))
|
||||||
|
|
|
@ -362,7 +362,7 @@ quality boxes available in vagrant atlas:
|
||||||
|
|
||||||
* sles-11
|
* sles-11
|
||||||
|
|
||||||
We're missing the follow because our tests are very linux/bash centric:
|
We're missing the following because our tests are very linux/bash centric:
|
||||||
|
|
||||||
* Windows Server 2012
|
* Windows Server 2012
|
||||||
|
|
||||||
|
@ -418,8 +418,8 @@ sudo -E bats $BATS_TESTS/*rpm*.bats
|
||||||
If you wanted to retest all the release artifacts on a single VM you could:
|
If you wanted to retest all the release artifacts on a single VM you could:
|
||||||
|
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
gradle vagrantSetUp
|
gradle setupBats
|
||||||
vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404
|
cd qa/vagrant; vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404
|
||||||
cd $BATS_ARCHIVES
|
cd $BATS_ARCHIVES
|
||||||
sudo -E bats $BATS_TESTS/*.bats
|
sudo -E bats $BATS_TESTS/*.bats
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
|
@ -505,4 +505,3 @@ included as part of the build by checking the projects of the build.
|
||||||
---------------------------------------------------------------------------
|
---------------------------------------------------------------------------
|
||||||
gradle projects
|
gradle projects
|
||||||
---------------------------------------------------------------------------
|
---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
|
@ -88,7 +88,7 @@ class ClusterConfiguration {
|
||||||
if (seedNode == node) {
|
if (seedNode == node) {
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') {
|
ant.waitfor(maxwait: '40', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') {
|
||||||
resourceexists {
|
resourceexists {
|
||||||
file(file: seedNode.transportPortsFile.toString())
|
file(file: seedNode.transportPortsFile.toString())
|
||||||
}
|
}
|
||||||
|
@ -136,6 +136,8 @@ class ClusterConfiguration {
|
||||||
|
|
||||||
LinkedHashMap<String, Object[]> setupCommands = new LinkedHashMap<>()
|
LinkedHashMap<String, Object[]> setupCommands = new LinkedHashMap<>()
|
||||||
|
|
||||||
|
List<Object> dependencies = new ArrayList<>()
|
||||||
|
|
||||||
@Input
|
@Input
|
||||||
void systemProperty(String property, String value) {
|
void systemProperty(String property, String value) {
|
||||||
systemProperties.put(property, value)
|
systemProperties.put(property, value)
|
||||||
|
@ -179,4 +181,10 @@ class ClusterConfiguration {
|
||||||
}
|
}
|
||||||
extraConfigFiles.put(path, sourceFile)
|
extraConfigFiles.put(path, sourceFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Add dependencies that must be run before the first task setting up the cluster. */
|
||||||
|
@Input
|
||||||
|
void dependsOn(Object... deps) {
|
||||||
|
dependencies.addAll(deps)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,7 @@ class ClusterFormationTasks {
|
||||||
// first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything
|
// first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything
|
||||||
// in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk
|
// in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk
|
||||||
// such that snapshots survive failures / test runs and there is no simple way today to fix that.
|
// such that snapshots survive failures / test runs and there is no simple way today to fix that.
|
||||||
Task cleanup = project.tasks.create(name: "${prefix}#prepareCluster.cleanShared", type: Delete, dependsOn: runner.dependsOn.collect()) {
|
Task cleanup = project.tasks.create(name: "${prefix}#prepareCluster.cleanShared", type: Delete, dependsOn: config.dependencies) {
|
||||||
delete sharedDir
|
delete sharedDir
|
||||||
doLast {
|
doLast {
|
||||||
sharedDir.mkdirs()
|
sharedDir.mkdirs()
|
||||||
|
@ -104,7 +104,7 @@ class ClusterFormationTasks {
|
||||||
NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir)
|
NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir)
|
||||||
nodes.add(node)
|
nodes.add(node)
|
||||||
Task dependsOn = startTasks.empty ? cleanup : startTasks.get(0)
|
Task dependsOn = startTasks.empty ? cleanup : startTasks.get(0)
|
||||||
startTasks.add(configureNode(project, prefix, runner, dependsOn, node, distro, nodes.get(0)))
|
startTasks.add(configureNode(project, prefix, runner, dependsOn, node, config, distro, nodes.get(0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks)
|
Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks)
|
||||||
|
@ -148,7 +148,8 @@ class ClusterFormationTasks {
|
||||||
*
|
*
|
||||||
* @return a task which starts the node.
|
* @return a task which starts the node.
|
||||||
*/
|
*/
|
||||||
static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) {
|
static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, ClusterConfiguration config,
|
||||||
|
Configuration distribution, NodeInfo seedNode) {
|
||||||
|
|
||||||
// tasks are chained so their execution order is maintained
|
// tasks are chained so their execution order is maintained
|
||||||
Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) {
|
Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) {
|
||||||
|
@ -161,7 +162,7 @@ class ClusterFormationTasks {
|
||||||
|
|
||||||
setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node)
|
setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node)
|
||||||
setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node)
|
setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node)
|
||||||
setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, configuration)
|
setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, distribution)
|
||||||
setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode)
|
setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode)
|
||||||
setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node)
|
setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node)
|
||||||
setup = configureAddKeystoreSettingTasks(prefix, project, setup, node)
|
setup = configureAddKeystoreSettingTasks(prefix, project, setup, node)
|
||||||
|
@ -205,6 +206,13 @@ class ClusterFormationTasks {
|
||||||
// if we are running in the background, make sure to stop the server when the task completes
|
// if we are running in the background, make sure to stop the server when the task completes
|
||||||
runner.finalizedBy(stop)
|
runner.finalizedBy(stop)
|
||||||
start.finalizedBy(stop)
|
start.finalizedBy(stop)
|
||||||
|
for (Object dependency : config.dependencies) {
|
||||||
|
if (dependency instanceof Fixture) {
|
||||||
|
Task depStop = ((Fixture)dependency).stopTask
|
||||||
|
runner.finalizedBy(depStop)
|
||||||
|
start.finalizedBy(depStop)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return start
|
return start
|
||||||
}
|
}
|
||||||
|
@ -541,7 +549,7 @@ class ClusterFormationTasks {
|
||||||
static Task configureWaitTask(String name, Project project, List<NodeInfo> nodes, List<Task> startTasks) {
|
static Task configureWaitTask(String name, Project project, List<NodeInfo> nodes, List<Task> startTasks) {
|
||||||
Task wait = project.tasks.create(name: name, dependsOn: startTasks)
|
Task wait = project.tasks.create(name: name, dependsOn: startTasks)
|
||||||
wait.doLast {
|
wait.doLast {
|
||||||
ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") {
|
ant.waitfor(maxwait: '60', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") {
|
||||||
or {
|
or {
|
||||||
for (NodeInfo node : nodes) {
|
for (NodeInfo node : nodes) {
|
||||||
resourceexists {
|
resourceexists {
|
||||||
|
|
|
@ -477,10 +477,10 @@ class VagrantTestPlugin implements Plugin<Project> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
packaging.doFirst {
|
platform.doFirst {
|
||||||
project.gradle.addListener(platformReproListener)
|
project.gradle.addListener(platformReproListener)
|
||||||
}
|
}
|
||||||
packaging.doLast {
|
platform.doLast {
|
||||||
project.gradle.removeListener(platformReproListener)
|
project.gradle.removeListener(platformReproListener)
|
||||||
}
|
}
|
||||||
if (project.extensions.esvagrant.boxes.contains(box)) {
|
if (project.extensions.esvagrant.boxes.contains(box)) {
|
||||||
|
|
|
@ -308,7 +308,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||||
|
|
||||||
assertEquals(RestStatus.BAD_REQUEST, exception.status());
|
assertEquals(RestStatus.BAD_REQUEST, exception.status());
|
||||||
assertEquals("Elasticsearch exception [type=illegal_argument_exception, " +
|
assertEquals("Elasticsearch exception [type=illegal_argument_exception, " +
|
||||||
"reason=Can't specify parent if no parent field has been configured]", exception.getMessage());
|
"reason=can't specify parent if no parent field has been configured]", exception.getMessage());
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
|
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> {
|
||||||
|
|
|
@ -184,12 +184,7 @@ public class BulkProcessor implements Closeable {
|
||||||
this.bulkActions = bulkActions;
|
this.bulkActions = bulkActions;
|
||||||
this.bulkSize = bulkSize.getBytes();
|
this.bulkSize = bulkSize.getBytes();
|
||||||
this.bulkRequest = new BulkRequest();
|
this.bulkRequest = new BulkRequest();
|
||||||
|
this.bulkRequestHandler = new BulkRequestHandler(consumer, backoffPolicy, listener, threadPool, concurrentRequests);
|
||||||
if (concurrentRequests == 0) {
|
|
||||||
this.bulkRequestHandler = BulkRequestHandler.syncHandler(consumer, backoffPolicy, listener, threadPool);
|
|
||||||
} else {
|
|
||||||
this.bulkRequestHandler = BulkRequestHandler.asyncHandler(consumer, backoffPolicy, listener, threadPool, concurrentRequests);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start period flushing task after everything is setup
|
// Start period flushing task after everything is setup
|
||||||
this.cancellableFlushTask = startFlushTask(flushInterval, threadPool);
|
this.cancellableFlushTask = startFlushTask(flushInterval, threadPool);
|
||||||
|
|
|
@ -27,121 +27,50 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.Semaphore;
|
import java.util.concurrent.Semaphore;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.function.BiConsumer;
|
import java.util.function.BiConsumer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Abstracts the low-level details of bulk request handling
|
* Implements the low-level details of bulk request handling
|
||||||
*/
|
*/
|
||||||
abstract class BulkRequestHandler {
|
public final class BulkRequestHandler {
|
||||||
protected final Logger logger;
|
private final Logger logger;
|
||||||
protected final BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer;
|
private final BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer;
|
||||||
protected final ThreadPool threadPool;
|
|
||||||
|
|
||||||
protected BulkRequestHandler(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, ThreadPool threadPool) {
|
|
||||||
this.logger = Loggers.getLogger(getClass());
|
|
||||||
this.consumer = consumer;
|
|
||||||
this.threadPool = threadPool;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public abstract void execute(BulkRequest bulkRequest, long executionId);
|
|
||||||
|
|
||||||
public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException;
|
|
||||||
|
|
||||||
|
|
||||||
public static BulkRequestHandler syncHandler(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer,
|
|
||||||
BackoffPolicy backoffPolicy, BulkProcessor.Listener listener,
|
|
||||||
ThreadPool threadPool) {
|
|
||||||
return new SyncBulkRequestHandler(consumer, backoffPolicy, listener, threadPool);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static BulkRequestHandler asyncHandler(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer,
|
|
||||||
BackoffPolicy backoffPolicy, BulkProcessor.Listener listener,
|
|
||||||
ThreadPool threadPool, int concurrentRequests) {
|
|
||||||
return new AsyncBulkRequestHandler(consumer, backoffPolicy, listener, threadPool, concurrentRequests);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class SyncBulkRequestHandler extends BulkRequestHandler {
|
|
||||||
private final BulkProcessor.Listener listener;
|
|
||||||
private final BackoffPolicy backoffPolicy;
|
|
||||||
|
|
||||||
SyncBulkRequestHandler(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BackoffPolicy backoffPolicy,
|
|
||||||
BulkProcessor.Listener listener, ThreadPool threadPool) {
|
|
||||||
super(consumer, threadPool);
|
|
||||||
this.backoffPolicy = backoffPolicy;
|
|
||||||
this.listener = listener;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void execute(BulkRequest bulkRequest, long executionId) {
|
|
||||||
boolean afterCalled = false;
|
|
||||||
try {
|
|
||||||
listener.beforeBulk(executionId, bulkRequest);
|
|
||||||
BulkResponse bulkResponse = Retry
|
|
||||||
.on(EsRejectedExecutionException.class)
|
|
||||||
.policy(backoffPolicy)
|
|
||||||
.using(threadPool)
|
|
||||||
.withSyncBackoff(consumer, bulkRequest, Settings.EMPTY);
|
|
||||||
afterCalled = true;
|
|
||||||
listener.afterBulk(executionId, bulkRequest, bulkResponse);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
|
||||||
if (!afterCalled) {
|
|
||||||
listener.afterBulk(executionId, bulkRequest, e);
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
|
||||||
if (!afterCalled) {
|
|
||||||
listener.afterBulk(executionId, bulkRequest, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
|
|
||||||
// we are "closed" immediately as there is no request in flight
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class AsyncBulkRequestHandler extends BulkRequestHandler {
|
|
||||||
private final BackoffPolicy backoffPolicy;
|
|
||||||
private final BulkProcessor.Listener listener;
|
private final BulkProcessor.Listener listener;
|
||||||
private final Semaphore semaphore;
|
private final Semaphore semaphore;
|
||||||
|
private final Retry retry;
|
||||||
private final int concurrentRequests;
|
private final int concurrentRequests;
|
||||||
|
|
||||||
private AsyncBulkRequestHandler(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BackoffPolicy backoffPolicy,
|
BulkRequestHandler(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BackoffPolicy backoffPolicy,
|
||||||
BulkProcessor.Listener listener, ThreadPool threadPool,
|
BulkProcessor.Listener listener, ThreadPool threadPool,
|
||||||
int concurrentRequests) {
|
int concurrentRequests) {
|
||||||
super(consumer, threadPool);
|
assert concurrentRequests >= 0;
|
||||||
this.backoffPolicy = backoffPolicy;
|
this.logger = Loggers.getLogger(getClass());
|
||||||
assert concurrentRequests > 0;
|
this.consumer = consumer;
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
this.concurrentRequests = concurrentRequests;
|
this.concurrentRequests = concurrentRequests;
|
||||||
this.semaphore = new Semaphore(concurrentRequests);
|
this.retry = new Retry(EsRejectedExecutionException.class, backoffPolicy, threadPool);
|
||||||
|
this.semaphore = new Semaphore(concurrentRequests > 0 ? concurrentRequests : 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void execute(BulkRequest bulkRequest, long executionId) {
|
public void execute(BulkRequest bulkRequest, long executionId) {
|
||||||
|
Runnable toRelease = () -> {};
|
||||||
boolean bulkRequestSetupSuccessful = false;
|
boolean bulkRequestSetupSuccessful = false;
|
||||||
boolean acquired = false;
|
|
||||||
try {
|
try {
|
||||||
listener.beforeBulk(executionId, bulkRequest);
|
listener.beforeBulk(executionId, bulkRequest);
|
||||||
semaphore.acquire();
|
semaphore.acquire();
|
||||||
acquired = true;
|
toRelease = semaphore::release;
|
||||||
Retry.on(EsRejectedExecutionException.class)
|
CountDownLatch latch = new CountDownLatch(1);
|
||||||
.policy(backoffPolicy)
|
retry.withBackoff(consumer, bulkRequest, new ActionListener<BulkResponse>() {
|
||||||
.using(threadPool)
|
|
||||||
.withAsyncBackoff(consumer, bulkRequest, new ActionListener<BulkResponse>() {
|
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(BulkResponse response) {
|
public void onResponse(BulkResponse response) {
|
||||||
try {
|
try {
|
||||||
listener.afterBulk(executionId, bulkRequest, response);
|
listener.afterBulk(executionId, bulkRequest, response);
|
||||||
} finally {
|
} finally {
|
||||||
semaphore.release();
|
semaphore.release();
|
||||||
|
latch.countDown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,10 +80,14 @@ abstract class BulkRequestHandler {
|
||||||
listener.afterBulk(executionId, bulkRequest, e);
|
listener.afterBulk(executionId, bulkRequest, e);
|
||||||
} finally {
|
} finally {
|
||||||
semaphore.release();
|
semaphore.release();
|
||||||
|
latch.countDown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, Settings.EMPTY);
|
}, Settings.EMPTY);
|
||||||
bulkRequestSetupSuccessful = true;
|
bulkRequestSetupSuccessful = true;
|
||||||
|
if (concurrentRequests == 0) {
|
||||||
|
latch.await();
|
||||||
|
}
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
logger.info((Supplier<?>) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e);
|
||||||
|
@ -163,14 +96,13 @@ abstract class BulkRequestHandler {
|
||||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e);
|
||||||
listener.afterBulk(executionId, bulkRequest, e);
|
listener.afterBulk(executionId, bulkRequest, e);
|
||||||
} finally {
|
} finally {
|
||||||
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore
|
if (bulkRequestSetupSuccessful == false) { // if we fail on client.bulk() release the semaphore
|
||||||
semaphore.release();
|
toRelease.run();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
|
||||||
public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
|
|
||||||
if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
|
if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
|
||||||
semaphore.release(this.concurrentRequests);
|
semaphore.release(this.concurrentRequests);
|
||||||
return true;
|
return true;
|
||||||
|
@ -178,4 +110,3 @@ abstract class BulkRequestHandler {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
|
@ -25,20 +25,14 @@ import org.elasticsearch.action.support.PlainActionFuture;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
|
||||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.Executors;
|
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
|
||||||
import java.util.concurrent.ScheduledFuture;
|
import java.util.concurrent.ScheduledFuture;
|
||||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.function.BiConsumer;
|
import java.util.function.BiConsumer;
|
||||||
import java.util.function.BiFunction;
|
|
||||||
import java.util.function.Predicate;
|
import java.util.function.Predicate;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -46,62 +40,42 @@ import java.util.function.Predicate;
|
||||||
*/
|
*/
|
||||||
public class Retry {
|
public class Retry {
|
||||||
private final Class<? extends Throwable> retryOnThrowable;
|
private final Class<? extends Throwable> retryOnThrowable;
|
||||||
|
private final BackoffPolicy backoffPolicy;
|
||||||
|
private final ThreadPool threadPool;
|
||||||
|
|
||||||
private BackoffPolicy backoffPolicy;
|
|
||||||
private ThreadPool threadPool;
|
|
||||||
|
|
||||||
public static Retry on(Class<? extends Throwable> retryOnThrowable) {
|
public Retry(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, ThreadPool threadPool) {
|
||||||
return new Retry(retryOnThrowable);
|
|
||||||
}
|
|
||||||
|
|
||||||
Retry(Class<? extends Throwable> retryOnThrowable) {
|
|
||||||
this.retryOnThrowable = retryOnThrowable;
|
this.retryOnThrowable = retryOnThrowable;
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param backoffPolicy The backoff policy that defines how long and how often to wait for retries.
|
|
||||||
*/
|
|
||||||
public Retry policy(BackoffPolicy backoffPolicy) {
|
|
||||||
this.backoffPolicy = backoffPolicy;
|
this.backoffPolicy = backoffPolicy;
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param threadPool The threadPool that will be used to schedule retries.
|
|
||||||
*/
|
|
||||||
public Retry using(ThreadPool threadPool) {
|
|
||||||
this.threadPool = threadPool;
|
this.threadPool = threadPool;
|
||||||
return this;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes #apply(BulkRequest, ActionListener). Backs off on the provided exception and delegates results to the
|
* Invokes #accept(BulkRequest, ActionListener). Backs off on the provided exception and delegates results to the
|
||||||
* provided listener. Retries will be attempted using the provided schedule function
|
* provided listener. Retries will be scheduled using the class's thread pool.
|
||||||
* @param consumer The consumer to which apply the request and listener
|
* @param consumer The consumer to which apply the request and listener
|
||||||
* @param bulkRequest The bulk request that should be executed.
|
* @param bulkRequest The bulk request that should be executed.
|
||||||
* @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not
|
* @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not
|
||||||
* @param settings settings
|
* @param settings settings
|
||||||
*/
|
*/
|
||||||
public void withAsyncBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest, ActionListener<BulkResponse> listener, Settings settings) {
|
public void withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest, ActionListener<BulkResponse> listener, Settings settings) {
|
||||||
RetryHandler r = new RetryHandler(retryOnThrowable, backoffPolicy, consumer, listener, settings, threadPool);
|
RetryHandler r = new RetryHandler(retryOnThrowable, backoffPolicy, consumer, listener, settings, threadPool);
|
||||||
r.execute(bulkRequest);
|
r.execute(bulkRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invokes #apply(BulkRequest, ActionListener). Backs off on the provided exception. Retries will be attempted using
|
* Invokes #accept(BulkRequest, ActionListener). Backs off on the provided exception. Retries will be scheduled using
|
||||||
* the provided schedule function.
|
* the class's thread pool.
|
||||||
*
|
*
|
||||||
* @param consumer The consumer to which apply the request and listener
|
* @param consumer The consumer to which apply the request and listener
|
||||||
* @param bulkRequest The bulk request that should be executed.
|
* @param bulkRequest The bulk request that should be executed.
|
||||||
* @param settings settings
|
* @param settings settings
|
||||||
* @return the bulk response as returned by the client.
|
* @return a future representing the bulk response returned by the client.
|
||||||
* @throws Exception Any exception thrown by the callable.
|
|
||||||
*/
|
*/
|
||||||
public BulkResponse withSyncBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest, Settings settings) throws Exception {
|
public PlainActionFuture<BulkResponse> withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest, Settings settings) {
|
||||||
PlainActionFuture<BulkResponse> actionFuture = PlainActionFuture.newFuture();
|
PlainActionFuture<BulkResponse> future = PlainActionFuture.newFuture();
|
||||||
RetryHandler r = new RetryHandler(retryOnThrowable, backoffPolicy, consumer, actionFuture, settings, threadPool);
|
withBackoff(consumer, bulkRequest, future, settings);
|
||||||
r.execute(bulkRequest);
|
return future;
|
||||||
return actionFuture.actionGet();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static class RetryHandler implements ActionListener<BulkResponse> {
|
static class RetryHandler implements ActionListener<BulkResponse> {
|
||||||
|
|
|
@ -279,7 +279,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||||
break;
|
break;
|
||||||
default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]");
|
default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]");
|
||||||
}
|
}
|
||||||
} catch (ElasticsearchParseException | RoutingMissingException e) {
|
} catch (ElasticsearchParseException | IllegalArgumentException | RoutingMissingException e) {
|
||||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id(), e);
|
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id(), e);
|
||||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure);
|
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure);
|
||||||
responses.set(i, bulkItemResponse);
|
responses.set(i, bulkItemResponse);
|
||||||
|
|
|
@ -134,7 +134,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||||
this.mainRequest = mainRequest;
|
this.mainRequest = mainRequest;
|
||||||
this.listener = listener;
|
this.listener = listener;
|
||||||
BackoffPolicy backoffPolicy = buildBackoffPolicy();
|
BackoffPolicy backoffPolicy = buildBackoffPolicy();
|
||||||
bulkRetry = Retry.on(EsRejectedExecutionException.class).policy(BackoffPolicy.wrap(backoffPolicy, task::countBulkRetry)).using(threadPool);
|
bulkRetry = new Retry(EsRejectedExecutionException.class, BackoffPolicy.wrap(backoffPolicy, task::countBulkRetry), threadPool);
|
||||||
scrollSource = buildScrollableResultSource(backoffPolicy);
|
scrollSource = buildScrollableResultSource(backoffPolicy);
|
||||||
scriptApplier = Objects.requireNonNull(buildScriptApplier(), "script applier must not be null");
|
scriptApplier = Objects.requireNonNull(buildScriptApplier(), "script applier must not be null");
|
||||||
/*
|
/*
|
||||||
|
@ -337,7 +337,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||||
finishHim(null);
|
finishHim(null);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
bulkRetry.withAsyncBackoff(client::bulk, request, new ActionListener<BulkResponse>() {
|
bulkRetry.withBackoff(client::bulk, request, new ActionListener<BulkResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(BulkResponse response) {
|
public void onResponse(BulkResponse response) {
|
||||||
onBulkResponse(thisBatchStartTime, response);
|
onBulkResponse(thisBatchStartTime, response);
|
||||||
|
|
|
@ -491,14 +491,18 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
}
|
}
|
||||||
|
|
||||||
if (parent != null && !mappingMd.hasParentField()) {
|
if (parent != null && !mappingMd.hasParentField()) {
|
||||||
throw new IllegalArgumentException("Can't specify parent if no parent field has been configured");
|
throw new IllegalArgumentException("can't specify parent if no parent field has been configured");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (parent != null) {
|
if (parent != null) {
|
||||||
throw new IllegalArgumentException("Can't specify parent if no parent field has been configured");
|
throw new IllegalArgumentException("can't specify parent if no parent field has been configured");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ("".equals(id)) {
|
||||||
|
throw new IllegalArgumentException("if _id is specified it must not be empty");
|
||||||
|
}
|
||||||
|
|
||||||
// generate id if not already provided
|
// generate id if not already provided
|
||||||
if (id == null) {
|
if (id == null) {
|
||||||
assert autoGeneratedTimestamp == -1 : "timestamp has already been generated!";
|
assert autoGeneratedTimestamp == -1 : "timestamp has already been generated!";
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.bootstrap;
|
package org.elasticsearch.bootstrap;
|
||||||
|
|
||||||
import org.elasticsearch.SecureSM;
|
import org.elasticsearch.SecureSM;
|
||||||
import org.elasticsearch.common.Strings;
|
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
import org.elasticsearch.common.io.PathUtils;
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
import org.elasticsearch.common.network.NetworkModule;
|
import org.elasticsearch.common.network.NetworkModule;
|
||||||
|
@ -45,11 +44,9 @@ import java.security.NoSuchAlgorithmException;
|
||||||
import java.security.Permissions;
|
import java.security.Permissions;
|
||||||
import java.security.Policy;
|
import java.security.Policy;
|
||||||
import java.security.URIParameter;
|
import java.security.URIParameter;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.LinkedHashSet;
|
import java.util.LinkedHashSet;
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
@ -269,6 +266,26 @@ final class Security {
|
||||||
for (Path path : environment.dataFiles()) {
|
for (Path path : environment.dataFiles()) {
|
||||||
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* If path.data and default.path.data are set, we need read access to the paths in default.path.data to check for the existence of
|
||||||
|
* index directories there that could have arisen from a bug in the handling of simultaneous configuration of path.data and
|
||||||
|
* default.path.data that was introduced in Elasticsearch 5.3.0.
|
||||||
|
*
|
||||||
|
* If path.data is not set then default.path.data would take precedence in setting the data paths for the environment and
|
||||||
|
* permissions would have been granted above.
|
||||||
|
*
|
||||||
|
* If path.data is not set and default.path.data is not set, then we would fallback to the default data directory under
|
||||||
|
* Elasticsearch home and again permissions would have been granted above.
|
||||||
|
*
|
||||||
|
* If path.data is set and default.path.data is not set, there is nothing to do here.
|
||||||
|
*/
|
||||||
|
if (Environment.PATH_DATA_SETTING.exists(environment.settings())
|
||||||
|
&& Environment.DEFAULT_PATH_DATA_SETTING.exists(environment.settings())) {
|
||||||
|
for (final String path : Environment.DEFAULT_PATH_DATA_SETTING.get(environment.settings())) {
|
||||||
|
// write permissions are not needed here, we are not going to be writing to any paths here
|
||||||
|
addPath(policy, Environment.DEFAULT_PATH_DATA_SETTING.getKey(), getPath(path), "read,readlink");
|
||||||
|
}
|
||||||
|
}
|
||||||
for (Path path : environment.repoFiles()) {
|
for (Path path : environment.repoFiles()) {
|
||||||
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
|
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||||
}
|
}
|
||||||
|
@ -278,6 +295,11 @@ final class Security {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "read path that is not configured in environment")
|
||||||
|
private static Path getPath(final String path) {
|
||||||
|
return PathUtils.get(path);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add dynamic {@link SocketPermission}s based on HTTP and transport settings.
|
* Add dynamic {@link SocketPermission}s based on HTTP and transport settings.
|
||||||
*
|
*
|
||||||
|
|
|
@ -30,13 +30,17 @@ import org.elasticsearch.common.util.ByteArray;
|
||||||
*/
|
*/
|
||||||
public final class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable {
|
public final class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable {
|
||||||
|
|
||||||
public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray byteArray, int length) {
|
private final Releasable releasable;
|
||||||
|
|
||||||
|
public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray byteArray, int length,
|
||||||
|
Releasable releasable) {
|
||||||
super(bigarrays, byteArray, length);
|
super(bigarrays, byteArray, length);
|
||||||
|
this.releasable = releasable;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() {
|
public void close() {
|
||||||
Releasables.close(byteArray);
|
Releasables.close(releasable);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
package org.elasticsearch.common.compress;
|
package org.elasticsearch.common.compress;
|
||||||
|
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
|
||||||
|
@ -31,5 +32,9 @@ public interface Compressor {
|
||||||
|
|
||||||
StreamInput streamInput(StreamInput in) throws IOException;
|
StreamInput streamInput(StreamInput in) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new stream output that compresses the contents and writes to the provided stream
|
||||||
|
* output. Closing the returned {@link StreamOutput} will close the provided stream output.
|
||||||
|
*/
|
||||||
StreamOutput streamOutput(StreamOutput out) throws IOException;
|
StreamOutput streamOutput(StreamOutput out) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.common.compress;
|
package org.elasticsearch.common.compress;
|
||||||
|
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.compress.Compressor;
|
|
||||||
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
||||||
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
@ -88,6 +87,7 @@ public class DeflateCompressor implements Compressor {
|
||||||
decompressedIn = new BufferedInputStream(decompressedIn, BUFFER_SIZE);
|
decompressedIn = new BufferedInputStream(decompressedIn, BUFFER_SIZE);
|
||||||
return new InputStreamStreamInput(decompressedIn) {
|
return new InputStreamStreamInput(decompressedIn) {
|
||||||
final AtomicBoolean closed = new AtomicBoolean(false);
|
final AtomicBoolean closed = new AtomicBoolean(false);
|
||||||
|
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
try {
|
try {
|
||||||
super.close();
|
super.close();
|
||||||
|
@ -107,10 +107,11 @@ public class DeflateCompressor implements Compressor {
|
||||||
final boolean nowrap = true;
|
final boolean nowrap = true;
|
||||||
final Deflater deflater = new Deflater(LEVEL, nowrap);
|
final Deflater deflater = new Deflater(LEVEL, nowrap);
|
||||||
final boolean syncFlush = true;
|
final boolean syncFlush = true;
|
||||||
OutputStream compressedOut = new DeflaterOutputStream(out, deflater, BUFFER_SIZE, syncFlush);
|
DeflaterOutputStream deflaterOutputStream = new DeflaterOutputStream(out, deflater, BUFFER_SIZE, syncFlush);
|
||||||
compressedOut = new BufferedOutputStream(compressedOut, BUFFER_SIZE);
|
OutputStream compressedOut = new BufferedOutputStream(deflaterOutputStream, BUFFER_SIZE);
|
||||||
return new OutputStreamStreamOutput(compressedOut) {
|
return new OutputStreamStreamOutput(compressedOut) {
|
||||||
final AtomicBoolean closed = new AtomicBoolean(false);
|
final AtomicBoolean closed = new AtomicBoolean(false);
|
||||||
|
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
try {
|
try {
|
||||||
super.close();
|
super.close();
|
||||||
|
|
|
@ -1,32 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.common.io;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A bytes stream that requires its bytes to be released once no longer used.
|
|
||||||
*/
|
|
||||||
public interface ReleasableBytesStream extends BytesStream {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
ReleasablePagedBytesReference bytes();
|
|
||||||
|
|
||||||
}
|
|
|
@ -20,6 +20,9 @@
|
||||||
package org.elasticsearch.common.io;
|
package org.elasticsearch.common.io;
|
||||||
|
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.io.stream.BytesStream;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.util.Callback;
|
import org.elasticsearch.common.util.Callback;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
|
@ -236,4 +239,56 @@ public abstract class Streams {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wraps the given {@link BytesStream} in a {@link StreamOutput} that simply flushes when
|
||||||
|
* close is called.
|
||||||
|
*/
|
||||||
|
public static BytesStream flushOnCloseStream(BytesStream os) {
|
||||||
|
return new FlushOnCloseOutputStream(os);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A wrapper around a {@link BytesStream} that makes the close operation a flush. This is
|
||||||
|
* needed as sometimes a stream will be closed but the bytes that the stream holds still need
|
||||||
|
* to be used and the stream cannot be closed until the bytes have been consumed.
|
||||||
|
*/
|
||||||
|
private static class FlushOnCloseOutputStream extends BytesStream {
|
||||||
|
|
||||||
|
private final BytesStream delegate;
|
||||||
|
|
||||||
|
private FlushOnCloseOutputStream(BytesStream bytesStreamOutput) {
|
||||||
|
this.delegate = bytesStreamOutput;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeByte(byte b) throws IOException {
|
||||||
|
delegate.writeByte(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeBytes(byte[] b, int offset, int length) throws IOException {
|
||||||
|
delegate.writeBytes(b, offset, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void flush() throws IOException {
|
||||||
|
delegate.flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void reset() throws IOException {
|
||||||
|
delegate.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public BytesReference bytes() {
|
||||||
|
return delegate.bytes();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,11 +17,11 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.common.io;
|
package org.elasticsearch.common.io.stream;
|
||||||
|
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
|
||||||
public interface BytesStream {
|
public abstract class BytesStream extends StreamOutput {
|
||||||
|
|
||||||
BytesReference bytes();
|
public abstract BytesReference bytes();
|
||||||
}
|
}
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.common.io.stream;
|
||||||
|
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.bytes.PagedBytesReference;
|
import org.elasticsearch.common.bytes.PagedBytesReference;
|
||||||
import org.elasticsearch.common.io.BytesStream;
|
|
||||||
import org.elasticsearch.common.util.BigArrays;
|
import org.elasticsearch.common.util.BigArrays;
|
||||||
import org.elasticsearch.common.util.ByteArray;
|
import org.elasticsearch.common.util.ByteArray;
|
||||||
|
|
||||||
|
@ -31,7 +30,7 @@ import java.io.IOException;
|
||||||
* A @link {@link StreamOutput} that uses {@link BigArrays} to acquire pages of
|
* A @link {@link StreamOutput} that uses {@link BigArrays} to acquire pages of
|
||||||
* bytes, which avoids frequent reallocation & copying of the internal data.
|
* bytes, which avoids frequent reallocation & copying of the internal data.
|
||||||
*/
|
*/
|
||||||
public class BytesStreamOutput extends StreamOutput implements BytesStream {
|
public class BytesStreamOutput extends BytesStream {
|
||||||
|
|
||||||
protected final BigArrays bigArrays;
|
protected final BigArrays bigArrays;
|
||||||
|
|
||||||
|
@ -151,7 +150,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
|
||||||
return bytes.ramBytesUsed();
|
return bytes.ramBytesUsed();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void ensureCapacity(long offset) {
|
void ensureCapacity(long offset) {
|
||||||
if (offset > Integer.MAX_VALUE) {
|
if (offset > Integer.MAX_VALUE) {
|
||||||
throw new IllegalArgumentException(getClass().getSimpleName() + " cannot hold more than 2GB of data");
|
throw new IllegalArgumentException(getClass().getSimpleName() + " cannot hold more than 2GB of data");
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,29 +20,66 @@
|
||||||
package org.elasticsearch.common.io.stream;
|
package org.elasticsearch.common.io.stream;
|
||||||
|
|
||||||
import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
|
import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
|
||||||
import org.elasticsearch.common.io.ReleasableBytesStream;
|
import org.elasticsearch.common.lease.Releasable;
|
||||||
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.common.util.BigArrays;
|
import org.elasticsearch.common.util.BigArrays;
|
||||||
|
import org.elasticsearch.common.util.ByteArray;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An bytes stream output that allows providing a {@link BigArrays} instance
|
* An bytes stream output that allows providing a {@link BigArrays} instance
|
||||||
* expecting it to require releasing its content ({@link #bytes()}) once done.
|
* expecting it to require releasing its content ({@link #bytes()}) once done.
|
||||||
* <p>
|
* <p>
|
||||||
* Please note, its is the responsibility of the caller to make sure the bytes
|
* Please note, closing this stream will release the bytes that are in use by any
|
||||||
* reference do not "escape" and are released only once.
|
* {@link ReleasablePagedBytesReference} returned from {@link #bytes()}, so this
|
||||||
|
* stream should only be closed after the bytes have been output or copied
|
||||||
|
* elsewhere.
|
||||||
*/
|
*/
|
||||||
public class ReleasableBytesStreamOutput extends BytesStreamOutput implements ReleasableBytesStream {
|
public class ReleasableBytesStreamOutput extends BytesStreamOutput
|
||||||
|
implements Releasable {
|
||||||
|
|
||||||
|
private Releasable releasable;
|
||||||
|
|
||||||
public ReleasableBytesStreamOutput(BigArrays bigarrays) {
|
public ReleasableBytesStreamOutput(BigArrays bigarrays) {
|
||||||
super(BigArrays.PAGE_SIZE_IN_BYTES, bigarrays);
|
this(BigArrays.PAGE_SIZE_IN_BYTES, bigarrays);
|
||||||
}
|
}
|
||||||
|
|
||||||
public ReleasableBytesStreamOutput(int expectedSize, BigArrays bigArrays) {
|
public ReleasableBytesStreamOutput(int expectedSize, BigArrays bigArrays) {
|
||||||
super(expectedSize, bigArrays);
|
super(expectedSize, bigArrays);
|
||||||
|
this.releasable = Releasables.releaseOnce(this.bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a {@link Releasable} implementation of a
|
||||||
|
* {@link org.elasticsearch.common.bytes.BytesReference} that represents the current state of
|
||||||
|
* the bytes in the stream.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ReleasablePagedBytesReference bytes() {
|
||||||
|
return new ReleasablePagedBytesReference(bigArrays, bytes, count, releasable);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ReleasablePagedBytesReference bytes() {
|
public void close() {
|
||||||
return new ReleasablePagedBytesReference(bigArrays, bytes, count);
|
Releasables.close(releasable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void ensureCapacity(long offset) {
|
||||||
|
final ByteArray prevBytes = this.bytes;
|
||||||
|
super.ensureCapacity(offset);
|
||||||
|
if (prevBytes != this.bytes) {
|
||||||
|
// re-create the releasable with the new reference
|
||||||
|
releasable = Releasables.releaseOnce(this.bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void reset() {
|
||||||
|
final ByteArray prevBytes = this.bytes;
|
||||||
|
super.reset();
|
||||||
|
if (prevBytes != this.bytes) {
|
||||||
|
// re-create the releasable with the new reference
|
||||||
|
releasable = Releasables.releaseOnce(this.bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -226,10 +226,41 @@ public class DeprecationLogger {
|
||||||
* @return the extracted warning value
|
* @return the extracted warning value
|
||||||
*/
|
*/
|
||||||
public static String extractWarningValueFromWarningHeader(final String s) {
|
public static String extractWarningValueFromWarningHeader(final String s) {
|
||||||
|
/*
|
||||||
|
* We know the exact format of the warning header, so to extract the warning value we can skip forward from the front to the first
|
||||||
|
* quote, and skip backwards from the end to the penultimate quote:
|
||||||
|
*
|
||||||
|
* 299 Elasticsearch-6.0.0 "warning value" "Sat, 25, Feb 2017 10:27:43 GMT"
|
||||||
|
* ^ ^ ^
|
||||||
|
* firstQuote penultimateQuote lastQuote
|
||||||
|
*
|
||||||
|
* We do it this way rather than seeking forward after the first quote because there could be escaped quotes in the warning value
|
||||||
|
* but since there are none in the warning date, we can skip backwards to find the quote that closes the quoted warning value.
|
||||||
|
*
|
||||||
|
* We parse this manually rather than using the capturing regular expression because the regular expression involves a lot of
|
||||||
|
* backtracking and carries a performance penalty. However, when assertions are enabled, we still use the regular expression to
|
||||||
|
* verify that we are maintaining the warning header format.
|
||||||
|
*/
|
||||||
|
final int firstQuote = s.indexOf('\"');
|
||||||
|
final int lastQuote = s.lastIndexOf('\"');
|
||||||
|
final int penultimateQuote = s.lastIndexOf('\"', lastQuote - 1);
|
||||||
|
final String warningValue = s.substring(firstQuote + 1, penultimateQuote - 2);
|
||||||
|
assert assertWarningValue(s, warningValue);
|
||||||
|
return warningValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that the specified string has the warning value equal to the provided warning value.
|
||||||
|
*
|
||||||
|
* @param s the string representing a full warning header
|
||||||
|
* @param warningValue the expected warning header
|
||||||
|
* @return {@code true} if the specified string has the expected warning value
|
||||||
|
*/
|
||||||
|
private static boolean assertWarningValue(final String s, final String warningValue) {
|
||||||
final Matcher matcher = WARNING_HEADER_PATTERN.matcher(s);
|
final Matcher matcher = WARNING_HEADER_PATTERN.matcher(s);
|
||||||
final boolean matches = matcher.matches();
|
final boolean matches = matcher.matches();
|
||||||
assert matches;
|
assert matches;
|
||||||
return matcher.group(1);
|
return matcher.group(1).equals(warningValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -29,9 +29,12 @@ import org.apache.lucene.index.TermsEnum;
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
import org.apache.lucene.search.DocIdSetIterator;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.common.lucene.uid.VersionsResolver.DocIdAndVersion;
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo;
|
||||||
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion;
|
||||||
|
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.VersionFieldMapper;
|
import org.elasticsearch.index.mapper.VersionFieldMapper;
|
||||||
|
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -43,7 +46,7 @@ import java.io.IOException;
|
||||||
* in more than one document! It will only return the first one it
|
* in more than one document! It will only return the first one it
|
||||||
* finds. */
|
* finds. */
|
||||||
|
|
||||||
final class PerThreadIDAndVersionLookup {
|
final class PerThreadIDVersionAndSeqNoLookup {
|
||||||
// TODO: do we really need to store all this stuff? some if it might not speed up anything.
|
// TODO: do we really need to store all this stuff? some if it might not speed up anything.
|
||||||
// we keep it around for now, to reduce the amount of e.g. hash lookups by field and stuff
|
// we keep it around for now, to reduce the amount of e.g. hash lookups by field and stuff
|
||||||
|
|
||||||
|
@ -51,7 +54,10 @@ final class PerThreadIDAndVersionLookup {
|
||||||
private final TermsEnum termsEnum;
|
private final TermsEnum termsEnum;
|
||||||
/** _version data */
|
/** _version data */
|
||||||
private final NumericDocValues versions;
|
private final NumericDocValues versions;
|
||||||
|
/** _seq_no data */
|
||||||
|
private final NumericDocValues seqNos;
|
||||||
|
/** _primary_term data */
|
||||||
|
private final NumericDocValues primaryTerms;
|
||||||
/** Reused for iteration (when the term exists) */
|
/** Reused for iteration (when the term exists) */
|
||||||
private PostingsEnum docsEnum;
|
private PostingsEnum docsEnum;
|
||||||
|
|
||||||
|
@ -61,7 +67,7 @@ final class PerThreadIDAndVersionLookup {
|
||||||
/**
|
/**
|
||||||
* Initialize lookup for the provided segment
|
* Initialize lookup for the provided segment
|
||||||
*/
|
*/
|
||||||
PerThreadIDAndVersionLookup(LeafReader reader) throws IOException {
|
PerThreadIDVersionAndSeqNoLookup(LeafReader reader) throws IOException {
|
||||||
Fields fields = reader.fields();
|
Fields fields = reader.fields();
|
||||||
Terms terms = fields.terms(UidFieldMapper.NAME);
|
Terms terms = fields.terms(UidFieldMapper.NAME);
|
||||||
termsEnum = terms.iterator();
|
termsEnum = terms.iterator();
|
||||||
|
@ -74,6 +80,8 @@ final class PerThreadIDAndVersionLookup {
|
||||||
throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME +
|
throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME +
|
||||||
"] field");
|
"] field");
|
||||||
}
|
}
|
||||||
|
seqNos = reader.getNumericDocValues(SeqNoFieldMapper.NAME);
|
||||||
|
primaryTerms = reader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
|
||||||
Object readerKey = null;
|
Object readerKey = null;
|
||||||
assert (readerKey = reader.getCoreCacheKey()) != null;
|
assert (readerKey = reader.getCoreCacheKey()) != null;
|
||||||
this.readerKey = readerKey;
|
this.readerKey = readerKey;
|
||||||
|
@ -113,4 +121,25 @@ final class PerThreadIDAndVersionLookup {
|
||||||
return DocIdSetIterator.NO_MORE_DOCS;
|
return DocIdSetIterator.NO_MORE_DOCS;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Return null if id is not found. */
|
||||||
|
DocIdAndSeqNo lookupSeqNo(BytesRef id, Bits liveDocs, LeafReaderContext context) throws IOException {
|
||||||
|
assert context.reader().getCoreCacheKey().equals(readerKey) :
|
||||||
|
"context's reader is not the same as the reader class was initialized on.";
|
||||||
|
int docID = getDocID(id, liveDocs);
|
||||||
|
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
|
||||||
|
return new DocIdAndSeqNo(docID, seqNos == null ? SequenceNumbersService.UNASSIGNED_SEQ_NO : seqNos.get(docID), context);
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* returns 0 if the primary term is not found.
|
||||||
|
*
|
||||||
|
* Note that 0 is an illegal primary term. See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)}
|
||||||
|
**/
|
||||||
|
long lookUpPrimaryTerm(int docID) throws IOException {
|
||||||
|
return primaryTerms == null ? 0 : primaryTerms.get(docID);
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,180 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.lucene.uid;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.LeafReader;
|
||||||
|
import org.apache.lucene.index.LeafReader.CoreClosedListener;
|
||||||
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
|
import org.apache.lucene.index.Term;
|
||||||
|
import org.apache.lucene.util.CloseableThreadLocal;
|
||||||
|
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||||
|
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
|
||||||
|
import static org.elasticsearch.common.lucene.uid.Versions.NOT_FOUND;
|
||||||
|
|
||||||
|
/** Utility class to resolve the Lucene doc ID, version, seqNo and primaryTerms for a given uid. */
|
||||||
|
public final class VersionsAndSeqNoResolver {
|
||||||
|
|
||||||
|
static final ConcurrentMap<Object, CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup>> lookupStates =
|
||||||
|
ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
|
||||||
|
|
||||||
|
// Evict this reader from lookupStates once it's closed:
|
||||||
|
private static final CoreClosedListener removeLookupState = key -> {
|
||||||
|
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> ctl = lookupStates.remove(key);
|
||||||
|
if (ctl != null) {
|
||||||
|
ctl.close();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private static PerThreadIDVersionAndSeqNoLookup getLookupState(LeafReader reader) throws IOException {
|
||||||
|
Object key = reader.getCoreCacheKey();
|
||||||
|
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> ctl = lookupStates.get(key);
|
||||||
|
if (ctl == null) {
|
||||||
|
// First time we are seeing this reader's core; make a new CTL:
|
||||||
|
ctl = new CloseableThreadLocal<>();
|
||||||
|
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> other = lookupStates.putIfAbsent(key, ctl);
|
||||||
|
if (other == null) {
|
||||||
|
// Our CTL won, we must remove it when the core is closed:
|
||||||
|
reader.addCoreClosedListener(removeLookupState);
|
||||||
|
} else {
|
||||||
|
// Another thread beat us to it: just use their CTL:
|
||||||
|
ctl = other;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
PerThreadIDVersionAndSeqNoLookup lookupState = ctl.get();
|
||||||
|
if (lookupState == null) {
|
||||||
|
lookupState = new PerThreadIDVersionAndSeqNoLookup(reader);
|
||||||
|
ctl.set(lookupState);
|
||||||
|
}
|
||||||
|
|
||||||
|
return lookupState;
|
||||||
|
}
|
||||||
|
|
||||||
|
private VersionsAndSeqNoResolver() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Wraps an {@link LeafReaderContext}, a doc ID <b>relative to the context doc base</b> and a version. */
|
||||||
|
public static class DocIdAndVersion {
|
||||||
|
public final int docId;
|
||||||
|
public final long version;
|
||||||
|
public final LeafReaderContext context;
|
||||||
|
|
||||||
|
DocIdAndVersion(int docId, long version, LeafReaderContext context) {
|
||||||
|
this.docId = docId;
|
||||||
|
this.version = version;
|
||||||
|
this.context = context;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Wraps an {@link LeafReaderContext}, a doc ID <b>relative to the context doc base</b> and a seqNo. */
|
||||||
|
public static class DocIdAndSeqNo {
|
||||||
|
public final int docId;
|
||||||
|
public final long seqNo;
|
||||||
|
public final LeafReaderContext context;
|
||||||
|
|
||||||
|
DocIdAndSeqNo(int docId, long seqNo, LeafReaderContext context) {
|
||||||
|
this.docId = docId;
|
||||||
|
this.seqNo = seqNo;
|
||||||
|
this.context = context;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load the internal doc ID and version for the uid from the reader, returning<ul>
|
||||||
|
* <li>null if the uid wasn't found,
|
||||||
|
* <li>a doc ID and a version otherwise
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term) throws IOException {
|
||||||
|
assert term.field().equals(UidFieldMapper.NAME) : "unexpected term field " + term.field();
|
||||||
|
List<LeafReaderContext> leaves = reader.leaves();
|
||||||
|
if (leaves.isEmpty()) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
// iterate backwards to optimize for the frequently updated documents
|
||||||
|
// which are likely to be in the last segments
|
||||||
|
for (int i = leaves.size() - 1; i >= 0; i--) {
|
||||||
|
LeafReaderContext context = leaves.get(i);
|
||||||
|
LeafReader leaf = context.reader();
|
||||||
|
PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf);
|
||||||
|
DocIdAndVersion result = lookup.lookupVersion(term.bytes(), leaf.getLiveDocs(), context);
|
||||||
|
if (result != null) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load the internal doc ID and sequence number for the uid from the reader, returning<ul>
|
||||||
|
* <li>null if the uid wasn't found,
|
||||||
|
* <li>a doc ID and the associated seqNo otherwise
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
public static DocIdAndSeqNo loadDocIdAndSeqNo(IndexReader reader, Term term) throws IOException {
|
||||||
|
assert term.field().equals(UidFieldMapper.NAME) : "unexpected term field " + term.field();
|
||||||
|
List<LeafReaderContext> leaves = reader.leaves();
|
||||||
|
if (leaves.isEmpty()) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
// iterate backwards to optimize for the frequently updated documents
|
||||||
|
// which are likely to be in the last segments
|
||||||
|
for (int i = leaves.size() - 1; i >= 0; i--) {
|
||||||
|
LeafReaderContext context = leaves.get(i);
|
||||||
|
LeafReader leaf = context.reader();
|
||||||
|
PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf);
|
||||||
|
DocIdAndSeqNo result = lookup.lookupSeqNo(term.bytes(), leaf.getLiveDocs(), context);
|
||||||
|
if (result != null) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load the primaryTerm associated with the given {@link DocIdAndSeqNo}
|
||||||
|
*/
|
||||||
|
public static long loadPrimaryTerm(DocIdAndSeqNo docIdAndSeqNo) throws IOException {
|
||||||
|
LeafReader leaf = docIdAndSeqNo.context.reader();
|
||||||
|
PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf);
|
||||||
|
long result = lookup.lookUpPrimaryTerm(docIdAndSeqNo.docId);
|
||||||
|
assert result > 0 : "should always resolve a primary term for a resolved sequence number. primary_term [" + result + "]"
|
||||||
|
+ " docId [" + docIdAndSeqNo.docId + "] seqNo [" + docIdAndSeqNo.seqNo + "]";
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load the version for the uid from the reader, returning<ul>
|
||||||
|
* <li>{@link Versions#NOT_FOUND} if no matching doc exists,
|
||||||
|
* <li>the version associated with the provided uid otherwise
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
public static long loadVersion(IndexReader reader, Term term) throws IOException {
|
||||||
|
final DocIdAndVersion docIdAndVersion = loadDocIdAndVersion(reader, term);
|
||||||
|
return docIdAndVersion == null ? NOT_FOUND : docIdAndVersion.version;
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,263 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.common.lucene.uid;
|
|
||||||
|
|
||||||
import org.apache.lucene.index.Fields;
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
|
||||||
import org.apache.lucene.index.LeafReader;
|
|
||||||
import org.apache.lucene.index.LeafReader.CoreClosedListener;
|
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
|
||||||
import org.apache.lucene.index.NumericDocValues;
|
|
||||||
import org.apache.lucene.index.PostingsEnum;
|
|
||||||
import org.apache.lucene.index.SortedNumericDocValues;
|
|
||||||
import org.apache.lucene.index.Term;
|
|
||||||
import org.apache.lucene.index.Terms;
|
|
||||||
import org.apache.lucene.index.TermsEnum;
|
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
|
||||||
import org.apache.lucene.util.Bits;
|
|
||||||
import org.apache.lucene.util.BytesRef;
|
|
||||||
import org.apache.lucene.util.CloseableThreadLocal;
|
|
||||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
|
||||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
|
||||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.concurrent.ConcurrentMap;
|
|
||||||
|
|
||||||
import static org.elasticsearch.common.lucene.uid.Versions.NOT_FOUND;
|
|
||||||
|
|
||||||
/** Utility class to resolve the Lucene doc ID and version for a given uid. */
|
|
||||||
public class VersionsResolver {
|
|
||||||
|
|
||||||
static final ConcurrentMap<Object, CloseableThreadLocal<PerThreadIDAndVersionLookup>>
|
|
||||||
lookupStates = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
|
|
||||||
|
|
||||||
// Evict this reader from lookupStates once it's closed:
|
|
||||||
private static final CoreClosedListener removeLookupState = key -> {
|
|
||||||
CloseableThreadLocal<PerThreadIDAndVersionLookup> ctl = lookupStates.remove(key);
|
|
||||||
if (ctl != null) {
|
|
||||||
ctl.close();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
private static PerThreadIDAndVersionLookup getLookupState(LeafReader reader)
|
|
||||||
throws IOException {
|
|
||||||
Object key = reader.getCoreCacheKey();
|
|
||||||
CloseableThreadLocal<PerThreadIDAndVersionLookup> ctl = lookupStates.get(key);
|
|
||||||
if (ctl == null) {
|
|
||||||
// First time we are seeing this reader's core; make a
|
|
||||||
// new CTL:
|
|
||||||
ctl = new CloseableThreadLocal<>();
|
|
||||||
CloseableThreadLocal<PerThreadIDAndVersionLookup> other =
|
|
||||||
lookupStates.putIfAbsent(key, ctl);
|
|
||||||
if (other == null) {
|
|
||||||
// Our CTL won, we must remove it when the
|
|
||||||
// core is closed:
|
|
||||||
reader.addCoreClosedListener(removeLookupState);
|
|
||||||
} else {
|
|
||||||
// Another thread beat us to it: just use
|
|
||||||
// their CTL:
|
|
||||||
ctl = other;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
PerThreadIDAndVersionLookup lookupState = ctl.get();
|
|
||||||
if (lookupState == null) {
|
|
||||||
lookupState = new PerThreadIDAndVersionLookup(reader);
|
|
||||||
ctl.set(lookupState);
|
|
||||||
}
|
|
||||||
|
|
||||||
return lookupState;
|
|
||||||
}
|
|
||||||
|
|
||||||
private VersionsResolver() {
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Wraps an {@link LeafReaderContext}, a doc ID <b>relative to the context doc base</b> and
|
|
||||||
* a version.
|
|
||||||
**/
|
|
||||||
public static class DocIdAndVersion {
|
|
||||||
public final int docId;
|
|
||||||
public final long version;
|
|
||||||
public final LeafReaderContext context;
|
|
||||||
|
|
||||||
public DocIdAndVersion(int docId, long version, LeafReaderContext context) {
|
|
||||||
this.docId = docId;
|
|
||||||
this.version = version;
|
|
||||||
this.context = context;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Load the internal doc ID and version for the uid from the reader, returning<ul>
|
|
||||||
* <li>null if the uid wasn't found,
|
|
||||||
* <li>a doc ID and a version otherwise
|
|
||||||
* </ul>
|
|
||||||
*/
|
|
||||||
public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term)
|
|
||||||
throws IOException {
|
|
||||||
assert term.field().equals(UidFieldMapper.NAME);
|
|
||||||
List<LeafReaderContext> leaves = reader.leaves();
|
|
||||||
if (leaves.isEmpty()) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
// iterate backwards to optimize for the frequently updated documents
|
|
||||||
// which are likely to be in the last segments
|
|
||||||
for (int i = leaves.size() - 1; i >= 0; i--) {
|
|
||||||
LeafReaderContext context = leaves.get(i);
|
|
||||||
LeafReader leaf = context.reader();
|
|
||||||
PerThreadIDAndVersionLookup lookup = getLookupState(leaf);
|
|
||||||
DocIdAndVersion result =
|
|
||||||
lookup.lookupVersion(term.bytes(), leaf.getLiveDocs(), context);
|
|
||||||
if (result != null) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Load the version for the uid from the reader, returning<ul>
|
|
||||||
* <li>{@link Versions#NOT_FOUND} if no matching doc exists,
|
|
||||||
* <li>the version associated with the provided uid otherwise
|
|
||||||
* </ul>
|
|
||||||
*/
|
|
||||||
public static long loadVersion(IndexReader reader, Term term) throws IOException {
|
|
||||||
final DocIdAndVersion docIdAndVersion = loadDocIdAndVersion(reader, term);
|
|
||||||
return docIdAndVersion == null ? NOT_FOUND : docIdAndVersion.version;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the sequence number for the given uid term, returning
|
|
||||||
* {@code SequenceNumbersService.UNASSIGNED_SEQ_NO} if none is found.
|
|
||||||
*/
|
|
||||||
public static long loadSeqNo(IndexReader reader, Term term) throws IOException {
|
|
||||||
assert term.field().equals(UidFieldMapper.NAME) : "can only load _seq_no by uid";
|
|
||||||
List<LeafReaderContext> leaves = reader.leaves();
|
|
||||||
if (leaves.isEmpty()) {
|
|
||||||
return SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterate backwards to optimize for the frequently updated documents
|
|
||||||
// which are likely to be in the last segments
|
|
||||||
for (int i = leaves.size() - 1; i >= 0; i--) {
|
|
||||||
LeafReader leaf = leaves.get(i).reader();
|
|
||||||
Bits liveDocs = leaf.getLiveDocs();
|
|
||||||
|
|
||||||
TermsEnum termsEnum = null;
|
|
||||||
SortedNumericDocValues dvField = null;
|
|
||||||
PostingsEnum docsEnum = null;
|
|
||||||
|
|
||||||
final Fields fields = leaf.fields();
|
|
||||||
if (fields != null) {
|
|
||||||
Terms terms = fields.terms(UidFieldMapper.NAME);
|
|
||||||
if (terms != null) {
|
|
||||||
termsEnum = terms.iterator();
|
|
||||||
assert termsEnum != null;
|
|
||||||
dvField = leaf.getSortedNumericDocValues(SeqNoFieldMapper.NAME);
|
|
||||||
assert dvField != null;
|
|
||||||
|
|
||||||
final BytesRef id = term.bytes();
|
|
||||||
if (termsEnum.seekExact(id)) {
|
|
||||||
// there may be more than one matching docID, in the
|
|
||||||
// case of nested docs, so we want the last one:
|
|
||||||
docsEnum = termsEnum.postings(docsEnum, 0);
|
|
||||||
int docID = DocIdSetIterator.NO_MORE_DOCS;
|
|
||||||
for (int d = docsEnum.nextDoc();
|
|
||||||
d != DocIdSetIterator.NO_MORE_DOCS; d = docsEnum.nextDoc()) {
|
|
||||||
if (liveDocs != null && liveDocs.get(d) == false) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
docID = d;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
|
|
||||||
dvField.setDocument(docID);
|
|
||||||
assert dvField.count() == 1 :
|
|
||||||
"expected only a single value for _seq_no but got " +
|
|
||||||
dvField.count();
|
|
||||||
return dvField.valueAt(0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the primary term for the given uid term, returning {@code 0} if none is found.
|
|
||||||
*/
|
|
||||||
public static long loadPrimaryTerm(IndexReader reader, Term term) throws IOException {
|
|
||||||
assert term.field().equals(UidFieldMapper.NAME) : "can only load _primary_term by uid";
|
|
||||||
List<LeafReaderContext> leaves = reader.leaves();
|
|
||||||
if (leaves.isEmpty()) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterate backwards to optimize for the frequently updated documents
|
|
||||||
// which are likely to be in the last segments
|
|
||||||
for (int i = leaves.size() - 1; i >= 0; i--) {
|
|
||||||
LeafReader leaf = leaves.get(i).reader();
|
|
||||||
Bits liveDocs = leaf.getLiveDocs();
|
|
||||||
|
|
||||||
TermsEnum termsEnum = null;
|
|
||||||
NumericDocValues dvField = null;
|
|
||||||
PostingsEnum docsEnum = null;
|
|
||||||
|
|
||||||
final Fields fields = leaf.fields();
|
|
||||||
if (fields != null) {
|
|
||||||
Terms terms = fields.terms(UidFieldMapper.NAME);
|
|
||||||
if (terms != null) {
|
|
||||||
termsEnum = terms.iterator();
|
|
||||||
assert termsEnum != null;
|
|
||||||
dvField = leaf.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
|
|
||||||
assert dvField != null;
|
|
||||||
|
|
||||||
final BytesRef id = term.bytes();
|
|
||||||
if (termsEnum.seekExact(id)) {
|
|
||||||
// there may be more than one matching docID, in the
|
|
||||||
// case of nested docs, so we want the last one:
|
|
||||||
docsEnum = termsEnum.postings(docsEnum, 0);
|
|
||||||
int docID = DocIdSetIterator.NO_MORE_DOCS;
|
|
||||||
for (int d = docsEnum.nextDoc();
|
|
||||||
d != DocIdSetIterator.NO_MORE_DOCS;
|
|
||||||
d = docsEnum.nextDoc()) {
|
|
||||||
if (liveDocs != null && liveDocs.get(d) == false) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
docID = d;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
|
|
||||||
return dvField.get(docID);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -311,9 +311,12 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
HunspellService.HUNSPELL_IGNORE_CASE,
|
HunspellService.HUNSPELL_IGNORE_CASE,
|
||||||
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
|
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
|
||||||
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
|
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
|
||||||
|
Environment.DEFAULT_PATH_CONF_SETTING,
|
||||||
Environment.PATH_CONF_SETTING,
|
Environment.PATH_CONF_SETTING,
|
||||||
|
Environment.DEFAULT_PATH_DATA_SETTING,
|
||||||
Environment.PATH_DATA_SETTING,
|
Environment.PATH_DATA_SETTING,
|
||||||
Environment.PATH_HOME_SETTING,
|
Environment.PATH_HOME_SETTING,
|
||||||
|
Environment.DEFAULT_PATH_LOGS_SETTING,
|
||||||
Environment.PATH_LOGS_SETTING,
|
Environment.PATH_LOGS_SETTING,
|
||||||
Environment.PATH_REPO_SETTING,
|
Environment.PATH_REPO_SETTING,
|
||||||
Environment.PATH_SCRIPTS_SETTING,
|
Environment.PATH_SCRIPTS_SETTING,
|
||||||
|
|
|
@ -57,6 +57,7 @@ import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.LinkedHashMap;
|
import java.util.LinkedHashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
@ -442,6 +443,20 @@ public final class Settings implements ToXContent {
|
||||||
public String[] getAsArray(String settingPrefix, String[] defaultArray, Boolean commaDelimited) throws SettingsException {
|
public String[] getAsArray(String settingPrefix, String[] defaultArray, Boolean commaDelimited) throws SettingsException {
|
||||||
List<String> result = new ArrayList<>();
|
List<String> result = new ArrayList<>();
|
||||||
|
|
||||||
|
final String valueFromPrefix = get(settingPrefix);
|
||||||
|
final String valueFromPreifx0 = get(settingPrefix + ".0");
|
||||||
|
|
||||||
|
if (valueFromPrefix != null && valueFromPreifx0 != null) {
|
||||||
|
final String message = String.format(
|
||||||
|
Locale.ROOT,
|
||||||
|
"settings object contains values for [%s=%s] and [%s=%s]",
|
||||||
|
settingPrefix,
|
||||||
|
valueFromPrefix,
|
||||||
|
settingPrefix + ".0",
|
||||||
|
valueFromPreifx0);
|
||||||
|
throw new IllegalStateException(message);
|
||||||
|
}
|
||||||
|
|
||||||
if (get(settingPrefix) != null) {
|
if (get(settingPrefix) != null) {
|
||||||
if (commaDelimited) {
|
if (commaDelimited) {
|
||||||
String[] strings = Strings.splitStringByCommaToArray(get(settingPrefix));
|
String[] strings = Strings.splitStringByCommaToArray(get(settingPrefix));
|
||||||
|
@ -1048,13 +1063,11 @@ public final class Settings implements ToXContent {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder putProperties(Map<String, String> esSettings, Predicate<String> keyPredicate, Function<String, String> keyFunction) {
|
public Builder putProperties(final Map<String, String> esSettings, final Function<String, String> keyFunction) {
|
||||||
for (final Map.Entry<String, String> esSetting : esSettings.entrySet()) {
|
for (final Map.Entry<String, String> esSetting : esSettings.entrySet()) {
|
||||||
final String key = esSetting.getKey();
|
final String key = esSetting.getKey();
|
||||||
if (keyPredicate.test(key)) {
|
|
||||||
map.put(keyFunction.apply(key), esSetting.getValue());
|
map.put(keyFunction.apply(key), esSetting.getValue());
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.common.xcontent;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
import org.elasticsearch.common.io.BytesStream;
|
import org.elasticsearch.common.io.stream.BytesStream;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
import org.elasticsearch.common.lease.Releasable;
|
import org.elasticsearch.common.lease.Releasable;
|
||||||
import org.elasticsearch.common.text.Text;
|
import org.elasticsearch.common.text.Text;
|
||||||
|
@ -53,7 +53,7 @@ import java.util.concurrent.TimeUnit;
|
||||||
/**
|
/**
|
||||||
* A utility to build XContent (ie json).
|
* A utility to build XContent (ie json).
|
||||||
*/
|
*/
|
||||||
public final class XContentBuilder implements BytesStream, Releasable, Flushable {
|
public final class XContentBuilder implements Releasable, Flushable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new {@link XContentBuilder} using the given {@link XContent} content.
|
* Create a new {@link XContentBuilder} using the given {@link XContent} content.
|
||||||
|
@ -1041,7 +1041,6 @@ public final class XContentBuilder implements BytesStream, Releasable, Flushable
|
||||||
return this.generator;
|
return this.generator;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public BytesReference bytes() {
|
public BytesReference bytes() {
|
||||||
close();
|
close();
|
||||||
return ((BytesStream) bos).bytes();
|
return ((BytesStream) bos).bytes();
|
||||||
|
|
|
@ -111,14 +111,14 @@ public class ElectMasterService extends AbstractComponent {
|
||||||
return minimumMasterNodes;
|
return minimumMasterNodes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean hasEnoughMasterNodes(Iterable<DiscoveryNode> nodes) {
|
public int countMasterNodes(Iterable<DiscoveryNode> nodes) {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (DiscoveryNode node : nodes) {
|
for (DiscoveryNode node : nodes) {
|
||||||
if (node.isMasterNode()) {
|
if (node.isMasterNode()) {
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return count > 0 && (minimumMasterNodes < 0 || count >= minimumMasterNodes);
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean hasEnoughCandidates(Collection<MasterCandidate> candidates) {
|
public boolean hasEnoughCandidates(Collection<MasterCandidate> candidates) {
|
||||||
|
@ -149,13 +149,12 @@ public class ElectMasterService extends AbstractComponent {
|
||||||
return activeMasters.stream().min(ElectMasterService::compareNodes).get();
|
return activeMasters.stream().min(ElectMasterService::compareNodes).get();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean hasEnoughMasterNodes(Iterable<DiscoveryNode> nodes) {
|
||||||
|
return minimumMasterNodes < 1 || countMasterNodes(nodes) >= minimumMasterNodes;
|
||||||
|
}
|
||||||
|
|
||||||
public boolean hasTooManyMasterNodes(Iterable<DiscoveryNode> nodes) {
|
public boolean hasTooManyMasterNodes(Iterable<DiscoveryNode> nodes) {
|
||||||
int count = 0;
|
final int count = countMasterNodes(nodes);
|
||||||
for (DiscoveryNode node : nodes) {
|
|
||||||
if (node.isMasterNode()) {
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return count > 1 && minimumMasterNodes <= count / 2;
|
return count > 1 && minimumMasterNodes <= count / 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
|
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Setting.Property;
|
import org.elasticsearch.common.settings.Setting.Property;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
@ -580,8 +581,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
final ClusterState remainingNodesClusterState = remainingNodesClusterState(currentState, remainingNodesBuilder);
|
final ClusterState remainingNodesClusterState = remainingNodesClusterState(currentState, remainingNodesBuilder);
|
||||||
|
|
||||||
final ClusterTasksResult.Builder<Task> resultBuilder = ClusterTasksResult.<Task>builder().successes(tasks);
|
final ClusterTasksResult.Builder<Task> resultBuilder = ClusterTasksResult.<Task>builder().successes(tasks);
|
||||||
if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) {
|
if (electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes()) == false) {
|
||||||
rejoin.accept("not enough master nodes");
|
final int masterNodes = electMasterService.countMasterNodes(remainingNodesClusterState.nodes());
|
||||||
|
rejoin.accept(LoggerMessageFormat.format("not enough master nodes (has [{}], but needed [{}])",
|
||||||
|
masterNodes, electMasterService.minimumMasterNodes()));
|
||||||
return resultBuilder.build(currentState);
|
return resultBuilder.build(currentState);
|
||||||
} else {
|
} else {
|
||||||
return resultBuilder.build(allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks)));
|
return resultBuilder.build(allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks)));
|
||||||
|
@ -920,7 +923,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
return winner.getNode();
|
return winner.getNode();
|
||||||
} else {
|
} else {
|
||||||
// if we don't have enough master nodes, we bail, because there are not enough master to elect from
|
// if we don't have enough master nodes, we bail, because there are not enough master to elect from
|
||||||
logger.trace("not enough master nodes [{}]", masterCandidates);
|
logger.warn("not enough master nodes discovered during pinging (found [{}], but needed [{}]), pinging again",
|
||||||
|
masterCandidates, electMaster.minimumMasterNodes());
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -49,11 +49,17 @@ import static org.elasticsearch.common.Strings.cleanPath;
|
||||||
// public+forbidden api!
|
// public+forbidden api!
|
||||||
public class Environment {
|
public class Environment {
|
||||||
public static final Setting<String> PATH_HOME_SETTING = Setting.simpleString("path.home", Property.NodeScope);
|
public static final Setting<String> PATH_HOME_SETTING = Setting.simpleString("path.home", Property.NodeScope);
|
||||||
public static final Setting<String> PATH_CONF_SETTING = Setting.simpleString("path.conf", Property.NodeScope);
|
public static final Setting<String> DEFAULT_PATH_CONF_SETTING = Setting.simpleString("default.path.conf", Property.NodeScope);
|
||||||
|
public static final Setting<String> PATH_CONF_SETTING =
|
||||||
|
new Setting<>("path.conf", DEFAULT_PATH_CONF_SETTING, Function.identity(), Property.NodeScope);
|
||||||
public static final Setting<String> PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", Property.NodeScope);
|
public static final Setting<String> PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", Property.NodeScope);
|
||||||
|
public static final Setting<List<String>> DEFAULT_PATH_DATA_SETTING =
|
||||||
|
Setting.listSetting("default.path.data", Collections.emptyList(), Function.identity(), Property.NodeScope);
|
||||||
public static final Setting<List<String>> PATH_DATA_SETTING =
|
public static final Setting<List<String>> PATH_DATA_SETTING =
|
||||||
Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), Property.NodeScope);
|
Setting.listSetting("path.data", DEFAULT_PATH_DATA_SETTING, Function.identity(), Property.NodeScope);
|
||||||
public static final Setting<String> PATH_LOGS_SETTING = Setting.simpleString("path.logs", Property.NodeScope);
|
public static final Setting<String> DEFAULT_PATH_LOGS_SETTING = Setting.simpleString("default.path.logs", Property.NodeScope);
|
||||||
|
public static final Setting<String> PATH_LOGS_SETTING =
|
||||||
|
new Setting<>("path.logs", DEFAULT_PATH_LOGS_SETTING, Function.identity(), Property.NodeScope);
|
||||||
public static final Setting<List<String>> PATH_REPO_SETTING =
|
public static final Setting<List<String>> PATH_REPO_SETTING =
|
||||||
Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), Property.NodeScope);
|
Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), Property.NodeScope);
|
||||||
public static final Setting<String> PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope);
|
public static final Setting<String> PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope);
|
||||||
|
@ -115,7 +121,8 @@ public class Environment {
|
||||||
throw new IllegalStateException(PATH_HOME_SETTING.getKey() + " is not configured");
|
throw new IllegalStateException(PATH_HOME_SETTING.getKey() + " is not configured");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PATH_CONF_SETTING.exists(settings)) {
|
// this is trappy, Setting#get(Settings) will get a fallback setting yet return false for Settings#exists(Settings)
|
||||||
|
if (PATH_CONF_SETTING.exists(settings) || DEFAULT_PATH_CONF_SETTING.exists(settings)) {
|
||||||
configFile = PathUtils.get(cleanPath(PATH_CONF_SETTING.get(settings)));
|
configFile = PathUtils.get(cleanPath(PATH_CONF_SETTING.get(settings)));
|
||||||
} else {
|
} else {
|
||||||
configFile = homeFile.resolve("config");
|
configFile = homeFile.resolve("config");
|
||||||
|
@ -156,7 +163,9 @@ public class Environment {
|
||||||
} else {
|
} else {
|
||||||
repoFiles = new Path[0];
|
repoFiles = new Path[0];
|
||||||
}
|
}
|
||||||
if (PATH_LOGS_SETTING.exists(settings)) {
|
|
||||||
|
// this is trappy, Setting#get(Settings) will get a fallback setting yet return false for Settings#exists(Settings)
|
||||||
|
if (PATH_LOGS_SETTING.exists(settings) || DEFAULT_PATH_LOGS_SETTING.exists(settings)) {
|
||||||
logsFile = PathUtils.get(cleanPath(PATH_LOGS_SETTING.get(settings)));
|
logsFile = PathUtils.get(cleanPath(PATH_LOGS_SETTING.get(settings)));
|
||||||
} else {
|
} else {
|
||||||
logsFile = homeFile.resolve("logs");
|
logsFile = homeFile.resolve("logs");
|
||||||
|
@ -174,7 +183,9 @@ public class Environment {
|
||||||
|
|
||||||
Settings.Builder finalSettings = Settings.builder().put(settings);
|
Settings.Builder finalSettings = Settings.builder().put(settings);
|
||||||
finalSettings.put(PATH_HOME_SETTING.getKey(), homeFile);
|
finalSettings.put(PATH_HOME_SETTING.getKey(), homeFile);
|
||||||
|
if (PATH_DATA_SETTING.exists(settings)) {
|
||||||
finalSettings.putArray(PATH_DATA_SETTING.getKey(), dataPaths);
|
finalSettings.putArray(PATH_DATA_SETTING.getKey(), dataPaths);
|
||||||
|
}
|
||||||
finalSettings.put(PATH_LOGS_SETTING.getKey(), logsFile);
|
finalSettings.put(PATH_LOGS_SETTING.getKey(), logsFile);
|
||||||
this.settings = finalSettings.build();
|
this.settings = finalSettings.build();
|
||||||
|
|
||||||
|
|
|
@ -202,7 +202,7 @@ public final class NodeEnvironment implements Closeable {
|
||||||
for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) {
|
for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) {
|
||||||
Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex];
|
Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex];
|
||||||
Path dataDir = environment.dataFiles()[dirIndex];
|
Path dataDir = environment.dataFiles()[dirIndex];
|
||||||
Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
|
Path dir = resolveNodePath(dataDir, possibleLockId);
|
||||||
Files.createDirectories(dir);
|
Files.createDirectories(dir);
|
||||||
|
|
||||||
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
|
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
|
||||||
|
@ -268,6 +268,17 @@ public final class NodeEnvironment implements Closeable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolve a specific nodes/{node.id} path for the specified path and node lock id.
|
||||||
|
*
|
||||||
|
* @param path the path
|
||||||
|
* @param nodeLockId the node lock id
|
||||||
|
* @return the resolved path
|
||||||
|
*/
|
||||||
|
public static Path resolveNodePath(final Path path, final int nodeLockId) {
|
||||||
|
return path.resolve(NODES_FOLDER).resolve(Integer.toString(nodeLockId));
|
||||||
|
}
|
||||||
|
|
||||||
/** Returns true if the directory is empty */
|
/** Returns true if the directory is empty */
|
||||||
private static boolean dirEmpty(final Path path) throws IOException {
|
private static boolean dirEmpty(final Path path) throws IOException {
|
||||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
|
try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
|
||||||
|
@ -724,6 +735,14 @@ public final class NodeEnvironment implements Closeable {
|
||||||
return nodePaths;
|
return nodePaths;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public int getNodeLockId() {
|
||||||
|
assertEnvIsLocked();
|
||||||
|
if (nodePaths == null || locks == null) {
|
||||||
|
throw new IllegalStateException("node is not configured to store local location");
|
||||||
|
}
|
||||||
|
return nodeLockId;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns all index paths.
|
* Returns all index paths.
|
||||||
*/
|
*/
|
||||||
|
@ -736,6 +755,8 @@ public final class NodeEnvironment implements Closeable {
|
||||||
return indexPaths;
|
return indexPaths;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns all shard paths excluding custom shard path. Note: Shards are only allocated on one of the
|
* Returns all shard paths excluding custom shard path. Note: Shards are only allocated on one of the
|
||||||
* returned paths. The returned array may contain paths to non-existing directories.
|
* returned paths. The returned array may contain paths to non-existing directories.
|
||||||
|
@ -764,6 +785,25 @@ public final class NodeEnvironment implements Closeable {
|
||||||
assertEnvIsLocked();
|
assertEnvIsLocked();
|
||||||
Set<String> indexFolders = new HashSet<>();
|
Set<String> indexFolders = new HashSet<>();
|
||||||
for (NodePath nodePath : nodePaths) {
|
for (NodePath nodePath : nodePaths) {
|
||||||
|
indexFolders.addAll(availableIndexFoldersForPath(nodePath));
|
||||||
|
}
|
||||||
|
return indexFolders;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return all directory names in the nodes/{node.id}/indices directory for the given node path.
|
||||||
|
*
|
||||||
|
* @param nodePath the path
|
||||||
|
* @return all directories that could be indices for the given node path.
|
||||||
|
* @throws IOException if an I/O exception occurs traversing the filesystem
|
||||||
|
*/
|
||||||
|
public Set<String> availableIndexFoldersForPath(final NodePath nodePath) throws IOException {
|
||||||
|
if (nodePaths == null || locks == null) {
|
||||||
|
throw new IllegalStateException("node is not configured to store local location");
|
||||||
|
}
|
||||||
|
assertEnvIsLocked();
|
||||||
|
final Set<String> indexFolders = new HashSet<>();
|
||||||
Path indicesLocation = nodePath.indicesPath;
|
Path indicesLocation = nodePath.indicesPath;
|
||||||
if (Files.isDirectory(indicesLocation)) {
|
if (Files.isDirectory(indicesLocation)) {
|
||||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indicesLocation)) {
|
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indicesLocation)) {
|
||||||
|
@ -774,9 +814,7 @@ public final class NodeEnvironment implements Closeable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return indexFolders;
|
return indexFolders;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -27,18 +27,13 @@ class DeleteVersionValue extends VersionValue {
|
||||||
|
|
||||||
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DeleteVersionValue.class);
|
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DeleteVersionValue.class);
|
||||||
|
|
||||||
private final long time;
|
final long time;
|
||||||
|
|
||||||
DeleteVersionValue(long version, long time) {
|
DeleteVersionValue(long version,long seqNo, long term, long time) {
|
||||||
super(version);
|
super(version, seqNo, term);
|
||||||
this.time = time;
|
this.time = time;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public long getTime() {
|
|
||||||
return this.time;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isDelete() {
|
public boolean isDelete() {
|
||||||
return true;
|
return true;
|
||||||
|
@ -52,7 +47,9 @@ class DeleteVersionValue extends VersionValue {
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "DeleteVersionValue{" +
|
return "DeleteVersionValue{" +
|
||||||
"version=" + getVersion() +
|
"version=" + version +
|
||||||
|
", seqNo=" + seqNo +
|
||||||
|
", term=" + term +
|
||||||
",time=" + time +
|
",time=" + time +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,8 +55,8 @@ import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.lucene.uid.Versions;
|
import org.elasticsearch.common.lucene.uid.Versions;
|
||||||
import org.elasticsearch.common.lucene.uid.VersionsResolver;
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver;
|
||||||
import org.elasticsearch.common.lucene.uid.VersionsResolver.DocIdAndVersion;
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion;
|
||||||
import org.elasticsearch.common.metrics.CounterMetric;
|
import org.elasticsearch.common.metrics.CounterMetric;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||||
|
@ -464,7 +464,7 @@ public abstract class Engine implements Closeable {
|
||||||
final Searcher searcher = searcherFactory.apply("get");
|
final Searcher searcher = searcherFactory.apply("get");
|
||||||
final DocIdAndVersion docIdAndVersion;
|
final DocIdAndVersion docIdAndVersion;
|
||||||
try {
|
try {
|
||||||
docIdAndVersion = VersionsResolver.loadDocIdAndVersion(searcher.reader(), get.uid());
|
docIdAndVersion = VersionsAndSeqNoResolver.loadDocIdAndVersion(searcher.reader(), get.uid());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
Releasables.closeWhileHandlingException(searcher);
|
Releasables.closeWhileHandlingException(searcher);
|
||||||
//TODO: A better exception goes here
|
//TODO: A better exception goes here
|
||||||
|
|
|
@ -51,7 +51,8 @@ import org.elasticsearch.common.lucene.LoggerInfoStream;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||||
import org.elasticsearch.common.lucene.uid.Versions;
|
import org.elasticsearch.common.lucene.uid.Versions;
|
||||||
import org.elasticsearch.common.lucene.uid.VersionsResolver;
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver;
|
||||||
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo;
|
||||||
import org.elasticsearch.common.metrics.CounterMetric;
|
import org.elasticsearch.common.metrics.CounterMetric;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
|
@ -298,7 +299,7 @@ public class InternalEngine extends Engine {
|
||||||
throw new IllegalStateException("no translog generation present in commit data but translog is expected to exist");
|
throw new IllegalStateException("no translog generation present in commit data but translog is expected to exist");
|
||||||
}
|
}
|
||||||
if (generation.translogUUID == null) {
|
if (generation.translogUUID == null) {
|
||||||
throw new IndexFormatTooOldException("trasnlog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first");
|
throw new IndexFormatTooOldException("translog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final Translog translog = new Translog(translogConfig, generation, globalCheckpointSupplier);
|
final Translog translog = new Translog(translogConfig, generation, globalCheckpointSupplier);
|
||||||
|
@ -389,10 +390,10 @@ public class InternalEngine extends Engine {
|
||||||
if (versionValue.isDelete()) {
|
if (versionValue.isDelete()) {
|
||||||
return GetResult.NOT_EXISTS;
|
return GetResult.NOT_EXISTS;
|
||||||
}
|
}
|
||||||
if (get.versionType().isVersionConflictForReads(versionValue.getVersion(), get.version())) {
|
if (get.versionType().isVersionConflictForReads(versionValue.version, get.version())) {
|
||||||
Uid uid = Uid.createUid(get.uid().text());
|
Uid uid = Uid.createUid(get.uid().text());
|
||||||
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(),
|
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(),
|
||||||
get.versionType().explainConflictForReads(versionValue.getVersion(), get.version()));
|
get.versionType().explainConflictForReads(versionValue.version, get.version()));
|
||||||
}
|
}
|
||||||
refresh("realtime_get");
|
refresh("realtime_get");
|
||||||
}
|
}
|
||||||
|
@ -416,6 +417,43 @@ public class InternalEngine extends Engine {
|
||||||
LUCENE_DOC_NOT_FOUND
|
LUCENE_DOC_NOT_FOUND
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) throws IOException {
|
||||||
|
assert op.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "resolving ops based on seq# but no seqNo is found";
|
||||||
|
final OpVsLuceneDocStatus status;
|
||||||
|
final VersionValue versionValue = versionMap.getUnderLock(op.uid());
|
||||||
|
assert incrementVersionLookup();
|
||||||
|
if (versionValue != null) {
|
||||||
|
if (op.seqNo() > versionValue.seqNo ||
|
||||||
|
(op.seqNo() == versionValue.seqNo && op.primaryTerm() > versionValue.term))
|
||||||
|
status = OpVsLuceneDocStatus.OP_NEWER;
|
||||||
|
else {
|
||||||
|
status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// load from index
|
||||||
|
assert incrementIndexVersionLookup();
|
||||||
|
try (Searcher searcher = acquireSearcher("load_seq_no")) {
|
||||||
|
DocIdAndSeqNo docAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), op.uid());
|
||||||
|
if (docAndSeqNo == null) {
|
||||||
|
status = OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND;
|
||||||
|
} else if (op.seqNo() > docAndSeqNo.seqNo) {
|
||||||
|
status = OpVsLuceneDocStatus.OP_NEWER;
|
||||||
|
} else if (op.seqNo() == docAndSeqNo.seqNo) {
|
||||||
|
// load term to tie break
|
||||||
|
final long existingTerm = VersionsAndSeqNoResolver.loadPrimaryTerm(docAndSeqNo);
|
||||||
|
if (op.primaryTerm() > existingTerm) {
|
||||||
|
status = OpVsLuceneDocStatus.OP_NEWER;
|
||||||
|
} else {
|
||||||
|
status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
/** resolves the current version of the document, returning null if not found */
|
/** resolves the current version of the document, returning null if not found */
|
||||||
private VersionValue resolveDocVersion(final Operation op) throws IOException {
|
private VersionValue resolveDocVersion(final Operation op) throws IOException {
|
||||||
assert incrementVersionLookup(); // used for asserting in tests
|
assert incrementVersionLookup(); // used for asserting in tests
|
||||||
|
@ -424,11 +462,10 @@ public class InternalEngine extends Engine {
|
||||||
assert incrementIndexVersionLookup(); // used for asserting in tests
|
assert incrementIndexVersionLookup(); // used for asserting in tests
|
||||||
final long currentVersion = loadCurrentVersionFromIndex(op.uid());
|
final long currentVersion = loadCurrentVersionFromIndex(op.uid());
|
||||||
if (currentVersion != Versions.NOT_FOUND) {
|
if (currentVersion != Versions.NOT_FOUND) {
|
||||||
versionValue = new VersionValue(currentVersion);
|
versionValue = new VersionValue(currentVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0L);
|
||||||
}
|
}
|
||||||
} else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() &&
|
} else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() &&
|
||||||
(engineConfig.getThreadPool().relativeTimeInMillis() - versionValue.getTime()) >
|
(engineConfig.getThreadPool().relativeTimeInMillis() - ((DeleteVersionValue)versionValue).time) > getGcDeletesInMillis()) {
|
||||||
getGcDeletesInMillis()) {
|
|
||||||
versionValue = null;
|
versionValue = null;
|
||||||
}
|
}
|
||||||
return versionValue;
|
return versionValue;
|
||||||
|
@ -436,12 +473,13 @@ public class InternalEngine extends Engine {
|
||||||
|
|
||||||
private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnVersions(final Operation op)
|
private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnVersions(final Operation op)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
assert op.seqNo() == SequenceNumbersService.UNASSIGNED_SEQ_NO : "op is resolved based on versions but have a seq#";
|
||||||
assert op.version() >= 0 : "versions should be non-negative. got " + op.version();
|
assert op.version() >= 0 : "versions should be non-negative. got " + op.version();
|
||||||
final VersionValue versionValue = resolveDocVersion(op);
|
final VersionValue versionValue = resolveDocVersion(op);
|
||||||
if (versionValue == null) {
|
if (versionValue == null) {
|
||||||
return OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND;
|
return OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND;
|
||||||
} else {
|
} else {
|
||||||
return op.versionType().isVersionConflictForWrites(versionValue.getVersion(), op.version(), versionValue.isDelete()) ?
|
return op.versionType().isVersionConflictForWrites(versionValue.version, op.version(), versionValue.isDelete()) ?
|
||||||
OpVsLuceneDocStatus.OP_STALE_OR_EQUAL : OpVsLuceneDocStatus.OP_NEWER;
|
OpVsLuceneDocStatus.OP_STALE_OR_EQUAL : OpVsLuceneDocStatus.OP_NEWER;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -601,7 +639,16 @@ public class InternalEngine extends Engine {
|
||||||
// unlike the primary, replicas don't really care to about creation status of documents
|
// unlike the primary, replicas don't really care to about creation status of documents
|
||||||
// this allows to ignore the case where a document was found in the live version maps in
|
// this allows to ignore the case where a document was found in the live version maps in
|
||||||
// a delete state and return false for the created flag in favor of code simplicity
|
// a delete state and return false for the created flag in favor of code simplicity
|
||||||
final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnVersions(index);
|
final OpVsLuceneDocStatus opVsLucene;
|
||||||
|
if (index.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
|
||||||
|
opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index);
|
||||||
|
} else {
|
||||||
|
// This can happen if the primary is still on an old node and send traffic without seq# or we recover from translog
|
||||||
|
// created by an old version.
|
||||||
|
assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) :
|
||||||
|
"index is newly created but op has no sequence numbers. op: " + index;
|
||||||
|
opVsLucene = compareOpToLuceneDocBasedOnVersions(index);
|
||||||
|
}
|
||||||
if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) {
|
if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) {
|
||||||
plan = IndexingStrategy.processButSkipLucene(false, index.seqNo(), index.version());
|
plan = IndexingStrategy.processButSkipLucene(false, index.seqNo(), index.version());
|
||||||
} else {
|
} else {
|
||||||
|
@ -633,7 +680,7 @@ public class InternalEngine extends Engine {
|
||||||
currentVersion = Versions.NOT_FOUND;
|
currentVersion = Versions.NOT_FOUND;
|
||||||
currentNotFoundOrDeleted = true;
|
currentNotFoundOrDeleted = true;
|
||||||
} else {
|
} else {
|
||||||
currentVersion = versionValue.getVersion();
|
currentVersion = versionValue.version;
|
||||||
currentNotFoundOrDeleted = versionValue.isDelete();
|
currentNotFoundOrDeleted = versionValue.isDelete();
|
||||||
}
|
}
|
||||||
if (index.versionType().isVersionConflictForWrites(
|
if (index.versionType().isVersionConflictForWrites(
|
||||||
|
@ -671,9 +718,9 @@ public class InternalEngine extends Engine {
|
||||||
assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false);
|
assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false);
|
||||||
index(index.docs(), indexWriter);
|
index(index.docs(), indexWriter);
|
||||||
}
|
}
|
||||||
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(plan.versionForIndexing));
|
versionMap.putUnderLock(index.uid().bytes(),
|
||||||
return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing,
|
new VersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm()));
|
||||||
plan.currentNotFoundOrDeleted);
|
return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
if (indexWriter.getTragicException() == null) {
|
if (indexWriter.getTragicException() == null) {
|
||||||
/* There is no tragic event recorded so this must be a document failure.
|
/* There is no tragic event recorded so this must be a document failure.
|
||||||
|
@ -873,7 +920,14 @@ public class InternalEngine extends Engine {
|
||||||
// unlike the primary, replicas don't really care to about found status of documents
|
// unlike the primary, replicas don't really care to about found status of documents
|
||||||
// this allows to ignore the case where a document was found in the live version maps in
|
// this allows to ignore the case where a document was found in the live version maps in
|
||||||
// a delete state and return true for the found flag in favor of code simplicity
|
// a delete state and return true for the found flag in favor of code simplicity
|
||||||
final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnVersions(delete);
|
final OpVsLuceneDocStatus opVsLucene;
|
||||||
|
if (delete.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
|
||||||
|
opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete);
|
||||||
|
} else {
|
||||||
|
assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) :
|
||||||
|
"index is newly created but op has no sequence numbers. op: " + delete;
|
||||||
|
opVsLucene = compareOpToLuceneDocBasedOnVersions(delete);
|
||||||
|
}
|
||||||
|
|
||||||
final DeletionStrategy plan;
|
final DeletionStrategy plan;
|
||||||
if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) {
|
if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) {
|
||||||
|
@ -898,7 +952,7 @@ public class InternalEngine extends Engine {
|
||||||
currentVersion = Versions.NOT_FOUND;
|
currentVersion = Versions.NOT_FOUND;
|
||||||
currentlyDeleted = true;
|
currentlyDeleted = true;
|
||||||
} else {
|
} else {
|
||||||
currentVersion = versionValue.getVersion();
|
currentVersion = versionValue.version;
|
||||||
currentlyDeleted = versionValue.isDelete();
|
currentlyDeleted = versionValue.isDelete();
|
||||||
}
|
}
|
||||||
final DeletionStrategy plan;
|
final DeletionStrategy plan;
|
||||||
|
@ -923,7 +977,7 @@ public class InternalEngine extends Engine {
|
||||||
indexWriter.deleteDocuments(delete.uid());
|
indexWriter.deleteDocuments(delete.uid());
|
||||||
}
|
}
|
||||||
versionMap.putUnderLock(delete.uid().bytes(),
|
versionMap.putUnderLock(delete.uid().bytes(),
|
||||||
new DeleteVersionValue(plan.versionOfDeletion,
|
new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(),
|
||||||
engineConfig.getThreadPool().relativeTimeInMillis()));
|
engineConfig.getThreadPool().relativeTimeInMillis()));
|
||||||
return new DeleteResult(
|
return new DeleteResult(
|
||||||
plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
||||||
|
@ -1179,12 +1233,12 @@ public class InternalEngine extends Engine {
|
||||||
try {
|
try {
|
||||||
translog.prepareCommit();
|
translog.prepareCommit();
|
||||||
logger.trace("starting commit for flush; commitTranslog=true");
|
logger.trace("starting commit for flush; commitTranslog=true");
|
||||||
commitIndexWriter(indexWriter, translog, null);
|
final long committedGeneration = commitIndexWriter(indexWriter, translog, null);
|
||||||
logger.trace("finished commit for flush");
|
logger.trace("finished commit for flush");
|
||||||
// we need to refresh in order to clear older version values
|
// we need to refresh in order to clear older version values
|
||||||
refresh("version_table_flush");
|
refresh("version_table_flush");
|
||||||
// after refresh documents can be retrieved from the index so we can now commit the translog
|
// after refresh documents can be retrieved from the index so we can now commit the translog
|
||||||
translog.commit();
|
translog.commit(committedGeneration);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new FlushFailedEngineException(shardId, e);
|
throw new FlushFailedEngineException(shardId, e);
|
||||||
}
|
}
|
||||||
|
@ -1235,14 +1289,14 @@ public class InternalEngine extends Engine {
|
||||||
// TODO: not good that we reach into LiveVersionMap here; can we move this inside VersionMap instead? problem is the dirtyLock...
|
// TODO: not good that we reach into LiveVersionMap here; can we move this inside VersionMap instead? problem is the dirtyLock...
|
||||||
|
|
||||||
// we only need to prune the deletes map; the current/old version maps are cleared on refresh:
|
// we only need to prune the deletes map; the current/old version maps are cleared on refresh:
|
||||||
for (Map.Entry<BytesRef, VersionValue> entry : versionMap.getAllTombstones()) {
|
for (Map.Entry<BytesRef, DeleteVersionValue> entry : versionMap.getAllTombstones()) {
|
||||||
BytesRef uid = entry.getKey();
|
BytesRef uid = entry.getKey();
|
||||||
try (Releasable ignored = acquireLock(uid)) { // can we do it without this lock on each value? maybe batch to a set and get the lock once per set?
|
try (Releasable ignored = acquireLock(uid)) { // can we do it without this lock on each value? maybe batch to a set and get the lock once per set?
|
||||||
|
|
||||||
// Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator:
|
// Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator:
|
||||||
VersionValue versionValue = versionMap.getTombstoneUnderLock(uid);
|
DeleteVersionValue versionValue = versionMap.getTombstoneUnderLock(uid);
|
||||||
if (versionValue != null) {
|
if (versionValue != null) {
|
||||||
if (timeMSec - versionValue.getTime() > getGcDeletesInMillis()) {
|
if (timeMSec - versionValue.time > getGcDeletesInMillis()) {
|
||||||
versionMap.removeTombstoneUnderLock(uid);
|
versionMap.removeTombstoneUnderLock(uid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1490,7 +1544,7 @@ public class InternalEngine extends Engine {
|
||||||
private long loadCurrentVersionFromIndex(Term uid) throws IOException {
|
private long loadCurrentVersionFromIndex(Term uid) throws IOException {
|
||||||
assert incrementIndexVersionLookup();
|
assert incrementIndexVersionLookup();
|
||||||
try (Searcher searcher = acquireSearcher("load_version")) {
|
try (Searcher searcher = acquireSearcher("load_version")) {
|
||||||
return VersionsResolver.loadVersion(searcher.reader(), uid);
|
return VersionsAndSeqNoResolver.loadVersion(searcher.reader(), uid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1680,55 +1734,65 @@ public class InternalEngine extends Engine {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException {
|
/**
|
||||||
|
* Commits the specified index writer.
|
||||||
|
*
|
||||||
|
* @param writer the index writer to commit
|
||||||
|
* @param translog the translog
|
||||||
|
* @param syncId the sync flush ID ({@code null} if not committing a synced flush)
|
||||||
|
* @return the minimum translog generation for the local checkpoint committed with the specified index writer
|
||||||
|
* @throws IOException if an I/O exception occurs committing the specfied writer
|
||||||
|
*/
|
||||||
|
private long commitIndexWriter(final IndexWriter writer, final Translog translog, @Nullable final String syncId) throws IOException {
|
||||||
ensureCanFlush();
|
ensureCanFlush();
|
||||||
try {
|
try {
|
||||||
Translog.TranslogGeneration translogGeneration = translog.getGeneration();
|
final long localCheckpoint = seqNoService().getLocalCheckpoint();
|
||||||
|
final Translog.TranslogGeneration translogGeneration = translog.getMinGenerationForSeqNo(localCheckpoint + 1);
|
||||||
final String translogFileGen = Long.toString(translogGeneration.translogFileGeneration);
|
final String translogFileGeneration = Long.toString(translogGeneration.translogFileGeneration);
|
||||||
final String translogUUID = translogGeneration.translogUUID;
|
final String translogUUID = translogGeneration.translogUUID;
|
||||||
final String localCheckpoint = Long.toString(seqNoService().getLocalCheckpoint());
|
final String localCheckpointValue = Long.toString(localCheckpoint);
|
||||||
|
|
||||||
writer.setLiveCommitData(() -> {
|
writer.setLiveCommitData(() -> {
|
||||||
/*
|
/*
|
||||||
* The user data captured above (e.g. local checkpoint) contains data that must be evaluated *before* Lucene flushes
|
* The user data captured above (e.g. local checkpoint) contains data that must be evaluated *before* Lucene flushes
|
||||||
* segments, including the local checkpoint amongst other values. The maximum sequence number is different - we never want
|
* segments, including the local checkpoint amongst other values. The maximum sequence number is different, we never want
|
||||||
* the maximum sequence number to be less than the last sequence number to go into a Lucene commit, otherwise we run the
|
* the maximum sequence number to be less than the last sequence number to go into a Lucene commit, otherwise we run the
|
||||||
* risk of re-using a sequence number for two different documents when restoring from this commit point and subsequently
|
* risk of re-using a sequence number for two different documents when restoring from this commit point and subsequently
|
||||||
* writing new documents to the index. Since we only know which Lucene documents made it into the final commit after the
|
* writing new documents to the index. Since we only know which Lucene documents made it into the final commit after the
|
||||||
* {@link IndexWriter#commit()} call flushes all documents, we defer computation of the max_seq_no to the time of invocation
|
* {@link IndexWriter#commit()} call flushes all documents, we defer computation of the maximum sequence number to the time
|
||||||
* of the commit data iterator (which occurs after all documents have been flushed to Lucene).
|
* of invocation of the commit data iterator (which occurs after all documents have been flushed to Lucene).
|
||||||
*/
|
*/
|
||||||
final Map<String, String> commitData = new HashMap<>(6);
|
final Map<String, String> commitData = new HashMap<>(5);
|
||||||
commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogFileGen);
|
commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogFileGeneration);
|
||||||
commitData.put(Translog.TRANSLOG_UUID_KEY, translogUUID);
|
commitData.put(Translog.TRANSLOG_UUID_KEY, translogUUID);
|
||||||
commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpoint);
|
commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpointValue);
|
||||||
if (syncId != null) {
|
if (syncId != null) {
|
||||||
commitData.put(Engine.SYNC_COMMIT_ID, syncId);
|
commitData.put(Engine.SYNC_COMMIT_ID, syncId);
|
||||||
}
|
}
|
||||||
commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(seqNoService().getMaxSeqNo()));
|
commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(seqNoService().getMaxSeqNo()));
|
||||||
if (logger.isTraceEnabled()) {
|
|
||||||
logger.trace("committing writer with commit data [{}]", commitData);
|
logger.trace("committing writer with commit data [{}]", commitData);
|
||||||
}
|
|
||||||
return commitData.entrySet().iterator();
|
return commitData.entrySet().iterator();
|
||||||
});
|
});
|
||||||
|
|
||||||
writer.commit();
|
writer.commit();
|
||||||
} catch (Exception ex) {
|
return translogGeneration.translogFileGeneration;
|
||||||
|
} catch (final Exception ex) {
|
||||||
try {
|
try {
|
||||||
failEngine("lucene commit failed", ex);
|
failEngine("lucene commit failed", ex);
|
||||||
} catch (Exception inner) {
|
} catch (final Exception inner) {
|
||||||
ex.addSuppressed(inner);
|
ex.addSuppressed(inner);
|
||||||
}
|
}
|
||||||
throw ex;
|
throw ex;
|
||||||
} catch (AssertionError e) {
|
} catch (final AssertionError e) {
|
||||||
// IndexWriter throws AssertionError on commit, if asserts are enabled, if any files don't exist, but tests that
|
/*
|
||||||
// randomly throw FNFE/NSFE can also hit this:
|
* If assertions are enabled, IndexWriter throws AssertionError on commit if any files don't exist, but tests that randomly
|
||||||
|
* throw FileNotFoundException or NoSuchFileException can also hit this.
|
||||||
|
*/
|
||||||
if (ExceptionsHelper.stackTrace(e).contains("org.apache.lucene.index.IndexWriter.filesExist")) {
|
if (ExceptionsHelper.stackTrace(e).contains("org.apache.lucene.index.IndexWriter.filesExist")) {
|
||||||
EngineException engineException = new EngineException(shardId, "failed to commit engine", e);
|
final EngineException engineException = new EngineException(shardId, "failed to commit engine", e);
|
||||||
try {
|
try {
|
||||||
failEngine("lucene commit failed", engineException);
|
failEngine("lucene commit failed", engineException);
|
||||||
} catch (Exception inner) {
|
} catch (final Exception inner) {
|
||||||
engineException.addSuppressed(inner);
|
engineException.addSuppressed(inner);
|
||||||
}
|
}
|
||||||
throw engineException;
|
throw engineException;
|
||||||
|
@ -1812,7 +1876,7 @@ public class InternalEngine extends Engine {
|
||||||
* Gets the commit data from {@link IndexWriter} as a map.
|
* Gets the commit data from {@link IndexWriter} as a map.
|
||||||
*/
|
*/
|
||||||
private static Map<String, String> commitDataAsMap(final IndexWriter indexWriter) {
|
private static Map<String, String> commitDataAsMap(final IndexWriter indexWriter) {
|
||||||
Map<String, String> commitData = new HashMap<>(6);
|
Map<String, String> commitData = new HashMap<>(5);
|
||||||
for (Map.Entry<String, String> entry : indexWriter.getLiveCommitData()) {
|
for (Map.Entry<String, String> entry : indexWriter.getLiveCommitData()) {
|
||||||
commitData.put(entry.getKey(), entry.getValue());
|
commitData.put(entry.getKey(), entry.getValue());
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
|
||||||
}
|
}
|
||||||
|
|
||||||
// All deletes also go here, and delete "tombstones" are retained after refresh:
|
// All deletes also go here, and delete "tombstones" are retained after refresh:
|
||||||
private final Map<BytesRef,VersionValue> tombstones = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
|
private final Map<BytesRef,DeleteVersionValue> tombstones = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
|
||||||
|
|
||||||
private volatile Maps maps = new Maps();
|
private volatile Maps maps = new Maps();
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
|
||||||
final VersionValue prevTombstone;
|
final VersionValue prevTombstone;
|
||||||
if (version.isDelete()) {
|
if (version.isDelete()) {
|
||||||
// Also enroll the delete into tombstones, and account for its RAM too:
|
// Also enroll the delete into tombstones, and account for its RAM too:
|
||||||
prevTombstone = tombstones.put(uid, version);
|
prevTombstone = tombstones.put(uid, (DeleteVersionValue)version);
|
||||||
|
|
||||||
// We initially account for BytesRef/VersionValue RAM for a delete against the tombstones, because this RAM will not be freed up
|
// We initially account for BytesRef/VersionValue RAM for a delete against the tombstones, because this RAM will not be freed up
|
||||||
// on refresh. Later, in removeTombstoneUnderLock, if we clear the tombstone entry but the delete remains in current, we shift
|
// on refresh. Later, in removeTombstoneUnderLock, if we clear the tombstone entry but the delete remains in current, we shift
|
||||||
|
@ -225,12 +225,12 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Caller has a lock, so that this uid will not be concurrently added/deleted by another thread. */
|
/** Caller has a lock, so that this uid will not be concurrently added/deleted by another thread. */
|
||||||
VersionValue getTombstoneUnderLock(BytesRef uid) {
|
DeleteVersionValue getTombstoneUnderLock(BytesRef uid) {
|
||||||
return tombstones.get(uid);
|
return tombstones.get(uid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Iterates over all deleted versions, including new ones (not yet exposed via reader) and old ones (exposed via reader but not yet GC'd). */
|
/** Iterates over all deleted versions, including new ones (not yet exposed via reader) and old ones (exposed via reader but not yet GC'd). */
|
||||||
Iterable<Map.Entry<BytesRef,VersionValue>> getAllTombstones() {
|
Iterable<Map.Entry<BytesRef, DeleteVersionValue>> getAllTombstones() {
|
||||||
return tombstones.entrySet();
|
return tombstones.entrySet();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,18 +30,17 @@ class VersionValue implements Accountable {
|
||||||
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(VersionValue.class);
|
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(VersionValue.class);
|
||||||
|
|
||||||
/** the version of the document. used for versioned indexed operations and as a BWC layer, where no seq# are set yet */
|
/** the version of the document. used for versioned indexed operations and as a BWC layer, where no seq# are set yet */
|
||||||
private final long version;
|
final long version;
|
||||||
|
|
||||||
VersionValue(long version) {
|
/** the seq number of the operation that last changed the associated uuid */
|
||||||
|
final long seqNo;
|
||||||
|
/** the the term of the operation that last changed the associated uuid */
|
||||||
|
final long term;
|
||||||
|
|
||||||
|
VersionValue(long version, long seqNo, long term) {
|
||||||
this.version = version;
|
this.version = version;
|
||||||
}
|
this.seqNo = seqNo;
|
||||||
|
this.term = term;
|
||||||
public long getTime() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
public long getVersion() {
|
|
||||||
return version;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isDelete() {
|
public boolean isDelete() {
|
||||||
|
@ -61,6 +60,9 @@ class VersionValue implements Accountable {
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "VersionValue{" +
|
return "VersionValue{" +
|
||||||
"version=" + version + "}";
|
"version=" + version +
|
||||||
|
", seqNo=" + seqNo +
|
||||||
|
", term=" + term +
|
||||||
|
'}';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.collect.Tuple;
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
import org.elasticsearch.common.lucene.uid.VersionsResolver.DocIdAndVersion;
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion;
|
||||||
import org.elasticsearch.common.metrics.CounterMetric;
|
import org.elasticsearch.common.metrics.CounterMetric;
|
||||||
import org.elasticsearch.common.metrics.MeanMetric;
|
import org.elasticsearch.common.metrics.MeanMetric;
|
||||||
import org.elasticsearch.common.util.set.Sets;
|
import org.elasticsearch.common.util.set.Sets;
|
||||||
|
|
|
@ -254,12 +254,12 @@ public abstract class ParseContext {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public SeqNoFieldMapper.SequenceID seqID() {
|
public SeqNoFieldMapper.SequenceIDFields seqID() {
|
||||||
return in.seqID();
|
return in.seqID();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void seqID(SeqNoFieldMapper.SequenceID seqID) {
|
public void seqID(SeqNoFieldMapper.SequenceIDFields seqID) {
|
||||||
in.seqID(seqID);
|
in.seqID(seqID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -310,7 +310,7 @@ public abstract class ParseContext {
|
||||||
|
|
||||||
private Field version;
|
private Field version;
|
||||||
|
|
||||||
private SeqNoFieldMapper.SequenceID seqID;
|
private SeqNoFieldMapper.SequenceIDFields seqID;
|
||||||
|
|
||||||
private final AllEntries allEntries;
|
private final AllEntries allEntries;
|
||||||
|
|
||||||
|
@ -404,12 +404,12 @@ public abstract class ParseContext {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public SeqNoFieldMapper.SequenceID seqID() {
|
public SeqNoFieldMapper.SequenceIDFields seqID() {
|
||||||
return this.seqID;
|
return this.seqID;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void seqID(SeqNoFieldMapper.SequenceID seqID) {
|
public void seqID(SeqNoFieldMapper.SequenceIDFields seqID) {
|
||||||
this.seqID = seqID;
|
this.seqID = seqID;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -539,9 +539,9 @@ public abstract class ParseContext {
|
||||||
|
|
||||||
public abstract void version(Field version);
|
public abstract void version(Field version);
|
||||||
|
|
||||||
public abstract SeqNoFieldMapper.SequenceID seqID();
|
public abstract SeqNoFieldMapper.SequenceIDFields seqID();
|
||||||
|
|
||||||
public abstract void seqID(SeqNoFieldMapper.SequenceID seqID);
|
public abstract void seqID(SeqNoFieldMapper.SequenceIDFields seqID);
|
||||||
|
|
||||||
public final boolean includeInAll(Boolean includeInAll, FieldMapper mapper) {
|
public final boolean includeInAll(Boolean includeInAll, FieldMapper mapper) {
|
||||||
return includeInAll(includeInAll, mapper.fieldType().indexOptions() != IndexOptions.NONE);
|
return includeInAll(includeInAll, mapper.fieldType().indexOptions() != IndexOptions.NONE);
|
||||||
|
|
|
@ -36,7 +36,7 @@ public class ParsedDocument {
|
||||||
|
|
||||||
private final String id, type;
|
private final String id, type;
|
||||||
private final BytesRef uid;
|
private final BytesRef uid;
|
||||||
private final SeqNoFieldMapper.SequenceID seqID;
|
private final SeqNoFieldMapper.SequenceIDFields seqID;
|
||||||
|
|
||||||
private final String routing;
|
private final String routing;
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ public class ParsedDocument {
|
||||||
private String parent;
|
private String parent;
|
||||||
|
|
||||||
public ParsedDocument(Field version,
|
public ParsedDocument(Field version,
|
||||||
SeqNoFieldMapper.SequenceID seqID,
|
SeqNoFieldMapper.SequenceIDFields seqID,
|
||||||
String id,
|
String id,
|
||||||
String type,
|
String type,
|
||||||
String routing,
|
String routing,
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.index.mapper;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.document.LongPoint;
|
import org.apache.lucene.document.LongPoint;
|
||||||
import org.apache.lucene.document.NumericDocValuesField;
|
import org.apache.lucene.document.NumericDocValuesField;
|
||||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
|
||||||
import org.apache.lucene.index.DocValuesType;
|
import org.apache.lucene.index.DocValuesType;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
|
@ -66,13 +65,13 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
|
||||||
* A sequence ID, which is made up of a sequence number (both the searchable
|
* A sequence ID, which is made up of a sequence number (both the searchable
|
||||||
* and doc_value version of the field) and the primary term.
|
* and doc_value version of the field) and the primary term.
|
||||||
*/
|
*/
|
||||||
public static class SequenceID {
|
public static class SequenceIDFields {
|
||||||
|
|
||||||
public final Field seqNo;
|
public final Field seqNo;
|
||||||
public final Field seqNoDocValue;
|
public final Field seqNoDocValue;
|
||||||
public final Field primaryTerm;
|
public final Field primaryTerm;
|
||||||
|
|
||||||
public SequenceID(Field seqNo, Field seqNoDocValue, Field primaryTerm) {
|
public SequenceIDFields(Field seqNo, Field seqNoDocValue, Field primaryTerm) {
|
||||||
Objects.requireNonNull(seqNo, "sequence number field cannot be null");
|
Objects.requireNonNull(seqNo, "sequence number field cannot be null");
|
||||||
Objects.requireNonNull(seqNoDocValue, "sequence number dv field cannot be null");
|
Objects.requireNonNull(seqNoDocValue, "sequence number dv field cannot be null");
|
||||||
Objects.requireNonNull(primaryTerm, "primary term field cannot be null");
|
Objects.requireNonNull(primaryTerm, "primary term field cannot be null");
|
||||||
|
@ -81,9 +80,9 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
|
||||||
this.primaryTerm = primaryTerm;
|
this.primaryTerm = primaryTerm;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SequenceID emptySeqID() {
|
public static SequenceIDFields emptySeqID() {
|
||||||
return new SequenceID(new LongPoint(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO),
|
return new SequenceIDFields(new LongPoint(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO),
|
||||||
new SortedNumericDocValuesField(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO),
|
new NumericDocValuesField(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO),
|
||||||
new NumericDocValuesField(PRIMARY_TERM_NAME, 0));
|
new NumericDocValuesField(PRIMARY_TERM_NAME, 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -242,7 +241,7 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
|
||||||
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
|
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
|
||||||
// see InternalEngine.innerIndex to see where the real version value is set
|
// see InternalEngine.innerIndex to see where the real version value is set
|
||||||
// also see ParsedDocument.updateSeqID (called by innerIndex)
|
// also see ParsedDocument.updateSeqID (called by innerIndex)
|
||||||
SequenceID seqID = SequenceID.emptySeqID();
|
SequenceIDFields seqID = SequenceIDFields.emptySeqID();
|
||||||
context.seqID(seqID);
|
context.seqID(seqID);
|
||||||
fields.add(seqID.seqNo);
|
fields.add(seqID.seqNo);
|
||||||
fields.add(seqID.seqNoDocValue);
|
fields.add(seqID.seqNoDocValue);
|
||||||
|
@ -264,7 +263,7 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
|
||||||
for (int i = 1; i < context.docs().size(); i++) {
|
for (int i = 1; i < context.docs().size(); i++) {
|
||||||
final Document doc = context.docs().get(i);
|
final Document doc = context.docs().get(i);
|
||||||
doc.add(new LongPoint(NAME, 1));
|
doc.add(new LongPoint(NAME, 1));
|
||||||
doc.add(new SortedNumericDocValuesField(NAME, 1L));
|
doc.add(new NumericDocValuesField(NAME, 1L));
|
||||||
doc.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0L));
|
doc.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0L));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@ import org.elasticsearch.action.termvectors.TermVectorsResponse;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.lucene.uid.VersionsResolver.DocIdAndVersion;
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion;
|
||||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||||
|
|
|
@ -85,14 +85,14 @@ import java.util.stream.Stream;
|
||||||
* When a translog is opened the checkpoint is use to retrieve the latest translog file generation and subsequently to open the last written file to recovery operations.
|
* When a translog is opened the checkpoint is use to retrieve the latest translog file generation and subsequently to open the last written file to recovery operations.
|
||||||
* The {@link org.elasticsearch.index.translog.Translog.TranslogGeneration}, given when the translog is opened / constructed is compared against
|
* The {@link org.elasticsearch.index.translog.Translog.TranslogGeneration}, given when the translog is opened / constructed is compared against
|
||||||
* the latest generation and all consecutive translog files singe the given generation and the last generation in the checkpoint will be recovered and preserved until the next
|
* the latest generation and all consecutive translog files singe the given generation and the last generation in the checkpoint will be recovered and preserved until the next
|
||||||
* generation is committed using {@link Translog#commit()}. In the common case the translog file generation in the checkpoint and the generation passed to the translog on creation are
|
* generation is committed using {@link Translog#commit(long)}. In the common case the translog file generation in the checkpoint and the generation passed to the translog on creation are
|
||||||
* the same. The only situation when they can be different is when an actual translog commit fails in between {@link Translog#prepareCommit()} and {@link Translog#commit()}. In such a case
|
* the same. The only situation when they can be different is when an actual translog commit fails in between {@link Translog#prepareCommit()} and {@link Translog#commit(long)}. In such a case
|
||||||
* the currently being committed translog file will not be deleted since it's commit was not successful. Yet, a new/current translog file is already opened at that point such that there is more than
|
* the currently being committed translog file will not be deleted since it's commit was not successful. Yet, a new/current translog file is already opened at that point such that there is more than
|
||||||
* one translog file present. Such an uncommitted translog file always has a <tt>translog-${gen}.ckp</tt> associated with it which is an fsynced copy of the it's last <tt>translog.ckp</tt> such that in
|
* one translog file present. Such an uncommitted translog file always has a <tt>translog-${gen}.ckp</tt> associated with it which is an fsynced copy of the it's last <tt>translog.ckp</tt> such that in
|
||||||
* disaster recovery last fsynced offsets, number of operation etc. are still preserved.
|
* disaster recovery last fsynced offsets, number of operation etc. are still preserved.
|
||||||
* </p>
|
* </p>
|
||||||
*/
|
*/
|
||||||
public class Translog extends AbstractIndexShardComponent implements IndexShardComponent, Closeable, TwoPhaseCommit {
|
public class Translog extends AbstractIndexShardComponent implements IndexShardComponent, Closeable {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO
|
* TODO
|
||||||
|
@ -439,7 +439,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
}
|
}
|
||||||
throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e);
|
throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e);
|
||||||
} finally {
|
} finally {
|
||||||
Releasables.close(out.bytes());
|
Releasables.close(out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -804,6 +804,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
|
|
||||||
long seqNo();
|
long seqNo();
|
||||||
|
|
||||||
|
long primaryTerm();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reads the type and the operation from the given stream. The operation must be written with
|
* Reads the type and the operation from the given stream. The operation must be written with
|
||||||
* {@link Operation#writeType(Operation, StreamOutput)}
|
* {@link Operation#writeType(Operation, StreamOutput)}
|
||||||
|
@ -953,6 +955,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
return seqNo;
|
return seqNo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public long primaryTerm() {
|
public long primaryTerm() {
|
||||||
return primaryTerm;
|
return primaryTerm;
|
||||||
}
|
}
|
||||||
|
@ -1104,6 +1107,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
return seqNo;
|
return seqNo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public long primaryTerm() {
|
public long primaryTerm() {
|
||||||
return primaryTerm;
|
return primaryTerm;
|
||||||
}
|
}
|
||||||
|
@ -1180,6 +1184,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
return seqNo;
|
return seqNo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public long primaryTerm() {
|
public long primaryTerm() {
|
||||||
return primaryTerm;
|
return primaryTerm;
|
||||||
}
|
}
|
||||||
|
@ -1332,7 +1337,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
bytes.writeTo(outStream);
|
bytes.writeTo(outStream);
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
Releasables.close(out.bytes());
|
Releasables.close(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1347,6 +1352,31 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
out.writeInt((int) checksum);
|
out.writeInt((int) checksum);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the minimum generation that could contain any sequence number after the specified sequence number, or the current generation if
|
||||||
|
* there is no generation that could any such sequence number.
|
||||||
|
*
|
||||||
|
* @param seqNo the sequence number
|
||||||
|
* @return the minimum generation for the sequence number
|
||||||
|
*/
|
||||||
|
public TranslogGeneration getMinGenerationForSeqNo(final long seqNo) {
|
||||||
|
try (ReleasableLock ignored = writeLock.acquire()) {
|
||||||
|
/*
|
||||||
|
* When flushing, the engine will ask the translog for the minimum generation that could contain any sequence number after the
|
||||||
|
* local checkpoint. Immediately after flushing, there will be no such generation, so this minimum generation in this case will
|
||||||
|
* be the current translog generation as we do not need any prior generations to have a complete history up to the current local
|
||||||
|
* checkpoint.
|
||||||
|
*/
|
||||||
|
long minTranslogFileGeneration = this.currentFileGeneration();
|
||||||
|
for (final TranslogReader reader : readers) {
|
||||||
|
if (seqNo <= reader.getCheckpoint().maxSeqNo) {
|
||||||
|
minTranslogFileGeneration = Math.min(minTranslogFileGeneration, reader.getGeneration());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return new TranslogGeneration(translogUUID, minTranslogFileGeneration);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Roll the current translog generation into a new generation. This does not commit the
|
* Roll the current translog generation into a new generation. This does not commit the
|
||||||
* translog.
|
* translog.
|
||||||
|
@ -1375,27 +1405,38 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
/**
|
||||||
public long prepareCommit() throws IOException {
|
* Prepares a translog commit by setting the current committing generation and rolling the translog generation.
|
||||||
|
*
|
||||||
|
* @throws IOException if an I/O exception occurred while rolling the translog generation
|
||||||
|
*/
|
||||||
|
public void prepareCommit() throws IOException {
|
||||||
try (ReleasableLock ignored = writeLock.acquire()) {
|
try (ReleasableLock ignored = writeLock.acquire()) {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
if (currentCommittingGeneration != NOT_SET_GENERATION) {
|
if (currentCommittingGeneration != NOT_SET_GENERATION) {
|
||||||
final String message = String.format(
|
final String message =
|
||||||
Locale.ROOT,
|
String.format(Locale.ROOT, "already committing a translog with generation [%d]", currentCommittingGeneration);
|
||||||
"already committing a translog with generation [%d]",
|
|
||||||
currentCommittingGeneration);
|
|
||||||
throw new IllegalStateException(message);
|
throw new IllegalStateException(message);
|
||||||
}
|
}
|
||||||
currentCommittingGeneration = current.getGeneration();
|
currentCommittingGeneration = current.getGeneration();
|
||||||
rollGeneration();
|
rollGeneration();
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
/**
|
||||||
public long commit() throws IOException {
|
* Commits the translog and sets the last committed translog generation to the specified generation. The specified committed generation
|
||||||
|
* will be used when trimming unreferenced translog generations such that generations from the committed generation will be preserved.
|
||||||
|
*
|
||||||
|
* If {@link Translog#prepareCommit()} was not called before calling commit, this method will be invoked too causing the translog
|
||||||
|
* generation to be rolled.
|
||||||
|
*
|
||||||
|
* @param committedGeneration the minimum translog generation to preserve after trimming unreferenced generations
|
||||||
|
* @throws IOException if an I/O exception occurred preparing the translog commit
|
||||||
|
*/
|
||||||
|
public void commit(final long committedGeneration) throws IOException {
|
||||||
try (ReleasableLock ignored = writeLock.acquire()) {
|
try (ReleasableLock ignored = writeLock.acquire()) {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
|
assert assertCommittedGenerationIsInValidRange(committedGeneration);
|
||||||
if (currentCommittingGeneration == NOT_SET_GENERATION) {
|
if (currentCommittingGeneration == NOT_SET_GENERATION) {
|
||||||
prepareCommit();
|
prepareCommit();
|
||||||
}
|
}
|
||||||
|
@ -1403,26 +1444,39 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
assert readers.stream().anyMatch(r -> r.getGeneration() == currentCommittingGeneration)
|
assert readers.stream().anyMatch(r -> r.getGeneration() == currentCommittingGeneration)
|
||||||
: "readers missing committing generation [" + currentCommittingGeneration + "]";
|
: "readers missing committing generation [" + currentCommittingGeneration + "]";
|
||||||
// set the last committed generation otherwise old files will not be cleaned up
|
// set the last committed generation otherwise old files will not be cleaned up
|
||||||
lastCommittedTranslogFileGeneration = currentCommittingGeneration + 1;
|
lastCommittedTranslogFileGeneration = committedGeneration;
|
||||||
currentCommittingGeneration = NOT_SET_GENERATION;
|
currentCommittingGeneration = NOT_SET_GENERATION;
|
||||||
trimUnreferencedReaders();
|
trimUnreferencedReaders();
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean assertCommittedGenerationIsInValidRange(final long committedGeneration) {
|
||||||
|
assert committedGeneration <= current.generation
|
||||||
|
: "tried to commit generation [" + committedGeneration + "] after current generation [" + current.generation + "]";
|
||||||
|
final long min = readers.stream().map(TranslogReader::getGeneration).min(Long::compareTo).orElse(Long.MIN_VALUE);
|
||||||
|
assert committedGeneration >= min
|
||||||
|
: "tried to commit generation [" + committedGeneration + "] before minimum generation [" + min + "]";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trims unreferenced translog generations. The guarantee here is that translog generations will be preserved for all outstanding views
|
||||||
|
* and from the last committed translog generation defined by {@link Translog#lastCommittedTranslogFileGeneration}.
|
||||||
|
*/
|
||||||
void trimUnreferencedReaders() {
|
void trimUnreferencedReaders() {
|
||||||
try (ReleasableLock ignored = writeLock.acquire()) {
|
try (ReleasableLock ignored = writeLock.acquire()) {
|
||||||
if (closed.get()) {
|
if (closed.get()) {
|
||||||
// we're shutdown potentially on some tragic event - don't delete anything
|
// we're shutdown potentially on some tragic event, don't delete anything
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
long minReferencedGen = outstandingViews.stream().mapToLong(View::minTranslogGeneration).min().orElse(Long.MAX_VALUE);
|
long minReferencedGen = Math.min(
|
||||||
minReferencedGen = Math.min(lastCommittedTranslogFileGeneration, minReferencedGen);
|
lastCommittedTranslogFileGeneration,
|
||||||
final long finalMinReferencedGen = minReferencedGen;
|
outstandingViews.stream().mapToLong(View::minTranslogGeneration).min().orElse(Long.MAX_VALUE));
|
||||||
List<TranslogReader> unreferenced = readers.stream().filter(r -> r.getGeneration() < finalMinReferencedGen).collect(Collectors.toList());
|
final List<TranslogReader> unreferenced =
|
||||||
|
readers.stream().filter(r -> r.getGeneration() < minReferencedGen).collect(Collectors.toList());
|
||||||
for (final TranslogReader unreferencedReader : unreferenced) {
|
for (final TranslogReader unreferencedReader : unreferenced) {
|
||||||
Path translogPath = unreferencedReader.path();
|
final Path translogPath = unreferencedReader.path();
|
||||||
logger.trace("delete translog file - not referenced and not current anymore {}", translogPath);
|
logger.trace("delete translog file [{}], not referenced and not current anymore", translogPath);
|
||||||
IOUtils.closeWhileHandlingException(unreferencedReader);
|
IOUtils.closeWhileHandlingException(unreferencedReader);
|
||||||
IOUtils.deleteFilesIgnoringExceptions(translogPath,
|
IOUtils.deleteFilesIgnoringExceptions(translogPath,
|
||||||
translogPath.resolveSibling(getCommitCheckpointFileName(unreferencedReader.getGeneration())));
|
translogPath.resolveSibling(getCommitCheckpointFileName(unreferencedReader.getGeneration())));
|
||||||
|
@ -1442,13 +1496,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void rollback() throws IOException {
|
|
||||||
ensureOpen();
|
|
||||||
close();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* References a transaction log generation
|
* References a transaction log generation
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -37,17 +37,12 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.Predicate;
|
|
||||||
import java.util.function.UnaryOperator;
|
|
||||||
|
|
||||||
import static org.elasticsearch.common.Strings.cleanPath;
|
import static org.elasticsearch.common.Strings.cleanPath;
|
||||||
|
|
||||||
public class InternalSettingsPreparer {
|
public class InternalSettingsPreparer {
|
||||||
|
|
||||||
private static final String[] ALLOWED_SUFFIXES = {".yml", ".yaml", ".json"};
|
private static final String[] ALLOWED_SUFFIXES = {".yml", ".yaml", ".json"};
|
||||||
private static final String PROPERTY_DEFAULTS_PREFIX = "default.";
|
|
||||||
private static final Predicate<String> PROPERTY_DEFAULTS_PREDICATE = key -> key.startsWith(PROPERTY_DEFAULTS_PREFIX);
|
|
||||||
private static final UnaryOperator<String> STRIP_PROPERTY_DEFAULTS_PREFIX = key -> key.substring(PROPERTY_DEFAULTS_PREFIX.length());
|
|
||||||
|
|
||||||
public static final String SECRET_PROMPT_VALUE = "${prompt.secret}";
|
public static final String SECRET_PROMPT_VALUE = "${prompt.secret}";
|
||||||
public static final String TEXT_PROMPT_VALUE = "${prompt.text}";
|
public static final String TEXT_PROMPT_VALUE = "${prompt.text}";
|
||||||
|
@ -125,15 +120,16 @@ public class InternalSettingsPreparer {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initializes the builder with the given input settings, and loads system properties settings if allowed.
|
* Initializes the builder with the given input settings, and applies settings from the specified map (these settings typically come
|
||||||
* If loadDefaults is true, system property default settings are loaded.
|
* from the command line).
|
||||||
|
*
|
||||||
|
* @param output the settings builder to apply the input and default settings to
|
||||||
|
* @param input the input settings
|
||||||
|
* @param esSettings a map from which to apply settings
|
||||||
*/
|
*/
|
||||||
private static void initializeSettings(Settings.Builder output, Settings input, Map<String, String> esSettings) {
|
static void initializeSettings(final Settings.Builder output, final Settings input, final Map<String, String> esSettings) {
|
||||||
output.put(input);
|
output.put(input);
|
||||||
output.putProperties(esSettings,
|
output.putProperties(esSettings, Function.identity());
|
||||||
PROPERTY_DEFAULTS_PREDICATE.and(key -> output.get(STRIP_PROPERTY_DEFAULTS_PREFIX.apply(key)) == null),
|
|
||||||
STRIP_PROPERTY_DEFAULTS_PREFIX);
|
|
||||||
output.putProperties(esSettings, PROPERTY_DEFAULTS_PREDICATE.negate(), Function.identity());
|
|
||||||
output.replacePropertyPlaceholders();
|
output.replacePropertyPlaceholders();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,7 @@ import org.elasticsearch.cluster.routing.RoutingService;
|
||||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.StopWatch;
|
import org.elasticsearch.common.StopWatch;
|
||||||
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
import org.elasticsearch.common.component.Lifecycle;
|
import org.elasticsearch.common.component.Lifecycle;
|
||||||
import org.elasticsearch.common.component.LifecycleComponent;
|
import org.elasticsearch.common.component.LifecycleComponent;
|
||||||
import org.elasticsearch.common.inject.Binder;
|
import org.elasticsearch.common.inject.Binder;
|
||||||
|
@ -58,6 +59,7 @@ import org.elasticsearch.common.inject.Key;
|
||||||
import org.elasticsearch.common.inject.Module;
|
import org.elasticsearch.common.inject.Module;
|
||||||
import org.elasticsearch.common.inject.ModulesBuilder;
|
import org.elasticsearch.common.inject.ModulesBuilder;
|
||||||
import org.elasticsearch.common.inject.util.Providers;
|
import org.elasticsearch.common.inject.util.Providers;
|
||||||
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||||
|
@ -146,7 +148,9 @@ import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
|
@ -262,6 +266,9 @@ public class Node implements Closeable {
|
||||||
Logger logger = Loggers.getLogger(Node.class, tmpSettings);
|
Logger logger = Loggers.getLogger(Node.class, tmpSettings);
|
||||||
final String nodeId = nodeEnvironment.nodeId();
|
final String nodeId = nodeEnvironment.nodeId();
|
||||||
tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId);
|
tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId);
|
||||||
|
if (DiscoveryNode.nodeRequiresLocalStorage(tmpSettings)) {
|
||||||
|
checkForIndexDataInDefaultPathData(tmpSettings, nodeEnvironment, logger);
|
||||||
|
}
|
||||||
// this must be captured after the node name is possibly added to the settings
|
// this must be captured after the node name is possibly added to the settings
|
||||||
final String nodeName = NODE_NAME_SETTING.get(tmpSettings);
|
final String nodeName = NODE_NAME_SETTING.get(tmpSettings);
|
||||||
if (hadPredefinedNodeName == false) {
|
if (hadPredefinedNodeName == false) {
|
||||||
|
@ -500,6 +507,58 @@ public class Node implements Closeable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks for path.data and default.path.data being configured, and there being index data in any of the paths in default.path.data.
|
||||||
|
*
|
||||||
|
* @param settings the settings to check for path.data and default.path.data
|
||||||
|
* @param nodeEnv the current node environment
|
||||||
|
* @param logger a logger where messages regarding the detection will be logged
|
||||||
|
* @throws IOException if an I/O exception occurs reading the directory structure
|
||||||
|
*/
|
||||||
|
static void checkForIndexDataInDefaultPathData(
|
||||||
|
final Settings settings, final NodeEnvironment nodeEnv, final Logger logger) throws IOException {
|
||||||
|
if (!Environment.PATH_DATA_SETTING.exists(settings) || !Environment.DEFAULT_PATH_DATA_SETTING.exists(settings)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean clean = true;
|
||||||
|
for (final String defaultPathData : Environment.DEFAULT_PATH_DATA_SETTING.get(settings)) {
|
||||||
|
final Path nodeDirectory = NodeEnvironment.resolveNodePath(getPath(defaultPathData), nodeEnv.getNodeLockId());
|
||||||
|
if (Files.exists(nodeDirectory) == false) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(nodeDirectory);
|
||||||
|
final Set<String> availableIndexFolders = nodeEnv.availableIndexFoldersForPath(nodePath);
|
||||||
|
if (availableIndexFolders.isEmpty()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
clean = false;
|
||||||
|
logger.error("detected index data in default.path.data [{}] where there should not be any", nodePath.indicesPath);
|
||||||
|
for (final String availableIndexFolder : availableIndexFolders) {
|
||||||
|
logger.info(
|
||||||
|
"index folder [{}] in default.path.data [{}] must be moved to any of {}",
|
||||||
|
availableIndexFolder,
|
||||||
|
nodePath.indicesPath,
|
||||||
|
Arrays.stream(nodeEnv.nodePaths()).map(np -> np.indicesPath).collect(Collectors.toList()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (clean) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
final String message = String.format(
|
||||||
|
Locale.ROOT,
|
||||||
|
"detected index data in default.path.data %s where there should not be any; check the logs for details",
|
||||||
|
Environment.DEFAULT_PATH_DATA_SETTING.get(settings));
|
||||||
|
throw new IllegalStateException(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "read path that is not configured in environment")
|
||||||
|
private static Path getPath(final String path) {
|
||||||
|
return PathUtils.get(path);
|
||||||
|
}
|
||||||
|
|
||||||
// visible for testing
|
// visible for testing
|
||||||
static void warnIfPreRelease(final Version version, final boolean isSnapshot, final Logger logger) {
|
static void warnIfPreRelease(final Version version, final boolean isSnapshot, final Logger logger) {
|
||||||
if (!version.isRelease() || isSnapshot) {
|
if (!version.isRelease() || isSnapshot) {
|
||||||
|
|
|
@ -20,12 +20,14 @@ package org.elasticsearch.rest;
|
||||||
|
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
import org.elasticsearch.common.io.Streams;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.OutputStream;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.function.Predicate;
|
import java.util.function.Predicate;
|
||||||
|
@ -97,7 +99,9 @@ public abstract class AbstractRestChannel implements RestChannel {
|
||||||
excludes = filters.stream().filter(EXCLUDE_FILTER).map(f -> f.substring(1)).collect(toSet());
|
excludes = filters.stream().filter(EXCLUDE_FILTER).map(f -> f.substring(1)).collect(toSet());
|
||||||
}
|
}
|
||||||
|
|
||||||
XContentBuilder builder = new XContentBuilder(XContentFactory.xContent(responseContentType), bytesOutput(), includes, excludes);
|
OutputStream unclosableOutputStream = Streams.flushOnCloseStream(bytesOutput());
|
||||||
|
XContentBuilder builder =
|
||||||
|
new XContentBuilder(XContentFactory.xContent(responseContentType), unclosableOutputStream, includes, excludes);
|
||||||
if (pretty) {
|
if (pretty) {
|
||||||
builder.prettyPrint().lfAtEnd();
|
builder.prettyPrint().lfAtEnd();
|
||||||
}
|
}
|
||||||
|
@ -107,8 +111,9 @@ public abstract class AbstractRestChannel implements RestChannel {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A channel level bytes output that can be reused. It gets reset on each call to this
|
* A channel level bytes output that can be reused. The bytes output is lazily instantiated
|
||||||
* method.
|
* by a call to {@link #newBytesOutput()}. Once the stream is created, it gets reset on each
|
||||||
|
* call to this method.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public final BytesStreamOutput bytesOutput() {
|
public final BytesStreamOutput bytesOutput() {
|
||||||
|
@ -120,6 +125,14 @@ public abstract class AbstractRestChannel implements RestChannel {
|
||||||
return bytesOut;
|
return bytesOut;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An accessor to the raw value of the channel bytes output. This method will not instantiate
|
||||||
|
* a new stream if one does not exist and this method will not reset the stream.
|
||||||
|
*/
|
||||||
|
protected final BytesStreamOutput bytesOutputOrNull() {
|
||||||
|
return bytesOut;
|
||||||
|
}
|
||||||
|
|
||||||
protected BytesStreamOutput newBytesOutput() {
|
protected BytesStreamOutput newBytesOutput() {
|
||||||
return new BytesStreamOutput();
|
return new BytesStreamOutput();
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -147,8 +146,8 @@ public class BytesRestResponse extends RestResponse {
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
||||||
static BytesRestResponse createSimpleErrorResponse(RestStatus status, String errorMessage) throws IOException {
|
static BytesRestResponse createSimpleErrorResponse(RestChannel channel, RestStatus status, String errorMessage) throws IOException {
|
||||||
return new BytesRestResponse(status, JsonXContent.contentBuilder().startObject()
|
return new BytesRestResponse(status, channel.newErrorBuilder().startObject()
|
||||||
.field("error", errorMessage)
|
.field("error", errorMessage)
|
||||||
.field("status", status.getStatus())
|
.field("status", status.getStatus())
|
||||||
.endObject());
|
.endObject());
|
||||||
|
|
|
@ -178,8 +178,9 @@ public class RestController extends AbstractComponent implements HttpServerTrans
|
||||||
sendContentTypeErrorMessage(request, responseChannel);
|
sendContentTypeErrorMessage(request, responseChannel);
|
||||||
} else if (contentLength > 0 && handler != null && handler.supportsContentStream() &&
|
} else if (contentLength > 0 && handler != null && handler.supportsContentStream() &&
|
||||||
request.getXContentType() != XContentType.JSON && request.getXContentType() != XContentType.SMILE) {
|
request.getXContentType() != XContentType.JSON && request.getXContentType() != XContentType.SMILE) {
|
||||||
responseChannel.sendResponse(BytesRestResponse.createSimpleErrorResponse(RestStatus.NOT_ACCEPTABLE, "Content-Type [" +
|
responseChannel.sendResponse(BytesRestResponse.createSimpleErrorResponse(responseChannel,
|
||||||
request.getXContentType() + "] does not support stream parsing. Use JSON or SMILE instead"));
|
RestStatus.NOT_ACCEPTABLE, "Content-Type [" + request.getXContentType() +
|
||||||
|
"] does not support stream parsing. Use JSON or SMILE instead"));
|
||||||
} else {
|
} else {
|
||||||
if (canTripCircuitBreaker(request)) {
|
if (canTripCircuitBreaker(request)) {
|
||||||
inFlightRequestsBreaker(circuitBreakerService).addEstimateBytesAndMaybeBreak(contentLength, "<http_request>");
|
inFlightRequestsBreaker(circuitBreakerService).addEstimateBytesAndMaybeBreak(contentLength, "<http_request>");
|
||||||
|
@ -229,7 +230,8 @@ public class RestController extends AbstractComponent implements HttpServerTrans
|
||||||
void dispatchRequest(final RestRequest request, final RestChannel channel, final NodeClient client, ThreadContext threadContext,
|
void dispatchRequest(final RestRequest request, final RestChannel channel, final NodeClient client, ThreadContext threadContext,
|
||||||
final RestHandler handler) throws Exception {
|
final RestHandler handler) throws Exception {
|
||||||
if (checkRequestParameters(request, channel) == false) {
|
if (checkRequestParameters(request, channel) == false) {
|
||||||
channel.sendResponse(BytesRestResponse.createSimpleErrorResponse(BAD_REQUEST, "error traces in responses are disabled."));
|
channel
|
||||||
|
.sendResponse(BytesRestResponse.createSimpleErrorResponse(channel,BAD_REQUEST, "error traces in responses are disabled."));
|
||||||
} else {
|
} else {
|
||||||
for (String key : headersToCopy) {
|
for (String key : headersToCopy) {
|
||||||
String httpHeader = request.header(key);
|
String httpHeader = request.header(key);
|
||||||
|
@ -283,7 +285,7 @@ public class RestController extends AbstractComponent implements HttpServerTrans
|
||||||
Strings.collectionToCommaDelimitedString(restRequest.getAllHeaderValues("Content-Type")) + "] is not supported";
|
Strings.collectionToCommaDelimitedString(restRequest.getAllHeaderValues("Content-Type")) + "] is not supported";
|
||||||
}
|
}
|
||||||
|
|
||||||
channel.sendResponse(BytesRestResponse.createSimpleErrorResponse(NOT_ACCEPTABLE, errorMessage));
|
channel.sendResponse(BytesRestResponse.createSimpleErrorResponse(channel, NOT_ACCEPTABLE, errorMessage));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -20,8 +20,9 @@ package org.elasticsearch.rest.action.cat;
|
||||||
|
|
||||||
import org.elasticsearch.client.node.NodeClient;
|
import org.elasticsearch.client.node.NodeClient;
|
||||||
import org.elasticsearch.common.Table;
|
import org.elasticsearch.common.Table;
|
||||||
|
import org.elasticsearch.common.io.Streams;
|
||||||
import org.elasticsearch.common.io.UTF8StreamWriter;
|
import org.elasticsearch.common.io.UTF8StreamWriter;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStream;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.rest.BaseRestHandler;
|
import org.elasticsearch.rest.BaseRestHandler;
|
||||||
import org.elasticsearch.rest.BytesRestResponse;
|
import org.elasticsearch.rest.BytesRestResponse;
|
||||||
|
@ -56,7 +57,7 @@ public abstract class AbstractCatAction extends BaseRestHandler {
|
||||||
return channel -> {
|
return channel -> {
|
||||||
Table table = getTableWithHeader(request);
|
Table table = getTableWithHeader(request);
|
||||||
int[] width = buildHelpWidths(table, request);
|
int[] width = buildHelpWidths(table, request);
|
||||||
BytesStreamOutput bytesOutput = channel.bytesOutput();
|
BytesStream bytesOutput = Streams.flushOnCloseStream(channel.bytesOutput());
|
||||||
UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOutput);
|
UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOutput);
|
||||||
for (Table.Cell cell : table.getHeaders()) {
|
for (Table.Cell cell : table.getHeaders()) {
|
||||||
// need to do left-align always, so create new cells
|
// need to do left-align always, so create new cells
|
||||||
|
|
|
@ -22,8 +22,9 @@ package org.elasticsearch.rest.action.cat;
|
||||||
import org.elasticsearch.common.Booleans;
|
import org.elasticsearch.common.Booleans;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.Table;
|
import org.elasticsearch.common.Table;
|
||||||
|
import org.elasticsearch.common.io.Streams;
|
||||||
import org.elasticsearch.common.io.UTF8StreamWriter;
|
import org.elasticsearch.common.io.UTF8StreamWriter;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStream;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
import org.elasticsearch.common.regex.Regex;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.unit.SizeValue;
|
import org.elasticsearch.common.unit.SizeValue;
|
||||||
|
@ -82,7 +83,7 @@ public class RestTable {
|
||||||
List<DisplayHeader> headers = buildDisplayHeaders(table, request);
|
List<DisplayHeader> headers = buildDisplayHeaders(table, request);
|
||||||
int[] width = buildWidths(table, request, verbose, headers);
|
int[] width = buildWidths(table, request, verbose, headers);
|
||||||
|
|
||||||
BytesStreamOutput bytesOut = channel.bytesOutput();
|
BytesStream bytesOut = Streams.flushOnCloseStream(channel.bytesOutput());
|
||||||
UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOut);
|
UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOut);
|
||||||
int lastHeader = headers.size() - 1;
|
int lastHeader = headers.size() - 1;
|
||||||
if (verbose) {
|
if (verbose) {
|
||||||
|
|
|
@ -42,12 +42,12 @@ import java.util.Map;
|
||||||
/**
|
/**
|
||||||
* A geo metric aggregator that computes a geo-centroid from a {@code geo_point} type field
|
* A geo metric aggregator that computes a geo-centroid from a {@code geo_point} type field
|
||||||
*/
|
*/
|
||||||
public final class GeoCentroidAggregator extends MetricsAggregator {
|
final class GeoCentroidAggregator extends MetricsAggregator {
|
||||||
private final ValuesSource.GeoPoint valuesSource;
|
private final ValuesSource.GeoPoint valuesSource;
|
||||||
LongArray centroids;
|
private LongArray centroids;
|
||||||
LongArray counts;
|
private LongArray counts;
|
||||||
|
|
||||||
protected GeoCentroidAggregator(String name, SearchContext context, Aggregator parent,
|
GeoCentroidAggregator(String name, SearchContext context, Aggregator parent,
|
||||||
ValuesSource.GeoPoint valuesSource, List<PipelineAggregator> pipelineAggregators,
|
ValuesSource.GeoPoint valuesSource, List<PipelineAggregator> pipelineAggregators,
|
||||||
Map<String, Object> metaData) throws IOException {
|
Map<String, Object> metaData) throws IOException {
|
||||||
super(name, context, parent, pipelineAggregators, metaData);
|
super(name, context, parent, pipelineAggregators, metaData);
|
||||||
|
|
|
@ -32,9 +32,9 @@ import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
public class GeoCentroidAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource.GeoPoint, GeoCentroidAggregatorFactory> {
|
class GeoCentroidAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource.GeoPoint, GeoCentroidAggregatorFactory> {
|
||||||
|
|
||||||
public GeoCentroidAggregatorFactory(String name, ValuesSourceConfig<ValuesSource.GeoPoint> config,
|
GeoCentroidAggregatorFactory(String name, ValuesSourceConfig<ValuesSource.GeoPoint> config,
|
||||||
SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder,
|
SearchContext context, AggregatorFactory<?> parent, AggregatorFactories.Builder subFactoriesBuilder,
|
||||||
Map<String, Object> metaData) throws IOException {
|
Map<String, Object> metaData) throws IOException {
|
||||||
super(name, config, context, parent, subFactoriesBuilder, metaData);
|
super(name, config, context, parent, subFactoriesBuilder, metaData);
|
||||||
|
|
|
@ -38,7 +38,7 @@ public class InternalGeoCentroid extends InternalAggregation implements GeoCentr
|
||||||
protected final GeoPoint centroid;
|
protected final GeoPoint centroid;
|
||||||
protected final long count;
|
protected final long count;
|
||||||
|
|
||||||
public InternalGeoCentroid(String name, GeoPoint centroid, long count, List<PipelineAggregator>
|
InternalGeoCentroid(String name, GeoPoint centroid, long count, List<PipelineAggregator>
|
||||||
pipelineAggregators, Map<String, Object> metaData) {
|
pipelineAggregators, Map<String, Object> metaData) {
|
||||||
super(name, pipelineAggregators, metaData);
|
super(name, pipelineAggregators, metaData);
|
||||||
assert (centroid == null) == (count == 0);
|
assert (centroid == null) == (count == 0);
|
||||||
|
@ -132,7 +132,7 @@ public class InternalGeoCentroid extends InternalAggregation implements GeoCentr
|
||||||
}
|
}
|
||||||
|
|
||||||
static class Fields {
|
static class Fields {
|
||||||
public static final String CENTROID = "location";
|
static final String CENTROID = "location";
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -1,41 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.search.aggregations.metrics.percentiles;
|
|
||||||
|
|
||||||
public class InternalPercentile implements Percentile {
|
|
||||||
|
|
||||||
private final double percent;
|
|
||||||
private final double value;
|
|
||||||
|
|
||||||
public InternalPercentile(double percent, double value) {
|
|
||||||
this.percent = percent;
|
|
||||||
this.value = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public double getPercent() {
|
|
||||||
return percent;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public double getValue() {
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -19,10 +19,41 @@
|
||||||
|
|
||||||
package org.elasticsearch.search.aggregations.metrics.percentiles;
|
package org.elasticsearch.search.aggregations.metrics.percentiles;
|
||||||
|
|
||||||
public interface Percentile {
|
import java.util.Objects;
|
||||||
|
|
||||||
double getPercent();
|
public class Percentile {
|
||||||
|
|
||||||
double getValue();
|
private final double percent;
|
||||||
|
private final double value;
|
||||||
|
|
||||||
|
public Percentile(double percent, double value) {
|
||||||
|
this.percent = percent;
|
||||||
|
this.value = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
public double getPercent() {
|
||||||
|
return percent;
|
||||||
|
}
|
||||||
|
|
||||||
|
public double getValue() {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (o == null || getClass() != o.getClass()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
Percentile that = (Percentile) o;
|
||||||
|
return Double.compare(that.percent, percent) == 0
|
||||||
|
&& Double.compare(that.value, value) == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Objects.hash(percent, value);
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.hdr;
|
||||||
import org.HdrHistogram.DoubleHistogram;
|
import org.HdrHistogram.DoubleHistogram;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.search.DocValueFormat;
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentile;
|
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks;
|
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks;
|
||||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
@ -109,7 +108,7 @@ public class InternalHDRPercentileRanks extends AbstractInternalHDRPercentiles i
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Percentile next() {
|
public Percentile next() {
|
||||||
final Percentile next = new InternalPercentile(percentileRank(state, values[i]), values[i]);
|
final Percentile next = new Percentile(percentileRank(state, values[i]), values[i]);
|
||||||
++i;
|
++i;
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.hdr;
|
||||||
import org.HdrHistogram.DoubleHistogram;
|
import org.HdrHistogram.DoubleHistogram;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.search.DocValueFormat;
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentile;
|
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles;
|
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles;
|
||||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
@ -99,7 +98,7 @@ public class InternalHDRPercentiles extends AbstractInternalHDRPercentiles imple
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Percentile next() {
|
public Percentile next() {
|
||||||
final Percentile next = new InternalPercentile(percents[i], state.getValueAtPercentile(percents[i]));
|
final Percentile next = new Percentile(percents[i], state.getValueAtPercentile(percents[i]));
|
||||||
++i;
|
++i;
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest;
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.search.DocValueFormat;
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentile;
|
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks;
|
import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks;
|
||||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
@ -106,7 +105,7 @@ public class InternalTDigestPercentileRanks extends AbstractInternalTDigestPerce
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Percentile next() {
|
public Percentile next() {
|
||||||
final Percentile next = new InternalPercentile(percentileRank(state, values[i]), values[i]);
|
final Percentile next = new Percentile(percentileRank(state, values[i]), values[i]);
|
||||||
++i;
|
++i;
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest;
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.search.DocValueFormat;
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentile;
|
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles;
|
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles;
|
||||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
@ -95,7 +94,7 @@ public class InternalTDigestPercentiles extends AbstractInternalTDigestPercentil
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Percentile next() {
|
public Percentile next() {
|
||||||
final Percentile next = new InternalPercentile(percents[i], state.quantile(percents[i] / 100));
|
final Percentile next = new Percentile(percents[i], state.quantile(percents[i] / 100));
|
||||||
++i;
|
++i;
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.search.DocValueFormat;
|
||||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||||
import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation;
|
import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation;
|
||||||
import org.elasticsearch.search.aggregations.metrics.max.InternalMax;
|
import org.elasticsearch.search.aggregations.metrics.max.InternalMax;
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentile;
|
|
||||||
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
|
||||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
|
||||||
|
@ -136,7 +135,7 @@ public class InternalPercentilesBucket extends InternalNumericMetricsAggregation
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Percentile next() {
|
public Percentile next() {
|
||||||
final Percentile next = new InternalPercentile(percents[i], percentiles[i]);
|
final Percentile next = new Percentile(percents[i], percentiles[i]);
|
||||||
++i;
|
++i;
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
|
@ -183,7 +183,7 @@ public final class TaskInfo implements Writeable, ToXContent {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final ConstructingObjectParser<TaskInfo, Void> PARSER = new ConstructingObjectParser<>(
|
public static final ConstructingObjectParser<TaskInfo, Void> PARSER = new ConstructingObjectParser<>(
|
||||||
"task_info", a -> {
|
"task_info", true, a -> {
|
||||||
int i = 0;
|
int i = 0;
|
||||||
TaskId id = new TaskId((String) a[i++], (Long) a[i++]);
|
TaskId id = new TaskId((String) a[i++], (Long) a[i++]);
|
||||||
String type = (String) a[i++];
|
String type = (String) a[i++];
|
||||||
|
@ -196,11 +196,11 @@ public final class TaskInfo implements Writeable, ToXContent {
|
||||||
String parentTaskIdString = (String) a[i++];
|
String parentTaskIdString = (String) a[i++];
|
||||||
|
|
||||||
RawTaskStatus status = statusBytes == null ? null : new RawTaskStatus(statusBytes);
|
RawTaskStatus status = statusBytes == null ? null : new RawTaskStatus(statusBytes);
|
||||||
TaskId parentTaskId = parentTaskIdString == null ? TaskId.EMPTY_TASK_ID : new TaskId((String) parentTaskIdString);
|
TaskId parentTaskId = parentTaskIdString == null ? TaskId.EMPTY_TASK_ID : new TaskId(parentTaskIdString);
|
||||||
return new TaskInfo(id, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId);
|
return new TaskInfo(id, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId);
|
||||||
});
|
});
|
||||||
static {
|
static {
|
||||||
// Note for the future: this has to be backwards compatible with all changes to the task storage format
|
// Note for the future: this has to be backwards and forwards compatible with all changes to the task storage format
|
||||||
PARSER.declareString(constructorArg(), new ParseField("node"));
|
PARSER.declareString(constructorArg(), new ParseField("node"));
|
||||||
PARSER.declareLong(constructorArg(), new ParseField("id"));
|
PARSER.declareLong(constructorArg(), new ParseField("id"));
|
||||||
PARSER.declareString(constructorArg(), new ParseField("type"));
|
PARSER.declareString(constructorArg(), new ParseField("type"));
|
||||||
|
|
|
@ -41,7 +41,7 @@ import org.elasticsearch.common.component.Lifecycle;
|
||||||
import org.elasticsearch.common.compress.Compressor;
|
import org.elasticsearch.common.compress.Compressor;
|
||||||
import org.elasticsearch.common.compress.CompressorFactory;
|
import org.elasticsearch.common.compress.CompressorFactory;
|
||||||
import org.elasticsearch.common.compress.NotCompressedException;
|
import org.elasticsearch.common.compress.NotCompressedException;
|
||||||
import org.elasticsearch.common.io.ReleasableBytesStream;
|
import org.elasticsearch.common.io.Streams;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
|
@ -1025,10 +1025,8 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||||
}
|
}
|
||||||
status = TransportStatus.setRequest(status);
|
status = TransportStatus.setRequest(status);
|
||||||
ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays);
|
ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays);
|
||||||
// we wrap this in a release once since if the onRequestSent callback throws an exception
|
boolean addedReleaseListener = false;
|
||||||
// we might release things twice and this should be prevented
|
StreamOutput stream = Streams.flushOnCloseStream(bStream);
|
||||||
final Releasable toRelease = Releasables.releaseOnce(() -> Releasables.close(bStream.bytes()));
|
|
||||||
StreamOutput stream = bStream;
|
|
||||||
try {
|
try {
|
||||||
// only compress if asked, and, the request is not bytes, since then only
|
// only compress if asked, and, the request is not bytes, since then only
|
||||||
// the header part is compressed, and the "body" can't be extracted as compressed
|
// the header part is compressed, and the "body" can't be extracted as compressed
|
||||||
|
@ -1047,12 +1045,17 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||||
stream.writeString(action);
|
stream.writeString(action);
|
||||||
BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream, bStream);
|
BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream, bStream);
|
||||||
final TransportRequestOptions finalOptions = options;
|
final TransportRequestOptions finalOptions = options;
|
||||||
|
final StreamOutput finalStream = stream;
|
||||||
// this might be called in a different thread
|
// this might be called in a different thread
|
||||||
SendListener onRequestSent = new SendListener(toRelease,
|
SendListener onRequestSent = new SendListener(
|
||||||
|
() -> IOUtils.closeWhileHandlingException(finalStream, bStream),
|
||||||
() -> transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions));
|
() -> transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions));
|
||||||
internalSendMessage(targetChannel, message, onRequestSent);
|
internalSendMessage(targetChannel, message, onRequestSent);
|
||||||
|
addedReleaseListener = true;
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.close(stream);
|
if (!addedReleaseListener) {
|
||||||
|
IOUtils.close(stream, bStream);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1114,10 +1117,8 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||||
}
|
}
|
||||||
status = TransportStatus.setResponse(status); // TODO share some code with sendRequest
|
status = TransportStatus.setResponse(status); // TODO share some code with sendRequest
|
||||||
ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays);
|
ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays);
|
||||||
// we wrap this in a release once since if the onRequestSent callback throws an exception
|
boolean addedReleaseListener = false;
|
||||||
// we might release things twice and this should be prevented
|
StreamOutput stream = Streams.flushOnCloseStream(bStream);
|
||||||
final Releasable toRelease = Releasables.releaseOnce(() -> Releasables.close(bStream.bytes()));
|
|
||||||
StreamOutput stream = bStream;
|
|
||||||
try {
|
try {
|
||||||
if (options.compress()) {
|
if (options.compress()) {
|
||||||
status = TransportStatus.setCompress(status);
|
status = TransportStatus.setCompress(status);
|
||||||
|
@ -1128,12 +1129,16 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||||
BytesReference reference = buildMessage(requestId, status, nodeVersion, response, stream, bStream);
|
BytesReference reference = buildMessage(requestId, status, nodeVersion, response, stream, bStream);
|
||||||
|
|
||||||
final TransportResponseOptions finalOptions = options;
|
final TransportResponseOptions finalOptions = options;
|
||||||
|
final StreamOutput finalStream = stream;
|
||||||
// this might be called in a different thread
|
// this might be called in a different thread
|
||||||
SendListener listener = new SendListener(toRelease,
|
SendListener listener = new SendListener(() -> IOUtils.closeWhileHandlingException(finalStream, bStream),
|
||||||
() -> transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions));
|
() -> transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions));
|
||||||
internalSendMessage(channel, reference, listener);
|
internalSendMessage(channel, reference, listener);
|
||||||
|
addedReleaseListener = true;
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.close(stream);
|
if (!addedReleaseListener) {
|
||||||
|
IOUtils.close(stream, bStream);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1161,7 +1166,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||||
* Serializes the given message into a bytes representation
|
* Serializes the given message into a bytes representation
|
||||||
*/
|
*/
|
||||||
private BytesReference buildMessage(long requestId, byte status, Version nodeVersion, TransportMessage message, StreamOutput stream,
|
private BytesReference buildMessage(long requestId, byte status, Version nodeVersion, TransportMessage message, StreamOutput stream,
|
||||||
ReleasableBytesStream writtenBytes) throws IOException {
|
ReleasableBytesStreamOutput writtenBytes) throws IOException {
|
||||||
final BytesReference zeroCopyBuffer;
|
final BytesReference zeroCopyBuffer;
|
||||||
if (message instanceof BytesTransportRequest) { // what a shitty optimization - we should use a direct send method instead
|
if (message instanceof BytesTransportRequest) { // what a shitty optimization - we should use a direct send method instead
|
||||||
BytesTransportRequest bRequest = (BytesTransportRequest) message;
|
BytesTransportRequest bRequest = (BytesTransportRequest) message;
|
||||||
|
|
|
@ -80,43 +80,37 @@ public class RetryTests extends ESTestCase {
|
||||||
return request;
|
return request;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSyncRetryBacksOff() throws Exception {
|
public void testRetryBacksOff() throws Exception {
|
||||||
BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL);
|
BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL);
|
||||||
|
|
||||||
BulkRequest bulkRequest = createBulkRequest();
|
BulkRequest bulkRequest = createBulkRequest();
|
||||||
BulkResponse response = Retry
|
BulkResponse response = new Retry(EsRejectedExecutionException.class, backoff, bulkClient.threadPool())
|
||||||
.on(EsRejectedExecutionException.class)
|
.withBackoff(bulkClient::bulk, bulkRequest, bulkClient.settings())
|
||||||
.policy(backoff)
|
.actionGet();
|
||||||
.using(bulkClient.threadPool())
|
|
||||||
.withSyncBackoff(bulkClient::bulk, bulkRequest, bulkClient.settings());
|
|
||||||
|
|
||||||
assertFalse(response.hasFailures());
|
assertFalse(response.hasFailures());
|
||||||
assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions()));
|
assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSyncRetryFailsAfterBackoff() throws Exception {
|
public void testRetryFailsAfterBackoff() throws Exception {
|
||||||
BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1);
|
BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1);
|
||||||
|
|
||||||
BulkRequest bulkRequest = createBulkRequest();
|
BulkRequest bulkRequest = createBulkRequest();
|
||||||
BulkResponse response = Retry
|
BulkResponse response = new Retry(EsRejectedExecutionException.class, backoff, bulkClient.threadPool())
|
||||||
.on(EsRejectedExecutionException.class)
|
.withBackoff(bulkClient::bulk, bulkRequest, bulkClient.settings())
|
||||||
.policy(backoff)
|
.actionGet();
|
||||||
.using(bulkClient.threadPool())
|
|
||||||
.withSyncBackoff(bulkClient::bulk, bulkRequest, bulkClient.settings());
|
|
||||||
|
|
||||||
assertTrue(response.hasFailures());
|
assertTrue(response.hasFailures());
|
||||||
assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions()));
|
assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testAsyncRetryBacksOff() throws Exception {
|
public void testRetryWithListenerBacksOff() throws Exception {
|
||||||
BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL);
|
BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL);
|
||||||
AssertingListener listener = new AssertingListener();
|
AssertingListener listener = new AssertingListener();
|
||||||
|
|
||||||
BulkRequest bulkRequest = createBulkRequest();
|
BulkRequest bulkRequest = createBulkRequest();
|
||||||
Retry.on(EsRejectedExecutionException.class)
|
Retry retry = new Retry(EsRejectedExecutionException.class, backoff, bulkClient.threadPool());
|
||||||
.policy(backoff)
|
retry.withBackoff(bulkClient::bulk, bulkRequest, listener, bulkClient.settings());
|
||||||
.using(bulkClient.threadPool())
|
|
||||||
.withAsyncBackoff(bulkClient::bulk, bulkRequest, listener, bulkClient.settings());
|
|
||||||
|
|
||||||
listener.awaitCallbacksCalled();
|
listener.awaitCallbacksCalled();
|
||||||
listener.assertOnResponseCalled();
|
listener.assertOnResponseCalled();
|
||||||
|
@ -125,15 +119,13 @@ public class RetryTests extends ESTestCase {
|
||||||
listener.assertOnFailureNeverCalled();
|
listener.assertOnFailureNeverCalled();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testAsyncRetryFailsAfterBacksOff() throws Exception {
|
public void testRetryWithListenerFailsAfterBacksOff() throws Exception {
|
||||||
BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1);
|
BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1);
|
||||||
AssertingListener listener = new AssertingListener();
|
AssertingListener listener = new AssertingListener();
|
||||||
|
|
||||||
BulkRequest bulkRequest = createBulkRequest();
|
BulkRequest bulkRequest = createBulkRequest();
|
||||||
Retry.on(EsRejectedExecutionException.class)
|
Retry retry = new Retry(EsRejectedExecutionException.class, backoff, bulkClient.threadPool());
|
||||||
.policy(backoff)
|
retry.withBackoff(bulkClient::bulk, bulkRequest, listener, bulkClient.settings());
|
||||||
.using(bulkClient.threadPool())
|
|
||||||
.withAsyncBackoff(bulkClient::bulk, bulkRequest, listener, bulkClient.settings());
|
|
||||||
|
|
||||||
listener.awaitCallbacksCalled();
|
listener.awaitCallbacksCalled();
|
||||||
|
|
||||||
|
|
|
@ -166,6 +166,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||||
Files.createDirectories(multiDataPath[0]);
|
Files.createDirectories(multiDataPath[0]);
|
||||||
Files.createDirectories(multiDataPath[1]);
|
Files.createDirectories(multiDataPath[1]);
|
||||||
logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]);
|
logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]);
|
||||||
|
ensureGreen();
|
||||||
}
|
}
|
||||||
|
|
||||||
void upgradeIndexFolder() throws Exception {
|
void upgradeIndexFolder() throws Exception {
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.common.io.stream;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.util.MockBigArrays;
|
||||||
|
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class ReleasableBytesStreamOutputTests extends ESTestCase {
|
||||||
|
|
||||||
|
public void testRelease() throws Exception {
|
||||||
|
MockBigArrays mockBigArrays =
|
||||||
|
new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||||
|
try (ReleasableBytesStreamOutput output =
|
||||||
|
getRandomReleasableBytesStreamOutput(mockBigArrays)) {
|
||||||
|
output.writeBoolean(randomBoolean());
|
||||||
|
}
|
||||||
|
MockBigArrays.ensureAllArraysAreReleased();
|
||||||
|
}
|
||||||
|
|
||||||
|
private ReleasableBytesStreamOutput getRandomReleasableBytesStreamOutput(
|
||||||
|
MockBigArrays mockBigArrays) throws IOException {
|
||||||
|
ReleasableBytesStreamOutput output = new ReleasableBytesStreamOutput(mockBigArrays);
|
||||||
|
if (randomBoolean()) {
|
||||||
|
for (int i = 0; i < scaledRandomIntBetween(1, 32); i++) {
|
||||||
|
output.write(randomByte());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
}
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.FixedBitSet;
|
import org.apache.lucene.util.FixedBitSet;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.lucene.uid.VersionsResolver.DocIdAndVersion;
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion;
|
||||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.VersionFieldMapper;
|
import org.elasticsearch.index.mapper.VersionFieldMapper;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
@ -53,7 +53,7 @@ public class VersionLookupTests extends ESTestCase {
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
DirectoryReader reader = DirectoryReader.open(writer);
|
DirectoryReader reader = DirectoryReader.open(writer);
|
||||||
LeafReaderContext segment = reader.leaves().get(0);
|
LeafReaderContext segment = reader.leaves().get(0);
|
||||||
PerThreadIDAndVersionLookup lookup = new PerThreadIDAndVersionLookup(segment.reader());
|
PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(segment.reader());
|
||||||
// found doc
|
// found doc
|
||||||
DocIdAndVersion result = lookup.lookupVersion(new BytesRef("6"), null, segment);
|
DocIdAndVersion result = lookup.lookupVersion(new BytesRef("6"), null, segment);
|
||||||
assertNotNull(result);
|
assertNotNull(result);
|
||||||
|
@ -81,7 +81,7 @@ public class VersionLookupTests extends ESTestCase {
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
DirectoryReader reader = DirectoryReader.open(writer);
|
DirectoryReader reader = DirectoryReader.open(writer);
|
||||||
LeafReaderContext segment = reader.leaves().get(0);
|
LeafReaderContext segment = reader.leaves().get(0);
|
||||||
PerThreadIDAndVersionLookup lookup = new PerThreadIDAndVersionLookup(segment.reader());
|
PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(segment.reader());
|
||||||
// return the last doc when there are duplicates
|
// return the last doc when there are duplicates
|
||||||
DocIdAndVersion result = lookup.lookupVersion(new BytesRef("6"), null, segment);
|
DocIdAndVersion result = lookup.lookupVersion(new BytesRef("6"), null, segment);
|
||||||
assertNotNull(result);
|
assertNotNull(result);
|
||||||
|
|
|
@ -38,8 +38,8 @@ import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import static org.elasticsearch.common.lucene.uid.VersionsResolver.loadDocIdAndVersion;
|
import static org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.loadDocIdAndVersion;
|
||||||
import static org.elasticsearch.common.lucene.uid.VersionsResolver.loadVersion;
|
import static org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.loadVersion;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.nullValue;
|
import static org.hamcrest.Matchers.nullValue;
|
||||||
|
|
||||||
|
@ -145,7 +145,7 @@ public class VersionsTests extends ESTestCase {
|
||||||
|
|
||||||
/** Test that version map cache works, is evicted on close, etc */
|
/** Test that version map cache works, is evicted on close, etc */
|
||||||
public void testCache() throws Exception {
|
public void testCache() throws Exception {
|
||||||
int size = VersionsResolver.lookupStates.size();
|
int size = VersionsAndSeqNoResolver.lookupStates.size();
|
||||||
|
|
||||||
Directory dir = newDirectory();
|
Directory dir = newDirectory();
|
||||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
|
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
|
||||||
|
@ -156,21 +156,21 @@ public class VersionsTests extends ESTestCase {
|
||||||
DirectoryReader reader = DirectoryReader.open(writer);
|
DirectoryReader reader = DirectoryReader.open(writer);
|
||||||
// should increase cache size by 1
|
// should increase cache size by 1
|
||||||
assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
|
assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
|
||||||
assertEquals(size+1, VersionsResolver.lookupStates.size());
|
assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size());
|
||||||
// should be cache hit
|
// should be cache hit
|
||||||
assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
|
assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
|
||||||
assertEquals(size+1, VersionsResolver.lookupStates.size());
|
assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size());
|
||||||
|
|
||||||
reader.close();
|
reader.close();
|
||||||
writer.close();
|
writer.close();
|
||||||
// core should be evicted from the map
|
// core should be evicted from the map
|
||||||
assertEquals(size, VersionsResolver.lookupStates.size());
|
assertEquals(size, VersionsAndSeqNoResolver.lookupStates.size());
|
||||||
dir.close();
|
dir.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Test that version map cache behaves properly with a filtered reader */
|
/** Test that version map cache behaves properly with a filtered reader */
|
||||||
public void testCacheFilterReader() throws Exception {
|
public void testCacheFilterReader() throws Exception {
|
||||||
int size = VersionsResolver.lookupStates.size();
|
int size = VersionsAndSeqNoResolver.lookupStates.size();
|
||||||
|
|
||||||
Directory dir = newDirectory();
|
Directory dir = newDirectory();
|
||||||
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
|
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
|
||||||
|
@ -180,17 +180,17 @@ public class VersionsTests extends ESTestCase {
|
||||||
writer.addDocument(doc);
|
writer.addDocument(doc);
|
||||||
DirectoryReader reader = DirectoryReader.open(writer);
|
DirectoryReader reader = DirectoryReader.open(writer);
|
||||||
assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
|
assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
|
||||||
assertEquals(size+1, VersionsResolver.lookupStates.size());
|
assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size());
|
||||||
// now wrap the reader
|
// now wrap the reader
|
||||||
DirectoryReader wrapped = ElasticsearchDirectoryReader.wrap(reader, new ShardId("bogus", "_na_", 5));
|
DirectoryReader wrapped = ElasticsearchDirectoryReader.wrap(reader, new ShardId("bogus", "_na_", 5));
|
||||||
assertEquals(87, loadVersion(wrapped, new Term(UidFieldMapper.NAME, "6")));
|
assertEquals(87, loadVersion(wrapped, new Term(UidFieldMapper.NAME, "6")));
|
||||||
// same size map: core cache key is shared
|
// same size map: core cache key is shared
|
||||||
assertEquals(size+1, VersionsResolver.lookupStates.size());
|
assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size());
|
||||||
|
|
||||||
reader.close();
|
reader.close();
|
||||||
writer.close();
|
writer.close();
|
||||||
// core should be evicted from the map
|
// core should be evicted from the map
|
||||||
assertEquals(size, VersionsResolver.lookupStates.size());
|
assertEquals(size, VersionsAndSeqNoResolver.lookupStates.size());
|
||||||
dir.close();
|
dir.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -562,4 +562,16 @@ public class SettingsTests extends ESTestCase {
|
||||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> setting.get(settings));
|
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> setting.get(settings));
|
||||||
assertTrue(e.getMessage().contains("must be stored inside the Elasticsearch keystore"));
|
assertTrue(e.getMessage().contains("must be stored inside the Elasticsearch keystore"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetAsArrayFailsOnDuplicates() {
|
||||||
|
final Settings settings =
|
||||||
|
Settings.builder()
|
||||||
|
.put("foobar.0", "bar")
|
||||||
|
.put("foobar.1", "baz")
|
||||||
|
.put("foobar", "foo")
|
||||||
|
.build();
|
||||||
|
final IllegalStateException e = expectThrows(IllegalStateException.class, () -> settings.getAsArray("foobar"));
|
||||||
|
assertThat(e, hasToString(containsString("settings object contains values for [foobar=foo] and [foobar.0=bar]")));
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,4 +139,19 @@ public class ElectMasterServiceTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testCountMasterNodes() {
|
||||||
|
List<DiscoveryNode> nodes = generateRandomNodes();
|
||||||
|
ElectMasterService service = electMasterService();
|
||||||
|
|
||||||
|
int masterNodes = 0;
|
||||||
|
|
||||||
|
for (DiscoveryNode node : nodes) {
|
||||||
|
if (node.isMasterNode()) {
|
||||||
|
masterNodes++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEquals(masterNodes, service.countMasterNodes(nodes));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,6 +108,8 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase {
|
||||||
final ClusterStateTaskExecutor.ClusterTasksResult<ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task> result =
|
final ClusterStateTaskExecutor.ClusterTasksResult<ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task> result =
|
||||||
executor.execute(clusterState, tasks);
|
executor.execute(clusterState, tasks);
|
||||||
verify(electMasterService).hasEnoughMasterNodes(eq(remainingNodesClusterState.get().nodes()));
|
verify(electMasterService).hasEnoughMasterNodes(eq(remainingNodesClusterState.get().nodes()));
|
||||||
|
verify(electMasterService).countMasterNodes(eq(remainingNodesClusterState.get().nodes()));
|
||||||
|
verify(electMasterService).minimumMasterNodes();
|
||||||
verifyNoMoreInteractions(electMasterService);
|
verifyNoMoreInteractions(electMasterService);
|
||||||
|
|
||||||
// ensure that we did not reroute
|
// ensure that we did not reroute
|
||||||
|
|
|
@ -23,10 +23,12 @@ import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
|
||||||
import static org.hamcrest.CoreMatchers.endsWith;
|
import static org.hamcrest.CoreMatchers.endsWith;
|
||||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
import static org.hamcrest.CoreMatchers.notNullValue;
|
||||||
import static org.hamcrest.CoreMatchers.nullValue;
|
import static org.hamcrest.CoreMatchers.nullValue;
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Simple unit-tests for Environment.java
|
* Simple unit-tests for Environment.java
|
||||||
|
@ -71,4 +73,104 @@ public class EnvironmentTests extends ESTestCase {
|
||||||
assertThat(environment.resolveRepoURL(new URL("jar:http://localhost/test/../repo1?blah!/repo/")), nullValue());
|
assertThat(environment.resolveRepoURL(new URL("jar:http://localhost/test/../repo1?blah!/repo/")), nullValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testDefaultPathData() {
|
||||||
|
final Path defaultPathData = createTempDir().toAbsolutePath();
|
||||||
|
final Settings settings = Settings.builder()
|
||||||
|
.put("path.home", createTempDir().toAbsolutePath())
|
||||||
|
.put("default.path.data", defaultPathData)
|
||||||
|
.build();
|
||||||
|
final Environment environment = new Environment(settings);
|
||||||
|
assertThat(environment.dataFiles(), equalTo(new Path[] { defaultPathData }));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPathDataOverrideDefaultPathData() {
|
||||||
|
final Path pathData = createTempDir().toAbsolutePath();
|
||||||
|
final Path defaultPathData = createTempDir().toAbsolutePath();
|
||||||
|
final Settings settings = Settings.builder()
|
||||||
|
.put("path.home", createTempDir().toAbsolutePath())
|
||||||
|
.put("path.data", pathData)
|
||||||
|
.put("default.path.data", defaultPathData)
|
||||||
|
.build();
|
||||||
|
final Environment environment = new Environment(settings);
|
||||||
|
assertThat(environment.dataFiles(), equalTo(new Path[] { pathData }));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPathDataWhenNotSet() {
|
||||||
|
final Path pathHome = createTempDir().toAbsolutePath();
|
||||||
|
final Settings settings = Settings.builder().put("path.home", pathHome).build();
|
||||||
|
final Environment environment = new Environment(settings);
|
||||||
|
assertThat(environment.dataFiles(), equalTo(new Path[]{pathHome.resolve("data")}));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPathDataNotSetInEnvironmentIfNotSet() {
|
||||||
|
final Path defaultPathData = createTempDir().toAbsolutePath();
|
||||||
|
final Settings settings = Settings.builder()
|
||||||
|
.put("path.home", createTempDir().toAbsolutePath())
|
||||||
|
.put("default.path.data", defaultPathData)
|
||||||
|
.build();
|
||||||
|
assertFalse(Environment.PATH_DATA_SETTING.exists(settings));
|
||||||
|
assertTrue(Environment.DEFAULT_PATH_DATA_SETTING.exists(settings));
|
||||||
|
final Environment environment = new Environment(settings);
|
||||||
|
assertFalse(Environment.PATH_DATA_SETTING.exists(environment.settings()));
|
||||||
|
assertTrue(Environment.DEFAULT_PATH_DATA_SETTING.exists(environment.settings()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testDefaultPathLogs() {
|
||||||
|
final Path defaultPathLogs = createTempDir().toAbsolutePath();
|
||||||
|
final Settings settings = Settings.builder()
|
||||||
|
.put("path.home", createTempDir().toAbsolutePath())
|
||||||
|
.put("default.path.logs", defaultPathLogs)
|
||||||
|
.build();
|
||||||
|
final Environment environment = new Environment(settings);
|
||||||
|
assertThat(environment.logsFile(), equalTo(defaultPathLogs));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPathLogsOverrideDefaultPathLogs() {
|
||||||
|
final Path pathLogs = createTempDir().toAbsolutePath();
|
||||||
|
final Path defaultPathLogs = createTempDir().toAbsolutePath();
|
||||||
|
final Settings settings = Settings.builder()
|
||||||
|
.put("path.home", createTempDir().toAbsolutePath())
|
||||||
|
.put("path.logs", pathLogs)
|
||||||
|
.put("default.path.logs", defaultPathLogs)
|
||||||
|
.build();
|
||||||
|
final Environment environment = new Environment(settings);
|
||||||
|
assertThat(environment.logsFile(), equalTo(pathLogs));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPathLogsWhenNotSet() {
|
||||||
|
final Path pathHome = createTempDir().toAbsolutePath();
|
||||||
|
final Settings settings = Settings.builder().put("path.home", pathHome).build();
|
||||||
|
final Environment environment = new Environment(settings);
|
||||||
|
assertThat(environment.logsFile(), equalTo(pathHome.resolve("logs")));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testDefaultPathConf() {
|
||||||
|
final Path defaultPathConf = createTempDir().toAbsolutePath();
|
||||||
|
final Settings settings = Settings.builder()
|
||||||
|
.put("path.home", createTempDir().toAbsolutePath())
|
||||||
|
.put("default.path.conf", defaultPathConf)
|
||||||
|
.build();
|
||||||
|
final Environment environment = new Environment(settings);
|
||||||
|
assertThat(environment.configFile(), equalTo(defaultPathConf));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPathConfOverrideDefaultPathConf() {
|
||||||
|
final Path pathConf = createTempDir().toAbsolutePath();
|
||||||
|
final Path defaultPathConf = createTempDir().toAbsolutePath();
|
||||||
|
final Settings settings = Settings.builder()
|
||||||
|
.put("path.home", createTempDir().toAbsolutePath())
|
||||||
|
.put("path.conf", pathConf)
|
||||||
|
.put("default.path.conf", defaultPathConf)
|
||||||
|
.build();
|
||||||
|
final Environment environment = new Environment(settings);
|
||||||
|
assertThat(environment.configFile(), equalTo(pathConf));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPathConfWhenNotSet() {
|
||||||
|
final Path pathHome = createTempDir().toAbsolutePath();
|
||||||
|
final Settings settings = Settings.builder().put("path.home", pathHome).build();
|
||||||
|
final Environment environment = new Environment(settings);
|
||||||
|
assertThat(environment.configFile(), equalTo(pathHome.resolve("config")));
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ import static org.hamcrest.Matchers.startsWith;
|
||||||
public class IndexingSlowLogTests extends ESTestCase {
|
public class IndexingSlowLogTests extends ESTestCase {
|
||||||
public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException {
|
public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException {
|
||||||
BytesReference source = JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject().bytes();
|
BytesReference source = JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject().bytes();
|
||||||
ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceID.emptySeqID(), "id",
|
ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id",
|
||||||
"test", null, null, source, XContentType.JSON, null);
|
"test", null, null, source, XContentType.JSON, null);
|
||||||
Index index = new Index("foo", "123");
|
Index index = new Index("foo", "123");
|
||||||
// Turning off document logging doesn't log source[]
|
// Turning off document logging doesn't log source[]
|
||||||
|
|
|
@ -83,7 +83,8 @@ import org.elasticsearch.common.io.FileSystemUtils;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.lucene.uid.Versions;
|
import org.elasticsearch.common.lucene.uid.Versions;
|
||||||
import org.elasticsearch.common.lucene.uid.VersionsResolver;
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver;
|
||||||
|
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.BigArrays;
|
import org.elasticsearch.common.util.BigArrays;
|
||||||
|
@ -150,6 +151,7 @@ import java.util.Base64;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -165,6 +167,8 @@ import java.util.concurrent.atomic.AtomicReference;
|
||||||
import java.util.function.BiFunction;
|
import java.util.function.BiFunction;
|
||||||
import java.util.function.LongSupplier;
|
import java.util.function.LongSupplier;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.LongStream;
|
||||||
|
|
||||||
import static java.util.Collections.emptyMap;
|
import static java.util.Collections.emptyMap;
|
||||||
import static java.util.Collections.shuffle;
|
import static java.util.Collections.shuffle;
|
||||||
|
@ -292,7 +296,7 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
private static ParsedDocument testParsedDocument(String id, String type, String routing, Document document, BytesReference source, Mapping mappingUpdate) {
|
private static ParsedDocument testParsedDocument(String id, String type, String routing, Document document, BytesReference source, Mapping mappingUpdate) {
|
||||||
Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
|
Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
|
||||||
Field versionField = new NumericDocValuesField("_version", 0);
|
Field versionField = new NumericDocValuesField("_version", 0);
|
||||||
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
|
SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||||
document.add(uidField);
|
document.add(uidField);
|
||||||
document.add(versionField);
|
document.add(versionField);
|
||||||
document.add(seqID.seqNo);
|
document.add(seqID.seqNo);
|
||||||
|
@ -833,6 +837,58 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testTranslogRecoveryWithMultipleGenerations() throws IOException {
|
||||||
|
final int docs = randomIntBetween(1, 4096);
|
||||||
|
final List<Long> seqNos = LongStream.range(0, docs).boxed().collect(Collectors.toList());
|
||||||
|
Randomness.shuffle(seqNos);
|
||||||
|
engine.close();
|
||||||
|
Engine initialEngine = null;
|
||||||
|
try {
|
||||||
|
final AtomicInteger counter = new AtomicInteger();
|
||||||
|
initialEngine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG)) {
|
||||||
|
@Override
|
||||||
|
public SequenceNumbersService seqNoService() {
|
||||||
|
return new SequenceNumbersService(
|
||||||
|
engine.shardId,
|
||||||
|
engine.config().getIndexSettings(),
|
||||||
|
SequenceNumbersService.NO_OPS_PERFORMED,
|
||||||
|
SequenceNumbersService.NO_OPS_PERFORMED,
|
||||||
|
SequenceNumbersService.UNASSIGNED_SEQ_NO) {
|
||||||
|
@Override
|
||||||
|
public long generateSeqNo() {
|
||||||
|
return seqNos.get(counter.getAndIncrement());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
for (int i = 0; i < docs; i++) {
|
||||||
|
final String id = Integer.toString(i);
|
||||||
|
final ParsedDocument doc = testParsedDocument(id, "test", null, testDocumentWithTextField(), SOURCE, null);
|
||||||
|
initialEngine.index(indexForDoc(doc));
|
||||||
|
if (rarely()) {
|
||||||
|
initialEngine.getTranslog().rollGeneration();
|
||||||
|
} else if (rarely()) {
|
||||||
|
initialEngine.flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
IOUtils.close(initialEngine);
|
||||||
|
}
|
||||||
|
|
||||||
|
Engine recoveringEngine = null;
|
||||||
|
try {
|
||||||
|
recoveringEngine = new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG));
|
||||||
|
recoveringEngine.recoverFromTranslog();
|
||||||
|
try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) {
|
||||||
|
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), docs);
|
||||||
|
assertEquals(docs, topDocs.totalHits);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
IOUtils.close(recoveringEngine);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
public void testConcurrentGetAndFlush() throws Exception {
|
public void testConcurrentGetAndFlush() throws Exception {
|
||||||
ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
|
ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
|
||||||
engine.index(indexForDoc(doc));
|
engine.index(indexForDoc(doc));
|
||||||
|
@ -1369,19 +1425,10 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
|
|
||||||
public void testOutOfOrderDocsOnReplica() throws IOException {
|
public void testOutOfOrderDocsOnReplica() throws IOException {
|
||||||
final List<Engine.Operation> ops = generateSingleDocHistory(true,
|
final List<Engine.Operation> ops = generateSingleDocHistory(true,
|
||||||
randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 2, 20);
|
randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE, VersionType.FORCE), false, 2, 2, 20);
|
||||||
assertOpsOnReplica(ops, replicaEngine, true);
|
assertOpsOnReplica(ops, replicaEngine, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNonStandardVersioningOnReplica() throws IOException {
|
|
||||||
// TODO: this can be folded into testOutOfOrderDocsOnReplica once out of order
|
|
||||||
// is detected using seq#
|
|
||||||
final List<Engine.Operation> ops = generateSingleDocHistory(true,
|
|
||||||
randomFrom(VersionType.EXTERNAL_GTE, VersionType.FORCE), false, 2, 2, 20);
|
|
||||||
assertOpsOnReplica(ops, replicaEngine, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public void testOutOfOrderDocsOnReplicaOldPrimary() throws IOException {
|
public void testOutOfOrderDocsOnReplicaOldPrimary() throws IOException {
|
||||||
IndexSettings oldSettings = IndexSettingsModule.newIndexSettings("testOld", Settings.builder()
|
IndexSettings oldSettings = IndexSettingsModule.newIndexSettings("testOld", Settings.builder()
|
||||||
.put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us
|
.put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us
|
||||||
|
@ -3357,18 +3404,24 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
searchResult.close();
|
searchResult.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws BrokenBarrierException, InterruptedException, IOException {
|
/**
|
||||||
engine.close();
|
* A sequence number service that will generate a sequence number and if {@code stall} is set to {@code true} will wait on the barrier
|
||||||
final int docs = randomIntBetween(1, 32);
|
* and the referenced latch before returning. If the local checkpoint should advance (because {@code stall} is {@code false}), then the
|
||||||
InternalEngine initialEngine = null;
|
* value of {@code expectedLocalCheckpoint} is set accordingly.
|
||||||
try {
|
*
|
||||||
final CountDownLatch latch = new CountDownLatch(1);
|
* @param latchReference to latch the thread for the purpose of stalling
|
||||||
final CyclicBarrier barrier = new CyclicBarrier(2);
|
* @param barrier to signal the thread has generated a new sequence number
|
||||||
final AtomicBoolean skip = new AtomicBoolean();
|
* @param stall whether or not the thread should stall
|
||||||
final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED);
|
* @param expectedLocalCheckpoint the expected local checkpoint after generating a new sequence
|
||||||
final List<Thread> threads = new ArrayList<>();
|
* number
|
||||||
final SequenceNumbersService seqNoService =
|
* @return a sequence number service
|
||||||
new SequenceNumbersService(
|
*/
|
||||||
|
private SequenceNumbersService getStallingSeqNoService(
|
||||||
|
final AtomicReference<CountDownLatch> latchReference,
|
||||||
|
final CyclicBarrier barrier,
|
||||||
|
final AtomicBoolean stall,
|
||||||
|
final AtomicLong expectedLocalCheckpoint) {
|
||||||
|
return new SequenceNumbersService(
|
||||||
shardId,
|
shardId,
|
||||||
defaultSettings,
|
defaultSettings,
|
||||||
SequenceNumbersService.NO_OPS_PERFORMED,
|
SequenceNumbersService.NO_OPS_PERFORMED,
|
||||||
|
@ -3377,7 +3430,8 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
@Override
|
@Override
|
||||||
public long generateSeqNo() {
|
public long generateSeqNo() {
|
||||||
final long seqNo = super.generateSeqNo();
|
final long seqNo = super.generateSeqNo();
|
||||||
if (skip.get()) {
|
final CountDownLatch latch = latchReference.get();
|
||||||
|
if (stall.get()) {
|
||||||
try {
|
try {
|
||||||
barrier.await();
|
barrier.await();
|
||||||
latch.await();
|
latch.await();
|
||||||
|
@ -3392,13 +3446,26 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
return seqNo;
|
return seqNo;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws BrokenBarrierException, InterruptedException, IOException {
|
||||||
|
engine.close();
|
||||||
|
final int docs = randomIntBetween(1, 32);
|
||||||
|
InternalEngine initialEngine = null;
|
||||||
|
try {
|
||||||
|
final AtomicReference<CountDownLatch> latchReference = new AtomicReference<>(new CountDownLatch(1));
|
||||||
|
final CyclicBarrier barrier = new CyclicBarrier(2);
|
||||||
|
final AtomicBoolean stall = new AtomicBoolean();
|
||||||
|
final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED);
|
||||||
|
final List<Thread> threads = new ArrayList<>();
|
||||||
|
final SequenceNumbersService seqNoService = getStallingSeqNoService(latchReference, barrier, stall, expectedLocalCheckpoint);
|
||||||
initialEngine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, () -> seqNoService);
|
initialEngine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, () -> seqNoService);
|
||||||
final InternalEngine finalInitialEngine = initialEngine;
|
final InternalEngine finalInitialEngine = initialEngine;
|
||||||
for (int i = 0; i < docs; i++) {
|
for (int i = 0; i < docs; i++) {
|
||||||
final String id = Integer.toString(i);
|
final String id = Integer.toString(i);
|
||||||
final ParsedDocument doc = testParsedDocument(id, "test", null, testDocumentWithTextField(), SOURCE, null);
|
final ParsedDocument doc = testParsedDocument(id, "test", null, testDocumentWithTextField(), SOURCE, null);
|
||||||
|
|
||||||
skip.set(randomBoolean());
|
stall.set(randomBoolean());
|
||||||
final Thread thread = new Thread(() -> {
|
final Thread thread = new Thread(() -> {
|
||||||
try {
|
try {
|
||||||
finalInitialEngine.index(indexForDoc(doc));
|
finalInitialEngine.index(indexForDoc(doc));
|
||||||
|
@ -3407,7 +3474,7 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
thread.start();
|
thread.start();
|
||||||
if (skip.get()) {
|
if (stall.get()) {
|
||||||
threads.add(thread);
|
threads.add(thread);
|
||||||
barrier.await();
|
barrier.await();
|
||||||
} else {
|
} else {
|
||||||
|
@ -3419,7 +3486,7 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
assertThat(initialEngine.seqNoService().getMaxSeqNo(), equalTo((long) (docs - 1)));
|
assertThat(initialEngine.seqNoService().getMaxSeqNo(), equalTo((long) (docs - 1)));
|
||||||
initialEngine.flush(true, true);
|
initialEngine.flush(true, true);
|
||||||
|
|
||||||
latch.countDown();
|
latchReference.get().countDown();
|
||||||
for (final Thread thread : threads) {
|
for (final Thread thread : threads) {
|
||||||
thread.join();
|
thread.join();
|
||||||
}
|
}
|
||||||
|
@ -3594,6 +3661,78 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testMinGenerationForSeqNo() throws IOException, BrokenBarrierException, InterruptedException {
|
||||||
|
engine.close();
|
||||||
|
final int numberOfTriplets = randomIntBetween(1, 32);
|
||||||
|
InternalEngine actualEngine = null;
|
||||||
|
try {
|
||||||
|
final AtomicReference<CountDownLatch> latchReference = new AtomicReference<>();
|
||||||
|
final CyclicBarrier barrier = new CyclicBarrier(2);
|
||||||
|
final AtomicBoolean stall = new AtomicBoolean();
|
||||||
|
final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED);
|
||||||
|
final Map<Thread, CountDownLatch> threads = new LinkedHashMap<>();
|
||||||
|
final SequenceNumbersService seqNoService = getStallingSeqNoService(latchReference, barrier, stall, expectedLocalCheckpoint);
|
||||||
|
actualEngine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, () -> seqNoService);
|
||||||
|
final InternalEngine finalActualEngine = actualEngine;
|
||||||
|
final Translog translog = finalActualEngine.getTranslog();
|
||||||
|
final long generation = finalActualEngine.getTranslog().currentFileGeneration();
|
||||||
|
for (int i = 0; i < numberOfTriplets; i++) {
|
||||||
|
/*
|
||||||
|
* Index three documents with the first and last landing in the same generation and the middle document being stalled until
|
||||||
|
* a later generation.
|
||||||
|
*/
|
||||||
|
stall.set(false);
|
||||||
|
index(finalActualEngine, 3 * i);
|
||||||
|
|
||||||
|
final CountDownLatch latch = new CountDownLatch(1);
|
||||||
|
latchReference.set(latch);
|
||||||
|
final int skipId = 3 * i + 1;
|
||||||
|
stall.set(true);
|
||||||
|
final Thread thread = new Thread(() -> {
|
||||||
|
try {
|
||||||
|
index(finalActualEngine, skipId);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new AssertionError(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
thread.start();
|
||||||
|
threads.put(thread, latch);
|
||||||
|
barrier.await();
|
||||||
|
|
||||||
|
stall.set(false);
|
||||||
|
index(finalActualEngine, 3 * i + 2);
|
||||||
|
finalActualEngine.flush();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This sequence number landed in the last generation, but the lower and upper bounds for an earlier generation straddle
|
||||||
|
* this sequence number.
|
||||||
|
*/
|
||||||
|
assertThat(translog.getMinGenerationForSeqNo(3 * i + 1).translogFileGeneration, equalTo(i + generation));
|
||||||
|
}
|
||||||
|
|
||||||
|
int i = 0;
|
||||||
|
for (final Map.Entry<Thread, CountDownLatch> entry : threads.entrySet()) {
|
||||||
|
final Map<String, String> userData = finalActualEngine.commitStats().getUserData();
|
||||||
|
assertThat(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(Long.toString(3 * i)));
|
||||||
|
assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo(Long.toString(i + generation)));
|
||||||
|
entry.getValue().countDown();
|
||||||
|
entry.getKey().join();
|
||||||
|
finalActualEngine.flush();
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
IOUtils.close(actualEngine);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void index(final InternalEngine engine, final int id) throws IOException {
|
||||||
|
final String docId = Integer.toString(id);
|
||||||
|
final ParsedDocument doc =
|
||||||
|
testParsedDocument(docId, "test", null, testDocumentWithTextField(), SOURCE, null);
|
||||||
|
engine.index(indexForDoc(doc));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return a tuple representing the sequence ID for the given {@code Get}
|
* Return a tuple representing the sequence ID for the given {@code Get}
|
||||||
* operation. The first value in the tuple is the sequence number, the
|
* operation. The first value in the tuple is the sequence number, the
|
||||||
|
@ -3601,9 +3740,17 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
*/
|
*/
|
||||||
private Tuple<Long, Long> getSequenceID(Engine engine, Engine.Get get) throws EngineException {
|
private Tuple<Long, Long> getSequenceID(Engine engine, Engine.Get get) throws EngineException {
|
||||||
try (Searcher searcher = engine.acquireSearcher("get")) {
|
try (Searcher searcher = engine.acquireSearcher("get")) {
|
||||||
long seqNum = VersionsResolver.loadSeqNo(searcher.reader(), get.uid());
|
final long primaryTerm;
|
||||||
long primaryTerm = VersionsResolver.loadPrimaryTerm(searcher.reader(), get.uid());
|
final long seqNo;
|
||||||
return new Tuple<>(seqNum, primaryTerm);
|
DocIdAndSeqNo docIdAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), get.uid());
|
||||||
|
if (docIdAndSeqNo == null) {
|
||||||
|
primaryTerm = 0;
|
||||||
|
seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||||
|
} else {
|
||||||
|
seqNo = docIdAndSeqNo.seqNo;
|
||||||
|
primaryTerm = VersionsAndSeqNoResolver.loadPrimaryTerm(docIdAndSeqNo);
|
||||||
|
}
|
||||||
|
return new Tuple<>(seqNo, primaryTerm);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new EngineException(shardId, "unable to retrieve sequence id", e);
|
throw new EngineException(shardId, "unable to retrieve sequence id", e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ public class LiveVersionMapTests extends ESTestCase {
|
||||||
for (int i = 0; i < 100000; ++i) {
|
for (int i = 0; i < 100000; ++i) {
|
||||||
BytesRefBuilder uid = new BytesRefBuilder();
|
BytesRefBuilder uid = new BytesRefBuilder();
|
||||||
uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20));
|
uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20));
|
||||||
VersionValue version = new VersionValue(randomLong());
|
VersionValue version = new VersionValue(randomLong(), randomLong(), randomLong());
|
||||||
map.putUnderLock(uid.toBytesRef(), version);
|
map.putUnderLock(uid.toBytesRef(), version);
|
||||||
}
|
}
|
||||||
long actualRamBytesUsed = RamUsageTester.sizeOf(map);
|
long actualRamBytesUsed = RamUsageTester.sizeOf(map);
|
||||||
|
@ -48,7 +48,7 @@ public class LiveVersionMapTests extends ESTestCase {
|
||||||
for (int i = 0; i < 100000; ++i) {
|
for (int i = 0; i < 100000; ++i) {
|
||||||
BytesRefBuilder uid = new BytesRefBuilder();
|
BytesRefBuilder uid = new BytesRefBuilder();
|
||||||
uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20));
|
uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20));
|
||||||
VersionValue version = new VersionValue(randomLong());
|
VersionValue version = new VersionValue(randomLong(), randomLong(), randomLong());
|
||||||
map.putUnderLock(uid.toBytesRef(), version);
|
map.putUnderLock(uid.toBytesRef(), version);
|
||||||
}
|
}
|
||||||
actualRamBytesUsed = RamUsageTester.sizeOf(map);
|
actualRamBytesUsed = RamUsageTester.sizeOf(map);
|
||||||
|
|
|
@ -25,12 +25,12 @@ import org.elasticsearch.test.ESTestCase;
|
||||||
public class VersionValueTests extends ESTestCase {
|
public class VersionValueTests extends ESTestCase {
|
||||||
|
|
||||||
public void testRamBytesUsed() {
|
public void testRamBytesUsed() {
|
||||||
VersionValue versionValue = new VersionValue(randomLong());
|
VersionValue versionValue = new VersionValue(randomLong(), randomLong(), randomLong());
|
||||||
assertEquals(RamUsageTester.sizeOf(versionValue), versionValue.ramBytesUsed());
|
assertEquals(RamUsageTester.sizeOf(versionValue), versionValue.ramBytesUsed());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDeleteRamBytesUsed() {
|
public void testDeleteRamBytesUsed() {
|
||||||
DeleteVersionValue versionValue = new DeleteVersionValue(randomLong(), randomLong());
|
DeleteVersionValue versionValue = new DeleteVersionValue(randomLong(), randomLong(), randomLong(), randomLong());
|
||||||
assertEquals(RamUsageTester.sizeOf(versionValue), versionValue.ramBytesUsed());
|
assertEquals(RamUsageTester.sizeOf(versionValue), versionValue.ramBytesUsed());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,13 +27,9 @@ import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkItemRequest;
|
import org.elasticsearch.action.bulk.BulkItemRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||||
import org.elasticsearch.action.bulk.BulkRequest;
|
|
||||||
import org.elasticsearch.action.bulk.BulkResponse;
|
|
||||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkShardResponse;
|
import org.elasticsearch.action.bulk.BulkShardResponse;
|
||||||
import org.elasticsearch.action.bulk.TransportShardBulkActionTests;
|
import org.elasticsearch.action.bulk.TransportShardBulkActionTests;
|
||||||
import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction;
|
|
||||||
import org.elasticsearch.action.delete.DeleteRequest;
|
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.index.IndexResponse;
|
import org.elasticsearch.action.index.IndexResponse;
|
||||||
import org.elasticsearch.action.support.PlainActionFuture;
|
import org.elasticsearch.action.support.PlainActionFuture;
|
||||||
|
@ -98,6 +94,10 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
}
|
}
|
||||||
|
|
||||||
protected IndexMetaData buildIndexMetaData(int replicas) throws IOException {
|
protected IndexMetaData buildIndexMetaData(int replicas) throws IOException {
|
||||||
|
return buildIndexMetaData(replicas, indexMapping);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected IndexMetaData buildIndexMetaData(int replicas, Map<String, String> mappings) throws IOException {
|
||||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas)
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas)
|
||||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
|
@ -105,7 +105,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName())
|
IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName())
|
||||||
.settings(settings)
|
.settings(settings)
|
||||||
.primaryTerm(0, 1);
|
.primaryTerm(0, 1);
|
||||||
for (Map.Entry<String, String> typeMapping : indexMapping.entrySet()) {
|
for (Map.Entry<String, String> typeMapping : mappings.entrySet()) {
|
||||||
metaData.putMapping(typeMapping.getKey(), typeMapping.getValue());
|
metaData.putMapping(typeMapping.getKey(), typeMapping.getValue());
|
||||||
}
|
}
|
||||||
return metaData.build();
|
return metaData.build();
|
||||||
|
@ -224,15 +224,24 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
updateAllocationIDsOnPrimary();
|
updateAllocationIDsOnPrimary();
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized IndexShard addReplica() throws IOException {
|
public IndexShard addReplica() throws IOException {
|
||||||
final ShardRouting replicaRouting = createShardRouting("s" + replicaId.incrementAndGet(), false);
|
final ShardRouting replicaRouting = createShardRouting("s" + replicaId.incrementAndGet(), false);
|
||||||
final IndexShard replica =
|
final IndexShard replica =
|
||||||
newShard(replicaRouting, indexMetaData, null, this::syncGlobalCheckpoint, getEngineFactory(replicaRouting));
|
newShard(replicaRouting, indexMetaData, null, this::syncGlobalCheckpoint, getEngineFactory(replicaRouting));
|
||||||
replicas.add(replica);
|
addReplica(replica);
|
||||||
updateAllocationIDsOnPrimary();
|
|
||||||
return replica;
|
return replica;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public synchronized void addReplica(IndexShard replica) {
|
||||||
|
assert shardRoutings().stream()
|
||||||
|
.filter(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())).findFirst().isPresent() == false :
|
||||||
|
"replica with aId [" + replica.routingEntry().allocationId() + "] already exists";
|
||||||
|
replica.updatePrimaryTerm(primary.getPrimaryTerm());
|
||||||
|
replicas.add(replica);
|
||||||
|
updateAllocationIDsOnPrimary();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardPath, final String nodeId) throws IOException {
|
public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardPath, final String nodeId) throws IOException {
|
||||||
final ShardRouting shardRouting = TestShardRouting.newShardRouting(
|
final ShardRouting shardRouting = TestShardRouting.newShardRouting(
|
||||||
shardId,
|
shardId,
|
||||||
|
@ -264,6 +273,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
}
|
}
|
||||||
boolean found = replicas.remove(replica);
|
boolean found = replicas.remove(replica);
|
||||||
assert found;
|
assert found;
|
||||||
|
closeShards(primary);
|
||||||
primary = replica;
|
primary = replica;
|
||||||
replica.updateRoutingEntry(replica.routingEntry().moveActiveReplicaToPrimary());
|
replica.updateRoutingEntry(replica.routingEntry().moveActiveReplicaToPrimary());
|
||||||
updateAllocationIDsOnPrimary();
|
updateAllocationIDsOnPrimary();
|
||||||
|
|
|
@ -18,6 +18,9 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.index.replication;
|
package org.elasticsearch.index.replication;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.Term;
|
||||||
|
import org.apache.lucene.search.TermQuery;
|
||||||
|
import org.apache.lucene.search.TopDocs;
|
||||||
import org.elasticsearch.action.DocWriteResponse;
|
import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.index.IndexResponse;
|
import org.elasticsearch.action.index.IndexResponse;
|
||||||
|
@ -37,6 +40,7 @@ import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.Future;
|
import java.util.concurrent.Future;
|
||||||
|
|
||||||
|
@ -152,4 +156,28 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testConflictingOpsOnReplica() throws Exception {
|
||||||
|
Map<String, String> mappings =
|
||||||
|
Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}");
|
||||||
|
try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(2, mappings))) {
|
||||||
|
shards.startAll();
|
||||||
|
IndexShard replica1 = shards.getReplicas().get(0);
|
||||||
|
logger.info("--> isolated replica " + replica1.routingEntry());
|
||||||
|
shards.removeReplica(replica1);
|
||||||
|
IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"1\"}", XContentType.JSON);
|
||||||
|
shards.index(indexRequest);
|
||||||
|
shards.addReplica(replica1);
|
||||||
|
logger.info("--> promoting replica to primary " + replica1.routingEntry());
|
||||||
|
shards.promoteReplicaToPrimary(replica1);
|
||||||
|
indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"2\"}", XContentType.JSON);
|
||||||
|
shards.index(indexRequest);
|
||||||
|
shards.refresh("test");
|
||||||
|
for (IndexShard shard : shards) {
|
||||||
|
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
|
||||||
|
TopDocs search = searcher.searcher().search(new TermQuery(new Term("f", "2")), 10);
|
||||||
|
assertEquals("shard " + shard.routingEntry() + " misses new version", 1, search.totalHits);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,7 +107,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
Mapping mappingUpdate) {
|
Mapping mappingUpdate) {
|
||||||
Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
|
Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
|
||||||
Field versionField = new NumericDocValuesField("_version", 0);
|
Field versionField = new NumericDocValuesField("_version", 0);
|
||||||
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
|
SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||||
document.add(uidField);
|
document.add(uidField);
|
||||||
document.add(versionField);
|
document.add(versionField);
|
||||||
document.add(seqID.seqNo);
|
document.add(seqID.seqNo);
|
||||||
|
|
|
@ -551,7 +551,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
ParseContext.Document document, BytesReference source, Mapping mappingUpdate) {
|
ParseContext.Document document, BytesReference source, Mapping mappingUpdate) {
|
||||||
Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
|
Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
|
||||||
Field versionField = new NumericDocValuesField("_version", 0);
|
Field versionField = new NumericDocValuesField("_version", 0);
|
||||||
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
|
SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||||
document.add(uidField);
|
document.add(uidField);
|
||||||
document.add(versionField);
|
document.add(versionField);
|
||||||
document.add(seqID.seqNo);
|
document.add(seqID.seqNo);
|
||||||
|
|
|
@ -332,7 +332,7 @@ public class RefreshListenersTests extends ESTestCase {
|
||||||
document.add(new TextField("test", testFieldValue, Field.Store.YES));
|
document.add(new TextField("test", testFieldValue, Field.Store.YES));
|
||||||
Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
|
Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
|
||||||
Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY);
|
Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY);
|
||||||
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
|
SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||||
document.add(uidField);
|
document.add(uidField);
|
||||||
document.add(versionField);
|
document.add(versionField);
|
||||||
document.add(seqID.seqNo);
|
document.add(seqID.seqNo);
|
||||||
|
|
|
@ -36,8 +36,10 @@ import org.apache.lucene.util.LineFileDocs;
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
import org.apache.lucene.util.LuceneTestCase;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
|
import org.elasticsearch.common.Randomness;
|
||||||
import org.elasticsearch.common.bytes.BytesArray;
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
import org.elasticsearch.common.io.FileSystemUtils;
|
import org.elasticsearch.common.io.FileSystemUtils;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
@ -84,6 +86,7 @@ import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -101,6 +104,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.LongStream;
|
||||||
|
|
||||||
import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE;
|
import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
@ -124,7 +128,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
|
|
||||||
if (translog.isOpen()) {
|
if (translog.isOpen()) {
|
||||||
if (translog.currentFileGeneration() > 1) {
|
if (translog.currentFileGeneration() > 1) {
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
assertFileDeleted(translog, translog.currentFileGeneration() - 1);
|
assertFileDeleted(translog, translog.currentFileGeneration() - 1);
|
||||||
}
|
}
|
||||||
translog.close();
|
translog.close();
|
||||||
|
@ -287,7 +291,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
|
assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
|
||||||
assertThat(snapshot.totalOperations(), equalTo(ops.size()));
|
assertThat(snapshot.totalOperations(), equalTo(ops.size()));
|
||||||
|
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
snapshot = translog.newSnapshot();
|
snapshot = translog.newSnapshot();
|
||||||
assertThat(snapshot, SnapshotMatchers.size(0));
|
assertThat(snapshot, SnapshotMatchers.size(0));
|
||||||
assertThat(snapshot.totalOperations(), equalTo(0));
|
assertThat(snapshot.totalOperations(), equalTo(0));
|
||||||
|
@ -373,7 +377,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
{
|
{
|
||||||
final TranslogStats stats = stats();
|
final TranslogStats stats = stats();
|
||||||
assertThat(stats.estimatedNumberOfOperations(), equalTo(0L));
|
assertThat(stats.estimatedNumberOfOperations(), equalTo(0L));
|
||||||
|
@ -446,7 +450,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
|
|
||||||
try (Translog.View view = translog.newView()) {
|
try (Translog.View view = translog.newView()) {
|
||||||
Translog.Snapshot snapshot2 = translog.newSnapshot();
|
Translog.Snapshot snapshot2 = translog.newSnapshot();
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
assertThat(snapshot2, SnapshotMatchers.equalsTo(ops));
|
assertThat(snapshot2, SnapshotMatchers.equalsTo(ops));
|
||||||
assertThat(snapshot2.totalOperations(), equalTo(ops.size()));
|
assertThat(snapshot2.totalOperations(), equalTo(ops.size()));
|
||||||
}
|
}
|
||||||
|
@ -821,7 +825,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
run.set(false);
|
run.set(false);
|
||||||
|
@ -858,7 +862,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
assertTrue("we only synced a previous operation yet", translog.syncNeeded());
|
assertTrue("we only synced a previous operation yet", translog.syncNeeded());
|
||||||
}
|
}
|
||||||
if (rarely()) {
|
if (rarely()) {
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
assertFalse("location is from a previous translog - already synced", translog.ensureSynced(location)); // not syncing now
|
assertFalse("location is from a previous translog - already synced", translog.ensureSynced(location)); // not syncing now
|
||||||
assertFalse("no sync needed since no operations in current translog", translog.syncNeeded());
|
assertFalse("no sync needed since no operations in current translog", translog.syncNeeded());
|
||||||
}
|
}
|
||||||
|
@ -878,7 +882,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
ArrayList<Location> locations = new ArrayList<>();
|
ArrayList<Location> locations = new ArrayList<>();
|
||||||
for (int op = 0; op < translogOperations; op++) {
|
for (int op = 0; op < translogOperations; op++) {
|
||||||
if (rarely()) {
|
if (rarely()) {
|
||||||
translog.commit(); // do this first so that there is at least one pending tlog entry
|
translog.commit(translog.currentFileGeneration()); // do this first so that there is at least one pending tlog entry
|
||||||
}
|
}
|
||||||
final Translog.Location location = translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
|
final Translog.Location location = translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
|
||||||
locations.add(location);
|
locations.add(location);
|
||||||
|
@ -889,7 +893,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream()));
|
assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream()));
|
||||||
assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
|
assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
|
||||||
} else if (rarely()) {
|
} else if (rarely()) {
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); // not syncing now
|
assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); // not syncing now
|
||||||
assertFalse("no sync needed since no operations in current translog", translog.syncNeeded());
|
assertFalse("no sync needed since no operations in current translog", translog.syncNeeded());
|
||||||
} else {
|
} else {
|
||||||
|
@ -909,7 +913,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
for (int op = 0; op < translogOperations; op++) {
|
for (int op = 0; op < translogOperations; op++) {
|
||||||
locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
|
locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
|
||||||
if (rarely() && translogOperations > op + 1) {
|
if (rarely() && translogOperations > op + 1) {
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Collections.shuffle(locations, random());
|
Collections.shuffle(locations, random());
|
||||||
|
@ -1074,7 +1078,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
|
locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
|
||||||
final boolean commit = commitOften ? frequently() : rarely();
|
final boolean commit = commitOften ? frequently() : rarely();
|
||||||
if (commit && op < translogOperations - 1) {
|
if (commit && op < translogOperations - 1) {
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
minUncommittedOp = op + 1;
|
minUncommittedOp = op + 1;
|
||||||
translogGeneration = translog.getGeneration();
|
translogGeneration = translog.getGeneration();
|
||||||
}
|
}
|
||||||
|
@ -1300,7 +1304,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
for (int op = 0; op < translogOperations; op++) {
|
for (int op = 0; op < translogOperations; op++) {
|
||||||
locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
|
locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
firstUncommitted = op + 1;
|
firstUncommitted = op + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1483,7 +1487,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
translog.commit();
|
translog.commit(translog.currentFileGeneration());
|
||||||
fail("already closed");
|
fail("already closed");
|
||||||
} catch (AlreadyClosedException ex) {
|
} catch (AlreadyClosedException ex) {
|
||||||
assertNotNull(ex.getCause());
|
assertNotNull(ex.getCause());
|
||||||
|
@ -1930,7 +1934,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
failableTLog.prepareCommit();
|
failableTLog.prepareCommit();
|
||||||
}
|
}
|
||||||
failableTLog.commit();
|
failableTLog.commit(translog.currentFileGeneration());
|
||||||
syncedDocs.clear();
|
syncedDocs.clear();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2048,7 +2052,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
|
|
||||||
public void testTranslogOpSerialization() throws Exception {
|
public void testTranslogOpSerialization() throws Exception {
|
||||||
BytesReference B_1 = new BytesArray(new byte[]{1});
|
BytesReference B_1 = new BytesArray(new byte[]{1});
|
||||||
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
|
SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||||
assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers";
|
assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers";
|
||||||
long randomSeqNum = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong();
|
long randomSeqNum = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong();
|
||||||
long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong();
|
long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong();
|
||||||
|
@ -2110,12 +2114,13 @@ public class TranslogTests extends ESTestCase {
|
||||||
for (int i = 0; i <= rolls; i++) {
|
for (int i = 0; i <= rolls; i++) {
|
||||||
assertFileIsPresent(translog, generation + i);
|
assertFileIsPresent(translog, generation + i);
|
||||||
}
|
}
|
||||||
translog.commit();
|
translog.commit(generation + rolls);
|
||||||
assertThat(translog.currentFileGeneration(), equalTo(generation + rolls + 1));
|
assertThat(translog.currentFileGeneration(), equalTo(generation + rolls + 1));
|
||||||
assertThat(translog.totalOperations(), equalTo(0));
|
assertThat(translog.totalOperations(), equalTo(0));
|
||||||
for (int i = 0; i <= rolls; i++) {
|
for (int i = 0; i < rolls; i++) {
|
||||||
assertFileDeleted(translog, generation + i);
|
assertFileDeleted(translog, generation + i);
|
||||||
}
|
}
|
||||||
|
assertFileIsPresent(translog, generation + rolls);
|
||||||
assertFileIsPresent(translog, generation + rolls + 1);
|
assertFileIsPresent(translog, generation + rolls + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2167,7 +2172,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
translog.commit();
|
translog.commit(generation + rollsBefore + 1);
|
||||||
|
|
||||||
for (int i = 0; i <= rollsBefore; i++) {
|
for (int i = 0; i <= rollsBefore; i++) {
|
||||||
assertFileDeleted(translog, generation + i);
|
assertFileDeleted(translog, generation + i);
|
||||||
|
@ -2178,4 +2183,130 @@ public class TranslogTests extends ESTestCase {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testMinGenerationForSeqNo() throws IOException {
|
||||||
|
final long initialGeneration = translog.getGeneration().translogFileGeneration;
|
||||||
|
final int operations = randomIntBetween(1, 4096);
|
||||||
|
final List<Long> shuffledSeqNos = LongStream.range(0, operations).boxed().collect(Collectors.toList());
|
||||||
|
Randomness.shuffle(shuffledSeqNos);
|
||||||
|
final List<Tuple<Long, Long>> seqNos = new ArrayList<>();
|
||||||
|
final Map<Long, Long> terms = new HashMap<>();
|
||||||
|
for (final Long seqNo : shuffledSeqNos) {
|
||||||
|
seqNos.add(Tuple.tuple(seqNo, terms.computeIfAbsent(seqNo, k -> 0L)));
|
||||||
|
Long repeatingTermSeqNo = randomFrom(seqNos.stream().map(Tuple::v1).collect(Collectors.toList()));
|
||||||
|
seqNos.add(Tuple.tuple(repeatingTermSeqNo, terms.computeIfPresent(repeatingTermSeqNo, (s, t) -> t + 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (final Tuple<Long, Long> tuple : seqNos) {
|
||||||
|
translog.add(new Translog.NoOp(tuple.v1(), tuple.v2(), "test"));
|
||||||
|
if (rarely()) {
|
||||||
|
translog.rollGeneration();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Map<Long, Set<Tuple<Long, Long>>> generations = new HashMap<>();
|
||||||
|
|
||||||
|
translog.commit(initialGeneration);
|
||||||
|
for (long seqNo = 0; seqNo < operations; seqNo++) {
|
||||||
|
final Set<Tuple<Long, Long>> seenSeqNos = new HashSet<>();
|
||||||
|
final long generation = translog.getMinGenerationForSeqNo(seqNo).translogFileGeneration;
|
||||||
|
for (long g = generation; g < translog.currentFileGeneration(); g++) {
|
||||||
|
if (!generations.containsKey(g)) {
|
||||||
|
final Set<Tuple<Long, Long>> generationSeenSeqNos = new HashSet<>();
|
||||||
|
final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.getCommitCheckpointFileName(g)));
|
||||||
|
try (TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(g)), checkpoint)) {
|
||||||
|
Translog.Snapshot snapshot = reader.newSnapshot();
|
||||||
|
Translog.Operation operation;
|
||||||
|
while ((operation = snapshot.next()) != null) {
|
||||||
|
generationSeenSeqNos.add(Tuple.tuple(operation.seqNo(), operation.primaryTerm()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
generations.put(g, generationSeenSeqNos);
|
||||||
|
|
||||||
|
}
|
||||||
|
seenSeqNos.addAll(generations.get(g));
|
||||||
|
}
|
||||||
|
|
||||||
|
final long seqNoLowerBound = seqNo;
|
||||||
|
final Set<Tuple<Long, Long>> expected = seqNos.stream().filter(t -> t.v1() >= seqNoLowerBound).collect(Collectors.toSet());
|
||||||
|
seenSeqNos.retainAll(expected);
|
||||||
|
assertThat(seenSeqNos, equalTo(expected));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSimpleCommit() throws IOException {
|
||||||
|
final int operations = randomIntBetween(1, 4096);
|
||||||
|
long seqNo = 0;
|
||||||
|
for (int i = 0; i < operations; i++) {
|
||||||
|
translog.add(new Translog.NoOp(seqNo++, 0, "test'"));
|
||||||
|
if (rarely()) {
|
||||||
|
translog.rollGeneration();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
final long generation =
|
||||||
|
randomIntBetween(1, Math.toIntExact(translog.currentFileGeneration()));
|
||||||
|
translog.commit(generation);
|
||||||
|
for (long i = 0; i < generation; i++) {
|
||||||
|
assertFileDeleted(translog, i);
|
||||||
|
}
|
||||||
|
for (long i = generation; i <= translog.currentFileGeneration(); i++) {
|
||||||
|
assertFileIsPresent(translog, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPrepareCommitAndCommit() throws IOException {
|
||||||
|
final int operations = randomIntBetween(1, 4096);
|
||||||
|
long seqNo = 0;
|
||||||
|
long last = -1;
|
||||||
|
for (int i = 0; i < operations; i++) {
|
||||||
|
translog.add(new Translog.NoOp(seqNo++, 0, "test"));
|
||||||
|
if (rarely()) {
|
||||||
|
final long generation = translog.currentFileGeneration();
|
||||||
|
translog.prepareCommit();
|
||||||
|
if (rarely()) {
|
||||||
|
// simulate generation filling up and rolling between preparing the commit and committing
|
||||||
|
translog.rollGeneration();
|
||||||
|
}
|
||||||
|
final int committedGeneration = randomIntBetween(Math.max(1, Math.toIntExact(last)), Math.toIntExact(generation));
|
||||||
|
translog.commit(committedGeneration);
|
||||||
|
last = committedGeneration;
|
||||||
|
for (long g = 0; i < generation; g++) {
|
||||||
|
assertFileDeleted(translog, g);
|
||||||
|
}
|
||||||
|
for (long g = generation; g < translog.currentFileGeneration(); g++) {
|
||||||
|
assertFileIsPresent(translog, g);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testCommitWithOpenView() throws IOException {
|
||||||
|
final int operations = randomIntBetween(1, 4096);
|
||||||
|
long seqNo = 0;
|
||||||
|
long lastCommittedGeneration = -1;
|
||||||
|
for (int i = 0; i < operations; i++) {
|
||||||
|
translog.add(new Translog.NoOp(seqNo++, 0, "test"));
|
||||||
|
if (rarely()) {
|
||||||
|
try (Translog.View ignored = translog.newView()) {
|
||||||
|
final long viewGeneration = lastCommittedGeneration;
|
||||||
|
translog.prepareCommit();
|
||||||
|
final long committedGeneration = randomIntBetween(
|
||||||
|
Math.max(1, Math.toIntExact(lastCommittedGeneration)),
|
||||||
|
Math.toIntExact(translog.currentFileGeneration()));
|
||||||
|
translog.commit(committedGeneration);
|
||||||
|
lastCommittedGeneration = committedGeneration;
|
||||||
|
// with an open view, committing should preserve generations back to the last committed generation
|
||||||
|
for (long g = 1; g < Math.min(lastCommittedGeneration, viewGeneration); g++) {
|
||||||
|
assertFileDeleted(translog, g);
|
||||||
|
}
|
||||||
|
// the view generation could be -1 if no commit has been performed
|
||||||
|
final long max = Math.max(1, Math.min(lastCommittedGeneration, viewGeneration));
|
||||||
|
for (long g = max; g < translog.currentFileGeneration(); g++) {
|
||||||
|
assertFileIsPresent(translog, g);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,11 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
|
||||||
RoutingNode localRoutingNode = state.getRoutingNodes().node(state.getNodes().getLocalNodeId());
|
RoutingNode localRoutingNode = state.getRoutingNodes().node(state.getNodes().getLocalNodeId());
|
||||||
if (localRoutingNode != null) {
|
if (localRoutingNode != null) {
|
||||||
if (enableRandomFailures == false) {
|
if (enableRandomFailures == false) {
|
||||||
assertThat("failed shard cache should be empty", failedShardsCache.values(), empty());
|
// initializing a shard should succeed when enableRandomFailures is disabled
|
||||||
|
// active shards can be failed if state persistence was disabled in an earlier CS update
|
||||||
|
if (failedShardsCache.values().stream().anyMatch(ShardRouting::initializing)) {
|
||||||
|
fail("failed shard cache should not contain initializing shard routing: " + failedShardsCache.values());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// check that all shards in local routing nodes have been allocated
|
// check that all shards in local routing nodes have been allocated
|
||||||
for (ShardRouting shardRouting : localRoutingNode) {
|
for (ShardRouting shardRouting : localRoutingNode) {
|
||||||
|
@ -100,16 +104,23 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
|
||||||
|
|
||||||
MockIndexShard shard = indicesService.getShardOrNull(shardRouting.shardId());
|
MockIndexShard shard = indicesService.getShardOrNull(shardRouting.shardId());
|
||||||
ShardRouting failedShard = failedShardsCache.get(shardRouting.shardId());
|
ShardRouting failedShard = failedShardsCache.get(shardRouting.shardId());
|
||||||
if (enableRandomFailures) {
|
|
||||||
if (shard == null && failedShard == null) {
|
if (state.blocks().disableStatePersistence()) {
|
||||||
fail("Shard with id " + shardRouting + " expected but missing in indicesService and failedShardsCache");
|
if (shard != null) {
|
||||||
|
fail("Shard with id " + shardRouting + " should be removed from indicesService due to disabled state persistence");
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
if (failedShard != null && failedShard.isSameAllocation(shardRouting) == false) {
|
if (failedShard != null && failedShard.isSameAllocation(shardRouting) == false) {
|
||||||
fail("Shard cache has not been properly cleaned for " + failedShard);
|
fail("Shard cache has not been properly cleaned for " + failedShard);
|
||||||
}
|
}
|
||||||
} else {
|
if (shard == null && failedShard == null) {
|
||||||
if (shard == null) {
|
// shard must either be there or there must be a failure
|
||||||
fail("Shard with id " + shardRouting + " expected but missing in indicesService");
|
fail("Shard with id " + shardRouting + " expected but missing in indicesService and failedShardsCache");
|
||||||
|
}
|
||||||
|
if (enableRandomFailures == false) {
|
||||||
|
if (shard == null && shardRouting.initializing() && failedShard == shardRouting) {
|
||||||
|
// initializing a shard should succeed when enableRandomFailures is disabled
|
||||||
|
fail("Shard with id " + shardRouting + " expected but missing in indicesService " + failedShard);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,16 +143,22 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
|
||||||
.map(r -> r.allocationId().getId()).collect(Collectors.toSet());
|
.map(r -> r.allocationId().getId()).collect(Collectors.toSet());
|
||||||
Set<String> initializingIds = shardRoutingTable.getAllInitializingShards().stream()
|
Set<String> initializingIds = shardRoutingTable.getAllInitializingShards().stream()
|
||||||
.map(r -> r.allocationId().getId()).collect(Collectors.toSet());
|
.map(r -> r.allocationId().getId()).collect(Collectors.toSet());
|
||||||
assertThat(shard.routingEntry() + " isn't updated with active aIDs", shard.activeAllocationIds, equalTo(activeIds));
|
assertThat(shard.routingEntry() + " isn't updated with active aIDs", shard.activeAllocationIds,
|
||||||
|
equalTo(activeIds));
|
||||||
assertThat(shard.routingEntry() + " isn't updated with init aIDs", shard.initializingAllocationIds,
|
assertThat(shard.routingEntry() + " isn't updated with init aIDs", shard.initializingAllocationIds,
|
||||||
equalTo(initializingIds));
|
equalTo(initializingIds));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// all other shards / indices have been cleaned up
|
// all other shards / indices have been cleaned up
|
||||||
for (AllocatedIndex<? extends Shard> indexService : indicesService) {
|
for (AllocatedIndex<? extends Shard> indexService : indicesService) {
|
||||||
|
if (state.blocks().disableStatePersistence()) {
|
||||||
|
fail("Index service " + indexService.index() + " should be removed from indicesService due to disabled state persistence");
|
||||||
|
}
|
||||||
|
|
||||||
assertTrue(state.metaData().getIndexSafe(indexService.index()) != null);
|
assertTrue(state.metaData().getIndexSafe(indexService.index()) != null);
|
||||||
|
|
||||||
boolean shardsFound = false;
|
boolean shardsFound = false;
|
||||||
|
@ -158,13 +175,9 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
|
||||||
}
|
}
|
||||||
|
|
||||||
if (shardsFound == false) {
|
if (shardsFound == false) {
|
||||||
if (enableRandomFailures) {
|
|
||||||
// check if we have shards of that index in failedShardsCache
|
// check if we have shards of that index in failedShardsCache
|
||||||
// if yes, we might not have cleaned the index as failedShardsCache can be populated by another thread
|
// if yes, we might not have cleaned the index as failedShardsCache can be populated by another thread
|
||||||
assertFalse(failedShardsCache.keySet().stream().noneMatch(shardId -> shardId.getIndex().equals(indexService.index())));
|
assertFalse(failedShardsCache.keySet().stream().noneMatch(shardId -> shardId.getIndex().equals(indexService.index())));
|
||||||
} else {
|
|
||||||
fail("index service for index " + indexService.index() + " has no shards");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,8 @@ import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||||
|
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||||
|
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
@ -44,6 +46,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.UUIDs;
|
import org.elasticsearch.common.UUIDs;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.set.Sets;
|
import org.elasticsearch.common.util.set.Sets;
|
||||||
|
import org.elasticsearch.discovery.DiscoverySettings;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||||
import org.elasticsearch.repositories.RepositoriesService;
|
import org.elasticsearch.repositories.RepositoriesService;
|
||||||
|
@ -231,6 +234,23 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
||||||
public ClusterState randomlyUpdateClusterState(ClusterState state,
|
public ClusterState randomlyUpdateClusterState(ClusterState state,
|
||||||
Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap,
|
Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap,
|
||||||
Supplier<MockIndicesService> indicesServiceSupplier) {
|
Supplier<MockIndicesService> indicesServiceSupplier) {
|
||||||
|
// randomly remove no_master blocks
|
||||||
|
if (randomBoolean() && state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID)) {
|
||||||
|
state = ClusterState.builder(state).blocks(
|
||||||
|
ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// randomly add no_master blocks
|
||||||
|
if (rarely() && state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID) == false) {
|
||||||
|
ClusterBlock block = randomBoolean() ? DiscoverySettings.NO_MASTER_BLOCK_ALL : DiscoverySettings.NO_MASTER_BLOCK_WRITES;
|
||||||
|
state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).addGlobalBlock(block)).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// if no_master block is in place, make no other cluster state changes
|
||||||
|
if (state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID)) {
|
||||||
|
return state;
|
||||||
|
}
|
||||||
|
|
||||||
// randomly create new indices (until we have 200 max)
|
// randomly create new indices (until we have 200 max)
|
||||||
for (int i = 0; i < randomInt(5); i++) {
|
for (int i = 0; i < randomInt(5); i++) {
|
||||||
if (state.metaData().indices().size() > 200) {
|
if (state.metaData().indices().size() > 200) {
|
||||||
|
|
|
@ -204,7 +204,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
||||||
document.add(new TextField("test", "test", Field.Store.YES));
|
document.add(new TextField("test", "test", Field.Store.YES));
|
||||||
final Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
|
final Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
|
||||||
final Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY);
|
final Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY);
|
||||||
final SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
|
final SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
||||||
document.add(uidField);
|
document.add(uidField);
|
||||||
document.add(versionField);
|
document.add(versionField);
|
||||||
document.add(seqID.seqNo);
|
document.add(seqID.seqNo);
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.node.internal;
|
package org.elasticsearch.node;
|
||||||
|
|
||||||
import org.elasticsearch.cli.MockTerminal;
|
import org.elasticsearch.cli.MockTerminal;
|
||||||
import org.elasticsearch.cluster.ClusterName;
|
import org.elasticsearch.cluster.ClusterName;
|
||||||
|
@ -182,18 +182,11 @@ public class InternalSettingsPreparerTests extends ESTestCase {
|
||||||
assertEquals("secret", fakeSetting.get(env.settings()).toString());
|
assertEquals("secret", fakeSetting.get(env.settings()).toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDefaultProperties() throws Exception {
|
public void testDefaultPropertiesDoNothing() throws Exception {
|
||||||
Map<String, String> props = Collections.singletonMap("default.setting", "foo");
|
Map<String, String> props = Collections.singletonMap("default.setting", "foo");
|
||||||
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props);
|
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props);
|
||||||
assertEquals("foo", env.settings().get("setting"));
|
assertEquals("foo", env.settings().get("default.setting"));
|
||||||
|
assertNull(env.settings().get("setting"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDefaultPropertiesOverride() throws Exception {
|
|
||||||
Path configDir = homeDir.resolve("config");
|
|
||||||
Files.createDirectories(configDir);
|
|
||||||
Files.write(configDir.resolve("elasticsearch.yml"), Collections.singletonList("setting: bar"), StandardCharsets.UTF_8);
|
|
||||||
Map<String, String> props = Collections.singletonMap("default.setting", "foo");
|
|
||||||
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props);
|
|
||||||
assertEquals("bar", env.settings().get("setting"));
|
|
||||||
}
|
|
||||||
}
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.search.aggregations.bucket.sampler;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
|
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregationTestCase;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
public class InternalSamplerTests extends InternalSingleBucketAggregationTestCase<InternalSampler> {
|
||||||
|
@Override
|
||||||
|
protected InternalSampler createTestInstance(String name, long docCount, InternalAggregations aggregations,
|
||||||
|
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
|
||||||
|
return new InternalSampler(name, docCount, aggregations, pipelineAggregators, metaData);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void extraAssertReduced(InternalSampler reduced, List<InternalSampler> inputs) {
|
||||||
|
// Nothing extra to assert
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Writeable.Reader<InternalSampler> instanceReader() {
|
||||||
|
return InternalSampler::new;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,153 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.search.aggregations.metrics.geocentroid;
|
||||||
|
|
||||||
|
import org.apache.lucene.document.Document;
|
||||||
|
import org.apache.lucene.document.LatLonDocValuesField;
|
||||||
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
import org.apache.lucene.index.RandomIndexWriter;
|
||||||
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
|
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||||
|
import org.apache.lucene.store.Directory;
|
||||||
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
|
import org.elasticsearch.index.mapper.GeoPointFieldMapper;
|
||||||
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
|
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||||
|
import org.elasticsearch.test.geo.RandomGeoGenerator;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class GeoCentroidAggregatorTests extends AggregatorTestCase {
|
||||||
|
|
||||||
|
private static final double GEOHASH_TOLERANCE = 1E-4D;
|
||||||
|
|
||||||
|
public void testEmpty() throws Exception {
|
||||||
|
try (Directory dir = newDirectory();
|
||||||
|
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||||
|
GeoCentroidAggregationBuilder aggBuilder = new GeoCentroidAggregationBuilder("my_agg")
|
||||||
|
.field("field");
|
||||||
|
|
||||||
|
MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType();
|
||||||
|
fieldType.setHasDocValues(true);
|
||||||
|
fieldType.setName("field");
|
||||||
|
try (IndexReader reader = w.getReader()) {
|
||||||
|
IndexSearcher searcher = new IndexSearcher(reader);
|
||||||
|
InternalGeoCentroid result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||||
|
assertNull(result.centroid());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testUnmapped() throws Exception {
|
||||||
|
try (Directory dir = newDirectory();
|
||||||
|
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||||
|
GeoCentroidAggregationBuilder aggBuilder = new GeoCentroidAggregationBuilder("my_agg")
|
||||||
|
.field("another_field");
|
||||||
|
|
||||||
|
Document document = new Document();
|
||||||
|
document.add(new LatLonDocValuesField("field", 10, 10));
|
||||||
|
w.addDocument(document);
|
||||||
|
try (IndexReader reader = w.getReader()) {
|
||||||
|
IndexSearcher searcher = new IndexSearcher(reader);
|
||||||
|
|
||||||
|
MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType();
|
||||||
|
fieldType.setHasDocValues(true);
|
||||||
|
fieldType.setName("another_field");
|
||||||
|
InternalGeoCentroid result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||||
|
assertNull(result.centroid());
|
||||||
|
|
||||||
|
fieldType = new GeoPointFieldMapper.GeoPointFieldType();
|
||||||
|
fieldType.setHasDocValues(true);
|
||||||
|
fieldType.setName("field");
|
||||||
|
result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||||
|
assertNull(result.centroid());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSingleValuedField() throws Exception {
|
||||||
|
int numDocs = scaledRandomIntBetween(64, 256);
|
||||||
|
int numUniqueGeoPoints = randomIntBetween(1, numDocs);
|
||||||
|
try (Directory dir = newDirectory();
|
||||||
|
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||||
|
GeoPoint expectedCentroid = new GeoPoint(0, 0);
|
||||||
|
GeoPoint[] singleValues = new GeoPoint[numUniqueGeoPoints];
|
||||||
|
for (int i = 0 ; i < singleValues.length; i++) {
|
||||||
|
singleValues[i] = RandomGeoGenerator.randomPoint(random());
|
||||||
|
}
|
||||||
|
GeoPoint singleVal;
|
||||||
|
for (int i = 0; i < numDocs; i++) {
|
||||||
|
singleVal = singleValues[i % numUniqueGeoPoints];
|
||||||
|
Document document = new Document();
|
||||||
|
document.add(new LatLonDocValuesField("field", singleVal.getLat(), singleVal.getLon()));
|
||||||
|
w.addDocument(document);
|
||||||
|
expectedCentroid = expectedCentroid.reset(expectedCentroid.lat() + (singleVal.lat() - expectedCentroid.lat()) / (i + 1),
|
||||||
|
expectedCentroid.lon() + (singleVal.lon() - expectedCentroid.lon()) / (i + 1));
|
||||||
|
}
|
||||||
|
assertCentroid(w, expectedCentroid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testMultiValuedField() throws Exception {
|
||||||
|
int numDocs = scaledRandomIntBetween(64, 256);
|
||||||
|
int numUniqueGeoPoints = randomIntBetween(1, numDocs);
|
||||||
|
try (Directory dir = newDirectory();
|
||||||
|
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
|
||||||
|
|
||||||
|
GeoPoint expectedCentroid = new GeoPoint(0, 0);
|
||||||
|
GeoPoint[] multiValues = new GeoPoint[numUniqueGeoPoints];
|
||||||
|
for (int i = 0 ; i < multiValues.length; i++) {
|
||||||
|
multiValues[i] = RandomGeoGenerator.randomPoint(random());
|
||||||
|
}
|
||||||
|
final GeoPoint[] multiVal = new GeoPoint[2];
|
||||||
|
for (int i = 0; i < numDocs; i++) {
|
||||||
|
multiVal[0] = multiValues[i % numUniqueGeoPoints];
|
||||||
|
multiVal[1] = multiValues[(i+1) % numUniqueGeoPoints];
|
||||||
|
Document document = new Document();
|
||||||
|
document.add(new LatLonDocValuesField("field", multiVal[0].getLat(), multiVal[0].getLon()));
|
||||||
|
document.add(new LatLonDocValuesField("field", multiVal[1].getLat(), multiVal[1].getLon()));
|
||||||
|
w.addDocument(document);
|
||||||
|
double newMVLat = (multiVal[0].lat() + multiVal[1].lat())/2d;
|
||||||
|
double newMVLon = (multiVal[0].lon() + multiVal[1].lon())/2d;
|
||||||
|
expectedCentroid = expectedCentroid.reset(expectedCentroid.lat() + (newMVLat - expectedCentroid.lat()) / (i + 1),
|
||||||
|
expectedCentroid.lon() + (newMVLon - expectedCentroid.lon()) / (i + 1));
|
||||||
|
}
|
||||||
|
assertCentroid(w, expectedCentroid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertCentroid(RandomIndexWriter w, GeoPoint expectedCentroid) throws IOException {
|
||||||
|
MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType();
|
||||||
|
fieldType.setHasDocValues(true);
|
||||||
|
fieldType.setName("field");
|
||||||
|
GeoCentroidAggregationBuilder aggBuilder = new GeoCentroidAggregationBuilder("my_agg")
|
||||||
|
.field("field");
|
||||||
|
try (IndexReader reader = w.getReader()) {
|
||||||
|
IndexSearcher searcher = new IndexSearcher(reader);
|
||||||
|
InternalGeoCentroid result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType);
|
||||||
|
|
||||||
|
assertEquals("my_agg", result.getName());
|
||||||
|
GeoPoint centroid = result.centroid();
|
||||||
|
assertNotNull(centroid);
|
||||||
|
assertEquals(expectedCentroid.getLat(), centroid.getLat(), GEOHASH_TOLERANCE);
|
||||||
|
assertEquals(expectedCentroid.getLon(), centroid.getLon(), GEOHASH_TOLERANCE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,81 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
|
import org.elasticsearch.search.aggregations.InternalAggregationTestCase;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
public class InternalTDigestPercentilesTests extends InternalAggregationTestCase<InternalTDigestPercentiles> {
|
||||||
|
|
||||||
|
private final double[] percents = randomPercents();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected InternalTDigestPercentiles createTestInstance(String name,
|
||||||
|
List<PipelineAggregator> pipelineAggregators,
|
||||||
|
Map<String, Object> metaData) {
|
||||||
|
boolean keyed = randomBoolean();
|
||||||
|
DocValueFormat format = DocValueFormat.RAW;
|
||||||
|
TDigestState state = new TDigestState(100);
|
||||||
|
|
||||||
|
int numValues = randomInt(10);
|
||||||
|
for (int i = 0; i < numValues; ++i) {
|
||||||
|
state.add(randomDouble() * 100);
|
||||||
|
}
|
||||||
|
assertEquals(state.centroidCount(), numValues);
|
||||||
|
return new InternalTDigestPercentiles(name, percents, state, keyed, format, pipelineAggregators, metaData);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void assertReduced(InternalTDigestPercentiles reduced, List<InternalTDigestPercentiles> inputs) {
|
||||||
|
final TDigestState expectedState = new TDigestState(reduced.state.compression());
|
||||||
|
|
||||||
|
long totalCount = 0;
|
||||||
|
for (InternalTDigestPercentiles input : inputs) {
|
||||||
|
assertArrayEquals(reduced.keys, input.keys, 0d);
|
||||||
|
expectedState.add(input.state);
|
||||||
|
totalCount += input.state.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
assertEquals(totalCount, reduced.state.size());
|
||||||
|
if (totalCount > 0) {
|
||||||
|
assertEquals(expectedState.quantile(0), reduced.state.quantile(0), 0d);
|
||||||
|
assertEquals(expectedState.quantile(1), reduced.state.quantile(1), 0d);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Writeable.Reader<InternalTDigestPercentiles> instanceReader() {
|
||||||
|
return InternalTDigestPercentiles::new;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static double[] randomPercents() {
|
||||||
|
List<Double> randomCdfValues = randomSubsetOf(randomIntBetween(1, 7), 0.01d, 0.05d, 0.25d, 0.50d, 0.75d, 0.95d, 0.99d);
|
||||||
|
double[] percents = new double[randomCdfValues.size()];
|
||||||
|
for (int i = 0; i < randomCdfValues.size(); i++) {
|
||||||
|
percents[i] = randomCdfValues.get(i);
|
||||||
|
}
|
||||||
|
return percents;
|
||||||
|
}
|
||||||
|
}
|
|
@ -155,7 +155,8 @@ public class InternalTopHitsTests extends InternalAggregationTestCase<InternalTo
|
||||||
for (int i = 0; i < expectedHitsHits.length; i++) {
|
for (int i = 0; i < expectedHitsHits.length; i++) {
|
||||||
expectedHitsHits[i] = allHits.get(i).v2();
|
expectedHitsHits[i] = allHits.get(i).v2();
|
||||||
}
|
}
|
||||||
SearchHits expectedHits = new SearchHits(expectedHitsHits, totalHits, maxScore);
|
// Lucene's TopDocs initializes the maxScore to Float.NaN, if there is no maxScore
|
||||||
|
SearchHits expectedHits = new SearchHits(expectedHitsHits, totalHits, maxScore == Float.MIN_VALUE ? Float.NaN : maxScore);
|
||||||
assertEqualsWithErrorMessageFromXContent(expectedHits, actualHits);
|
assertEqualsWithErrorMessageFromXContent(expectedHits, actualHits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1182,13 +1182,13 @@ public class ChildQuerySearchIT extends ESIntegTestCase {
|
||||||
client().prepareIndex("test", "child1", "c1").setParent("p1").setSource("c_field", "blue").get();
|
client().prepareIndex("test", "child1", "c1").setParent("p1").setSource("c_field", "blue").get();
|
||||||
fail();
|
fail();
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
assertThat(e.toString(), containsString("Can't specify parent if no parent field has been configured"));
|
assertThat(e.toString(), containsString("can't specify parent if no parent field has been configured"));
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
client().prepareIndex("test", "child2", "c2").setParent("p1").setSource("c_field", "blue").get();
|
client().prepareIndex("test", "child2", "c2").setParent("p1").setSource("c_field", "blue").get();
|
||||||
fail();
|
fail();
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
assertThat(e.toString(), containsString("Can't specify parent if no parent field has been configured"));
|
assertThat(e.toString(), containsString("can't specify parent if no parent field has been configured"));
|
||||||
}
|
}
|
||||||
|
|
||||||
refresh();
|
refresh();
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
@ -74,6 +75,42 @@ public class TaskResultTests extends ESTestCase {
|
||||||
assertEquals(result, read);
|
assertEquals(result, read);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testTaskInfoIsForwardCompatible() throws IOException {
|
||||||
|
TaskInfo taskInfo = randomTaskInfo();
|
||||||
|
TaskInfo read;
|
||||||
|
try (XContentBuilder builder = XContentBuilder.builder(randomFrom(XContentType.values()).xContent())) {
|
||||||
|
builder.startObject();
|
||||||
|
taskInfo.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||||
|
builder.endObject();
|
||||||
|
try (XContentBuilder withExtraFields = addRandomUnknownFields(builder)) {
|
||||||
|
try (XContentBuilder shuffled = shuffleXContent(withExtraFields)) {
|
||||||
|
try (XContentParser parser = createParser(shuffled)) {
|
||||||
|
read = TaskInfo.PARSER.apply(parser, null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IOException("Error processing [" + taskInfo + "]", e);
|
||||||
|
}
|
||||||
|
assertEquals(taskInfo, read);
|
||||||
|
}
|
||||||
|
|
||||||
|
private XContentBuilder addRandomUnknownFields(XContentBuilder builder) throws IOException {
|
||||||
|
try (XContentParser parser = createParser(builder)) {
|
||||||
|
Map<String, Object> map = parser.mapOrdered();
|
||||||
|
int numberOfNewFields = randomIntBetween(2, 10);
|
||||||
|
for (int i = 0; i < numberOfNewFields; i++) {
|
||||||
|
if (randomBoolean()) {
|
||||||
|
map.put("unknown_field" + i, randomAlphaOfLength(20));
|
||||||
|
} else {
|
||||||
|
map.put("unknown_field" + i, Collections.singletonMap("inner", randomAlphaOfLength(20)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType());
|
||||||
|
return xContentBuilder.map(map);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static TaskResult randomTaskResult() throws IOException {
|
private static TaskResult randomTaskResult() throws IOException {
|
||||||
switch (between(0, 2)) {
|
switch (between(0, 2)) {
|
||||||
case 0:
|
case 0:
|
||||||
|
|
|
@ -325,7 +325,7 @@ public class SimpleVersioningIT extends ESIntegTestCase {
|
||||||
ids = new IDSource() {
|
ids = new IDSource() {
|
||||||
@Override
|
@Override
|
||||||
public String next() {
|
public String next() {
|
||||||
return TestUtil.randomSimpleString(random);
|
return TestUtil.randomSimpleString(random, 1, 10);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
break;
|
break;
|
||||||
|
@ -335,7 +335,7 @@ public class SimpleVersioningIT extends ESIntegTestCase {
|
||||||
ids = new IDSource() {
|
ids = new IDSource() {
|
||||||
@Override
|
@Override
|
||||||
public String next() {
|
public String next() {
|
||||||
return TestUtil.randomRealisticUnicodeString(random);
|
return TestUtil.randomRealisticUnicodeString(random, 1, 20);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -127,13 +127,13 @@ configure(distributions) {
|
||||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||||
apply plugin: 'elasticsearch.rest-test'
|
apply plugin: 'elasticsearch.rest-test'
|
||||||
project.integTest {
|
project.integTest {
|
||||||
dependsOn project.assemble
|
|
||||||
includePackaged project.name == 'integ-test-zip'
|
includePackaged project.name == 'integ-test-zip'
|
||||||
if (project.name != 'integ-test-zip') {
|
if (project.name != 'integ-test-zip') {
|
||||||
mustRunAfter ':distribution:integ-test-zip:integTest'
|
mustRunAfter ':distribution:integ-test-zip:integTest'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
project.integTestCluster {
|
project.integTestCluster {
|
||||||
|
dependsOn project.assemble
|
||||||
distribution = project.name
|
distribution = project.name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ import org.elasticsearch.gradle.LoggedExec
|
||||||
apply plugin: 'distribution'
|
apply plugin: 'distribution'
|
||||||
|
|
||||||
File checkoutDir = file("${buildDir}/bwc/checkout-5.x")
|
File checkoutDir = file("${buildDir}/bwc/checkout-5.x")
|
||||||
task createClone(type: Exec) {
|
task createClone(type: LoggedExec) {
|
||||||
onlyIf { checkoutDir.exists() == false }
|
onlyIf { checkoutDir.exists() == false }
|
||||||
commandLine = ['git', 'clone', rootDir, checkoutDir]
|
commandLine = ['git', 'clone', rootDir, checkoutDir]
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,10 +32,6 @@ fi
|
||||||
|
|
||||||
# The following variables can be overwritten in $DEFAULT
|
# The following variables can be overwritten in $DEFAULT
|
||||||
|
|
||||||
# Run Elasticsearch as this user ID and group ID
|
|
||||||
ES_USER=elasticsearch
|
|
||||||
ES_GROUP=elasticsearch
|
|
||||||
|
|
||||||
# Directory where the Elasticsearch binary distribution resides
|
# Directory where the Elasticsearch binary distribution resides
|
||||||
ES_HOME=/usr/share/$NAME
|
ES_HOME=/usr/share/$NAME
|
||||||
|
|
||||||
|
@ -76,6 +72,12 @@ if [ ! -z "$CONF_FILE" ]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# ES_USER and ES_GROUP settings were removed
|
||||||
|
if [ ! -z "$ES_USER" ] || [ ! -z "$ES_GROUP" ]; then
|
||||||
|
echo "ES_USER and ES_GROUP settings are no longer supported. To run as a custom user/group use the archive distribution of Elasticsearch."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Define other required variables
|
# Define other required variables
|
||||||
PID_FILE="$PID_DIR/$NAME.pid"
|
PID_FILE="$PID_DIR/$NAME.pid"
|
||||||
DAEMON=$ES_HOME/bin/elasticsearch
|
DAEMON=$ES_HOME/bin/elasticsearch
|
||||||
|
@ -119,10 +121,10 @@ case "$1" in
|
||||||
|
|
||||||
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
|
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
|
||||||
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
|
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
|
||||||
mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"
|
mkdir -p "$PID_DIR" && chown elasticsearch:elasticsearch "$PID_DIR"
|
||||||
fi
|
fi
|
||||||
if [ -n "$PID_FILE" ] && [ ! -e "$PID_FILE" ]; then
|
if [ -n "$PID_FILE" ] && [ ! -e "$PID_FILE" ]; then
|
||||||
touch "$PID_FILE" && chown "$ES_USER":"$ES_GROUP" "$PID_FILE"
|
touch "$PID_FILE" && chown elasticsearch:elasticsearch "$PID_FILE"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
if [ -n "$MAX_OPEN_FILES" ]; then
|
||||||
|
@ -138,7 +140,7 @@ case "$1" in
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Start Daemon
|
# Start Daemon
|
||||||
start-stop-daemon -d $ES_HOME --start --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS
|
start-stop-daemon -d $ES_HOME --start --user elasticsearch -c elasticsearch --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS
|
||||||
return=$?
|
return=$?
|
||||||
if [ $return -eq 0 ]; then
|
if [ $return -eq 0 ]; then
|
||||||
i=0
|
i=0
|
||||||
|
@ -162,7 +164,7 @@ case "$1" in
|
||||||
|
|
||||||
if [ -f "$PID_FILE" ]; then
|
if [ -f "$PID_FILE" ]; then
|
||||||
start-stop-daemon --stop --pidfile "$PID_FILE" \
|
start-stop-daemon --stop --pidfile "$PID_FILE" \
|
||||||
--user "$ES_USER" \
|
--user elasticsearch \
|
||||||
--quiet \
|
--quiet \
|
||||||
--retry forever/TERM/20 > /dev/null
|
--retry forever/TERM/20 > /dev/null
|
||||||
if [ $? -eq 1 ]; then
|
if [ $? -eq 1 ]; then
|
||||||
|
|
|
@ -32,8 +32,6 @@ if [ -f /etc/rc.d/init.d/functions ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Sets the default values for elasticsearch variables used in this script
|
# Sets the default values for elasticsearch variables used in this script
|
||||||
ES_USER="elasticsearch"
|
|
||||||
ES_GROUP="elasticsearch"
|
|
||||||
ES_HOME="/usr/share/elasticsearch"
|
ES_HOME="/usr/share/elasticsearch"
|
||||||
MAX_OPEN_FILES=65536
|
MAX_OPEN_FILES=65536
|
||||||
MAX_MAP_COUNT=262144
|
MAX_MAP_COUNT=262144
|
||||||
|
@ -55,6 +53,12 @@ if [ ! -z "$CONF_FILE" ]; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# ES_USER and ES_GROUP settings were removed
|
||||||
|
if [ ! -z "$ES_USER" ] || [ ! -z "$ES_GROUP" ]; then
|
||||||
|
echo "ES_USER and ES_GROUP settings are no longer supported. To run as a custom user/group use the archive distribution of Elasticsearch."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
exec="$ES_HOME/bin/elasticsearch"
|
exec="$ES_HOME/bin/elasticsearch"
|
||||||
prog="elasticsearch"
|
prog="elasticsearch"
|
||||||
pidfile="$PID_DIR/${prog}.pid"
|
pidfile="$PID_DIR/${prog}.pid"
|
||||||
|
@ -67,11 +71,6 @@ export ES_STARTUP_SLEEP_TIME
|
||||||
|
|
||||||
lockfile=/var/lock/subsys/$prog
|
lockfile=/var/lock/subsys/$prog
|
||||||
|
|
||||||
# backwards compatibility for old config sysconfig files, pre 0.90.1
|
|
||||||
if [ -n $USER ] && [ -z $ES_USER ] ; then
|
|
||||||
ES_USER=$USER
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -x "$exec" ]; then
|
if [ ! -x "$exec" ]; then
|
||||||
echo "The elasticsearch startup script does not exists or it is not executable, tried: $exec"
|
echo "The elasticsearch startup script does not exists or it is not executable, tried: $exec"
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -106,16 +105,16 @@ start() {
|
||||||
|
|
||||||
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
|
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
|
||||||
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
|
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
|
||||||
mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"
|
mkdir -p "$PID_DIR" && chown elasticsearch:elasticsearch "$PID_DIR"
|
||||||
fi
|
fi
|
||||||
if [ -n "$pidfile" ] && [ ! -e "$pidfile" ]; then
|
if [ -n "$pidfile" ] && [ ! -e "$pidfile" ]; then
|
||||||
touch "$pidfile" && chown "$ES_USER":"$ES_GROUP" "$pidfile"
|
touch "$pidfile" && chown elasticsearch:elasticsearch "$pidfile"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd $ES_HOME
|
cd $ES_HOME
|
||||||
echo -n $"Starting $prog: "
|
echo -n $"Starting $prog: "
|
||||||
# if not running, start it up here, usually something like "daemon $exec"
|
# if not running, start it up here, usually something like "daemon $exec"
|
||||||
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR
|
daemon --user elasticsearch --pidfile $pidfile $exec -p $pidfile -d -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR
|
||||||
retval=$?
|
retval=$?
|
||||||
echo
|
echo
|
||||||
[ $retval -eq 0 ] && touch $lockfile
|
[ $retval -eq 0 ] && touch $lockfile
|
||||||
|
|
|
@ -32,14 +32,6 @@
|
||||||
|
|
||||||
# SysV init.d
|
# SysV init.d
|
||||||
#
|
#
|
||||||
# When executing the init script, this user will be used to run the elasticsearch service.
|
|
||||||
# The default value is 'elasticsearch' and is declared in the init.d file.
|
|
||||||
# Note that this setting is only used by the init script. If changed, make sure that
|
|
||||||
# the configured user can read and write into the data, work, plugins and log directories.
|
|
||||||
# For systemd service, the user is usually configured in file /usr/lib/systemd/system/elasticsearch.service
|
|
||||||
#ES_USER=elasticsearch
|
|
||||||
#ES_GROUP=elasticsearch
|
|
||||||
|
|
||||||
# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process
|
# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process
|
||||||
ES_STARTUP_SLEEP_TIME=5
|
ES_STARTUP_SLEEP_TIME=5
|
||||||
|
|
||||||
|
|
|
@ -10,10 +10,6 @@
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Sets the default values for elasticsearch variables used in this script
|
|
||||||
ES_USER="elasticsearch"
|
|
||||||
ES_GROUP="elasticsearch"
|
|
||||||
|
|
||||||
# Source the default env file
|
# Source the default env file
|
||||||
ES_ENV_FILE="${path.env}"
|
ES_ENV_FILE="${path.env}"
|
||||||
if [ -f "$ES_ENV_FILE" ]; then
|
if [ -f "$ES_ENV_FILE" ]; then
|
||||||
|
@ -110,9 +106,9 @@ elif [ "$RESTART_ON_UPGRADE" = "true" ]; then
|
||||||
echo " OK"
|
echo " OK"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
chown -R $ES_USER:$ES_GROUP /var/lib/elasticsearch
|
chown -R elasticsearch:elasticsearch /var/lib/elasticsearch
|
||||||
chown -R $ES_USER:$ES_GROUP /var/log/elasticsearch
|
chown -R elasticsearch:elasticsearch /var/log/elasticsearch
|
||||||
chown -R root:$ES_GROUP /etc/elasticsearch
|
chown -R root:elasticsearch /etc/elasticsearch
|
||||||
chmod 0750 /etc/elasticsearch
|
chmod 0750 /etc/elasticsearch
|
||||||
chmod 0750 /etc/elasticsearch/scripts
|
chmod 0750 /etc/elasticsearch/scripts
|
||||||
|
|
||||||
|
|
|
@ -46,8 +46,6 @@ case "$1" in
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# Sets the default values for elasticsearch variables used in this script
|
# Sets the default values for elasticsearch variables used in this script
|
||||||
ES_USER="elasticsearch"
|
|
||||||
ES_GROUP="elasticsearch"
|
|
||||||
LOG_DIR="/var/log/elasticsearch"
|
LOG_DIR="/var/log/elasticsearch"
|
||||||
PLUGINS_DIR="/usr/share/elasticsearch/plugins"
|
PLUGINS_DIR="/usr/share/elasticsearch/plugins"
|
||||||
PID_DIR="/var/run/elasticsearch"
|
PID_DIR="/var/run/elasticsearch"
|
||||||
|
@ -95,12 +93,12 @@ if [ "$REMOVE_DIRS" = "true" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$REMOVE_USER_AND_GROUP" = "true" ]; then
|
if [ "$REMOVE_USER_AND_GROUP" = "true" ]; then
|
||||||
if id "$ES_USER" > /dev/null 2>&1 ; then
|
if id elasticsearch > /dev/null 2>&1 ; then
|
||||||
userdel "$ES_USER"
|
userdel elasticsearch
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if getent group "$ES_GROUP" > /dev/null 2>&1 ; then
|
if getent group elasticsearch > /dev/null 2>&1 ; then
|
||||||
groupdel "$ES_GROUP"
|
groupdel elasticsearch
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -11,10 +11,6 @@
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Sets the default values for elasticsearch variables used in this script
|
|
||||||
ES_USER="elasticsearch"
|
|
||||||
ES_GROUP="elasticsearch"
|
|
||||||
|
|
||||||
# Source the default env file
|
# Source the default env file
|
||||||
ES_ENV_FILE="${path.env}"
|
ES_ENV_FILE="${path.env}"
|
||||||
if [ -f "$ES_ENV_FILE" ]; then
|
if [ -f "$ES_ENV_FILE" ]; then
|
||||||
|
@ -27,22 +23,22 @@ case "$1" in
|
||||||
install|upgrade)
|
install|upgrade)
|
||||||
|
|
||||||
# Create elasticsearch group if not existing
|
# Create elasticsearch group if not existing
|
||||||
if ! getent group "$ES_GROUP" > /dev/null 2>&1 ; then
|
if ! getent group elasticsearch > /dev/null 2>&1 ; then
|
||||||
echo -n "Creating $ES_GROUP group..."
|
echo -n "Creating elasticsearch group..."
|
||||||
addgroup --quiet --system "$ES_GROUP"
|
addgroup --quiet --system elasticsearch
|
||||||
echo " OK"
|
echo " OK"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create elasticsearch user if not existing
|
# Create elasticsearch user if not existing
|
||||||
if ! id $ES_USER > /dev/null 2>&1 ; then
|
if ! id elasticsearch > /dev/null 2>&1 ; then
|
||||||
echo -n "Creating $ES_USER user..."
|
echo -n "Creating elasticsearch user..."
|
||||||
adduser --quiet \
|
adduser --quiet \
|
||||||
--system \
|
--system \
|
||||||
--no-create-home \
|
--no-create-home \
|
||||||
--ingroup "$ES_GROUP" \
|
--ingroup elasticsearch \
|
||||||
--disabled-password \
|
--disabled-password \
|
||||||
--shell /bin/false \
|
--shell /bin/false \
|
||||||
"$ES_USER"
|
elasticsearch
|
||||||
echo " OK"
|
echo " OK"
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
@ -53,21 +49,21 @@ case "$1" in
|
||||||
1|2)
|
1|2)
|
||||||
|
|
||||||
# Create elasticsearch group if not existing
|
# Create elasticsearch group if not existing
|
||||||
if ! getent group "$ES_GROUP" > /dev/null 2>&1 ; then
|
if ! getent group elasticsearch > /dev/null 2>&1 ; then
|
||||||
echo -n "Creating $ES_GROUP group..."
|
echo -n "Creating elasticsearch group..."
|
||||||
groupadd -r "$ES_GROUP"
|
groupadd -r elasticsearch
|
||||||
echo " OK"
|
echo " OK"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create elasticsearch user if not existing
|
# Create elasticsearch user if not existing
|
||||||
if ! id $ES_USER > /dev/null 2>&1 ; then
|
if ! id elasticsearch > /dev/null 2>&1 ; then
|
||||||
echo -n "Creating $ES_USER user..."
|
echo -n "Creating elasticsearch user..."
|
||||||
useradd -r \
|
useradd -r \
|
||||||
-M \
|
-M \
|
||||||
--gid "$ES_GROUP" \
|
--gid elasticsearch \
|
||||||
--shell /sbin/nologin \
|
--shell /sbin/nologin \
|
||||||
--comment "elasticsearch user" \
|
--comment "elasticsearch user" \
|
||||||
"$ES_USER"
|
elasticsearch
|
||||||
echo " OK"
|
echo " OK"
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue