diff --git a/.dir-locals.el b/.dir-locals.el index 0728ce905dd..2fdca14f5dd 100644 --- a/.dir-locals.el +++ b/.dir-locals.el @@ -83,6 +83,6 @@ )) (c-basic-offset . 4) (c-comment-only-line-offset . (0 . 0)) - (fill-column . 100) - (fci-rule-column . 100) + (fill-column . 140) + (fci-rule-column . 140) (compile-command . "gradle compileTestJava")))) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index a1a01a8f231..43b53fd360f 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -362,7 +362,7 @@ quality boxes available in vagrant atlas: * sles-11 -We're missing the follow because our tests are very linux/bash centric: +We're missing the following because our tests are very linux/bash centric: * Windows Server 2012 @@ -418,8 +418,8 @@ sudo -E bats $BATS_TESTS/*rpm*.bats If you wanted to retest all the release artifacts on a single VM you could: ------------------------------------------------- -gradle vagrantSetUp -vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404 +gradle setupBats +cd qa/vagrant; vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404 cd $BATS_ARCHIVES sudo -E bats $BATS_TESTS/*.bats ------------------------------------------------- @@ -505,4 +505,3 @@ included as part of the build by checking the projects of the build. --------------------------------------------------------------------------- gradle projects --------------------------------------------------------------------------- - diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 4a884735259..c9965caa96e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -88,7 +88,7 @@ class ClusterConfiguration { if (seedNode == node) { return null } - ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') { + ant.waitfor(maxwait: '40', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') { resourceexists { file(file: seedNode.transportPortsFile.toString()) } @@ -136,6 +136,8 @@ class ClusterConfiguration { LinkedHashMap setupCommands = new LinkedHashMap<>() + List dependencies = new ArrayList<>() + @Input void systemProperty(String property, String value) { systemProperties.put(property, value) @@ -179,4 +181,10 @@ class ClusterConfiguration { } extraConfigFiles.put(path, sourceFile) } + + /** Add dependencies that must be run before the first task setting up the cluster. */ + @Input + void dependsOn(Object... deps) { + dependencies.addAll(deps) + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index e8061b02f3d..ea01ba6daf8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -57,7 +57,7 @@ class ClusterFormationTasks { // first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything // in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk // such that snapshots survive failures / test runs and there is no simple way today to fix that. - Task cleanup = project.tasks.create(name: "${prefix}#prepareCluster.cleanShared", type: Delete, dependsOn: runner.dependsOn.collect()) { + Task cleanup = project.tasks.create(name: "${prefix}#prepareCluster.cleanShared", type: Delete, dependsOn: config.dependencies) { delete sharedDir doLast { sharedDir.mkdirs() @@ -104,7 +104,7 @@ class ClusterFormationTasks { NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir) nodes.add(node) Task dependsOn = startTasks.empty ? cleanup : startTasks.get(0) - startTasks.add(configureNode(project, prefix, runner, dependsOn, node, distro, nodes.get(0))) + startTasks.add(configureNode(project, prefix, runner, dependsOn, node, config, distro, nodes.get(0))) } Task wait = configureWaitTask("${prefix}#wait", project, nodes, startTasks) @@ -148,7 +148,8 @@ class ClusterFormationTasks { * * @return a task which starts the node. */ - static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) { + static Task configureNode(Project project, String prefix, Task runner, Object dependsOn, NodeInfo node, ClusterConfiguration config, + Configuration distribution, NodeInfo seedNode) { // tasks are chained so their execution order is maintained Task setup = project.tasks.create(name: taskName(prefix, node, 'clean'), type: Delete, dependsOn: dependsOn) { @@ -161,7 +162,7 @@ class ClusterFormationTasks { setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node) setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node) - setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, configuration) + setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, distribution) setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode) setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node) setup = configureAddKeystoreSettingTasks(prefix, project, setup, node) @@ -205,6 +206,13 @@ class ClusterFormationTasks { // if we are running in the background, make sure to stop the server when the task completes runner.finalizedBy(stop) start.finalizedBy(stop) + for (Object dependency : config.dependencies) { + if (dependency instanceof Fixture) { + Task depStop = ((Fixture)dependency).stopTask + runner.finalizedBy(depStop) + start.finalizedBy(depStop) + } + } } return start } @@ -541,7 +549,7 @@ class ClusterFormationTasks { static Task configureWaitTask(String name, Project project, List nodes, List startTasks) { Task wait = project.tasks.create(name: name, dependsOn: startTasks) wait.doLast { - ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") { + ant.waitfor(maxwait: '60', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: "failed${name}") { or { for (NodeInfo node : nodes) { resourceexists { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 66574f5f289..336ee207abf 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -477,10 +477,10 @@ class VagrantTestPlugin implements Plugin { } } } - packaging.doFirst { + platform.doFirst { project.gradle.addListener(platformReproListener) } - packaging.doLast { + platform.doLast { project.gradle.removeListener(platformReproListener) } if (project.extensions.esvagrant.boxes.contains(box)) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 9e4b7569c16..8db50535cc8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -308,7 +308,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase { assertEquals(RestStatus.BAD_REQUEST, exception.status()); assertEquals("Elasticsearch exception [type=illegal_argument_exception, " + - "reason=Can't specify parent if no parent field has been configured]", exception.getMessage()); + "reason=can't specify parent if no parent field has been configured]", exception.getMessage()); } { ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 0e448de686f..3269fbc9500 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -184,12 +184,7 @@ public class BulkProcessor implements Closeable { this.bulkActions = bulkActions; this.bulkSize = bulkSize.getBytes(); this.bulkRequest = new BulkRequest(); - - if (concurrentRequests == 0) { - this.bulkRequestHandler = BulkRequestHandler.syncHandler(consumer, backoffPolicy, listener, threadPool); - } else { - this.bulkRequestHandler = BulkRequestHandler.asyncHandler(consumer, backoffPolicy, listener, threadPool, concurrentRequests); - } + this.bulkRequestHandler = new BulkRequestHandler(consumer, backoffPolicy, listener, threadPool, concurrentRequests); // Start period flushing task after everything is setup this.cancellableFlushTask = startFlushTask(flushInterval, threadPool); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index e1755bfb8bf..52a83b00483 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -27,155 +27,86 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.threadpool.ThreadPool; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; /** - * Abstracts the low-level details of bulk request handling + * Implements the low-level details of bulk request handling */ -abstract class BulkRequestHandler { - protected final Logger logger; - protected final BiConsumer> consumer; - protected final ThreadPool threadPool; +public final class BulkRequestHandler { + private final Logger logger; + private final BiConsumer> consumer; + private final BulkProcessor.Listener listener; + private final Semaphore semaphore; + private final Retry retry; + private final int concurrentRequests; - protected BulkRequestHandler(BiConsumer> consumer, ThreadPool threadPool) { + BulkRequestHandler(BiConsumer> consumer, BackoffPolicy backoffPolicy, + BulkProcessor.Listener listener, ThreadPool threadPool, + int concurrentRequests) { + assert concurrentRequests >= 0; this.logger = Loggers.getLogger(getClass()); this.consumer = consumer; - this.threadPool = threadPool; + this.listener = listener; + this.concurrentRequests = concurrentRequests; + this.retry = new Retry(EsRejectedExecutionException.class, backoffPolicy, threadPool); + this.semaphore = new Semaphore(concurrentRequests > 0 ? concurrentRequests : 1); } - - public abstract void execute(BulkRequest bulkRequest, long executionId); - - public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException; - - - public static BulkRequestHandler syncHandler(BiConsumer> consumer, - BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, - ThreadPool threadPool) { - return new SyncBulkRequestHandler(consumer, backoffPolicy, listener, threadPool); - } - - public static BulkRequestHandler asyncHandler(BiConsumer> consumer, - BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, - ThreadPool threadPool, int concurrentRequests) { - return new AsyncBulkRequestHandler(consumer, backoffPolicy, listener, threadPool, concurrentRequests); - } - - private static class SyncBulkRequestHandler extends BulkRequestHandler { - private final BulkProcessor.Listener listener; - private final BackoffPolicy backoffPolicy; - - SyncBulkRequestHandler(BiConsumer> consumer, BackoffPolicy backoffPolicy, - BulkProcessor.Listener listener, ThreadPool threadPool) { - super(consumer, threadPool); - this.backoffPolicy = backoffPolicy; - this.listener = listener; - } - - @Override - public void execute(BulkRequest bulkRequest, long executionId) { - boolean afterCalled = false; - try { - listener.beforeBulk(executionId, bulkRequest); - BulkResponse bulkResponse = Retry - .on(EsRejectedExecutionException.class) - .policy(backoffPolicy) - .using(threadPool) - .withSyncBackoff(consumer, bulkRequest, Settings.EMPTY); - afterCalled = true; - listener.afterBulk(executionId, bulkRequest, bulkResponse); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.info((Supplier) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); - if (!afterCalled) { - listener.afterBulk(executionId, bulkRequest, e); + public void execute(BulkRequest bulkRequest, long executionId) { + Runnable toRelease = () -> {}; + boolean bulkRequestSetupSuccessful = false; + try { + listener.beforeBulk(executionId, bulkRequest); + semaphore.acquire(); + toRelease = semaphore::release; + CountDownLatch latch = new CountDownLatch(1); + retry.withBackoff(consumer, bulkRequest, new ActionListener() { + @Override + public void onResponse(BulkResponse response) { + try { + listener.afterBulk(executionId, bulkRequest, response); + } finally { + semaphore.release(); + latch.countDown(); + } } - } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); - if (!afterCalled) { - listener.afterBulk(executionId, bulkRequest, e); + + @Override + public void onFailure(Exception e) { + try { + listener.afterBulk(executionId, bulkRequest, e); + } finally { + semaphore.release(); + latch.countDown(); + } } + }, Settings.EMPTY); + bulkRequestSetupSuccessful = true; + if (concurrentRequests == 0) { + latch.await(); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.info((Supplier) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); + listener.afterBulk(executionId, bulkRequest, e); + } catch (Exception e) { + logger.warn((Supplier) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); + listener.afterBulk(executionId, bulkRequest, e); + } finally { + if (bulkRequestSetupSuccessful == false) { // if we fail on client.bulk() release the semaphore + toRelease.run(); } } + } - @Override - public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { - // we are "closed" immediately as there is no request in flight + boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) { + semaphore.release(this.concurrentRequests); return true; } - } - - private static class AsyncBulkRequestHandler extends BulkRequestHandler { - private final BackoffPolicy backoffPolicy; - private final BulkProcessor.Listener listener; - private final Semaphore semaphore; - private final int concurrentRequests; - - private AsyncBulkRequestHandler(BiConsumer> consumer, BackoffPolicy backoffPolicy, - BulkProcessor.Listener listener, ThreadPool threadPool, - int concurrentRequests) { - super(consumer, threadPool); - this.backoffPolicy = backoffPolicy; - assert concurrentRequests > 0; - this.listener = listener; - this.concurrentRequests = concurrentRequests; - this.semaphore = new Semaphore(concurrentRequests); - } - - @Override - public void execute(BulkRequest bulkRequest, long executionId) { - boolean bulkRequestSetupSuccessful = false; - boolean acquired = false; - try { - listener.beforeBulk(executionId, bulkRequest); - semaphore.acquire(); - acquired = true; - Retry.on(EsRejectedExecutionException.class) - .policy(backoffPolicy) - .using(threadPool) - .withAsyncBackoff(consumer, bulkRequest, new ActionListener() { - @Override - public void onResponse(BulkResponse response) { - try { - listener.afterBulk(executionId, bulkRequest, response); - } finally { - semaphore.release(); - } - } - - @Override - public void onFailure(Exception e) { - try { - listener.afterBulk(executionId, bulkRequest, e); - } finally { - semaphore.release(); - } - } - }, Settings.EMPTY); - bulkRequestSetupSuccessful = true; - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.info((Supplier) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); - listener.afterBulk(executionId, bulkRequest, e); - } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); - listener.afterBulk(executionId, bulkRequest, e); - } finally { - if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore - semaphore.release(); - } - } - } - - @Override - public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { - if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) { - semaphore.release(this.concurrentRequests); - return true; - } - return false; - } + return false; } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java index e1ba1a6bee1..8a9ef245f36 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -25,20 +25,14 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; -import java.util.function.BiFunction; import java.util.function.Predicate; /** @@ -46,62 +40,42 @@ import java.util.function.Predicate; */ public class Retry { private final Class retryOnThrowable; + private final BackoffPolicy backoffPolicy; + private final ThreadPool threadPool; - private BackoffPolicy backoffPolicy; - private ThreadPool threadPool; - public static Retry on(Class retryOnThrowable) { - return new Retry(retryOnThrowable); - } - - Retry(Class retryOnThrowable) { + public Retry(Class retryOnThrowable, BackoffPolicy backoffPolicy, ThreadPool threadPool) { this.retryOnThrowable = retryOnThrowable; - } - - /** - * @param backoffPolicy The backoff policy that defines how long and how often to wait for retries. - */ - public Retry policy(BackoffPolicy backoffPolicy) { this.backoffPolicy = backoffPolicy; - return this; - } - - /** - * @param threadPool The threadPool that will be used to schedule retries. - */ - public Retry using(ThreadPool threadPool) { this.threadPool = threadPool; - return this; } /** - * Invokes #apply(BulkRequest, ActionListener). Backs off on the provided exception and delegates results to the - * provided listener. Retries will be attempted using the provided schedule function + * Invokes #accept(BulkRequest, ActionListener). Backs off on the provided exception and delegates results to the + * provided listener. Retries will be scheduled using the class's thread pool. * @param consumer The consumer to which apply the request and listener * @param bulkRequest The bulk request that should be executed. * @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not * @param settings settings */ - public void withAsyncBackoff(BiConsumer> consumer, BulkRequest bulkRequest, ActionListener listener, Settings settings) { + public void withBackoff(BiConsumer> consumer, BulkRequest bulkRequest, ActionListener listener, Settings settings) { RetryHandler r = new RetryHandler(retryOnThrowable, backoffPolicy, consumer, listener, settings, threadPool); r.execute(bulkRequest); } /** - * Invokes #apply(BulkRequest, ActionListener). Backs off on the provided exception. Retries will be attempted using - * the provided schedule function. + * Invokes #accept(BulkRequest, ActionListener). Backs off on the provided exception. Retries will be scheduled using + * the class's thread pool. * * @param consumer The consumer to which apply the request and listener * @param bulkRequest The bulk request that should be executed. * @param settings settings - * @return the bulk response as returned by the client. - * @throws Exception Any exception thrown by the callable. + * @return a future representing the bulk response returned by the client. */ - public BulkResponse withSyncBackoff(BiConsumer> consumer, BulkRequest bulkRequest, Settings settings) throws Exception { - PlainActionFuture actionFuture = PlainActionFuture.newFuture(); - RetryHandler r = new RetryHandler(retryOnThrowable, backoffPolicy, consumer, actionFuture, settings, threadPool); - r.execute(bulkRequest); - return actionFuture.actionGet(); + public PlainActionFuture withBackoff(BiConsumer> consumer, BulkRequest bulkRequest, Settings settings) { + PlainActionFuture future = PlainActionFuture.newFuture(); + withBackoff(consumer, bulkRequest, future, settings); + return future; } static class RetryHandler implements ActionListener { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index ec529e59ae7..a9b82c514a6 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -279,7 +279,7 @@ public class TransportBulkAction extends HandledTransportAction() { + bulkRetry.withBackoff(client::bulk, request, new ActionListener() { @Override public void onResponse(BulkResponse response) { onBulkResponse(thisBatchStartTime, response); diff --git a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java index ca9e41c8e8a..41780ca0c7a 100644 --- a/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -491,14 +491,18 @@ public class IndexRequest extends ReplicatedWriteRequest implement } if (parent != null && !mappingMd.hasParentField()) { - throw new IllegalArgumentException("Can't specify parent if no parent field has been configured"); + throw new IllegalArgumentException("can't specify parent if no parent field has been configured"); } } else { if (parent != null) { - throw new IllegalArgumentException("Can't specify parent if no parent field has been configured"); + throw new IllegalArgumentException("can't specify parent if no parent field has been configured"); } } + if ("".equals(id)) { + throw new IllegalArgumentException("if _id is specified it must not be empty"); + } + // generate id if not already provided if (id == null) { assert autoGeneratedTimestamp == -1 : "timestamp has already been generated!"; diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java index de16bbe76aa..2af6ee33b37 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -20,7 +20,6 @@ package org.elasticsearch.bootstrap; import org.elasticsearch.SecureSM; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.network.NetworkModule; @@ -45,11 +44,9 @@ import java.security.NoSuchAlgorithmException; import java.security.Permissions; import java.security.Policy; import java.security.URIParameter; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashSet; -import java.util.List; import java.util.Map; import java.util.Set; @@ -269,6 +266,26 @@ final class Security { for (Path path : environment.dataFiles()) { addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete"); } + /* + * If path.data and default.path.data are set, we need read access to the paths in default.path.data to check for the existence of + * index directories there that could have arisen from a bug in the handling of simultaneous configuration of path.data and + * default.path.data that was introduced in Elasticsearch 5.3.0. + * + * If path.data is not set then default.path.data would take precedence in setting the data paths for the environment and + * permissions would have been granted above. + * + * If path.data is not set and default.path.data is not set, then we would fallback to the default data directory under + * Elasticsearch home and again permissions would have been granted above. + * + * If path.data is set and default.path.data is not set, there is nothing to do here. + */ + if (Environment.PATH_DATA_SETTING.exists(environment.settings()) + && Environment.DEFAULT_PATH_DATA_SETTING.exists(environment.settings())) { + for (final String path : Environment.DEFAULT_PATH_DATA_SETTING.get(environment.settings())) { + // write permissions are not needed here, we are not going to be writing to any paths here + addPath(policy, Environment.DEFAULT_PATH_DATA_SETTING.getKey(), getPath(path), "read,readlink"); + } + } for (Path path : environment.repoFiles()) { addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete"); } @@ -278,6 +295,11 @@ final class Security { } } + @SuppressForbidden(reason = "read path that is not configured in environment") + private static Path getPath(final String path) { + return PathUtils.get(path); + } + /** * Add dynamic {@link SocketPermission}s based on HTTP and transport settings. * diff --git a/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java index 2700ea4dc13..ac90e546f7e 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java @@ -30,13 +30,17 @@ import org.elasticsearch.common.util.ByteArray; */ public final class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable { - public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray byteArray, int length) { + private final Releasable releasable; + + public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray byteArray, int length, + Releasable releasable) { super(bigarrays, byteArray, length); + this.releasable = releasable; } @Override public void close() { - Releasables.close(byteArray); + Releasables.close(releasable); } } diff --git a/core/src/main/java/org/elasticsearch/common/compress/Compressor.java b/core/src/main/java/org/elasticsearch/common/compress/Compressor.java index 05706debd37..b39e7f6e142 100644 --- a/core/src/main/java/org/elasticsearch/common/compress/Compressor.java +++ b/core/src/main/java/org/elasticsearch/common/compress/Compressor.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.compress; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -31,5 +32,9 @@ public interface Compressor { StreamInput streamInput(StreamInput in) throws IOException; + /** + * Creates a new stream output that compresses the contents and writes to the provided stream + * output. Closing the returned {@link StreamOutput} will close the provided stream output. + */ StreamOutput streamOutput(StreamOutput out) throws IOException; } diff --git a/core/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java b/core/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java index 42e2efa358c..794a8db4960 100644 --- a/core/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java +++ b/core/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.compress; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -47,7 +46,7 @@ public class DeflateCompressor implements Compressor { // It needs to be different from other compressors and to not be specific // enough so that no stream starting with these bytes could be detected as // a XContent - private static final byte[] HEADER = new byte[] { 'D', 'F', 'L', '\0' }; + private static final byte[] HEADER = new byte[]{'D', 'F', 'L', '\0'}; // 3 is a good trade-off between speed and compression ratio private static final int LEVEL = 3; // We use buffering on the input and output of in/def-laters in order to @@ -88,6 +87,7 @@ public class DeflateCompressor implements Compressor { decompressedIn = new BufferedInputStream(decompressedIn, BUFFER_SIZE); return new InputStreamStreamInput(decompressedIn) { final AtomicBoolean closed = new AtomicBoolean(false); + public void close() throws IOException { try { super.close(); @@ -107,10 +107,11 @@ public class DeflateCompressor implements Compressor { final boolean nowrap = true; final Deflater deflater = new Deflater(LEVEL, nowrap); final boolean syncFlush = true; - OutputStream compressedOut = new DeflaterOutputStream(out, deflater, BUFFER_SIZE, syncFlush); - compressedOut = new BufferedOutputStream(compressedOut, BUFFER_SIZE); + DeflaterOutputStream deflaterOutputStream = new DeflaterOutputStream(out, deflater, BUFFER_SIZE, syncFlush); + OutputStream compressedOut = new BufferedOutputStream(deflaterOutputStream, BUFFER_SIZE); return new OutputStreamStreamOutput(compressedOut) { final AtomicBoolean closed = new AtomicBoolean(false); + public void close() throws IOException { try { super.close(); diff --git a/core/src/main/java/org/elasticsearch/common/io/ReleasableBytesStream.java b/core/src/main/java/org/elasticsearch/common/io/ReleasableBytesStream.java deleted file mode 100644 index e31f206bcad..00000000000 --- a/core/src/main/java/org/elasticsearch/common/io/ReleasableBytesStream.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.io; - -import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; - -/** - * A bytes stream that requires its bytes to be released once no longer used. - */ -public interface ReleasableBytesStream extends BytesStream { - - @Override - ReleasablePagedBytesReference bytes(); - -} diff --git a/core/src/main/java/org/elasticsearch/common/io/Streams.java b/core/src/main/java/org/elasticsearch/common/io/Streams.java index f922fde3e75..f24b703251b 100644 --- a/core/src/main/java/org/elasticsearch/common/io/Streams.java +++ b/core/src/main/java/org/elasticsearch/common/io/Streams.java @@ -20,6 +20,9 @@ package org.elasticsearch.common.io; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStream; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Callback; import java.io.BufferedReader; @@ -236,4 +239,56 @@ public abstract class Streams { } } } + + /** + * Wraps the given {@link BytesStream} in a {@link StreamOutput} that simply flushes when + * close is called. + */ + public static BytesStream flushOnCloseStream(BytesStream os) { + return new FlushOnCloseOutputStream(os); + } + + /** + * A wrapper around a {@link BytesStream} that makes the close operation a flush. This is + * needed as sometimes a stream will be closed but the bytes that the stream holds still need + * to be used and the stream cannot be closed until the bytes have been consumed. + */ + private static class FlushOnCloseOutputStream extends BytesStream { + + private final BytesStream delegate; + + private FlushOnCloseOutputStream(BytesStream bytesStreamOutput) { + this.delegate = bytesStreamOutput; + } + + @Override + public void writeByte(byte b) throws IOException { + delegate.writeByte(b); + } + + @Override + public void writeBytes(byte[] b, int offset, int length) throws IOException { + delegate.writeBytes(b, offset, length); + } + + @Override + public void flush() throws IOException { + delegate.flush(); + } + + @Override + public void close() throws IOException { + flush(); + } + + @Override + public void reset() throws IOException { + delegate.reset(); + } + + @Override + public BytesReference bytes() { + return delegate.bytes(); + } + } } diff --git a/core/src/main/java/org/elasticsearch/common/io/BytesStream.java b/core/src/main/java/org/elasticsearch/common/io/stream/BytesStream.java similarity index 85% rename from core/src/main/java/org/elasticsearch/common/io/BytesStream.java rename to core/src/main/java/org/elasticsearch/common/io/stream/BytesStream.java index 903c1dcb799..c20dcf62c9b 100644 --- a/core/src/main/java/org/elasticsearch/common/io/BytesStream.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/BytesStream.java @@ -17,11 +17,11 @@ * under the License. */ -package org.elasticsearch.common.io; +package org.elasticsearch.common.io.stream; import org.elasticsearch.common.bytes.BytesReference; -public interface BytesStream { +public abstract class BytesStream extends StreamOutput { - BytesReference bytes(); -} \ No newline at end of file + public abstract BytesReference bytes(); +} diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java index e65e8efb27b..ab9a1896ef7 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.io.stream; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.PagedBytesReference; -import org.elasticsearch.common.io.BytesStream; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; @@ -31,7 +30,7 @@ import java.io.IOException; * A @link {@link StreamOutput} that uses {@link BigArrays} to acquire pages of * bytes, which avoids frequent reallocation & copying of the internal data. */ -public class BytesStreamOutput extends StreamOutput implements BytesStream { +public class BytesStreamOutput extends BytesStream { protected final BigArrays bigArrays; @@ -50,7 +49,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { /** * Create a non recycling {@link BytesStreamOutput} with enough initial pages acquired * to satisfy the capacity given by expected size. - * + * * @param expectedSize the expected maximum size of the stream in bytes. */ public BytesStreamOutput(int expectedSize) { @@ -129,7 +128,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { /** * Returns the current size of the buffer. - * + * * @return the value of the count field, which is the number of valid * bytes in this output stream. * @see java.io.ByteArrayOutputStream#count @@ -151,7 +150,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { return bytes.ramBytesUsed(); } - private void ensureCapacity(long offset) { + void ensureCapacity(long offset) { if (offset > Integer.MAX_VALUE) { throw new IllegalArgumentException(getClass().getSimpleName() + " cannot hold more than 2GB of data"); } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java index 674ff18f0fc..0bfe15d6dc2 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java @@ -20,29 +20,66 @@ package org.elasticsearch.common.io.stream; import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; -import org.elasticsearch.common.io.ReleasableBytesStream; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ByteArray; /** * An bytes stream output that allows providing a {@link BigArrays} instance * expecting it to require releasing its content ({@link #bytes()}) once done. *

- * Please note, its is the responsibility of the caller to make sure the bytes - * reference do not "escape" and are released only once. + * Please note, closing this stream will release the bytes that are in use by any + * {@link ReleasablePagedBytesReference} returned from {@link #bytes()}, so this + * stream should only be closed after the bytes have been output or copied + * elsewhere. */ -public class ReleasableBytesStreamOutput extends BytesStreamOutput implements ReleasableBytesStream { +public class ReleasableBytesStreamOutput extends BytesStreamOutput + implements Releasable { + + private Releasable releasable; public ReleasableBytesStreamOutput(BigArrays bigarrays) { - super(BigArrays.PAGE_SIZE_IN_BYTES, bigarrays); + this(BigArrays.PAGE_SIZE_IN_BYTES, bigarrays); } public ReleasableBytesStreamOutput(int expectedSize, BigArrays bigArrays) { super(expectedSize, bigArrays); + this.releasable = Releasables.releaseOnce(this.bytes); + } + + /** + * Returns a {@link Releasable} implementation of a + * {@link org.elasticsearch.common.bytes.BytesReference} that represents the current state of + * the bytes in the stream. + */ + @Override + public ReleasablePagedBytesReference bytes() { + return new ReleasablePagedBytesReference(bigArrays, bytes, count, releasable); } @Override - public ReleasablePagedBytesReference bytes() { - return new ReleasablePagedBytesReference(bigArrays, bytes, count); + public void close() { + Releasables.close(releasable); } + @Override + void ensureCapacity(long offset) { + final ByteArray prevBytes = this.bytes; + super.ensureCapacity(offset); + if (prevBytes != this.bytes) { + // re-create the releasable with the new reference + releasable = Releasables.releaseOnce(this.bytes); + } + } + + @Override + public void reset() { + final ByteArray prevBytes = this.bytes; + super.reset(); + if (prevBytes != this.bytes) { + // re-create the releasable with the new reference + releasable = Releasables.releaseOnce(this.bytes); + } + } } diff --git a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index 8d412a0587b..7594f96e2df 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -226,10 +226,41 @@ public class DeprecationLogger { * @return the extracted warning value */ public static String extractWarningValueFromWarningHeader(final String s) { + /* + * We know the exact format of the warning header, so to extract the warning value we can skip forward from the front to the first + * quote, and skip backwards from the end to the penultimate quote: + * + * 299 Elasticsearch-6.0.0 "warning value" "Sat, 25, Feb 2017 10:27:43 GMT" + * ^ ^ ^ + * firstQuote penultimateQuote lastQuote + * + * We do it this way rather than seeking forward after the first quote because there could be escaped quotes in the warning value + * but since there are none in the warning date, we can skip backwards to find the quote that closes the quoted warning value. + * + * We parse this manually rather than using the capturing regular expression because the regular expression involves a lot of + * backtracking and carries a performance penalty. However, when assertions are enabled, we still use the regular expression to + * verify that we are maintaining the warning header format. + */ + final int firstQuote = s.indexOf('\"'); + final int lastQuote = s.lastIndexOf('\"'); + final int penultimateQuote = s.lastIndexOf('\"', lastQuote - 1); + final String warningValue = s.substring(firstQuote + 1, penultimateQuote - 2); + assert assertWarningValue(s, warningValue); + return warningValue; + } + + /** + * Assert that the specified string has the warning value equal to the provided warning value. + * + * @param s the string representing a full warning header + * @param warningValue the expected warning header + * @return {@code true} if the specified string has the expected warning value + */ + private static boolean assertWarningValue(final String s, final String warningValue) { final Matcher matcher = WARNING_HEADER_PATTERN.matcher(s); final boolean matches = matcher.matches(); assert matches; - return matcher.group(1); + return matcher.group(1).equals(warningValue); } /** diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java similarity index 73% rename from core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java rename to core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index caaf7fc84af..80977618c4b 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -29,9 +29,12 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.lucene.uid.VersionsResolver.DocIdAndVersion; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; +import org.elasticsearch.index.seqno.SequenceNumbersService; import java.io.IOException; @@ -43,7 +46,7 @@ import java.io.IOException; * in more than one document! It will only return the first one it * finds. */ -final class PerThreadIDAndVersionLookup { +final class PerThreadIDVersionAndSeqNoLookup { // TODO: do we really need to store all this stuff? some if it might not speed up anything. // we keep it around for now, to reduce the amount of e.g. hash lookups by field and stuff @@ -51,7 +54,10 @@ final class PerThreadIDAndVersionLookup { private final TermsEnum termsEnum; /** _version data */ private final NumericDocValues versions; - + /** _seq_no data */ + private final NumericDocValues seqNos; + /** _primary_term data */ + private final NumericDocValues primaryTerms; /** Reused for iteration (when the term exists) */ private PostingsEnum docsEnum; @@ -61,7 +67,7 @@ final class PerThreadIDAndVersionLookup { /** * Initialize lookup for the provided segment */ - PerThreadIDAndVersionLookup(LeafReader reader) throws IOException { + PerThreadIDVersionAndSeqNoLookup(LeafReader reader) throws IOException { Fields fields = reader.fields(); Terms terms = fields.terms(UidFieldMapper.NAME); termsEnum = terms.iterator(); @@ -74,6 +80,8 @@ final class PerThreadIDAndVersionLookup { throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME + "] field"); } + seqNos = reader.getNumericDocValues(SeqNoFieldMapper.NAME); + primaryTerms = reader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); Object readerKey = null; assert (readerKey = reader.getCoreCacheKey()) != null; this.readerKey = readerKey; @@ -113,4 +121,25 @@ final class PerThreadIDAndVersionLookup { return DocIdSetIterator.NO_MORE_DOCS; } } + + /** Return null if id is not found. */ + DocIdAndSeqNo lookupSeqNo(BytesRef id, Bits liveDocs, LeafReaderContext context) throws IOException { + assert context.reader().getCoreCacheKey().equals(readerKey) : + "context's reader is not the same as the reader class was initialized on."; + int docID = getDocID(id, liveDocs); + if (docID != DocIdSetIterator.NO_MORE_DOCS) { + return new DocIdAndSeqNo(docID, seqNos == null ? SequenceNumbersService.UNASSIGNED_SEQ_NO : seqNos.get(docID), context); + } else { + return null; + } + } + + /** + * returns 0 if the primary term is not found. + * + * Note that 0 is an illegal primary term. See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)} + **/ + long lookUpPrimaryTerm(int docID) throws IOException { + return primaryTerms == null ? 0 : primaryTerms.get(docID); + } } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java new file mode 100644 index 00000000000..1cbae29a3da --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene.uid; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReader.CoreClosedListener; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.util.CloseableThreadLocal; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.mapper.UidFieldMapper; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.ConcurrentMap; + +import static org.elasticsearch.common.lucene.uid.Versions.NOT_FOUND; + +/** Utility class to resolve the Lucene doc ID, version, seqNo and primaryTerms for a given uid. */ +public final class VersionsAndSeqNoResolver { + + static final ConcurrentMap> lookupStates = + ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); + + // Evict this reader from lookupStates once it's closed: + private static final CoreClosedListener removeLookupState = key -> { + CloseableThreadLocal ctl = lookupStates.remove(key); + if (ctl != null) { + ctl.close(); + } + }; + + private static PerThreadIDVersionAndSeqNoLookup getLookupState(LeafReader reader) throws IOException { + Object key = reader.getCoreCacheKey(); + CloseableThreadLocal ctl = lookupStates.get(key); + if (ctl == null) { + // First time we are seeing this reader's core; make a new CTL: + ctl = new CloseableThreadLocal<>(); + CloseableThreadLocal other = lookupStates.putIfAbsent(key, ctl); + if (other == null) { + // Our CTL won, we must remove it when the core is closed: + reader.addCoreClosedListener(removeLookupState); + } else { + // Another thread beat us to it: just use their CTL: + ctl = other; + } + } + + PerThreadIDVersionAndSeqNoLookup lookupState = ctl.get(); + if (lookupState == null) { + lookupState = new PerThreadIDVersionAndSeqNoLookup(reader); + ctl.set(lookupState); + } + + return lookupState; + } + + private VersionsAndSeqNoResolver() { + } + + /** Wraps an {@link LeafReaderContext}, a doc ID relative to the context doc base and a version. */ + public static class DocIdAndVersion { + public final int docId; + public final long version; + public final LeafReaderContext context; + + DocIdAndVersion(int docId, long version, LeafReaderContext context) { + this.docId = docId; + this.version = version; + this.context = context; + } + } + + /** Wraps an {@link LeafReaderContext}, a doc ID relative to the context doc base and a seqNo. */ + public static class DocIdAndSeqNo { + public final int docId; + public final long seqNo; + public final LeafReaderContext context; + + DocIdAndSeqNo(int docId, long seqNo, LeafReaderContext context) { + this.docId = docId; + this.seqNo = seqNo; + this.context = context; + } + } + + + /** + * Load the internal doc ID and version for the uid from the reader, returning

    + *
  • null if the uid wasn't found, + *
  • a doc ID and a version otherwise + *
+ */ + public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term) throws IOException { + assert term.field().equals(UidFieldMapper.NAME) : "unexpected term field " + term.field(); + List leaves = reader.leaves(); + if (leaves.isEmpty()) { + return null; + } + // iterate backwards to optimize for the frequently updated documents + // which are likely to be in the last segments + for (int i = leaves.size() - 1; i >= 0; i--) { + LeafReaderContext context = leaves.get(i); + LeafReader leaf = context.reader(); + PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf); + DocIdAndVersion result = lookup.lookupVersion(term.bytes(), leaf.getLiveDocs(), context); + if (result != null) { + return result; + } + } + return null; + } + + /** + * Load the internal doc ID and sequence number for the uid from the reader, returning
    + *
  • null if the uid wasn't found, + *
  • a doc ID and the associated seqNo otherwise + *
+ */ + public static DocIdAndSeqNo loadDocIdAndSeqNo(IndexReader reader, Term term) throws IOException { + assert term.field().equals(UidFieldMapper.NAME) : "unexpected term field " + term.field(); + List leaves = reader.leaves(); + if (leaves.isEmpty()) { + return null; + } + // iterate backwards to optimize for the frequently updated documents + // which are likely to be in the last segments + for (int i = leaves.size() - 1; i >= 0; i--) { + LeafReaderContext context = leaves.get(i); + LeafReader leaf = context.reader(); + PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf); + DocIdAndSeqNo result = lookup.lookupSeqNo(term.bytes(), leaf.getLiveDocs(), context); + if (result != null) { + return result; + } + } + return null; + } + + /** + * Load the primaryTerm associated with the given {@link DocIdAndSeqNo} + */ + public static long loadPrimaryTerm(DocIdAndSeqNo docIdAndSeqNo) throws IOException { + LeafReader leaf = docIdAndSeqNo.context.reader(); + PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf); + long result = lookup.lookUpPrimaryTerm(docIdAndSeqNo.docId); + assert result > 0 : "should always resolve a primary term for a resolved sequence number. primary_term [" + result + "]" + + " docId [" + docIdAndSeqNo.docId + "] seqNo [" + docIdAndSeqNo.seqNo + "]"; + return result; + } + + /** + * Load the version for the uid from the reader, returning
    + *
  • {@link Versions#NOT_FOUND} if no matching doc exists, + *
  • the version associated with the provided uid otherwise + *
+ */ + public static long loadVersion(IndexReader reader, Term term) throws IOException { + final DocIdAndVersion docIdAndVersion = loadDocIdAndVersion(reader, term); + return docIdAndVersion == null ? NOT_FOUND : docIdAndVersion.version; + } +} diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/VersionsResolver.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/VersionsResolver.java deleted file mode 100644 index fb5875cbae5..00000000000 --- a/core/src/main/java/org/elasticsearch/common/lucene/uid/VersionsResolver.java +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.uid; - -import org.apache.lucene.index.Fields; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReader.CoreClosedListener; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.PostingsEnum; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.Term; -import org.apache.lucene.index.Terms; -import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.CloseableThreadLocal; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.index.mapper.SeqNoFieldMapper; -import org.elasticsearch.index.mapper.UidFieldMapper; -import org.elasticsearch.index.seqno.SequenceNumbersService; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.ConcurrentMap; - -import static org.elasticsearch.common.lucene.uid.Versions.NOT_FOUND; - -/** Utility class to resolve the Lucene doc ID and version for a given uid. */ -public class VersionsResolver { - - static final ConcurrentMap> - lookupStates = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); - - // Evict this reader from lookupStates once it's closed: - private static final CoreClosedListener removeLookupState = key -> { - CloseableThreadLocal ctl = lookupStates.remove(key); - if (ctl != null) { - ctl.close(); - } - }; - - private static PerThreadIDAndVersionLookup getLookupState(LeafReader reader) - throws IOException { - Object key = reader.getCoreCacheKey(); - CloseableThreadLocal ctl = lookupStates.get(key); - if (ctl == null) { - // First time we are seeing this reader's core; make a - // new CTL: - ctl = new CloseableThreadLocal<>(); - CloseableThreadLocal other = - lookupStates.putIfAbsent(key, ctl); - if (other == null) { - // Our CTL won, we must remove it when the - // core is closed: - reader.addCoreClosedListener(removeLookupState); - } else { - // Another thread beat us to it: just use - // their CTL: - ctl = other; - } - } - - PerThreadIDAndVersionLookup lookupState = ctl.get(); - if (lookupState == null) { - lookupState = new PerThreadIDAndVersionLookup(reader); - ctl.set(lookupState); - } - - return lookupState; - } - - private VersionsResolver() { - } - - /** - * Wraps an {@link LeafReaderContext}, a doc ID relative to the context doc base and - * a version. - **/ - public static class DocIdAndVersion { - public final int docId; - public final long version; - public final LeafReaderContext context; - - public DocIdAndVersion(int docId, long version, LeafReaderContext context) { - this.docId = docId; - this.version = version; - this.context = context; - } - } - - /** - * Load the internal doc ID and version for the uid from the reader, returning
    - *
  • null if the uid wasn't found, - *
  • a doc ID and a version otherwise - *
- */ - public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term) - throws IOException { - assert term.field().equals(UidFieldMapper.NAME); - List leaves = reader.leaves(); - if (leaves.isEmpty()) { - return null; - } - // iterate backwards to optimize for the frequently updated documents - // which are likely to be in the last segments - for (int i = leaves.size() - 1; i >= 0; i--) { - LeafReaderContext context = leaves.get(i); - LeafReader leaf = context.reader(); - PerThreadIDAndVersionLookup lookup = getLookupState(leaf); - DocIdAndVersion result = - lookup.lookupVersion(term.bytes(), leaf.getLiveDocs(), context); - if (result != null) { - return result; - } - } - return null; - } - - /** - * Load the version for the uid from the reader, returning
    - *
  • {@link Versions#NOT_FOUND} if no matching doc exists, - *
  • the version associated with the provided uid otherwise - *
- */ - public static long loadVersion(IndexReader reader, Term term) throws IOException { - final DocIdAndVersion docIdAndVersion = loadDocIdAndVersion(reader, term); - return docIdAndVersion == null ? NOT_FOUND : docIdAndVersion.version; - } - - /** - * Returns the sequence number for the given uid term, returning - * {@code SequenceNumbersService.UNASSIGNED_SEQ_NO} if none is found. - */ - public static long loadSeqNo(IndexReader reader, Term term) throws IOException { - assert term.field().equals(UidFieldMapper.NAME) : "can only load _seq_no by uid"; - List leaves = reader.leaves(); - if (leaves.isEmpty()) { - return SequenceNumbersService.UNASSIGNED_SEQ_NO; - } - - // iterate backwards to optimize for the frequently updated documents - // which are likely to be in the last segments - for (int i = leaves.size() - 1; i >= 0; i--) { - LeafReader leaf = leaves.get(i).reader(); - Bits liveDocs = leaf.getLiveDocs(); - - TermsEnum termsEnum = null; - SortedNumericDocValues dvField = null; - PostingsEnum docsEnum = null; - - final Fields fields = leaf.fields(); - if (fields != null) { - Terms terms = fields.terms(UidFieldMapper.NAME); - if (terms != null) { - termsEnum = terms.iterator(); - assert termsEnum != null; - dvField = leaf.getSortedNumericDocValues(SeqNoFieldMapper.NAME); - assert dvField != null; - - final BytesRef id = term.bytes(); - if (termsEnum.seekExact(id)) { - // there may be more than one matching docID, in the - // case of nested docs, so we want the last one: - docsEnum = termsEnum.postings(docsEnum, 0); - int docID = DocIdSetIterator.NO_MORE_DOCS; - for (int d = docsEnum.nextDoc(); - d != DocIdSetIterator.NO_MORE_DOCS; d = docsEnum.nextDoc()) { - if (liveDocs != null && liveDocs.get(d) == false) { - continue; - } - docID = d; - } - - if (docID != DocIdSetIterator.NO_MORE_DOCS) { - dvField.setDocument(docID); - assert dvField.count() == 1 : - "expected only a single value for _seq_no but got " + - dvField.count(); - return dvField.valueAt(0); - } - } - } - } - - } - return SequenceNumbersService.UNASSIGNED_SEQ_NO; - } - - /** - * Returns the primary term for the given uid term, returning {@code 0} if none is found. - */ - public static long loadPrimaryTerm(IndexReader reader, Term term) throws IOException { - assert term.field().equals(UidFieldMapper.NAME) : "can only load _primary_term by uid"; - List leaves = reader.leaves(); - if (leaves.isEmpty()) { - return 0; - } - - // iterate backwards to optimize for the frequently updated documents - // which are likely to be in the last segments - for (int i = leaves.size() - 1; i >= 0; i--) { - LeafReader leaf = leaves.get(i).reader(); - Bits liveDocs = leaf.getLiveDocs(); - - TermsEnum termsEnum = null; - NumericDocValues dvField = null; - PostingsEnum docsEnum = null; - - final Fields fields = leaf.fields(); - if (fields != null) { - Terms terms = fields.terms(UidFieldMapper.NAME); - if (terms != null) { - termsEnum = terms.iterator(); - assert termsEnum != null; - dvField = leaf.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); - assert dvField != null; - - final BytesRef id = term.bytes(); - if (termsEnum.seekExact(id)) { - // there may be more than one matching docID, in the - // case of nested docs, so we want the last one: - docsEnum = termsEnum.postings(docsEnum, 0); - int docID = DocIdSetIterator.NO_MORE_DOCS; - for (int d = docsEnum.nextDoc(); - d != DocIdSetIterator.NO_MORE_DOCS; - d = docsEnum.nextDoc()) { - if (liveDocs != null && liveDocs.get(d) == false) { - continue; - } - docID = d; - } - - if (docID != DocIdSetIterator.NO_MORE_DOCS) { - return dvField.get(docID); - } - } - } - } - - } - return 0; - } -} diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 8478a790689..1d894481373 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -311,9 +311,12 @@ public final class ClusterSettings extends AbstractScopedSettings { HunspellService.HUNSPELL_IGNORE_CASE, HunspellService.HUNSPELL_DICTIONARY_OPTIONS, IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, + Environment.DEFAULT_PATH_CONF_SETTING, Environment.PATH_CONF_SETTING, + Environment.DEFAULT_PATH_DATA_SETTING, Environment.PATH_DATA_SETTING, Environment.PATH_HOME_SETTING, + Environment.DEFAULT_PATH_LOGS_SETTING, Environment.PATH_LOGS_SETTING, Environment.PATH_REPO_SETTING, Environment.PATH_SCRIPTS_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index 374d923d30a..d3071eb58f0 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -57,6 +57,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; @@ -442,6 +443,20 @@ public final class Settings implements ToXContent { public String[] getAsArray(String settingPrefix, String[] defaultArray, Boolean commaDelimited) throws SettingsException { List result = new ArrayList<>(); + final String valueFromPrefix = get(settingPrefix); + final String valueFromPreifx0 = get(settingPrefix + ".0"); + + if (valueFromPrefix != null && valueFromPreifx0 != null) { + final String message = String.format( + Locale.ROOT, + "settings object contains values for [%s=%s] and [%s=%s]", + settingPrefix, + valueFromPrefix, + settingPrefix + ".0", + valueFromPreifx0); + throw new IllegalStateException(message); + } + if (get(settingPrefix) != null) { if (commaDelimited) { String[] strings = Strings.splitStringByCommaToArray(get(settingPrefix)); @@ -1048,12 +1063,10 @@ public final class Settings implements ToXContent { return this; } - public Builder putProperties(Map esSettings, Predicate keyPredicate, Function keyFunction) { + public Builder putProperties(final Map esSettings, final Function keyFunction) { for (final Map.Entry esSetting : esSettings.entrySet()) { final String key = esSetting.getKey(); - if (keyPredicate.test(key)) { - map.put(keyFunction.apply(key), esSetting.getValue()); - } + map.put(keyFunction.apply(key), esSetting.getValue()); } return this; } diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index 189e9d3c8d5..f0427ce2466 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -22,7 +22,7 @@ package org.elasticsearch.common.xcontent; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.io.BytesStream; +import org.elasticsearch.common.io.stream.BytesStream; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.text.Text; @@ -53,7 +53,7 @@ import java.util.concurrent.TimeUnit; /** * A utility to build XContent (ie json). */ -public final class XContentBuilder implements BytesStream, Releasable, Flushable { +public final class XContentBuilder implements Releasable, Flushable { /** * Create a new {@link XContentBuilder} using the given {@link XContent} content. @@ -1041,7 +1041,6 @@ public final class XContentBuilder implements BytesStream, Releasable, Flushable return this.generator; } - @Override public BytesReference bytes() { close(); return ((BytesStream) bos).bytes(); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java index f0de15da9ae..92b20c5199b 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java @@ -111,14 +111,14 @@ public class ElectMasterService extends AbstractComponent { return minimumMasterNodes; } - public boolean hasEnoughMasterNodes(Iterable nodes) { + public int countMasterNodes(Iterable nodes) { int count = 0; for (DiscoveryNode node : nodes) { if (node.isMasterNode()) { count++; } } - return count > 0 && (minimumMasterNodes < 0 || count >= minimumMasterNodes); + return count; } public boolean hasEnoughCandidates(Collection candidates) { @@ -149,13 +149,12 @@ public class ElectMasterService extends AbstractComponent { return activeMasters.stream().min(ElectMasterService::compareNodes).get(); } + public boolean hasEnoughMasterNodes(Iterable nodes) { + return minimumMasterNodes < 1 || countMasterNodes(nodes) >= minimumMasterNodes; + } + public boolean hasTooManyMasterNodes(Iterable nodes) { - int count = 0; - for (DiscoveryNode node : nodes) { - if (node.isMasterNode()) { - count++; - } - } + final int count = countMasterNodes(nodes); return count > 1 && minimumMasterNodes <= count / 2; } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 72c33c873d4..f5dbaac6242 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -580,8 +581,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover final ClusterState remainingNodesClusterState = remainingNodesClusterState(currentState, remainingNodesBuilder); final ClusterTasksResult.Builder resultBuilder = ClusterTasksResult.builder().successes(tasks); - if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) { - rejoin.accept("not enough master nodes"); + if (electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes()) == false) { + final int masterNodes = electMasterService.countMasterNodes(remainingNodesClusterState.nodes()); + rejoin.accept(LoggerMessageFormat.format("not enough master nodes (has [{}], but needed [{}])", + masterNodes, electMasterService.minimumMasterNodes())); return resultBuilder.build(currentState); } else { return resultBuilder.build(allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks))); @@ -920,7 +923,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return winner.getNode(); } else { // if we don't have enough master nodes, we bail, because there are not enough master to elect from - logger.trace("not enough master nodes [{}]", masterCandidates); + logger.warn("not enough master nodes discovered during pinging (found [{}], but needed [{}]), pinging again", + masterCandidates, electMaster.minimumMasterNodes()); return null; } } else { diff --git a/core/src/main/java/org/elasticsearch/env/Environment.java b/core/src/main/java/org/elasticsearch/env/Environment.java index 4b544aa3882..f431a7f646e 100644 --- a/core/src/main/java/org/elasticsearch/env/Environment.java +++ b/core/src/main/java/org/elasticsearch/env/Environment.java @@ -49,11 +49,17 @@ import static org.elasticsearch.common.Strings.cleanPath; // public+forbidden api! public class Environment { public static final Setting PATH_HOME_SETTING = Setting.simpleString("path.home", Property.NodeScope); - public static final Setting PATH_CONF_SETTING = Setting.simpleString("path.conf", Property.NodeScope); + public static final Setting DEFAULT_PATH_CONF_SETTING = Setting.simpleString("default.path.conf", Property.NodeScope); + public static final Setting PATH_CONF_SETTING = + new Setting<>("path.conf", DEFAULT_PATH_CONF_SETTING, Function.identity(), Property.NodeScope); public static final Setting PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", Property.NodeScope); + public static final Setting> DEFAULT_PATH_DATA_SETTING = + Setting.listSetting("default.path.data", Collections.emptyList(), Function.identity(), Property.NodeScope); public static final Setting> PATH_DATA_SETTING = - Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), Property.NodeScope); - public static final Setting PATH_LOGS_SETTING = Setting.simpleString("path.logs", Property.NodeScope); + Setting.listSetting("path.data", DEFAULT_PATH_DATA_SETTING, Function.identity(), Property.NodeScope); + public static final Setting DEFAULT_PATH_LOGS_SETTING = Setting.simpleString("default.path.logs", Property.NodeScope); + public static final Setting PATH_LOGS_SETTING = + new Setting<>("path.logs", DEFAULT_PATH_LOGS_SETTING, Function.identity(), Property.NodeScope); public static final Setting> PATH_REPO_SETTING = Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), Property.NodeScope); public static final Setting PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", Property.NodeScope); @@ -115,7 +121,8 @@ public class Environment { throw new IllegalStateException(PATH_HOME_SETTING.getKey() + " is not configured"); } - if (PATH_CONF_SETTING.exists(settings)) { + // this is trappy, Setting#get(Settings) will get a fallback setting yet return false for Settings#exists(Settings) + if (PATH_CONF_SETTING.exists(settings) || DEFAULT_PATH_CONF_SETTING.exists(settings)) { configFile = PathUtils.get(cleanPath(PATH_CONF_SETTING.get(settings))); } else { configFile = homeFile.resolve("config"); @@ -156,7 +163,9 @@ public class Environment { } else { repoFiles = new Path[0]; } - if (PATH_LOGS_SETTING.exists(settings)) { + + // this is trappy, Setting#get(Settings) will get a fallback setting yet return false for Settings#exists(Settings) + if (PATH_LOGS_SETTING.exists(settings) || DEFAULT_PATH_LOGS_SETTING.exists(settings)) { logsFile = PathUtils.get(cleanPath(PATH_LOGS_SETTING.get(settings))); } else { logsFile = homeFile.resolve("logs"); @@ -174,7 +183,9 @@ public class Environment { Settings.Builder finalSettings = Settings.builder().put(settings); finalSettings.put(PATH_HOME_SETTING.getKey(), homeFile); - finalSettings.putArray(PATH_DATA_SETTING.getKey(), dataPaths); + if (PATH_DATA_SETTING.exists(settings)) { + finalSettings.putArray(PATH_DATA_SETTING.getKey(), dataPaths); + } finalSettings.put(PATH_LOGS_SETTING.getKey(), logsFile); this.settings = finalSettings.build(); diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index ab969b17d49..dec59f97f42 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -202,7 +202,7 @@ public final class NodeEnvironment implements Closeable { for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) { Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex]; Path dataDir = environment.dataFiles()[dirIndex]; - Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId)); + Path dir = resolveNodePath(dataDir, possibleLockId); Files.createDirectories(dir); try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) { @@ -268,6 +268,17 @@ public final class NodeEnvironment implements Closeable { } } + /** + * Resolve a specific nodes/{node.id} path for the specified path and node lock id. + * + * @param path the path + * @param nodeLockId the node lock id + * @return the resolved path + */ + public static Path resolveNodePath(final Path path, final int nodeLockId) { + return path.resolve(NODES_FOLDER).resolve(Integer.toString(nodeLockId)); + } + /** Returns true if the directory is empty */ private static boolean dirEmpty(final Path path) throws IOException { try (DirectoryStream stream = Files.newDirectoryStream(path)) { @@ -724,6 +735,14 @@ public final class NodeEnvironment implements Closeable { return nodePaths; } + public int getNodeLockId() { + assertEnvIsLocked(); + if (nodePaths == null || locks == null) { + throw new IllegalStateException("node is not configured to store local location"); + } + return nodeLockId; + } + /** * Returns all index paths. */ @@ -736,6 +755,8 @@ public final class NodeEnvironment implements Closeable { return indexPaths; } + + /** * Returns all shard paths excluding custom shard path. Note: Shards are only allocated on one of the * returned paths. The returned array may contain paths to non-existing directories. @@ -764,19 +785,36 @@ public final class NodeEnvironment implements Closeable { assertEnvIsLocked(); Set indexFolders = new HashSet<>(); for (NodePath nodePath : nodePaths) { - Path indicesLocation = nodePath.indicesPath; - if (Files.isDirectory(indicesLocation)) { - try (DirectoryStream stream = Files.newDirectoryStream(indicesLocation)) { - for (Path index : stream) { - if (Files.isDirectory(index)) { - indexFolders.add(index.getFileName().toString()); - } + indexFolders.addAll(availableIndexFoldersForPath(nodePath)); + } + return indexFolders; + + } + + /** + * Return all directory names in the nodes/{node.id}/indices directory for the given node path. + * + * @param nodePath the path + * @return all directories that could be indices for the given node path. + * @throws IOException if an I/O exception occurs traversing the filesystem + */ + public Set availableIndexFoldersForPath(final NodePath nodePath) throws IOException { + if (nodePaths == null || locks == null) { + throw new IllegalStateException("node is not configured to store local location"); + } + assertEnvIsLocked(); + final Set indexFolders = new HashSet<>(); + Path indicesLocation = nodePath.indicesPath; + if (Files.isDirectory(indicesLocation)) { + try (DirectoryStream stream = Files.newDirectoryStream(indicesLocation)) { + for (Path index : stream) { + if (Files.isDirectory(index)) { + indexFolders.add(index.getFileName().toString()); } } } } return indexFolders; - } /** diff --git a/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java index 45b28d96ba2..899c06eb196 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java +++ b/core/src/main/java/org/elasticsearch/index/engine/DeleteVersionValue.java @@ -27,18 +27,13 @@ class DeleteVersionValue extends VersionValue { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DeleteVersionValue.class); - private final long time; + final long time; - DeleteVersionValue(long version, long time) { - super(version); + DeleteVersionValue(long version,long seqNo, long term, long time) { + super(version, seqNo, term); this.time = time; } - @Override - public long getTime() { - return this.time; - } - @Override public boolean isDelete() { return true; @@ -52,7 +47,9 @@ class DeleteVersionValue extends VersionValue { @Override public String toString() { return "DeleteVersionValue{" + - "version=" + getVersion() + + "version=" + version + + ", seqNo=" + seqNo + + ", term=" + term + ",time=" + time + '}'; } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 217bc459282..59655abf289 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -55,8 +55,8 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.lucene.uid.VersionsResolver; -import org.elasticsearch.common.lucene.uid.VersionsResolver.DocIdAndVersion; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; @@ -464,7 +464,7 @@ public abstract class Engine implements Closeable { final Searcher searcher = searcherFactory.apply("get"); final DocIdAndVersion docIdAndVersion; try { - docIdAndVersion = VersionsResolver.loadDocIdAndVersion(searcher.reader(), get.uid()); + docIdAndVersion = VersionsAndSeqNoResolver.loadDocIdAndVersion(searcher.reader(), get.uid()); } catch (Exception e) { Releasables.closeWhileHandlingException(searcher); //TODO: A better exception goes here diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 333dd769eaf..5e5b2ed3fec 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -51,7 +51,8 @@ import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.lucene.uid.VersionsResolver; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -298,7 +299,7 @@ public class InternalEngine extends Engine { throw new IllegalStateException("no translog generation present in commit data but translog is expected to exist"); } if (generation.translogUUID == null) { - throw new IndexFormatTooOldException("trasnlog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first"); + throw new IndexFormatTooOldException("translog", "translog has no generation nor a UUID - this might be an index from a previous version consider upgrading to N-1 first"); } } final Translog translog = new Translog(translogConfig, generation, globalCheckpointSupplier); @@ -389,10 +390,10 @@ public class InternalEngine extends Engine { if (versionValue.isDelete()) { return GetResult.NOT_EXISTS; } - if (get.versionType().isVersionConflictForReads(versionValue.getVersion(), get.version())) { + if (get.versionType().isVersionConflictForReads(versionValue.version, get.version())) { Uid uid = Uid.createUid(get.uid().text()); throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), - get.versionType().explainConflictForReads(versionValue.getVersion(), get.version())); + get.versionType().explainConflictForReads(versionValue.version, get.version())); } refresh("realtime_get"); } @@ -416,6 +417,43 @@ public class InternalEngine extends Engine { LUCENE_DOC_NOT_FOUND } + private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) throws IOException { + assert op.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "resolving ops based on seq# but no seqNo is found"; + final OpVsLuceneDocStatus status; + final VersionValue versionValue = versionMap.getUnderLock(op.uid()); + assert incrementVersionLookup(); + if (versionValue != null) { + if (op.seqNo() > versionValue.seqNo || + (op.seqNo() == versionValue.seqNo && op.primaryTerm() > versionValue.term)) + status = OpVsLuceneDocStatus.OP_NEWER; + else { + status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; + } + } else { + // load from index + assert incrementIndexVersionLookup(); + try (Searcher searcher = acquireSearcher("load_seq_no")) { + DocIdAndSeqNo docAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), op.uid()); + if (docAndSeqNo == null) { + status = OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND; + } else if (op.seqNo() > docAndSeqNo.seqNo) { + status = OpVsLuceneDocStatus.OP_NEWER; + } else if (op.seqNo() == docAndSeqNo.seqNo) { + // load term to tie break + final long existingTerm = VersionsAndSeqNoResolver.loadPrimaryTerm(docAndSeqNo); + if (op.primaryTerm() > existingTerm) { + status = OpVsLuceneDocStatus.OP_NEWER; + } else { + status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; + } + } else { + status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; + } + } + } + return status; + } + /** resolves the current version of the document, returning null if not found */ private VersionValue resolveDocVersion(final Operation op) throws IOException { assert incrementVersionLookup(); // used for asserting in tests @@ -424,11 +462,10 @@ public class InternalEngine extends Engine { assert incrementIndexVersionLookup(); // used for asserting in tests final long currentVersion = loadCurrentVersionFromIndex(op.uid()); if (currentVersion != Versions.NOT_FOUND) { - versionValue = new VersionValue(currentVersion); + versionValue = new VersionValue(currentVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0L); } } else if (engineConfig.isEnableGcDeletes() && versionValue.isDelete() && - (engineConfig.getThreadPool().relativeTimeInMillis() - versionValue.getTime()) > - getGcDeletesInMillis()) { + (engineConfig.getThreadPool().relativeTimeInMillis() - ((DeleteVersionValue)versionValue).time) > getGcDeletesInMillis()) { versionValue = null; } return versionValue; @@ -436,12 +473,13 @@ public class InternalEngine extends Engine { private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnVersions(final Operation op) throws IOException { + assert op.seqNo() == SequenceNumbersService.UNASSIGNED_SEQ_NO : "op is resolved based on versions but have a seq#"; assert op.version() >= 0 : "versions should be non-negative. got " + op.version(); final VersionValue versionValue = resolveDocVersion(op); if (versionValue == null) { return OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND; } else { - return op.versionType().isVersionConflictForWrites(versionValue.getVersion(), op.version(), versionValue.isDelete()) ? + return op.versionType().isVersionConflictForWrites(versionValue.version, op.version(), versionValue.isDelete()) ? OpVsLuceneDocStatus.OP_STALE_OR_EQUAL : OpVsLuceneDocStatus.OP_NEWER; } } @@ -601,7 +639,16 @@ public class InternalEngine extends Engine { // unlike the primary, replicas don't really care to about creation status of documents // this allows to ignore the case where a document was found in the live version maps in // a delete state and return false for the created flag in favor of code simplicity - final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnVersions(index); + final OpVsLuceneDocStatus opVsLucene; + if (index.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); + } else { + // This can happen if the primary is still on an old node and send traffic without seq# or we recover from translog + // created by an old version. + assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) : + "index is newly created but op has no sequence numbers. op: " + index; + opVsLucene = compareOpToLuceneDocBasedOnVersions(index); + } if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { plan = IndexingStrategy.processButSkipLucene(false, index.seqNo(), index.version()); } else { @@ -633,7 +680,7 @@ public class InternalEngine extends Engine { currentVersion = Versions.NOT_FOUND; currentNotFoundOrDeleted = true; } else { - currentVersion = versionValue.getVersion(); + currentVersion = versionValue.version; currentNotFoundOrDeleted = versionValue.isDelete(); } if (index.versionType().isVersionConflictForWrites( @@ -671,9 +718,9 @@ public class InternalEngine extends Engine { assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false); index(index.docs(), indexWriter); } - versionMap.putUnderLock(index.uid().bytes(), new VersionValue(plan.versionForIndexing)); - return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, - plan.currentNotFoundOrDeleted); + versionMap.putUnderLock(index.uid().bytes(), + new VersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); + return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { /* There is no tragic event recorded so this must be a document failure. @@ -873,7 +920,14 @@ public class InternalEngine extends Engine { // unlike the primary, replicas don't really care to about found status of documents // this allows to ignore the case where a document was found in the live version maps in // a delete state and return true for the found flag in favor of code simplicity - final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnVersions(delete); + final OpVsLuceneDocStatus opVsLucene; + if (delete.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) { + opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); + } else { + assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) : + "index is newly created but op has no sequence numbers. op: " + delete; + opVsLucene = compareOpToLuceneDocBasedOnVersions(delete); + } final DeletionStrategy plan; if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { @@ -898,7 +952,7 @@ public class InternalEngine extends Engine { currentVersion = Versions.NOT_FOUND; currentlyDeleted = true; } else { - currentVersion = versionValue.getVersion(); + currentVersion = versionValue.version; currentlyDeleted = versionValue.isDelete(); } final DeletionStrategy plan; @@ -923,7 +977,7 @@ public class InternalEngine extends Engine { indexWriter.deleteDocuments(delete.uid()); } versionMap.putUnderLock(delete.uid().bytes(), - new DeleteVersionValue(plan.versionOfDeletion, + new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), engineConfig.getThreadPool().relativeTimeInMillis())); return new DeleteResult( plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false); @@ -1179,12 +1233,12 @@ public class InternalEngine extends Engine { try { translog.prepareCommit(); logger.trace("starting commit for flush; commitTranslog=true"); - commitIndexWriter(indexWriter, translog, null); + final long committedGeneration = commitIndexWriter(indexWriter, translog, null); logger.trace("finished commit for flush"); // we need to refresh in order to clear older version values refresh("version_table_flush"); // after refresh documents can be retrieved from the index so we can now commit the translog - translog.commit(); + translog.commit(committedGeneration); } catch (Exception e) { throw new FlushFailedEngineException(shardId, e); } @@ -1235,14 +1289,14 @@ public class InternalEngine extends Engine { // TODO: not good that we reach into LiveVersionMap here; can we move this inside VersionMap instead? problem is the dirtyLock... // we only need to prune the deletes map; the current/old version maps are cleared on refresh: - for (Map.Entry entry : versionMap.getAllTombstones()) { + for (Map.Entry entry : versionMap.getAllTombstones()) { BytesRef uid = entry.getKey(); try (Releasable ignored = acquireLock(uid)) { // can we do it without this lock on each value? maybe batch to a set and get the lock once per set? // Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator: - VersionValue versionValue = versionMap.getTombstoneUnderLock(uid); + DeleteVersionValue versionValue = versionMap.getTombstoneUnderLock(uid); if (versionValue != null) { - if (timeMSec - versionValue.getTime() > getGcDeletesInMillis()) { + if (timeMSec - versionValue.time > getGcDeletesInMillis()) { versionMap.removeTombstoneUnderLock(uid); } } @@ -1490,7 +1544,7 @@ public class InternalEngine extends Engine { private long loadCurrentVersionFromIndex(Term uid) throws IOException { assert incrementIndexVersionLookup(); try (Searcher searcher = acquireSearcher("load_version")) { - return VersionsResolver.loadVersion(searcher.reader(), uid); + return VersionsAndSeqNoResolver.loadVersion(searcher.reader(), uid); } } @@ -1680,55 +1734,65 @@ public class InternalEngine extends Engine { } } - private void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { + /** + * Commits the specified index writer. + * + * @param writer the index writer to commit + * @param translog the translog + * @param syncId the sync flush ID ({@code null} if not committing a synced flush) + * @return the minimum translog generation for the local checkpoint committed with the specified index writer + * @throws IOException if an I/O exception occurs committing the specfied writer + */ + private long commitIndexWriter(final IndexWriter writer, final Translog translog, @Nullable final String syncId) throws IOException { ensureCanFlush(); try { - Translog.TranslogGeneration translogGeneration = translog.getGeneration(); - - final String translogFileGen = Long.toString(translogGeneration.translogFileGeneration); + final long localCheckpoint = seqNoService().getLocalCheckpoint(); + final Translog.TranslogGeneration translogGeneration = translog.getMinGenerationForSeqNo(localCheckpoint + 1); + final String translogFileGeneration = Long.toString(translogGeneration.translogFileGeneration); final String translogUUID = translogGeneration.translogUUID; - final String localCheckpoint = Long.toString(seqNoService().getLocalCheckpoint()); + final String localCheckpointValue = Long.toString(localCheckpoint); writer.setLiveCommitData(() -> { /* * The user data captured above (e.g. local checkpoint) contains data that must be evaluated *before* Lucene flushes - * segments, including the local checkpoint amongst other values. The maximum sequence number is different - we never want + * segments, including the local checkpoint amongst other values. The maximum sequence number is different, we never want * the maximum sequence number to be less than the last sequence number to go into a Lucene commit, otherwise we run the * risk of re-using a sequence number for two different documents when restoring from this commit point and subsequently - * writing new documents to the index. Since we only know which Lucene documents made it into the final commit after the - * {@link IndexWriter#commit()} call flushes all documents, we defer computation of the max_seq_no to the time of invocation - * of the commit data iterator (which occurs after all documents have been flushed to Lucene). + * writing new documents to the index. Since we only know which Lucene documents made it into the final commit after the + * {@link IndexWriter#commit()} call flushes all documents, we defer computation of the maximum sequence number to the time + * of invocation of the commit data iterator (which occurs after all documents have been flushed to Lucene). */ - final Map commitData = new HashMap<>(6); - commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogFileGen); + final Map commitData = new HashMap<>(5); + commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogFileGeneration); commitData.put(Translog.TRANSLOG_UUID_KEY, translogUUID); - commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpoint); + commitData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, localCheckpointValue); if (syncId != null) { commitData.put(Engine.SYNC_COMMIT_ID, syncId); } commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(seqNoService().getMaxSeqNo())); - if (logger.isTraceEnabled()) { - logger.trace("committing writer with commit data [{}]", commitData); - } + logger.trace("committing writer with commit data [{}]", commitData); return commitData.entrySet().iterator(); }); writer.commit(); - } catch (Exception ex) { + return translogGeneration.translogFileGeneration; + } catch (final Exception ex) { try { failEngine("lucene commit failed", ex); - } catch (Exception inner) { + } catch (final Exception inner) { ex.addSuppressed(inner); } throw ex; - } catch (AssertionError e) { - // IndexWriter throws AssertionError on commit, if asserts are enabled, if any files don't exist, but tests that - // randomly throw FNFE/NSFE can also hit this: + } catch (final AssertionError e) { + /* + * If assertions are enabled, IndexWriter throws AssertionError on commit if any files don't exist, but tests that randomly + * throw FileNotFoundException or NoSuchFileException can also hit this. + */ if (ExceptionsHelper.stackTrace(e).contains("org.apache.lucene.index.IndexWriter.filesExist")) { - EngineException engineException = new EngineException(shardId, "failed to commit engine", e); + final EngineException engineException = new EngineException(shardId, "failed to commit engine", e); try { failEngine("lucene commit failed", engineException); - } catch (Exception inner) { + } catch (final Exception inner) { engineException.addSuppressed(inner); } throw engineException; @@ -1812,7 +1876,7 @@ public class InternalEngine extends Engine { * Gets the commit data from {@link IndexWriter} as a map. */ private static Map commitDataAsMap(final IndexWriter indexWriter) { - Map commitData = new HashMap<>(6); + Map commitData = new HashMap<>(5); for (Map.Entry entry : indexWriter.getLiveCommitData()) { commitData.put(entry.getKey(), entry.getValue()); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 7233420309c..9ee4bd43c21 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/core/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -55,7 +55,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable { } // All deletes also go here, and delete "tombstones" are retained after refresh: - private final Map tombstones = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); + private final Map tombstones = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); private volatile Maps maps = new Maps(); @@ -180,7 +180,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable { final VersionValue prevTombstone; if (version.isDelete()) { // Also enroll the delete into tombstones, and account for its RAM too: - prevTombstone = tombstones.put(uid, version); + prevTombstone = tombstones.put(uid, (DeleteVersionValue)version); // We initially account for BytesRef/VersionValue RAM for a delete against the tombstones, because this RAM will not be freed up // on refresh. Later, in removeTombstoneUnderLock, if we clear the tombstone entry but the delete remains in current, we shift @@ -225,12 +225,12 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable { } /** Caller has a lock, so that this uid will not be concurrently added/deleted by another thread. */ - VersionValue getTombstoneUnderLock(BytesRef uid) { + DeleteVersionValue getTombstoneUnderLock(BytesRef uid) { return tombstones.get(uid); } /** Iterates over all deleted versions, including new ones (not yet exposed via reader) and old ones (exposed via reader but not yet GC'd). */ - Iterable> getAllTombstones() { + Iterable> getAllTombstones() { return tombstones.entrySet(); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java index 53550578cc3..1c2fa300520 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/core/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -30,18 +30,17 @@ class VersionValue implements Accountable { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(VersionValue.class); /** the version of the document. used for versioned indexed operations and as a BWC layer, where no seq# are set yet */ - private final long version; + final long version; - VersionValue(long version) { + /** the seq number of the operation that last changed the associated uuid */ + final long seqNo; + /** the the term of the operation that last changed the associated uuid */ + final long term; + + VersionValue(long version, long seqNo, long term) { this.version = version; - } - - public long getTime() { - throw new UnsupportedOperationException(); - } - - public long getVersion() { - return version; + this.seqNo = seqNo; + this.term = term; } public boolean isDelete() { @@ -61,6 +60,9 @@ class VersionValue implements Accountable { @Override public String toString() { return "VersionValue{" + - "version=" + version + "}"; + "version=" + version + + ", seqNo=" + seqNo + + ", term=" + term + + '}'; } } diff --git a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 33f55c7a916..6d3e1e3ab6a 100644 --- a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -24,7 +24,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.lucene.uid.VersionsResolver.DocIdAndVersion; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.util.set.Sets; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index f1b5760e901..64c4932e470 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -254,12 +254,12 @@ public abstract class ParseContext { } @Override - public SeqNoFieldMapper.SequenceID seqID() { + public SeqNoFieldMapper.SequenceIDFields seqID() { return in.seqID(); } @Override - public void seqID(SeqNoFieldMapper.SequenceID seqID) { + public void seqID(SeqNoFieldMapper.SequenceIDFields seqID) { in.seqID(seqID); } @@ -310,7 +310,7 @@ public abstract class ParseContext { private Field version; - private SeqNoFieldMapper.SequenceID seqID; + private SeqNoFieldMapper.SequenceIDFields seqID; private final AllEntries allEntries; @@ -404,12 +404,12 @@ public abstract class ParseContext { } @Override - public SeqNoFieldMapper.SequenceID seqID() { + public SeqNoFieldMapper.SequenceIDFields seqID() { return this.seqID; } @Override - public void seqID(SeqNoFieldMapper.SequenceID seqID) { + public void seqID(SeqNoFieldMapper.SequenceIDFields seqID) { this.seqID = seqID; } @@ -539,9 +539,9 @@ public abstract class ParseContext { public abstract void version(Field version); - public abstract SeqNoFieldMapper.SequenceID seqID(); + public abstract SeqNoFieldMapper.SequenceIDFields seqID(); - public abstract void seqID(SeqNoFieldMapper.SequenceID seqID); + public abstract void seqID(SeqNoFieldMapper.SequenceIDFields seqID); public final boolean includeInAll(Boolean includeInAll, FieldMapper mapper) { return includeInAll(includeInAll, mapper.fieldType().indexOptions() != IndexOptions.NONE); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index f7d5804be0d..91cf2aa4fa4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -36,7 +36,7 @@ public class ParsedDocument { private final String id, type; private final BytesRef uid; - private final SeqNoFieldMapper.SequenceID seqID; + private final SeqNoFieldMapper.SequenceIDFields seqID; private final String routing; @@ -50,7 +50,7 @@ public class ParsedDocument { private String parent; public ParsedDocument(Field version, - SeqNoFieldMapper.SequenceID seqID, + SeqNoFieldMapper.SequenceIDFields seqID, String id, String type, String routing, diff --git a/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index 9f844a3371e..9612d94e661 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.document.Field; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; -import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; @@ -66,13 +65,13 @@ public class SeqNoFieldMapper extends MetadataFieldMapper { * A sequence ID, which is made up of a sequence number (both the searchable * and doc_value version of the field) and the primary term. */ - public static class SequenceID { + public static class SequenceIDFields { public final Field seqNo; public final Field seqNoDocValue; public final Field primaryTerm; - public SequenceID(Field seqNo, Field seqNoDocValue, Field primaryTerm) { + public SequenceIDFields(Field seqNo, Field seqNoDocValue, Field primaryTerm) { Objects.requireNonNull(seqNo, "sequence number field cannot be null"); Objects.requireNonNull(seqNoDocValue, "sequence number dv field cannot be null"); Objects.requireNonNull(primaryTerm, "primary term field cannot be null"); @@ -81,9 +80,9 @@ public class SeqNoFieldMapper extends MetadataFieldMapper { this.primaryTerm = primaryTerm; } - public static SequenceID emptySeqID() { - return new SequenceID(new LongPoint(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO), - new SortedNumericDocValuesField(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO), + public static SequenceIDFields emptySeqID() { + return new SequenceIDFields(new LongPoint(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO), + new NumericDocValuesField(NAME, SequenceNumbersService.UNASSIGNED_SEQ_NO), new NumericDocValuesField(PRIMARY_TERM_NAME, 0)); } } @@ -242,7 +241,7 @@ public class SeqNoFieldMapper extends MetadataFieldMapper { protected void parseCreateField(ParseContext context, List fields) throws IOException { // see InternalEngine.innerIndex to see where the real version value is set // also see ParsedDocument.updateSeqID (called by innerIndex) - SequenceID seqID = SequenceID.emptySeqID(); + SequenceIDFields seqID = SequenceIDFields.emptySeqID(); context.seqID(seqID); fields.add(seqID.seqNo); fields.add(seqID.seqNoDocValue); @@ -264,7 +263,7 @@ public class SeqNoFieldMapper extends MetadataFieldMapper { for (int i = 1; i < context.docs().size(); i++) { final Document doc = context.docs().get(i); doc.add(new LongPoint(NAME, 1)); - doc.add(new SortedNumericDocValuesField(NAME, 1L)); + doc.add(new NumericDocValuesField(NAME, 1L)); doc.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0L)); } } diff --git a/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index 77d8204e45d..6351282a38a 100644 --- a/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/core/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -34,7 +34,7 @@ import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.lucene.uid.VersionsResolver.DocIdAndVersion; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index d9a8cc408f8..a5e6255ecb6 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -85,14 +85,14 @@ import java.util.stream.Stream; * When a translog is opened the checkpoint is use to retrieve the latest translog file generation and subsequently to open the last written file to recovery operations. * The {@link org.elasticsearch.index.translog.Translog.TranslogGeneration}, given when the translog is opened / constructed is compared against * the latest generation and all consecutive translog files singe the given generation and the last generation in the checkpoint will be recovered and preserved until the next - * generation is committed using {@link Translog#commit()}. In the common case the translog file generation in the checkpoint and the generation passed to the translog on creation are - * the same. The only situation when they can be different is when an actual translog commit fails in between {@link Translog#prepareCommit()} and {@link Translog#commit()}. In such a case + * generation is committed using {@link Translog#commit(long)}. In the common case the translog file generation in the checkpoint and the generation passed to the translog on creation are + * the same. The only situation when they can be different is when an actual translog commit fails in between {@link Translog#prepareCommit()} and {@link Translog#commit(long)}. In such a case * the currently being committed translog file will not be deleted since it's commit was not successful. Yet, a new/current translog file is already opened at that point such that there is more than * one translog file present. Such an uncommitted translog file always has a translog-${gen}.ckp associated with it which is an fsynced copy of the it's last translog.ckp such that in * disaster recovery last fsynced offsets, number of operation etc. are still preserved. *

*/ -public class Translog extends AbstractIndexShardComponent implements IndexShardComponent, Closeable, TwoPhaseCommit { +public class Translog extends AbstractIndexShardComponent implements IndexShardComponent, Closeable { /* * TODO @@ -439,7 +439,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); } finally { - Releasables.close(out.bytes()); + Releasables.close(out); } } @@ -804,6 +804,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC long seqNo(); + long primaryTerm(); + /** * Reads the type and the operation from the given stream. The operation must be written with * {@link Operation#writeType(Operation, StreamOutput)} @@ -953,6 +955,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return seqNo; } + @Override public long primaryTerm() { return primaryTerm; } @@ -1104,6 +1107,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return seqNo; } + @Override public long primaryTerm() { return primaryTerm; } @@ -1180,6 +1184,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return seqNo; } + @Override public long primaryTerm() { return primaryTerm; } @@ -1332,7 +1337,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC bytes.writeTo(outStream); } } finally { - Releasables.close(out.bytes()); + Releasables.close(out); } } @@ -1347,6 +1352,31 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC out.writeInt((int) checksum); } + /** + * Gets the minimum generation that could contain any sequence number after the specified sequence number, or the current generation if + * there is no generation that could any such sequence number. + * + * @param seqNo the sequence number + * @return the minimum generation for the sequence number + */ + public TranslogGeneration getMinGenerationForSeqNo(final long seqNo) { + try (ReleasableLock ignored = writeLock.acquire()) { + /* + * When flushing, the engine will ask the translog for the minimum generation that could contain any sequence number after the + * local checkpoint. Immediately after flushing, there will be no such generation, so this minimum generation in this case will + * be the current translog generation as we do not need any prior generations to have a complete history up to the current local + * checkpoint. + */ + long minTranslogFileGeneration = this.currentFileGeneration(); + for (final TranslogReader reader : readers) { + if (seqNo <= reader.getCheckpoint().maxSeqNo) { + minTranslogFileGeneration = Math.min(minTranslogFileGeneration, reader.getGeneration()); + } + } + return new TranslogGeneration(translogUUID, minTranslogFileGeneration); + } + } + /** * Roll the current translog generation into a new generation. This does not commit the * translog. @@ -1375,27 +1405,38 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - @Override - public long prepareCommit() throws IOException { + /** + * Prepares a translog commit by setting the current committing generation and rolling the translog generation. + * + * @throws IOException if an I/O exception occurred while rolling the translog generation + */ + public void prepareCommit() throws IOException { try (ReleasableLock ignored = writeLock.acquire()) { ensureOpen(); if (currentCommittingGeneration != NOT_SET_GENERATION) { - final String message = String.format( - Locale.ROOT, - "already committing a translog with generation [%d]", - currentCommittingGeneration); + final String message = + String.format(Locale.ROOT, "already committing a translog with generation [%d]", currentCommittingGeneration); throw new IllegalStateException(message); } currentCommittingGeneration = current.getGeneration(); rollGeneration(); } - return 0; } - @Override - public long commit() throws IOException { + /** + * Commits the translog and sets the last committed translog generation to the specified generation. The specified committed generation + * will be used when trimming unreferenced translog generations such that generations from the committed generation will be preserved. + * + * If {@link Translog#prepareCommit()} was not called before calling commit, this method will be invoked too causing the translog + * generation to be rolled. + * + * @param committedGeneration the minimum translog generation to preserve after trimming unreferenced generations + * @throws IOException if an I/O exception occurred preparing the translog commit + */ + public void commit(final long committedGeneration) throws IOException { try (ReleasableLock ignored = writeLock.acquire()) { ensureOpen(); + assert assertCommittedGenerationIsInValidRange(committedGeneration); if (currentCommittingGeneration == NOT_SET_GENERATION) { prepareCommit(); } @@ -1403,26 +1444,39 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC assert readers.stream().anyMatch(r -> r.getGeneration() == currentCommittingGeneration) : "readers missing committing generation [" + currentCommittingGeneration + "]"; // set the last committed generation otherwise old files will not be cleaned up - lastCommittedTranslogFileGeneration = currentCommittingGeneration + 1; + lastCommittedTranslogFileGeneration = committedGeneration; currentCommittingGeneration = NOT_SET_GENERATION; trimUnreferencedReaders(); } - return 0; } + private boolean assertCommittedGenerationIsInValidRange(final long committedGeneration) { + assert committedGeneration <= current.generation + : "tried to commit generation [" + committedGeneration + "] after current generation [" + current.generation + "]"; + final long min = readers.stream().map(TranslogReader::getGeneration).min(Long::compareTo).orElse(Long.MIN_VALUE); + assert committedGeneration >= min + : "tried to commit generation [" + committedGeneration + "] before minimum generation [" + min + "]"; + return true; + } + + /** + * Trims unreferenced translog generations. The guarantee here is that translog generations will be preserved for all outstanding views + * and from the last committed translog generation defined by {@link Translog#lastCommittedTranslogFileGeneration}. + */ void trimUnreferencedReaders() { try (ReleasableLock ignored = writeLock.acquire()) { if (closed.get()) { - // we're shutdown potentially on some tragic event - don't delete anything + // we're shutdown potentially on some tragic event, don't delete anything return; } - long minReferencedGen = outstandingViews.stream().mapToLong(View::minTranslogGeneration).min().orElse(Long.MAX_VALUE); - minReferencedGen = Math.min(lastCommittedTranslogFileGeneration, minReferencedGen); - final long finalMinReferencedGen = minReferencedGen; - List unreferenced = readers.stream().filter(r -> r.getGeneration() < finalMinReferencedGen).collect(Collectors.toList()); + long minReferencedGen = Math.min( + lastCommittedTranslogFileGeneration, + outstandingViews.stream().mapToLong(View::minTranslogGeneration).min().orElse(Long.MAX_VALUE)); + final List unreferenced = + readers.stream().filter(r -> r.getGeneration() < minReferencedGen).collect(Collectors.toList()); for (final TranslogReader unreferencedReader : unreferenced) { - Path translogPath = unreferencedReader.path(); - logger.trace("delete translog file - not referenced and not current anymore {}", translogPath); + final Path translogPath = unreferencedReader.path(); + logger.trace("delete translog file [{}], not referenced and not current anymore", translogPath); IOUtils.closeWhileHandlingException(unreferencedReader); IOUtils.deleteFilesIgnoringExceptions(translogPath, translogPath.resolveSibling(getCommitCheckpointFileName(unreferencedReader.getGeneration()))); @@ -1442,13 +1496,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - - @Override - public void rollback() throws IOException { - ensureOpen(); - close(); - } - /** * References a transaction log generation */ diff --git a/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java index f5501437bcc..d9cefc6b22e 100644 --- a/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java @@ -37,17 +37,12 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Function; -import java.util.function.Predicate; -import java.util.function.UnaryOperator; import static org.elasticsearch.common.Strings.cleanPath; public class InternalSettingsPreparer { private static final String[] ALLOWED_SUFFIXES = {".yml", ".yaml", ".json"}; - private static final String PROPERTY_DEFAULTS_PREFIX = "default."; - private static final Predicate PROPERTY_DEFAULTS_PREDICATE = key -> key.startsWith(PROPERTY_DEFAULTS_PREFIX); - private static final UnaryOperator STRIP_PROPERTY_DEFAULTS_PREFIX = key -> key.substring(PROPERTY_DEFAULTS_PREFIX.length()); public static final String SECRET_PROMPT_VALUE = "${prompt.secret}"; public static final String TEXT_PROMPT_VALUE = "${prompt.text}"; @@ -125,15 +120,16 @@ public class InternalSettingsPreparer { } /** - * Initializes the builder with the given input settings, and loads system properties settings if allowed. - * If loadDefaults is true, system property default settings are loaded. + * Initializes the builder with the given input settings, and applies settings from the specified map (these settings typically come + * from the command line). + * + * @param output the settings builder to apply the input and default settings to + * @param input the input settings + * @param esSettings a map from which to apply settings */ - private static void initializeSettings(Settings.Builder output, Settings input, Map esSettings) { + static void initializeSettings(final Settings.Builder output, final Settings input, final Map esSettings) { output.put(input); - output.putProperties(esSettings, - PROPERTY_DEFAULTS_PREDICATE.and(key -> output.get(STRIP_PROPERTY_DEFAULTS_PREFIX.apply(key)) == null), - STRIP_PROPERTY_DEFAULTS_PREFIX); - output.putProperties(esSettings, PROPERTY_DEFAULTS_PREDICATE.negate(), Function.identity()); + output.putProperties(esSettings, Function.identity()); output.replacePropertyPlaceholders(); } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index bf65f5b9441..2508872eed1 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -50,6 +50,7 @@ import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.StopWatch; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Binder; @@ -58,6 +59,7 @@ import org.elasticsearch.common.inject.Key; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.inject.util.Providers; +import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; @@ -146,7 +148,9 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -262,6 +266,9 @@ public class Node implements Closeable { Logger logger = Loggers.getLogger(Node.class, tmpSettings); final String nodeId = nodeEnvironment.nodeId(); tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId); + if (DiscoveryNode.nodeRequiresLocalStorage(tmpSettings)) { + checkForIndexDataInDefaultPathData(tmpSettings, nodeEnvironment, logger); + } // this must be captured after the node name is possibly added to the settings final String nodeName = NODE_NAME_SETTING.get(tmpSettings); if (hadPredefinedNodeName == false) { @@ -500,6 +507,58 @@ public class Node implements Closeable { } } + /** + * Checks for path.data and default.path.data being configured, and there being index data in any of the paths in default.path.data. + * + * @param settings the settings to check for path.data and default.path.data + * @param nodeEnv the current node environment + * @param logger a logger where messages regarding the detection will be logged + * @throws IOException if an I/O exception occurs reading the directory structure + */ + static void checkForIndexDataInDefaultPathData( + final Settings settings, final NodeEnvironment nodeEnv, final Logger logger) throws IOException { + if (!Environment.PATH_DATA_SETTING.exists(settings) || !Environment.DEFAULT_PATH_DATA_SETTING.exists(settings)) { + return; + } + + boolean clean = true; + for (final String defaultPathData : Environment.DEFAULT_PATH_DATA_SETTING.get(settings)) { + final Path nodeDirectory = NodeEnvironment.resolveNodePath(getPath(defaultPathData), nodeEnv.getNodeLockId()); + if (Files.exists(nodeDirectory) == false) { + continue; + } + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(nodeDirectory); + final Set availableIndexFolders = nodeEnv.availableIndexFoldersForPath(nodePath); + if (availableIndexFolders.isEmpty()) { + continue; + } + clean = false; + logger.error("detected index data in default.path.data [{}] where there should not be any", nodePath.indicesPath); + for (final String availableIndexFolder : availableIndexFolders) { + logger.info( + "index folder [{}] in default.path.data [{}] must be moved to any of {}", + availableIndexFolder, + nodePath.indicesPath, + Arrays.stream(nodeEnv.nodePaths()).map(np -> np.indicesPath).collect(Collectors.toList())); + } + } + + if (clean) { + return; + } + + final String message = String.format( + Locale.ROOT, + "detected index data in default.path.data %s where there should not be any; check the logs for details", + Environment.DEFAULT_PATH_DATA_SETTING.get(settings)); + throw new IllegalStateException(message); + } + + @SuppressForbidden(reason = "read path that is not configured in environment") + private static Path getPath(final String path) { + return PathUtils.get(path); + } + // visible for testing static void warnIfPreRelease(final Version version, final boolean isSnapshot, final Logger logger) { if (!version.isRelease() || isSnapshot) { diff --git a/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java b/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java index bdc78c82dd5..4db9aec6e93 100644 --- a/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java +++ b/core/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java @@ -20,12 +20,14 @@ package org.elasticsearch.rest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.io.OutputStream; import java.util.Collections; import java.util.Set; import java.util.function.Predicate; @@ -97,7 +99,9 @@ public abstract class AbstractRestChannel implements RestChannel { excludes = filters.stream().filter(EXCLUDE_FILTER).map(f -> f.substring(1)).collect(toSet()); } - XContentBuilder builder = new XContentBuilder(XContentFactory.xContent(responseContentType), bytesOutput(), includes, excludes); + OutputStream unclosableOutputStream = Streams.flushOnCloseStream(bytesOutput()); + XContentBuilder builder = + new XContentBuilder(XContentFactory.xContent(responseContentType), unclosableOutputStream, includes, excludes); if (pretty) { builder.prettyPrint().lfAtEnd(); } @@ -107,8 +111,9 @@ public abstract class AbstractRestChannel implements RestChannel { } /** - * A channel level bytes output that can be reused. It gets reset on each call to this - * method. + * A channel level bytes output that can be reused. The bytes output is lazily instantiated + * by a call to {@link #newBytesOutput()}. Once the stream is created, it gets reset on each + * call to this method. */ @Override public final BytesStreamOutput bytesOutput() { @@ -120,6 +125,14 @@ public abstract class AbstractRestChannel implements RestChannel { return bytesOut; } + /** + * An accessor to the raw value of the channel bytes output. This method will not instantiate + * a new stream if one does not exist and this method will not reset the stream. + */ + protected final BytesStreamOutput bytesOutputOrNull() { + return bytesOut; + } + protected BytesStreamOutput newBytesOutput() { return new BytesStreamOutput(); } diff --git a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java index 72ee7efc489..55991b35413 100644 --- a/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java +++ b/core/src/main/java/org/elasticsearch/rest/BytesRestResponse.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -147,8 +146,8 @@ public class BytesRestResponse extends RestResponse { return builder; } - static BytesRestResponse createSimpleErrorResponse(RestStatus status, String errorMessage) throws IOException { - return new BytesRestResponse(status, JsonXContent.contentBuilder().startObject() + static BytesRestResponse createSimpleErrorResponse(RestChannel channel, RestStatus status, String errorMessage) throws IOException { + return new BytesRestResponse(status, channel.newErrorBuilder().startObject() .field("error", errorMessage) .field("status", status.getStatus()) .endObject()); diff --git a/core/src/main/java/org/elasticsearch/rest/RestController.java b/core/src/main/java/org/elasticsearch/rest/RestController.java index ea603cf949f..a3d8a4b7db5 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestController.java +++ b/core/src/main/java/org/elasticsearch/rest/RestController.java @@ -178,8 +178,9 @@ public class RestController extends AbstractComponent implements HttpServerTrans sendContentTypeErrorMessage(request, responseChannel); } else if (contentLength > 0 && handler != null && handler.supportsContentStream() && request.getXContentType() != XContentType.JSON && request.getXContentType() != XContentType.SMILE) { - responseChannel.sendResponse(BytesRestResponse.createSimpleErrorResponse(RestStatus.NOT_ACCEPTABLE, "Content-Type [" + - request.getXContentType() + "] does not support stream parsing. Use JSON or SMILE instead")); + responseChannel.sendResponse(BytesRestResponse.createSimpleErrorResponse(responseChannel, + RestStatus.NOT_ACCEPTABLE, "Content-Type [" + request.getXContentType() + + "] does not support stream parsing. Use JSON or SMILE instead")); } else { if (canTripCircuitBreaker(request)) { inFlightRequestsBreaker(circuitBreakerService).addEstimateBytesAndMaybeBreak(contentLength, ""); @@ -229,7 +230,8 @@ public class RestController extends AbstractComponent implements HttpServerTrans void dispatchRequest(final RestRequest request, final RestChannel channel, final NodeClient client, ThreadContext threadContext, final RestHandler handler) throws Exception { if (checkRequestParameters(request, channel) == false) { - channel.sendResponse(BytesRestResponse.createSimpleErrorResponse(BAD_REQUEST, "error traces in responses are disabled.")); + channel + .sendResponse(BytesRestResponse.createSimpleErrorResponse(channel,BAD_REQUEST, "error traces in responses are disabled.")); } else { for (String key : headersToCopy) { String httpHeader = request.header(key); @@ -283,7 +285,7 @@ public class RestController extends AbstractComponent implements HttpServerTrans Strings.collectionToCommaDelimitedString(restRequest.getAllHeaderValues("Content-Type")) + "] is not supported"; } - channel.sendResponse(BytesRestResponse.createSimpleErrorResponse(NOT_ACCEPTABLE, errorMessage)); + channel.sendResponse(BytesRestResponse.createSimpleErrorResponse(channel, NOT_ACCEPTABLE, errorMessage)); } /** diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java index 806a89166d5..58dc861126b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/AbstractCatAction.java @@ -20,8 +20,9 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Table; +import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.UTF8StreamWriter; -import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.BytesStream; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; @@ -56,7 +57,7 @@ public abstract class AbstractCatAction extends BaseRestHandler { return channel -> { Table table = getTableWithHeader(request); int[] width = buildHelpWidths(table, request); - BytesStreamOutput bytesOutput = channel.bytesOutput(); + BytesStream bytesOutput = Streams.flushOnCloseStream(channel.bytesOutput()); UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOutput); for (Table.Cell cell : table.getHeaders()) { // need to do left-align always, so create new cells diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java index 5ebf36c63a1..00e56f3773c 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestTable.java @@ -22,8 +22,9 @@ package org.elasticsearch.rest.action.cat; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; +import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.UTF8StreamWriter; -import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.BytesStream; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.SizeValue; @@ -82,7 +83,7 @@ public class RestTable { List headers = buildDisplayHeaders(table, request); int[] width = buildWidths(table, request, verbose, headers); - BytesStreamOutput bytesOut = channel.bytesOutput(); + BytesStream bytesOut = Streams.flushOnCloseStream(channel.bytesOutput()); UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOut); int lastHeader = headers.size() - 1; if (verbose) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java index 9d537030fa9..4b23d042d53 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java @@ -42,12 +42,12 @@ import java.util.Map; /** * A geo metric aggregator that computes a geo-centroid from a {@code geo_point} type field */ -public final class GeoCentroidAggregator extends MetricsAggregator { +final class GeoCentroidAggregator extends MetricsAggregator { private final ValuesSource.GeoPoint valuesSource; - LongArray centroids; - LongArray counts; + private LongArray centroids; + private LongArray counts; - protected GeoCentroidAggregator(String name, SearchContext context, Aggregator parent, + GeoCentroidAggregator(String name, SearchContext context, Aggregator parent, ValuesSource.GeoPoint valuesSource, List pipelineAggregators, Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java index c21999d3fb4..d153da3afa3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorFactory.java @@ -32,9 +32,9 @@ import java.io.IOException; import java.util.List; import java.util.Map; -public class GeoCentroidAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoCentroidAggregatorFactory extends ValuesSourceAggregatorFactory { - public GeoCentroidAggregatorFactory(String name, ValuesSourceConfig config, + GeoCentroidAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java index a5a8058ed28..597ad6176bd 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java @@ -38,7 +38,7 @@ public class InternalGeoCentroid extends InternalAggregation implements GeoCentr protected final GeoPoint centroid; protected final long count; - public InternalGeoCentroid(String name, GeoPoint centroid, long count, List + InternalGeoCentroid(String name, GeoPoint centroid, long count, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); assert (centroid == null) == (count == 0); @@ -132,7 +132,7 @@ public class InternalGeoCentroid extends InternalAggregation implements GeoCentr } static class Fields { - public static final String CENTROID = "location"; + static final String CENTROID = "location"; } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentile.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentile.java deleted file mode 100644 index bb8876d82fd..00000000000 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentile.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.metrics.percentiles; - -public class InternalPercentile implements Percentile { - - private final double percent; - private final double value; - - public InternalPercentile(double percent, double value) { - this.percent = percent; - this.value = value; - } - - @Override - public double getPercent() { - return percent; - } - - @Override - public double getValue() { - return value; - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/Percentile.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/Percentile.java index 96ad4f261a6..ca62ca6b200 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/Percentile.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/Percentile.java @@ -19,10 +19,41 @@ package org.elasticsearch.search.aggregations.metrics.percentiles; -public interface Percentile { +import java.util.Objects; - double getPercent(); +public class Percentile { - double getValue(); + private final double percent; + private final double value; + public Percentile(double percent, double value) { + this.percent = percent; + this.value = value; + } + + public double getPercent() { + return percent; + } + + public double getValue() { + return value; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Percentile that = (Percentile) o; + return Double.compare(that.percent, percent) == 0 + && Double.compare(that.value, value) == 0; + } + + @Override + public int hashCode() { + return Objects.hash(percent, value); + } } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentileRanks.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentileRanks.java index 35234d73acc..cb058128c5a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentileRanks.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentileRanks.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; import org.HdrHistogram.DoubleHistogram; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -109,7 +108,7 @@ public class InternalHDRPercentileRanks extends AbstractInternalHDRPercentiles i @Override public Percentile next() { - final Percentile next = new InternalPercentile(percentileRank(state, values[i]), values[i]); + final Percentile next = new Percentile(percentileRank(state, values[i]), values[i]); ++i; return next; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentiles.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentiles.java index 579f25c1666..3967e94e488 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentiles.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/InternalHDRPercentiles.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.hdr; import org.HdrHistogram.DoubleHistogram; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -99,7 +98,7 @@ public class InternalHDRPercentiles extends AbstractInternalHDRPercentiles imple @Override public Percentile next() { - final Percentile next = new InternalPercentile(percents[i], state.getValueAtPercentile(percents[i])); + final Percentile next = new Percentile(percents[i], state.getValueAtPercentile(percents[i])); ++i; return next; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentileRanks.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentileRanks.java index 9e24ba5d86e..666993f41fd 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentileRanks.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentileRanks.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -106,7 +105,7 @@ public class InternalTDigestPercentileRanks extends AbstractInternalTDigestPerce @Override public Percentile next() { - final Percentile next = new InternalPercentile(percentileRank(state, values[i]), values[i]); + final Percentile next = new Percentile(percentileRank(state, values[i]), values[i]); ++i; return next; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentiles.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentiles.java index ec619219111..5a62f24933b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentiles.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentiles.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -95,7 +94,7 @@ public class InternalTDigestPercentiles extends AbstractInternalTDigestPercentil @Override public Percentile next() { - final Percentile next = new InternalPercentile(percents[i], state.quantile(percents[i] / 100)); + final Percentile next = new Percentile(percents[i], state.quantile(percents[i] / 100)); ++i; return next; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java index 059d72a8e10..375011c4e8e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/percentile/InternalPercentilesBucket.java @@ -26,7 +26,6 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.max.InternalMax; -import org.elasticsearch.search.aggregations.metrics.percentiles.InternalPercentile; import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -136,7 +135,7 @@ public class InternalPercentilesBucket extends InternalNumericMetricsAggregation @Override public Percentile next() { - final Percentile next = new InternalPercentile(percents[i], percentiles[i]); + final Percentile next = new Percentile(percents[i], percentiles[i]); ++i; return next; } diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/core/src/main/java/org/elasticsearch/tasks/TaskInfo.java index 59b3ef95306..bfd2addb9c5 100644 --- a/core/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/core/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -183,7 +183,7 @@ public final class TaskInfo implements Writeable, ToXContent { } public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "task_info", a -> { + "task_info", true, a -> { int i = 0; TaskId id = new TaskId((String) a[i++], (Long) a[i++]); String type = (String) a[i++]; @@ -196,11 +196,11 @@ public final class TaskInfo implements Writeable, ToXContent { String parentTaskIdString = (String) a[i++]; RawTaskStatus status = statusBytes == null ? null : new RawTaskStatus(statusBytes); - TaskId parentTaskId = parentTaskIdString == null ? TaskId.EMPTY_TASK_ID : new TaskId((String) parentTaskIdString); + TaskId parentTaskId = parentTaskIdString == null ? TaskId.EMPTY_TASK_ID : new TaskId(parentTaskIdString); return new TaskInfo(id, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId); }); static { - // Note for the future: this has to be backwards compatible with all changes to the task storage format + // Note for the future: this has to be backwards and forwards compatible with all changes to the task storage format PARSER.declareString(constructorArg(), new ParseField("node")); PARSER.declareLong(constructorArg(), new ParseField("id")); PARSER.declareString(constructorArg(), new ParseField("type")); diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index dd75ae29556..cbdc0dfa178 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -41,7 +41,7 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.compress.NotCompressedException; -import org.elasticsearch.common.io.ReleasableBytesStream; +import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -1025,10 +1025,8 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } status = TransportStatus.setRequest(status); ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); - // we wrap this in a release once since if the onRequestSent callback throws an exception - // we might release things twice and this should be prevented - final Releasable toRelease = Releasables.releaseOnce(() -> Releasables.close(bStream.bytes())); - StreamOutput stream = bStream; + boolean addedReleaseListener = false; + StreamOutput stream = Streams.flushOnCloseStream(bStream); try { // only compress if asked, and, the request is not bytes, since then only // the header part is compressed, and the "body" can't be extracted as compressed @@ -1047,12 +1045,17 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i stream.writeString(action); BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream, bStream); final TransportRequestOptions finalOptions = options; + final StreamOutput finalStream = stream; // this might be called in a different thread - SendListener onRequestSent = new SendListener(toRelease, + SendListener onRequestSent = new SendListener( + () -> IOUtils.closeWhileHandlingException(finalStream, bStream), () -> transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions)); internalSendMessage(targetChannel, message, onRequestSent); + addedReleaseListener = true; } finally { - IOUtils.close(stream); + if (!addedReleaseListener) { + IOUtils.close(stream, bStream); + } } } @@ -1114,10 +1117,8 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i } status = TransportStatus.setResponse(status); // TODO share some code with sendRequest ReleasableBytesStreamOutput bStream = new ReleasableBytesStreamOutput(bigArrays); - // we wrap this in a release once since if the onRequestSent callback throws an exception - // we might release things twice and this should be prevented - final Releasable toRelease = Releasables.releaseOnce(() -> Releasables.close(bStream.bytes())); - StreamOutput stream = bStream; + boolean addedReleaseListener = false; + StreamOutput stream = Streams.flushOnCloseStream(bStream); try { if (options.compress()) { status = TransportStatus.setCompress(status); @@ -1128,12 +1129,16 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i BytesReference reference = buildMessage(requestId, status, nodeVersion, response, stream, bStream); final TransportResponseOptions finalOptions = options; + final StreamOutput finalStream = stream; // this might be called in a different thread - SendListener listener = new SendListener(toRelease, + SendListener listener = new SendListener(() -> IOUtils.closeWhileHandlingException(finalStream, bStream), () -> transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions)); internalSendMessage(channel, reference, listener); + addedReleaseListener = true; } finally { - IOUtils.close(stream); + if (!addedReleaseListener) { + IOUtils.close(stream, bStream); + } } } @@ -1161,7 +1166,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i * Serializes the given message into a bytes representation */ private BytesReference buildMessage(long requestId, byte status, Version nodeVersion, TransportMessage message, StreamOutput stream, - ReleasableBytesStream writtenBytes) throws IOException { + ReleasableBytesStreamOutput writtenBytes) throws IOException { final BytesReference zeroCopyBuffer; if (message instanceof BytesTransportRequest) { // what a shitty optimization - we should use a direct send method instead BytesTransportRequest bRequest = (BytesTransportRequest) message; diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 51d3709ba1b..136097a2926 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -80,43 +80,37 @@ public class RetryTests extends ESTestCase { return request; } - public void testSyncRetryBacksOff() throws Exception { + public void testRetryBacksOff() throws Exception { BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL); BulkRequest bulkRequest = createBulkRequest(); - BulkResponse response = Retry - .on(EsRejectedExecutionException.class) - .policy(backoff) - .using(bulkClient.threadPool()) - .withSyncBackoff(bulkClient::bulk, bulkRequest, bulkClient.settings()); + BulkResponse response = new Retry(EsRejectedExecutionException.class, backoff, bulkClient.threadPool()) + .withBackoff(bulkClient::bulk, bulkRequest, bulkClient.settings()) + .actionGet(); assertFalse(response.hasFailures()); assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions())); } - public void testSyncRetryFailsAfterBackoff() throws Exception { + public void testRetryFailsAfterBackoff() throws Exception { BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1); BulkRequest bulkRequest = createBulkRequest(); - BulkResponse response = Retry - .on(EsRejectedExecutionException.class) - .policy(backoff) - .using(bulkClient.threadPool()) - .withSyncBackoff(bulkClient::bulk, bulkRequest, bulkClient.settings()); + BulkResponse response = new Retry(EsRejectedExecutionException.class, backoff, bulkClient.threadPool()) + .withBackoff(bulkClient::bulk, bulkRequest, bulkClient.settings()) + .actionGet(); assertTrue(response.hasFailures()); assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions())); } - public void testAsyncRetryBacksOff() throws Exception { + public void testRetryWithListenerBacksOff() throws Exception { BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL); AssertingListener listener = new AssertingListener(); BulkRequest bulkRequest = createBulkRequest(); - Retry.on(EsRejectedExecutionException.class) - .policy(backoff) - .using(bulkClient.threadPool()) - .withAsyncBackoff(bulkClient::bulk, bulkRequest, listener, bulkClient.settings()); + Retry retry = new Retry(EsRejectedExecutionException.class, backoff, bulkClient.threadPool()); + retry.withBackoff(bulkClient::bulk, bulkRequest, listener, bulkClient.settings()); listener.awaitCallbacksCalled(); listener.assertOnResponseCalled(); @@ -125,15 +119,13 @@ public class RetryTests extends ESTestCase { listener.assertOnFailureNeverCalled(); } - public void testAsyncRetryFailsAfterBacksOff() throws Exception { + public void testRetryWithListenerFailsAfterBacksOff() throws Exception { BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1); AssertingListener listener = new AssertingListener(); BulkRequest bulkRequest = createBulkRequest(); - Retry.on(EsRejectedExecutionException.class) - .policy(backoff) - .using(bulkClient.threadPool()) - .withAsyncBackoff(bulkClient::bulk, bulkRequest, listener, bulkClient.settings()); + Retry retry = new Retry(EsRejectedExecutionException.class, backoff, bulkClient.threadPool()); + retry.withBackoff(bulkClient::bulk, bulkRequest, listener, bulkClient.settings()); listener.awaitCallbacksCalled(); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 1d6a634a877..11d42516331 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -166,6 +166,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { Files.createDirectories(multiDataPath[0]); Files.createDirectories(multiDataPath[1]); logger.info("--> Multi data paths: {}, {}", multiDataPath[0], multiDataPath[1]); + ensureGreen(); } void upgradeIndexFolder() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutputTests.java b/core/src/test/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutputTests.java new file mode 100644 index 00000000000..557721a0241 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutputTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.io.stream; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class ReleasableBytesStreamOutputTests extends ESTestCase { + + public void testRelease() throws Exception { + MockBigArrays mockBigArrays = + new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()); + try (ReleasableBytesStreamOutput output = + getRandomReleasableBytesStreamOutput(mockBigArrays)) { + output.writeBoolean(randomBoolean()); + } + MockBigArrays.ensureAllArraysAreReleased(); + } + + private ReleasableBytesStreamOutput getRandomReleasableBytesStreamOutput( + MockBigArrays mockBigArrays) throws IOException { + ReleasableBytesStreamOutput output = new ReleasableBytesStreamOutput(mockBigArrays); + if (randomBoolean()) { + for (int i = 0; i < scaledRandomIntBetween(1, 32); i++) { + output.write(randomByte()); + } + } + return output; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java index d771ced56ff..8b68e769570 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionLookupTests.java @@ -31,7 +31,7 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.uid.VersionsResolver.DocIdAndVersion; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.test.ESTestCase; @@ -53,7 +53,7 @@ public class VersionLookupTests extends ESTestCase { writer.addDocument(doc); DirectoryReader reader = DirectoryReader.open(writer); LeafReaderContext segment = reader.leaves().get(0); - PerThreadIDAndVersionLookup lookup = new PerThreadIDAndVersionLookup(segment.reader()); + PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(segment.reader()); // found doc DocIdAndVersion result = lookup.lookupVersion(new BytesRef("6"), null, segment); assertNotNull(result); @@ -81,7 +81,7 @@ public class VersionLookupTests extends ESTestCase { writer.addDocument(doc); DirectoryReader reader = DirectoryReader.open(writer); LeafReaderContext segment = reader.leaves().get(0); - PerThreadIDAndVersionLookup lookup = new PerThreadIDAndVersionLookup(segment.reader()); + PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(segment.reader()); // return the last doc when there are duplicates DocIdAndVersion result = lookup.lookupVersion(new BytesRef("6"), null, segment); assertNotNull(result); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index 6b9960294e4..c5e66a3bf2a 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -38,8 +38,8 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.common.lucene.uid.VersionsResolver.loadDocIdAndVersion; -import static org.elasticsearch.common.lucene.uid.VersionsResolver.loadVersion; +import static org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.loadDocIdAndVersion; +import static org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.loadVersion; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -145,7 +145,7 @@ public class VersionsTests extends ESTestCase { /** Test that version map cache works, is evicted on close, etc */ public void testCache() throws Exception { - int size = VersionsResolver.lookupStates.size(); + int size = VersionsAndSeqNoResolver.lookupStates.size(); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); @@ -156,21 +156,21 @@ public class VersionsTests extends ESTestCase { DirectoryReader reader = DirectoryReader.open(writer); // should increase cache size by 1 assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6"))); - assertEquals(size+1, VersionsResolver.lookupStates.size()); + assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size()); // should be cache hit assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6"))); - assertEquals(size+1, VersionsResolver.lookupStates.size()); + assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size()); reader.close(); writer.close(); // core should be evicted from the map - assertEquals(size, VersionsResolver.lookupStates.size()); + assertEquals(size, VersionsAndSeqNoResolver.lookupStates.size()); dir.close(); } /** Test that version map cache behaves properly with a filtered reader */ public void testCacheFilterReader() throws Exception { - int size = VersionsResolver.lookupStates.size(); + int size = VersionsAndSeqNoResolver.lookupStates.size(); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); @@ -180,17 +180,17 @@ public class VersionsTests extends ESTestCase { writer.addDocument(doc); DirectoryReader reader = DirectoryReader.open(writer); assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6"))); - assertEquals(size+1, VersionsResolver.lookupStates.size()); + assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size()); // now wrap the reader DirectoryReader wrapped = ElasticsearchDirectoryReader.wrap(reader, new ShardId("bogus", "_na_", 5)); assertEquals(87, loadVersion(wrapped, new Term(UidFieldMapper.NAME, "6"))); // same size map: core cache key is shared - assertEquals(size+1, VersionsResolver.lookupStates.size()); + assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size()); reader.close(); writer.close(); // core should be evicted from the map - assertEquals(size, VersionsResolver.lookupStates.size()); + assertEquals(size, VersionsAndSeqNoResolver.lookupStates.size()); dir.close(); } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java index c1dc07116ec..6eec34a90e9 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java @@ -562,4 +562,16 @@ public class SettingsTests extends ESTestCase { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> setting.get(settings)); assertTrue(e.getMessage().contains("must be stored inside the Elasticsearch keystore")); } + + public void testGetAsArrayFailsOnDuplicates() { + final Settings settings = + Settings.builder() + .put("foobar.0", "bar") + .put("foobar.1", "baz") + .put("foobar", "foo") + .build(); + final IllegalStateException e = expectThrows(IllegalStateException.class, () -> settings.getAsArray("foobar")); + assertThat(e, hasToString(containsString("settings object contains values for [foobar=foo] and [foobar.0=bar]"))); + } + } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java index f59cc1cc6fb..2f3bd44f475 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java @@ -139,4 +139,19 @@ public class ElectMasterServiceTests extends ESTestCase { } } } + + public void testCountMasterNodes() { + List nodes = generateRandomNodes(); + ElectMasterService service = electMasterService(); + + int masterNodes = 0; + + for (DiscoveryNode node : nodes) { + if (node.isMasterNode()) { + masterNodes++; + } + } + + assertEquals(masterNodes, service.countMasterNodes(nodes)); + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java index 70b291fee60..bf25a7babd1 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java @@ -108,6 +108,8 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { final ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(clusterState, tasks); verify(electMasterService).hasEnoughMasterNodes(eq(remainingNodesClusterState.get().nodes())); + verify(electMasterService).countMasterNodes(eq(remainingNodesClusterState.get().nodes())); + verify(electMasterService).minimumMasterNodes(); verifyNoMoreInteractions(electMasterService); // ensure that we did not reroute diff --git a/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java index 855ce83343b..083e2ad5cc0 100644 --- a/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java @@ -23,10 +23,12 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.net.URL; +import java.nio.file.Path; import static org.hamcrest.CoreMatchers.endsWith; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.equalTo; /** * Simple unit-tests for Environment.java @@ -71,4 +73,104 @@ public class EnvironmentTests extends ESTestCase { assertThat(environment.resolveRepoURL(new URL("jar:http://localhost/test/../repo1?blah!/repo/")), nullValue()); } + public void testDefaultPathData() { + final Path defaultPathData = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder() + .put("path.home", createTempDir().toAbsolutePath()) + .put("default.path.data", defaultPathData) + .build(); + final Environment environment = new Environment(settings); + assertThat(environment.dataFiles(), equalTo(new Path[] { defaultPathData })); + } + + public void testPathDataOverrideDefaultPathData() { + final Path pathData = createTempDir().toAbsolutePath(); + final Path defaultPathData = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder() + .put("path.home", createTempDir().toAbsolutePath()) + .put("path.data", pathData) + .put("default.path.data", defaultPathData) + .build(); + final Environment environment = new Environment(settings); + assertThat(environment.dataFiles(), equalTo(new Path[] { pathData })); + } + + public void testPathDataWhenNotSet() { + final Path pathHome = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder().put("path.home", pathHome).build(); + final Environment environment = new Environment(settings); + assertThat(environment.dataFiles(), equalTo(new Path[]{pathHome.resolve("data")})); + } + + public void testPathDataNotSetInEnvironmentIfNotSet() { + final Path defaultPathData = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder() + .put("path.home", createTempDir().toAbsolutePath()) + .put("default.path.data", defaultPathData) + .build(); + assertFalse(Environment.PATH_DATA_SETTING.exists(settings)); + assertTrue(Environment.DEFAULT_PATH_DATA_SETTING.exists(settings)); + final Environment environment = new Environment(settings); + assertFalse(Environment.PATH_DATA_SETTING.exists(environment.settings())); + assertTrue(Environment.DEFAULT_PATH_DATA_SETTING.exists(environment.settings())); + } + + public void testDefaultPathLogs() { + final Path defaultPathLogs = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder() + .put("path.home", createTempDir().toAbsolutePath()) + .put("default.path.logs", defaultPathLogs) + .build(); + final Environment environment = new Environment(settings); + assertThat(environment.logsFile(), equalTo(defaultPathLogs)); + } + + public void testPathLogsOverrideDefaultPathLogs() { + final Path pathLogs = createTempDir().toAbsolutePath(); + final Path defaultPathLogs = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder() + .put("path.home", createTempDir().toAbsolutePath()) + .put("path.logs", pathLogs) + .put("default.path.logs", defaultPathLogs) + .build(); + final Environment environment = new Environment(settings); + assertThat(environment.logsFile(), equalTo(pathLogs)); + } + + public void testPathLogsWhenNotSet() { + final Path pathHome = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder().put("path.home", pathHome).build(); + final Environment environment = new Environment(settings); + assertThat(environment.logsFile(), equalTo(pathHome.resolve("logs"))); + } + + public void testDefaultPathConf() { + final Path defaultPathConf = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder() + .put("path.home", createTempDir().toAbsolutePath()) + .put("default.path.conf", defaultPathConf) + .build(); + final Environment environment = new Environment(settings); + assertThat(environment.configFile(), equalTo(defaultPathConf)); + } + + public void testPathConfOverrideDefaultPathConf() { + final Path pathConf = createTempDir().toAbsolutePath(); + final Path defaultPathConf = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder() + .put("path.home", createTempDir().toAbsolutePath()) + .put("path.conf", pathConf) + .put("default.path.conf", defaultPathConf) + .build(); + final Environment environment = new Environment(settings); + assertThat(environment.configFile(), equalTo(pathConf)); + } + + public void testPathConfWhenNotSet() { + final Path pathHome = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder().put("path.home", pathHome).build(); + final Environment environment = new Environment(settings); + assertThat(environment.configFile(), equalTo(pathHome.resolve("config"))); + } + } diff --git a/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index c815b2b55f9..daf196da7ce 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -41,7 +41,7 @@ import static org.hamcrest.Matchers.startsWith; public class IndexingSlowLogTests extends ESTestCase { public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { BytesReference source = JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject().bytes(); - ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceID.emptySeqID(), "id", + ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", "test", null, null, source, XContentType.JSON, null); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index c78374a0e9e..30d22dfb731 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -83,7 +83,8 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.lucene.uid.VersionsResolver; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; +import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -150,6 +151,7 @@ import java.util.Base64; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -165,6 +167,8 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.LongSupplier; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.LongStream; import static java.util.Collections.emptyMap; import static java.util.Collections.shuffle; @@ -292,7 +296,7 @@ public class InternalEngineTests extends ESTestCase { private static ParsedDocument testParsedDocument(String id, String type, String routing, Document document, BytesReference source, Mapping mappingUpdate) { Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE); Field versionField = new NumericDocValuesField("_version", 0); - SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); + SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); document.add(uidField); document.add(versionField); document.add(seqID.seqNo); @@ -833,6 +837,58 @@ public class InternalEngineTests extends ESTestCase { } } + public void testTranslogRecoveryWithMultipleGenerations() throws IOException { + final int docs = randomIntBetween(1, 4096); + final List seqNos = LongStream.range(0, docs).boxed().collect(Collectors.toList()); + Randomness.shuffle(seqNos); + engine.close(); + Engine initialEngine = null; + try { + final AtomicInteger counter = new AtomicInteger(); + initialEngine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG)) { + @Override + public SequenceNumbersService seqNoService() { + return new SequenceNumbersService( + engine.shardId, + engine.config().getIndexSettings(), + SequenceNumbersService.NO_OPS_PERFORMED, + SequenceNumbersService.NO_OPS_PERFORMED, + SequenceNumbersService.UNASSIGNED_SEQ_NO) { + @Override + public long generateSeqNo() { + return seqNos.get(counter.getAndIncrement()); + } + }; + } + }; + for (int i = 0; i < docs; i++) { + final String id = Integer.toString(i); + final ParsedDocument doc = testParsedDocument(id, "test", null, testDocumentWithTextField(), SOURCE, null); + initialEngine.index(indexForDoc(doc)); + if (rarely()) { + initialEngine.getTranslog().rollGeneration(); + } else if (rarely()) { + initialEngine.flush(); + } + } + } finally { + IOUtils.close(initialEngine); + } + + Engine recoveringEngine = null; + try { + recoveringEngine = new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)); + recoveringEngine.recoverFromTranslog(); + try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { + TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), docs); + assertEquals(docs, topDocs.totalHits); + } + } finally { + IOUtils.close(recoveringEngine); + } + + } + public void testConcurrentGetAndFlush() throws Exception { ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); engine.index(indexForDoc(doc)); @@ -1369,19 +1425,10 @@ public class InternalEngineTests extends ESTestCase { public void testOutOfOrderDocsOnReplica() throws IOException { final List ops = generateSingleDocHistory(true, - randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL), false, 2, 2, 20); + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE, VersionType.FORCE), false, 2, 2, 20); assertOpsOnReplica(ops, replicaEngine, true); } - public void testNonStandardVersioningOnReplica() throws IOException { - // TODO: this can be folded into testOutOfOrderDocsOnReplica once out of order - // is detected using seq# - final List ops = generateSingleDocHistory(true, - randomFrom(VersionType.EXTERNAL_GTE, VersionType.FORCE), false, 2, 2, 20); - assertOpsOnReplica(ops, replicaEngine, false); - } - - public void testOutOfOrderDocsOnReplicaOldPrimary() throws IOException { IndexSettings oldSettings = IndexSettingsModule.newIndexSettings("testOld", Settings.builder() .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us @@ -3357,48 +3404,68 @@ public class InternalEngineTests extends ESTestCase { searchResult.close(); } + /** + * A sequence number service that will generate a sequence number and if {@code stall} is set to {@code true} will wait on the barrier + * and the referenced latch before returning. If the local checkpoint should advance (because {@code stall} is {@code false}), then the + * value of {@code expectedLocalCheckpoint} is set accordingly. + * + * @param latchReference to latch the thread for the purpose of stalling + * @param barrier to signal the thread has generated a new sequence number + * @param stall whether or not the thread should stall + * @param expectedLocalCheckpoint the expected local checkpoint after generating a new sequence + * number + * @return a sequence number service + */ + private SequenceNumbersService getStallingSeqNoService( + final AtomicReference latchReference, + final CyclicBarrier barrier, + final AtomicBoolean stall, + final AtomicLong expectedLocalCheckpoint) { + return new SequenceNumbersService( + shardId, + defaultSettings, + SequenceNumbersService.NO_OPS_PERFORMED, + SequenceNumbersService.NO_OPS_PERFORMED, + SequenceNumbersService.UNASSIGNED_SEQ_NO) { + @Override + public long generateSeqNo() { + final long seqNo = super.generateSeqNo(); + final CountDownLatch latch = latchReference.get(); + if (stall.get()) { + try { + barrier.await(); + latch.await(); + } catch (BrokenBarrierException | InterruptedException e) { + throw new RuntimeException(e); + } + } else { + if (expectedLocalCheckpoint.get() + 1 == seqNo) { + expectedLocalCheckpoint.set(seqNo); + } + } + return seqNo; + } + }; + } + public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws BrokenBarrierException, InterruptedException, IOException { engine.close(); final int docs = randomIntBetween(1, 32); InternalEngine initialEngine = null; try { - final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference latchReference = new AtomicReference<>(new CountDownLatch(1)); final CyclicBarrier barrier = new CyclicBarrier(2); - final AtomicBoolean skip = new AtomicBoolean(); + final AtomicBoolean stall = new AtomicBoolean(); final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED); final List threads = new ArrayList<>(); - final SequenceNumbersService seqNoService = - new SequenceNumbersService( - shardId, - defaultSettings, - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.NO_OPS_PERFORMED, - SequenceNumbersService.UNASSIGNED_SEQ_NO) { - @Override - public long generateSeqNo() { - final long seqNo = super.generateSeqNo(); - if (skip.get()) { - try { - barrier.await(); - latch.await(); - } catch (BrokenBarrierException | InterruptedException e) { - throw new RuntimeException(e); - } - } else { - if (expectedLocalCheckpoint.get() + 1 == seqNo) { - expectedLocalCheckpoint.set(seqNo); - } - } - return seqNo; - } - }; + final SequenceNumbersService seqNoService = getStallingSeqNoService(latchReference, barrier, stall, expectedLocalCheckpoint); initialEngine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, () -> seqNoService); final InternalEngine finalInitialEngine = initialEngine; for (int i = 0; i < docs; i++) { final String id = Integer.toString(i); final ParsedDocument doc = testParsedDocument(id, "test", null, testDocumentWithTextField(), SOURCE, null); - skip.set(randomBoolean()); + stall.set(randomBoolean()); final Thread thread = new Thread(() -> { try { finalInitialEngine.index(indexForDoc(doc)); @@ -3407,7 +3474,7 @@ public class InternalEngineTests extends ESTestCase { } }); thread.start(); - if (skip.get()) { + if (stall.get()) { threads.add(thread); barrier.await(); } else { @@ -3419,7 +3486,7 @@ public class InternalEngineTests extends ESTestCase { assertThat(initialEngine.seqNoService().getMaxSeqNo(), equalTo((long) (docs - 1))); initialEngine.flush(true, true); - latch.countDown(); + latchReference.get().countDown(); for (final Thread thread : threads) { thread.join(); } @@ -3594,6 +3661,78 @@ public class InternalEngineTests extends ESTestCase { } } + public void testMinGenerationForSeqNo() throws IOException, BrokenBarrierException, InterruptedException { + engine.close(); + final int numberOfTriplets = randomIntBetween(1, 32); + InternalEngine actualEngine = null; + try { + final AtomicReference latchReference = new AtomicReference<>(); + final CyclicBarrier barrier = new CyclicBarrier(2); + final AtomicBoolean stall = new AtomicBoolean(); + final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED); + final Map threads = new LinkedHashMap<>(); + final SequenceNumbersService seqNoService = getStallingSeqNoService(latchReference, barrier, stall, expectedLocalCheckpoint); + actualEngine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, () -> seqNoService); + final InternalEngine finalActualEngine = actualEngine; + final Translog translog = finalActualEngine.getTranslog(); + final long generation = finalActualEngine.getTranslog().currentFileGeneration(); + for (int i = 0; i < numberOfTriplets; i++) { + /* + * Index three documents with the first and last landing in the same generation and the middle document being stalled until + * a later generation. + */ + stall.set(false); + index(finalActualEngine, 3 * i); + + final CountDownLatch latch = new CountDownLatch(1); + latchReference.set(latch); + final int skipId = 3 * i + 1; + stall.set(true); + final Thread thread = new Thread(() -> { + try { + index(finalActualEngine, skipId); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + thread.start(); + threads.put(thread, latch); + barrier.await(); + + stall.set(false); + index(finalActualEngine, 3 * i + 2); + finalActualEngine.flush(); + + /* + * This sequence number landed in the last generation, but the lower and upper bounds for an earlier generation straddle + * this sequence number. + */ + assertThat(translog.getMinGenerationForSeqNo(3 * i + 1).translogFileGeneration, equalTo(i + generation)); + } + + int i = 0; + for (final Map.Entry entry : threads.entrySet()) { + final Map userData = finalActualEngine.commitStats().getUserData(); + assertThat(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(Long.toString(3 * i))); + assertThat(userData.get(Translog.TRANSLOG_GENERATION_KEY), equalTo(Long.toString(i + generation))); + entry.getValue().countDown(); + entry.getKey().join(); + finalActualEngine.flush(); + i++; + } + + } finally { + IOUtils.close(actualEngine); + } + } + + private void index(final InternalEngine engine, final int id) throws IOException { + final String docId = Integer.toString(id); + final ParsedDocument doc = + testParsedDocument(docId, "test", null, testDocumentWithTextField(), SOURCE, null); + engine.index(indexForDoc(doc)); + } + /** * Return a tuple representing the sequence ID for the given {@code Get} * operation. The first value in the tuple is the sequence number, the @@ -3601,9 +3740,17 @@ public class InternalEngineTests extends ESTestCase { */ private Tuple getSequenceID(Engine engine, Engine.Get get) throws EngineException { try (Searcher searcher = engine.acquireSearcher("get")) { - long seqNum = VersionsResolver.loadSeqNo(searcher.reader(), get.uid()); - long primaryTerm = VersionsResolver.loadPrimaryTerm(searcher.reader(), get.uid()); - return new Tuple<>(seqNum, primaryTerm); + final long primaryTerm; + final long seqNo; + DocIdAndSeqNo docIdAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), get.uid()); + if (docIdAndSeqNo == null) { + primaryTerm = 0; + seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO; + } else { + seqNo = docIdAndSeqNo.seqNo; + primaryTerm = VersionsAndSeqNoResolver.loadPrimaryTerm(docIdAndSeqNo); + } + return new Tuple<>(seqNo, primaryTerm); } catch (Exception e) { throw new EngineException(shardId, "unable to retrieve sequence id", e); } diff --git a/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java b/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java index 9161bc413c8..97799f8c46a 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTests.java @@ -33,7 +33,7 @@ public class LiveVersionMapTests extends ESTestCase { for (int i = 0; i < 100000; ++i) { BytesRefBuilder uid = new BytesRefBuilder(); uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20)); - VersionValue version = new VersionValue(randomLong()); + VersionValue version = new VersionValue(randomLong(), randomLong(), randomLong()); map.putUnderLock(uid.toBytesRef(), version); } long actualRamBytesUsed = RamUsageTester.sizeOf(map); @@ -48,7 +48,7 @@ public class LiveVersionMapTests extends ESTestCase { for (int i = 0; i < 100000; ++i) { BytesRefBuilder uid = new BytesRefBuilder(); uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20)); - VersionValue version = new VersionValue(randomLong()); + VersionValue version = new VersionValue(randomLong(), randomLong(), randomLong()); map.putUnderLock(uid.toBytesRef(), version); } actualRamBytesUsed = RamUsageTester.sizeOf(map); diff --git a/core/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java b/core/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java index 7af8ebc7580..3b953edece1 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/VersionValueTests.java @@ -25,12 +25,12 @@ import org.elasticsearch.test.ESTestCase; public class VersionValueTests extends ESTestCase { public void testRamBytesUsed() { - VersionValue versionValue = new VersionValue(randomLong()); + VersionValue versionValue = new VersionValue(randomLong(), randomLong(), randomLong()); assertEquals(RamUsageTester.sizeOf(versionValue), versionValue.ramBytesUsed()); } public void testDeleteRamBytesUsed() { - DeleteVersionValue versionValue = new DeleteVersionValue(randomLong(), randomLong()); + DeleteVersionValue versionValue = new DeleteVersionValue(randomLong(), randomLong(), randomLong(), randomLong()); assertEquals(RamUsageTester.sizeOf(versionValue), versionValue.ramBytesUsed()); } diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 1d1af2b2fc5..c35f72d2085 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -27,13 +27,9 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.bulk.BulkItemRequest; import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.bulk.BulkShardResponse; import org.elasticsearch.action.bulk.TransportShardBulkActionTests; -import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction; -import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.PlainActionFuture; @@ -98,6 +94,10 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } protected IndexMetaData buildIndexMetaData(int replicas) throws IOException { + return buildIndexMetaData(replicas, indexMapping); + } + + protected IndexMetaData buildIndexMetaData(int replicas, Map mappings) throws IOException { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) @@ -105,7 +105,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName()) .settings(settings) .primaryTerm(0, 1); - for (Map.Entry typeMapping : indexMapping.entrySet()) { + for (Map.Entry typeMapping : mappings.entrySet()) { metaData.putMapping(typeMapping.getKey(), typeMapping.getValue()); } return metaData.build(); @@ -224,15 +224,24 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase updateAllocationIDsOnPrimary(); } - public synchronized IndexShard addReplica() throws IOException { + public IndexShard addReplica() throws IOException { final ShardRouting replicaRouting = createShardRouting("s" + replicaId.incrementAndGet(), false); final IndexShard replica = newShard(replicaRouting, indexMetaData, null, this::syncGlobalCheckpoint, getEngineFactory(replicaRouting)); - replicas.add(replica); - updateAllocationIDsOnPrimary(); + addReplica(replica); return replica; } + public synchronized void addReplica(IndexShard replica) { + assert shardRoutings().stream() + .filter(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())).findFirst().isPresent() == false : + "replica with aId [" + replica.routingEntry().allocationId() + "] already exists"; + replica.updatePrimaryTerm(primary.getPrimaryTerm()); + replicas.add(replica); + updateAllocationIDsOnPrimary(); + } + + public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardPath, final String nodeId) throws IOException { final ShardRouting shardRouting = TestShardRouting.newShardRouting( shardId, @@ -264,6 +273,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase } boolean found = replicas.remove(replica); assert found; + closeShards(primary); primary = replica; replica.updateRoutingEntry(replica.routingEntry().moveActiveReplicaToPrimary()); updateAllocationIDsOnPrimary(); diff --git a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 7a11f89b73b..5f69370a5ca 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -18,6 +18,9 @@ */ package org.elasticsearch.index.replication; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; @@ -37,6 +40,7 @@ import org.elasticsearch.indices.recovery.RecoveryTarget; import java.io.IOException; import java.util.Collections; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; @@ -152,4 +156,28 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase } } + public void testConflictingOpsOnReplica() throws Exception { + Map mappings = + Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}"); + try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(2, mappings))) { + shards.startAll(); + IndexShard replica1 = shards.getReplicas().get(0); + logger.info("--> isolated replica " + replica1.routingEntry()); + shards.removeReplica(replica1); + IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"1\"}", XContentType.JSON); + shards.index(indexRequest); + shards.addReplica(replica1); + logger.info("--> promoting replica to primary " + replica1.routingEntry()); + shards.promoteReplicaToPrimary(replica1); + indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"2\"}", XContentType.JSON); + shards.index(indexRequest); + shards.refresh("test"); + for (IndexShard shard : shards) { + try (Engine.Searcher searcher = shard.acquireSearcher("test")) { + TopDocs search = searcher.searcher().search(new TermQuery(new Term("f", "2")), 10); + assertEquals("shard " + shard.routingEntry() + " misses new version", 1, search.totalHits); + } + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index d203832fb15..fec0b766d34 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -107,7 +107,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { Mapping mappingUpdate) { Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE); Field versionField = new NumericDocValuesField("_version", 0); - SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); + SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); document.add(uidField); document.add(versionField); document.add(seqID.seqNo); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index b328e86e58d..629a8af3e0d 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -551,7 +551,7 @@ public class IndexShardTests extends IndexShardTestCase { ParseContext.Document document, BytesReference source, Mapping mappingUpdate) { Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE); Field versionField = new NumericDocValuesField("_version", 0); - SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); + SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); document.add(uidField); document.add(versionField); document.add(seqID.seqNo); diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 846ca6be201..b7e20cf75c8 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -332,7 +332,7 @@ public class RefreshListenersTests extends ESTestCase { document.add(new TextField("test", testFieldValue, Field.Store.YES)); Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE); Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY); - SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); + SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); document.add(uidField); document.add(versionField); document.add(seqID.seqNo); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 6b2aa5e5921..1e2d81705df 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -36,8 +36,10 @@ import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -84,6 +86,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -101,6 +104,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import java.util.stream.LongStream; import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; import static org.hamcrest.Matchers.containsString; @@ -124,7 +128,7 @@ public class TranslogTests extends ESTestCase { if (translog.isOpen()) { if (translog.currentFileGeneration() > 1) { - translog.commit(); + translog.commit(translog.currentFileGeneration()); assertFileDeleted(translog, translog.currentFileGeneration() - 1); } translog.close(); @@ -287,7 +291,7 @@ public class TranslogTests extends ESTestCase { assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.totalOperations(), equalTo(ops.size())); - translog.commit(); + translog.commit(translog.currentFileGeneration()); snapshot = translog.newSnapshot(); assertThat(snapshot, SnapshotMatchers.size(0)); assertThat(snapshot.totalOperations(), equalTo(0)); @@ -373,7 +377,7 @@ public class TranslogTests extends ESTestCase { } } - translog.commit(); + translog.commit(translog.currentFileGeneration()); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(0L)); @@ -446,7 +450,7 @@ public class TranslogTests extends ESTestCase { try (Translog.View view = translog.newView()) { Translog.Snapshot snapshot2 = translog.newSnapshot(); - translog.commit(); + translog.commit(translog.currentFileGeneration()); assertThat(snapshot2, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot2.totalOperations(), equalTo(ops.size())); } @@ -821,7 +825,7 @@ public class TranslogTests extends ESTestCase { break; } } - translog.commit(); + translog.commit(translog.currentFileGeneration()); } } finally { run.set(false); @@ -858,7 +862,7 @@ public class TranslogTests extends ESTestCase { assertTrue("we only synced a previous operation yet", translog.syncNeeded()); } if (rarely()) { - translog.commit(); + translog.commit(translog.currentFileGeneration()); assertFalse("location is from a previous translog - already synced", translog.ensureSynced(location)); // not syncing now assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); } @@ -878,7 +882,7 @@ public class TranslogTests extends ESTestCase { ArrayList locations = new ArrayList<>(); for (int op = 0; op < translogOperations; op++) { if (rarely()) { - translog.commit(); // do this first so that there is at least one pending tlog entry + translog.commit(translog.currentFileGeneration()); // do this first so that there is at least one pending tlog entry } final Translog.Location location = translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); locations.add(location); @@ -889,7 +893,7 @@ public class TranslogTests extends ESTestCase { assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream())); assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced } else if (rarely()) { - translog.commit(); + translog.commit(translog.currentFileGeneration()); assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); // not syncing now assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); } else { @@ -909,7 +913,7 @@ public class TranslogTests extends ESTestCase { for (int op = 0; op < translogOperations; op++) { locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))))); if (rarely() && translogOperations > op + 1) { - translog.commit(); + translog.commit(translog.currentFileGeneration()); } } Collections.shuffle(locations, random()); @@ -1074,7 +1078,7 @@ public class TranslogTests extends ESTestCase { locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); final boolean commit = commitOften ? frequently() : rarely(); if (commit && op < translogOperations - 1) { - translog.commit(); + translog.commit(translog.currentFileGeneration()); minUncommittedOp = op + 1; translogGeneration = translog.getGeneration(); } @@ -1300,7 +1304,7 @@ public class TranslogTests extends ESTestCase { for (int op = 0; op < translogOperations; op++) { locations.add(translog.add(new Translog.Index("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (randomBoolean()) { - translog.commit(); + translog.commit(translog.currentFileGeneration()); firstUncommitted = op + 1; } } @@ -1483,7 +1487,7 @@ public class TranslogTests extends ESTestCase { } try { - translog.commit(); + translog.commit(translog.currentFileGeneration()); fail("already closed"); } catch (AlreadyClosedException ex) { assertNotNull(ex.getCause()); @@ -1930,7 +1934,7 @@ public class TranslogTests extends ESTestCase { if (randomBoolean()) { failableTLog.prepareCommit(); } - failableTLog.commit(); + failableTLog.commit(translog.currentFileGeneration()); syncedDocs.clear(); } } @@ -2048,7 +2052,7 @@ public class TranslogTests extends ESTestCase { public void testTranslogOpSerialization() throws Exception { BytesReference B_1 = new BytesArray(new byte[]{1}); - SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); + SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers"; long randomSeqNum = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong(); long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong(); @@ -2110,12 +2114,13 @@ public class TranslogTests extends ESTestCase { for (int i = 0; i <= rolls; i++) { assertFileIsPresent(translog, generation + i); } - translog.commit(); + translog.commit(generation + rolls); assertThat(translog.currentFileGeneration(), equalTo(generation + rolls + 1)); assertThat(translog.totalOperations(), equalTo(0)); - for (int i = 0; i <= rolls; i++) { + for (int i = 0; i < rolls; i++) { assertFileDeleted(translog, generation + i); } + assertFileIsPresent(translog, generation + rolls); assertFileIsPresent(translog, generation + rolls + 1); } @@ -2167,7 +2172,7 @@ public class TranslogTests extends ESTestCase { } } - translog.commit(); + translog.commit(generation + rollsBefore + 1); for (int i = 0; i <= rollsBefore; i++) { assertFileDeleted(translog, generation + i); @@ -2178,4 +2183,130 @@ public class TranslogTests extends ESTestCase { } + public void testMinGenerationForSeqNo() throws IOException { + final long initialGeneration = translog.getGeneration().translogFileGeneration; + final int operations = randomIntBetween(1, 4096); + final List shuffledSeqNos = LongStream.range(0, operations).boxed().collect(Collectors.toList()); + Randomness.shuffle(shuffledSeqNos); + final List> seqNos = new ArrayList<>(); + final Map terms = new HashMap<>(); + for (final Long seqNo : shuffledSeqNos) { + seqNos.add(Tuple.tuple(seqNo, terms.computeIfAbsent(seqNo, k -> 0L))); + Long repeatingTermSeqNo = randomFrom(seqNos.stream().map(Tuple::v1).collect(Collectors.toList())); + seqNos.add(Tuple.tuple(repeatingTermSeqNo, terms.computeIfPresent(repeatingTermSeqNo, (s, t) -> t + 1))); + } + + for (final Tuple tuple : seqNos) { + translog.add(new Translog.NoOp(tuple.v1(), tuple.v2(), "test")); + if (rarely()) { + translog.rollGeneration(); + } + } + + Map>> generations = new HashMap<>(); + + translog.commit(initialGeneration); + for (long seqNo = 0; seqNo < operations; seqNo++) { + final Set> seenSeqNos = new HashSet<>(); + final long generation = translog.getMinGenerationForSeqNo(seqNo).translogFileGeneration; + for (long g = generation; g < translog.currentFileGeneration(); g++) { + if (!generations.containsKey(g)) { + final Set> generationSeenSeqNos = new HashSet<>(); + final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.getCommitCheckpointFileName(g))); + try (TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(g)), checkpoint)) { + Translog.Snapshot snapshot = reader.newSnapshot(); + Translog.Operation operation; + while ((operation = snapshot.next()) != null) { + generationSeenSeqNos.add(Tuple.tuple(operation.seqNo(), operation.primaryTerm())); + } + } + generations.put(g, generationSeenSeqNos); + + } + seenSeqNos.addAll(generations.get(g)); + } + + final long seqNoLowerBound = seqNo; + final Set> expected = seqNos.stream().filter(t -> t.v1() >= seqNoLowerBound).collect(Collectors.toSet()); + seenSeqNos.retainAll(expected); + assertThat(seenSeqNos, equalTo(expected)); + } + } + + public void testSimpleCommit() throws IOException { + final int operations = randomIntBetween(1, 4096); + long seqNo = 0; + for (int i = 0; i < operations; i++) { + translog.add(new Translog.NoOp(seqNo++, 0, "test'")); + if (rarely()) { + translog.rollGeneration(); + } + } + + final long generation = + randomIntBetween(1, Math.toIntExact(translog.currentFileGeneration())); + translog.commit(generation); + for (long i = 0; i < generation; i++) { + assertFileDeleted(translog, i); + } + for (long i = generation; i <= translog.currentFileGeneration(); i++) { + assertFileIsPresent(translog, i); + } + } + + public void testPrepareCommitAndCommit() throws IOException { + final int operations = randomIntBetween(1, 4096); + long seqNo = 0; + long last = -1; + for (int i = 0; i < operations; i++) { + translog.add(new Translog.NoOp(seqNo++, 0, "test")); + if (rarely()) { + final long generation = translog.currentFileGeneration(); + translog.prepareCommit(); + if (rarely()) { + // simulate generation filling up and rolling between preparing the commit and committing + translog.rollGeneration(); + } + final int committedGeneration = randomIntBetween(Math.max(1, Math.toIntExact(last)), Math.toIntExact(generation)); + translog.commit(committedGeneration); + last = committedGeneration; + for (long g = 0; i < generation; g++) { + assertFileDeleted(translog, g); + } + for (long g = generation; g < translog.currentFileGeneration(); g++) { + assertFileIsPresent(translog, g); + } + } + } + } + + public void testCommitWithOpenView() throws IOException { + final int operations = randomIntBetween(1, 4096); + long seqNo = 0; + long lastCommittedGeneration = -1; + for (int i = 0; i < operations; i++) { + translog.add(new Translog.NoOp(seqNo++, 0, "test")); + if (rarely()) { + try (Translog.View ignored = translog.newView()) { + final long viewGeneration = lastCommittedGeneration; + translog.prepareCommit(); + final long committedGeneration = randomIntBetween( + Math.max(1, Math.toIntExact(lastCommittedGeneration)), + Math.toIntExact(translog.currentFileGeneration())); + translog.commit(committedGeneration); + lastCommittedGeneration = committedGeneration; + // with an open view, committing should preserve generations back to the last committed generation + for (long g = 1; g < Math.min(lastCommittedGeneration, viewGeneration); g++) { + assertFileDeleted(translog, g); + } + // the view generation could be -1 if no commit has been performed + final long max = Math.max(1, Math.min(lastCommittedGeneration, viewGeneration)); + for (long g = max; g < translog.currentFileGeneration(); g++) { + assertFileIsPresent(translog, g); + } + } + } + } + } + } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 2449fdfc290..e1657955d90 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -91,7 +91,11 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC RoutingNode localRoutingNode = state.getRoutingNodes().node(state.getNodes().getLocalNodeId()); if (localRoutingNode != null) { if (enableRandomFailures == false) { - assertThat("failed shard cache should be empty", failedShardsCache.values(), empty()); + // initializing a shard should succeed when enableRandomFailures is disabled + // active shards can be failed if state persistence was disabled in an earlier CS update + if (failedShardsCache.values().stream().anyMatch(ShardRouting::initializing)) { + fail("failed shard cache should not contain initializing shard routing: " + failedShardsCache.values()); + } } // check that all shards in local routing nodes have been allocated for (ShardRouting shardRouting : localRoutingNode) { @@ -100,41 +104,50 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC MockIndexShard shard = indicesService.getShardOrNull(shardRouting.shardId()); ShardRouting failedShard = failedShardsCache.get(shardRouting.shardId()); - if (enableRandomFailures) { - if (shard == null && failedShard == null) { - fail("Shard with id " + shardRouting + " expected but missing in indicesService and failedShardsCache"); + + if (state.blocks().disableStatePersistence()) { + if (shard != null) { + fail("Shard with id " + shardRouting + " should be removed from indicesService due to disabled state persistence"); } + } else { if (failedShard != null && failedShard.isSameAllocation(shardRouting) == false) { fail("Shard cache has not been properly cleaned for " + failedShard); } - } else { - if (shard == null) { - fail("Shard with id " + shardRouting + " expected but missing in indicesService"); + if (shard == null && failedShard == null) { + // shard must either be there or there must be a failure + fail("Shard with id " + shardRouting + " expected but missing in indicesService and failedShardsCache"); } - } - - if (shard != null) { - AllocatedIndex indexService = indicesService.indexService(index); - assertTrue("Index " + index + " expected but missing in indicesService", indexService != null); - - // index metadata has been updated - assertThat(indexService.getIndexSettings().getIndexMetaData(), equalTo(indexMetaData)); - // shard has been created - if (enableRandomFailures == false || failedShard == null) { - assertTrue("Shard with id " + shardRouting + " expected but missing in indexService", shard != null); - // shard has latest shard routing - assertThat(shard.routingEntry(), equalTo(shardRouting)); + if (enableRandomFailures == false) { + if (shard == null && shardRouting.initializing() && failedShard == shardRouting) { + // initializing a shard should succeed when enableRandomFailures is disabled + fail("Shard with id " + shardRouting + " expected but missing in indicesService " + failedShard); + } } - if (shard.routingEntry().primary() && shard.routingEntry().active()) { - IndexShardRoutingTable shardRoutingTable = state.routingTable().shardRoutingTable(shard.shardId()); - Set activeIds = shardRoutingTable.activeShards().stream() - .map(r -> r.allocationId().getId()).collect(Collectors.toSet()); - Set initializingIds = shardRoutingTable.getAllInitializingShards().stream() - .map(r -> r.allocationId().getId()).collect(Collectors.toSet()); - assertThat(shard.routingEntry() + " isn't updated with active aIDs", shard.activeAllocationIds, equalTo(activeIds)); - assertThat(shard.routingEntry() + " isn't updated with init aIDs", shard.initializingAllocationIds, - equalTo(initializingIds)); + if (shard != null) { + AllocatedIndex indexService = indicesService.indexService(index); + assertTrue("Index " + index + " expected but missing in indicesService", indexService != null); + + // index metadata has been updated + assertThat(indexService.getIndexSettings().getIndexMetaData(), equalTo(indexMetaData)); + // shard has been created + if (enableRandomFailures == false || failedShard == null) { + assertTrue("Shard with id " + shardRouting + " expected but missing in indexService", shard != null); + // shard has latest shard routing + assertThat(shard.routingEntry(), equalTo(shardRouting)); + } + + if (shard.routingEntry().primary() && shard.routingEntry().active()) { + IndexShardRoutingTable shardRoutingTable = state.routingTable().shardRoutingTable(shard.shardId()); + Set activeIds = shardRoutingTable.activeShards().stream() + .map(r -> r.allocationId().getId()).collect(Collectors.toSet()); + Set initializingIds = shardRoutingTable.getAllInitializingShards().stream() + .map(r -> r.allocationId().getId()).collect(Collectors.toSet()); + assertThat(shard.routingEntry() + " isn't updated with active aIDs", shard.activeAllocationIds, + equalTo(activeIds)); + assertThat(shard.routingEntry() + " isn't updated with init aIDs", shard.initializingAllocationIds, + equalTo(initializingIds)); + } } } } @@ -142,6 +155,10 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC // all other shards / indices have been cleaned up for (AllocatedIndex indexService : indicesService) { + if (state.blocks().disableStatePersistence()) { + fail("Index service " + indexService.index() + " should be removed from indicesService due to disabled state persistence"); + } + assertTrue(state.metaData().getIndexSafe(indexService.index()) != null); boolean shardsFound = false; @@ -158,13 +175,9 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC } if (shardsFound == false) { - if (enableRandomFailures) { - // check if we have shards of that index in failedShardsCache - // if yes, we might not have cleaned the index as failedShardsCache can be populated by another thread - assertFalse(failedShardsCache.keySet().stream().noneMatch(shardId -> shardId.getIndex().equals(indexService.index()))); - } else { - fail("index service for index " + indexService.index() + " has no shards"); - } + // check if we have shards of that index in failedShardsCache + // if yes, we might not have cleaned the index as failedShardsCache can be populated by another thread + assertFalse(failedShardsCache.keySet().stream().noneMatch(shardId -> shardId.getIndex().equals(indexService.index()))); } } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 55e47dabdbf..f2608f68ee5 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -32,6 +32,8 @@ import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -44,6 +46,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.index.Index; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.repositories.RepositoriesService; @@ -231,6 +234,23 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice public ClusterState randomlyUpdateClusterState(ClusterState state, Map clusterStateServiceMap, Supplier indicesServiceSupplier) { + // randomly remove no_master blocks + if (randomBoolean() && state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID)) { + state = ClusterState.builder(state).blocks( + ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID)).build(); + } + + // randomly add no_master blocks + if (rarely() && state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID) == false) { + ClusterBlock block = randomBoolean() ? DiscoverySettings.NO_MASTER_BLOCK_ALL : DiscoverySettings.NO_MASTER_BLOCK_WRITES; + state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).addGlobalBlock(block)).build(); + } + + // if no_master block is in place, make no other cluster state changes + if (state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID)) { + return state; + } + // randomly create new indices (until we have 200 max) for (int i = 0; i < randomInt(5); i++) { if (state.metaData().indices().size() > 200) { diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 743510e373c..40a92b11e73 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -204,7 +204,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { document.add(new TextField("test", "test", Field.Store.YES)); final Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE); final Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY); - final SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); + final SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); document.add(uidField); document.add(versionField); document.add(seqID.seqNo); diff --git a/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java b/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java similarity index 92% rename from core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java rename to core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java index daaeab80143..033fa9cdf20 100644 --- a/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java +++ b/core/src/test/java/org/elasticsearch/node/InternalSettingsPreparerTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.node.internal; +package org.elasticsearch.node; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cluster.ClusterName; @@ -182,18 +182,11 @@ public class InternalSettingsPreparerTests extends ESTestCase { assertEquals("secret", fakeSetting.get(env.settings()).toString()); } - public void testDefaultProperties() throws Exception { + public void testDefaultPropertiesDoNothing() throws Exception { Map props = Collections.singletonMap("default.setting", "foo"); Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props); - assertEquals("foo", env.settings().get("setting")); + assertEquals("foo", env.settings().get("default.setting")); + assertNull(env.settings().get("setting")); } - public void testDefaultPropertiesOverride() throws Exception { - Path configDir = homeDir.resolve("config"); - Files.createDirectories(configDir); - Files.write(configDir.resolve("elasticsearch.yml"), Collections.singletonList("setting: bar"), StandardCharsets.UTF_8); - Map props = Collections.singletonMap("default.setting", "foo"); - Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props); - assertEquals("bar", env.settings().get("setting")); - } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSamplerTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSamplerTests.java new file mode 100644 index 00000000000..1c4fb6d2a65 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSamplerTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.sampler; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregationTestCase; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.util.List; +import java.util.Map; + +public class InternalSamplerTests extends InternalSingleBucketAggregationTestCase { + @Override + protected InternalSampler createTestInstance(String name, long docCount, InternalAggregations aggregations, + List pipelineAggregators, Map metaData) { + return new InternalSampler(name, docCount, aggregations, pipelineAggregators, metaData); + } + + @Override + protected void extraAssertReduced(InternalSampler reduced, List inputs) { + // Nothing extra to assert + } + + @Override + protected Writeable.Reader instanceReader() { + return InternalSampler::new; + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorTests.java new file mode 100644 index 00000000000..5ba9b4b01e7 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregatorTests.java @@ -0,0 +1,153 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.metrics.geocentroid; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.LatLonDocValuesField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.index.mapper.GeoPointFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.test.geo.RandomGeoGenerator; + +import java.io.IOException; + +public class GeoCentroidAggregatorTests extends AggregatorTestCase { + + private static final double GEOHASH_TOLERANCE = 1E-4D; + + public void testEmpty() throws Exception { + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + GeoCentroidAggregationBuilder aggBuilder = new GeoCentroidAggregationBuilder("my_agg") + .field("field"); + + MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType(); + fieldType.setHasDocValues(true); + fieldType.setName("field"); + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalGeoCentroid result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertNull(result.centroid()); + } + } + } + + public void testUnmapped() throws Exception { + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + GeoCentroidAggregationBuilder aggBuilder = new GeoCentroidAggregationBuilder("my_agg") + .field("another_field"); + + Document document = new Document(); + document.add(new LatLonDocValuesField("field", 10, 10)); + w.addDocument(document); + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + + MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType(); + fieldType.setHasDocValues(true); + fieldType.setName("another_field"); + InternalGeoCentroid result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertNull(result.centroid()); + + fieldType = new GeoPointFieldMapper.GeoPointFieldType(); + fieldType.setHasDocValues(true); + fieldType.setName("field"); + result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + assertNull(result.centroid()); + } + } + } + + public void testSingleValuedField() throws Exception { + int numDocs = scaledRandomIntBetween(64, 256); + int numUniqueGeoPoints = randomIntBetween(1, numDocs); + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + GeoPoint expectedCentroid = new GeoPoint(0, 0); + GeoPoint[] singleValues = new GeoPoint[numUniqueGeoPoints]; + for (int i = 0 ; i < singleValues.length; i++) { + singleValues[i] = RandomGeoGenerator.randomPoint(random()); + } + GeoPoint singleVal; + for (int i = 0; i < numDocs; i++) { + singleVal = singleValues[i % numUniqueGeoPoints]; + Document document = new Document(); + document.add(new LatLonDocValuesField("field", singleVal.getLat(), singleVal.getLon())); + w.addDocument(document); + expectedCentroid = expectedCentroid.reset(expectedCentroid.lat() + (singleVal.lat() - expectedCentroid.lat()) / (i + 1), + expectedCentroid.lon() + (singleVal.lon() - expectedCentroid.lon()) / (i + 1)); + } + assertCentroid(w, expectedCentroid); + } + } + + public void testMultiValuedField() throws Exception { + int numDocs = scaledRandomIntBetween(64, 256); + int numUniqueGeoPoints = randomIntBetween(1, numDocs); + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + + GeoPoint expectedCentroid = new GeoPoint(0, 0); + GeoPoint[] multiValues = new GeoPoint[numUniqueGeoPoints]; + for (int i = 0 ; i < multiValues.length; i++) { + multiValues[i] = RandomGeoGenerator.randomPoint(random()); + } + final GeoPoint[] multiVal = new GeoPoint[2]; + for (int i = 0; i < numDocs; i++) { + multiVal[0] = multiValues[i % numUniqueGeoPoints]; + multiVal[1] = multiValues[(i+1) % numUniqueGeoPoints]; + Document document = new Document(); + document.add(new LatLonDocValuesField("field", multiVal[0].getLat(), multiVal[0].getLon())); + document.add(new LatLonDocValuesField("field", multiVal[1].getLat(), multiVal[1].getLon())); + w.addDocument(document); + double newMVLat = (multiVal[0].lat() + multiVal[1].lat())/2d; + double newMVLon = (multiVal[0].lon() + multiVal[1].lon())/2d; + expectedCentroid = expectedCentroid.reset(expectedCentroid.lat() + (newMVLat - expectedCentroid.lat()) / (i + 1), + expectedCentroid.lon() + (newMVLon - expectedCentroid.lon()) / (i + 1)); + } + assertCentroid(w, expectedCentroid); + } + } + + private void assertCentroid(RandomIndexWriter w, GeoPoint expectedCentroid) throws IOException { + MappedFieldType fieldType = new GeoPointFieldMapper.GeoPointFieldType(); + fieldType.setHasDocValues(true); + fieldType.setName("field"); + GeoCentroidAggregationBuilder aggBuilder = new GeoCentroidAggregationBuilder("my_agg") + .field("field"); + try (IndexReader reader = w.getReader()) { + IndexSearcher searcher = new IndexSearcher(reader); + InternalGeoCentroid result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + + assertEquals("my_agg", result.getName()); + GeoPoint centroid = result.centroid(); + assertNotNull(centroid); + assertEquals(expectedCentroid.getLat(), centroid.getLat(), GEOHASH_TOLERANCE); + assertEquals(expectedCentroid.getLon(), centroid.getLon(), GEOHASH_TOLERANCE); + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java new file mode 100644 index 00000000000..75efa516409 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/InternalTDigestPercentilesTests.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics.percentiles.tdigest; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregationTestCase; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.util.List; +import java.util.Map; + +public class InternalTDigestPercentilesTests extends InternalAggregationTestCase { + + private final double[] percents = randomPercents(); + + @Override + protected InternalTDigestPercentiles createTestInstance(String name, + List pipelineAggregators, + Map metaData) { + boolean keyed = randomBoolean(); + DocValueFormat format = DocValueFormat.RAW; + TDigestState state = new TDigestState(100); + + int numValues = randomInt(10); + for (int i = 0; i < numValues; ++i) { + state.add(randomDouble() * 100); + } + assertEquals(state.centroidCount(), numValues); + return new InternalTDigestPercentiles(name, percents, state, keyed, format, pipelineAggregators, metaData); + } + + @Override + protected void assertReduced(InternalTDigestPercentiles reduced, List inputs) { + final TDigestState expectedState = new TDigestState(reduced.state.compression()); + + long totalCount = 0; + for (InternalTDigestPercentiles input : inputs) { + assertArrayEquals(reduced.keys, input.keys, 0d); + expectedState.add(input.state); + totalCount += input.state.size(); + } + + assertEquals(totalCount, reduced.state.size()); + if (totalCount > 0) { + assertEquals(expectedState.quantile(0), reduced.state.quantile(0), 0d); + assertEquals(expectedState.quantile(1), reduced.state.quantile(1), 0d); + } + } + + @Override + protected Writeable.Reader instanceReader() { + return InternalTDigestPercentiles::new; + } + + private static double[] randomPercents() { + List randomCdfValues = randomSubsetOf(randomIntBetween(1, 7), 0.01d, 0.05d, 0.25d, 0.50d, 0.75d, 0.95d, 0.99d); + double[] percents = new double[randomCdfValues.size()]; + for (int i = 0; i < randomCdfValues.size(); i++) { + percents[i] = randomCdfValues.get(i); + } + return percents; + } +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java index 8265933bfb6..22479eb4349 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHitsTests.java @@ -155,7 +155,8 @@ public class InternalTopHitsTests extends InternalAggregationTestCase map = parser.mapOrdered(); + int numberOfNewFields = randomIntBetween(2, 10); + for (int i = 0; i < numberOfNewFields; i++) { + if (randomBoolean()) { + map.put("unknown_field" + i, randomAlphaOfLength(20)); + } else { + map.put("unknown_field" + i, Collections.singletonMap("inner", randomAlphaOfLength(20))); + } + } + XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()); + return xContentBuilder.map(map); + } + } + private static TaskResult randomTaskResult() throws IOException { switch (between(0, 2)) { - case 0: - return new TaskResult(randomBoolean(), randomTaskInfo()); - case 1: - return new TaskResult(randomTaskInfo(), new RuntimeException("error")); - case 2: - return new TaskResult(randomTaskInfo(), randomTaskResponse()); - default: - throw new UnsupportedOperationException("Unsupported random TaskResult constructor"); + case 0: + return new TaskResult(randomBoolean(), randomTaskInfo()); + case 1: + return new TaskResult(randomTaskInfo(), new RuntimeException("error")); + case 2: + return new TaskResult(randomTaskInfo(), randomTaskResponse()); + default: + throw new UnsupportedOperationException("Unsupported random TaskResult constructor"); } } diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java index cfaf9203fea..23b849b0742 100644 --- a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -325,7 +325,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { ids = new IDSource() { @Override public String next() { - return TestUtil.randomSimpleString(random); + return TestUtil.randomSimpleString(random, 1, 10); } }; break; @@ -335,7 +335,7 @@ public class SimpleVersioningIT extends ESIntegTestCase { ids = new IDSource() { @Override public String next() { - return TestUtil.randomRealisticUnicodeString(random); + return TestUtil.randomRealisticUnicodeString(random, 1, 20); } }; break; diff --git a/distribution/build.gradle b/distribution/build.gradle index 8946d302473..b02c4ed8802 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -127,13 +127,13 @@ configure(distributions) { apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' project.integTest { - dependsOn project.assemble includePackaged project.name == 'integ-test-zip' if (project.name != 'integ-test-zip') { mustRunAfter ':distribution:integ-test-zip:integTest' } } project.integTestCluster { + dependsOn project.assemble distribution = project.name } diff --git a/distribution/bwc-zip/build.gradle b/distribution/bwc-zip/build.gradle index 7bb5cce51b6..13ef6be444c 100644 --- a/distribution/bwc-zip/build.gradle +++ b/distribution/bwc-zip/build.gradle @@ -30,7 +30,7 @@ import org.elasticsearch.gradle.LoggedExec apply plugin: 'distribution' File checkoutDir = file("${buildDir}/bwc/checkout-5.x") -task createClone(type: Exec) { +task createClone(type: LoggedExec) { onlyIf { checkoutDir.exists() == false } commandLine = ['git', 'clone', rootDir, checkoutDir] } diff --git a/distribution/deb/src/main/packaging/init.d/elasticsearch b/distribution/deb/src/main/packaging/init.d/elasticsearch index cf8b5351aec..59fbef6f277 100755 --- a/distribution/deb/src/main/packaging/init.d/elasticsearch +++ b/distribution/deb/src/main/packaging/init.d/elasticsearch @@ -32,10 +32,6 @@ fi # The following variables can be overwritten in $DEFAULT -# Run Elasticsearch as this user ID and group ID -ES_USER=elasticsearch -ES_GROUP=elasticsearch - # Directory where the Elasticsearch binary distribution resides ES_HOME=/usr/share/$NAME @@ -76,6 +72,12 @@ if [ ! -z "$CONF_FILE" ]; then exit 1 fi +# ES_USER and ES_GROUP settings were removed +if [ ! -z "$ES_USER" ] || [ ! -z "$ES_GROUP" ]; then + echo "ES_USER and ES_GROUP settings are no longer supported. To run as a custom user/group use the archive distribution of Elasticsearch." + exit 1 +fi + # Define other required variables PID_FILE="$PID_DIR/$NAME.pid" DAEMON=$ES_HOME/bin/elasticsearch @@ -119,10 +121,10 @@ case "$1" in # Ensure that the PID_DIR exists (it is cleaned at OS startup time) if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then - mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR" + mkdir -p "$PID_DIR" && chown elasticsearch:elasticsearch "$PID_DIR" fi if [ -n "$PID_FILE" ] && [ ! -e "$PID_FILE" ]; then - touch "$PID_FILE" && chown "$ES_USER":"$ES_GROUP" "$PID_FILE" + touch "$PID_FILE" && chown elasticsearch:elasticsearch "$PID_FILE" fi if [ -n "$MAX_OPEN_FILES" ]; then @@ -138,7 +140,7 @@ case "$1" in fi # Start Daemon - start-stop-daemon -d $ES_HOME --start --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS + start-stop-daemon -d $ES_HOME --start --user elasticsearch -c elasticsearch --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS return=$? if [ $return -eq 0 ]; then i=0 @@ -162,7 +164,7 @@ case "$1" in if [ -f "$PID_FILE" ]; then start-stop-daemon --stop --pidfile "$PID_FILE" \ - --user "$ES_USER" \ + --user elasticsearch \ --quiet \ --retry forever/TERM/20 > /dev/null if [ $? -eq 1 ]; then diff --git a/distribution/rpm/src/main/packaging/init.d/elasticsearch b/distribution/rpm/src/main/packaging/init.d/elasticsearch index f991dc2f928..1eeb3431526 100644 --- a/distribution/rpm/src/main/packaging/init.d/elasticsearch +++ b/distribution/rpm/src/main/packaging/init.d/elasticsearch @@ -32,8 +32,6 @@ if [ -f /etc/rc.d/init.d/functions ]; then fi # Sets the default values for elasticsearch variables used in this script -ES_USER="elasticsearch" -ES_GROUP="elasticsearch" ES_HOME="/usr/share/elasticsearch" MAX_OPEN_FILES=65536 MAX_MAP_COUNT=262144 @@ -55,6 +53,12 @@ if [ ! -z "$CONF_FILE" ]; then exit 1 fi +# ES_USER and ES_GROUP settings were removed +if [ ! -z "$ES_USER" ] || [ ! -z "$ES_GROUP" ]; then + echo "ES_USER and ES_GROUP settings are no longer supported. To run as a custom user/group use the archive distribution of Elasticsearch." + exit 1 +fi + exec="$ES_HOME/bin/elasticsearch" prog="elasticsearch" pidfile="$PID_DIR/${prog}.pid" @@ -67,11 +71,6 @@ export ES_STARTUP_SLEEP_TIME lockfile=/var/lock/subsys/$prog -# backwards compatibility for old config sysconfig files, pre 0.90.1 -if [ -n $USER ] && [ -z $ES_USER ] ; then - ES_USER=$USER -fi - if [ ! -x "$exec" ]; then echo "The elasticsearch startup script does not exists or it is not executable, tried: $exec" exit 1 @@ -106,16 +105,16 @@ start() { # Ensure that the PID_DIR exists (it is cleaned at OS startup time) if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then - mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR" + mkdir -p "$PID_DIR" && chown elasticsearch:elasticsearch "$PID_DIR" fi if [ -n "$pidfile" ] && [ ! -e "$pidfile" ]; then - touch "$pidfile" && chown "$ES_USER":"$ES_GROUP" "$pidfile" + touch "$pidfile" && chown elasticsearch:elasticsearch "$pidfile" fi cd $ES_HOME echo -n $"Starting $prog: " # if not running, start it up here, usually something like "daemon $exec" - daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR + daemon --user elasticsearch --pidfile $pidfile $exec -p $pidfile -d -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR retval=$? echo [ $retval -eq 0 ] && touch $lockfile diff --git a/distribution/src/main/packaging/env/elasticsearch b/distribution/src/main/packaging/env/elasticsearch index 8ce1bba934d..11999ffc7b5 100644 --- a/distribution/src/main/packaging/env/elasticsearch +++ b/distribution/src/main/packaging/env/elasticsearch @@ -32,14 +32,6 @@ # SysV init.d # -# When executing the init script, this user will be used to run the elasticsearch service. -# The default value is 'elasticsearch' and is declared in the init.d file. -# Note that this setting is only used by the init script. If changed, make sure that -# the configured user can read and write into the data, work, plugins and log directories. -# For systemd service, the user is usually configured in file /usr/lib/systemd/system/elasticsearch.service -#ES_USER=elasticsearch -#ES_GROUP=elasticsearch - # The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process ES_STARTUP_SLEEP_TIME=5 diff --git a/distribution/src/main/packaging/scripts/postinst b/distribution/src/main/packaging/scripts/postinst index d9f7e6ae126..86f2baadb94 100644 --- a/distribution/src/main/packaging/scripts/postinst +++ b/distribution/src/main/packaging/scripts/postinst @@ -10,10 +10,6 @@ -# Sets the default values for elasticsearch variables used in this script -ES_USER="elasticsearch" -ES_GROUP="elasticsearch" - # Source the default env file ES_ENV_FILE="${path.env}" if [ -f "$ES_ENV_FILE" ]; then @@ -110,9 +106,9 @@ elif [ "$RESTART_ON_UPGRADE" = "true" ]; then echo " OK" fi -chown -R $ES_USER:$ES_GROUP /var/lib/elasticsearch -chown -R $ES_USER:$ES_GROUP /var/log/elasticsearch -chown -R root:$ES_GROUP /etc/elasticsearch +chown -R elasticsearch:elasticsearch /var/lib/elasticsearch +chown -R elasticsearch:elasticsearch /var/log/elasticsearch +chown -R root:elasticsearch /etc/elasticsearch chmod 0750 /etc/elasticsearch chmod 0750 /etc/elasticsearch/scripts diff --git a/distribution/src/main/packaging/scripts/postrm b/distribution/src/main/packaging/scripts/postrm index 19c57eafa2b..b86901e2e25 100644 --- a/distribution/src/main/packaging/scripts/postrm +++ b/distribution/src/main/packaging/scripts/postrm @@ -46,8 +46,6 @@ case "$1" in esac # Sets the default values for elasticsearch variables used in this script -ES_USER="elasticsearch" -ES_GROUP="elasticsearch" LOG_DIR="/var/log/elasticsearch" PLUGINS_DIR="/usr/share/elasticsearch/plugins" PID_DIR="/var/run/elasticsearch" @@ -95,12 +93,12 @@ if [ "$REMOVE_DIRS" = "true" ]; then fi if [ "$REMOVE_USER_AND_GROUP" = "true" ]; then - if id "$ES_USER" > /dev/null 2>&1 ; then - userdel "$ES_USER" + if id elasticsearch > /dev/null 2>&1 ; then + userdel elasticsearch fi - if getent group "$ES_GROUP" > /dev/null 2>&1 ; then - groupdel "$ES_GROUP" + if getent group elasticsearch > /dev/null 2>&1 ; then + groupdel elasticsearch fi fi diff --git a/distribution/src/main/packaging/scripts/preinst b/distribution/src/main/packaging/scripts/preinst index 7ffbd3129a0..73bfe3c2468 100644 --- a/distribution/src/main/packaging/scripts/preinst +++ b/distribution/src/main/packaging/scripts/preinst @@ -11,10 +11,6 @@ -# Sets the default values for elasticsearch variables used in this script -ES_USER="elasticsearch" -ES_GROUP="elasticsearch" - # Source the default env file ES_ENV_FILE="${path.env}" if [ -f "$ES_ENV_FILE" ]; then @@ -27,22 +23,22 @@ case "$1" in install|upgrade) # Create elasticsearch group if not existing - if ! getent group "$ES_GROUP" > /dev/null 2>&1 ; then - echo -n "Creating $ES_GROUP group..." - addgroup --quiet --system "$ES_GROUP" + if ! getent group elasticsearch > /dev/null 2>&1 ; then + echo -n "Creating elasticsearch group..." + addgroup --quiet --system elasticsearch echo " OK" fi # Create elasticsearch user if not existing - if ! id $ES_USER > /dev/null 2>&1 ; then - echo -n "Creating $ES_USER user..." + if ! id elasticsearch > /dev/null 2>&1 ; then + echo -n "Creating elasticsearch user..." adduser --quiet \ --system \ --no-create-home \ - --ingroup "$ES_GROUP" \ + --ingroup elasticsearch \ --disabled-password \ --shell /bin/false \ - "$ES_USER" + elasticsearch echo " OK" fi ;; @@ -53,21 +49,21 @@ case "$1" in 1|2) # Create elasticsearch group if not existing - if ! getent group "$ES_GROUP" > /dev/null 2>&1 ; then - echo -n "Creating $ES_GROUP group..." - groupadd -r "$ES_GROUP" + if ! getent group elasticsearch > /dev/null 2>&1 ; then + echo -n "Creating elasticsearch group..." + groupadd -r elasticsearch echo " OK" fi # Create elasticsearch user if not existing - if ! id $ES_USER > /dev/null 2>&1 ; then - echo -n "Creating $ES_USER user..." + if ! id elasticsearch > /dev/null 2>&1 ; then + echo -n "Creating elasticsearch user..." useradd -r \ -M \ - --gid "$ES_GROUP" \ + --gid elasticsearch \ --shell /sbin/nologin \ --comment "elasticsearch user" \ - "$ES_USER" + elasticsearch echo " OK" fi ;; diff --git a/docs/java-api/search.asciidoc b/docs/java-api/search.asciidoc index b1eb120486a..cd185212da3 100644 --- a/docs/java-api/search.asciidoc +++ b/docs/java-api/search.asciidoc @@ -161,11 +161,9 @@ For example, if you have a file named `config/scripts/template_gender.mustache` [source,js] -------------------------------------------------- { - "template" : { - "query" : { - "match" : { - "gender" : "{{param_gender}}" - } + "query" : { + "match" : { + "gender" : "{{param_gender}}" } } } @@ -200,11 +198,9 @@ client.admin().cluster().preparePutStoredScript() .setId("template_gender") .setSource(new BytesArray( "{\n" + - " \"template\" : {\n" + - " \"query\" : {\n" + - " \"match\" : {\n" + - " \"gender\" : \"{{param_gender}}\"\n" + - " }\n" + + " \"query\" : {\n" + + " \"match\" : {\n" + + " \"gender\" : \"{{param_gender}}\"\n" + " }\n" + " }\n" + "}")).get(); diff --git a/docs/reference/aggregations/bucket/filter-aggregation.asciidoc b/docs/reference/aggregations/bucket/filter-aggregation.asciidoc index 3f8ff41304a..bff4096e2dd 100644 --- a/docs/reference/aggregations/bucket/filter-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/filter-aggregation.asciidoc @@ -22,7 +22,7 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -In the above example, we calculate the average price of all the products that are red. +In the above example, we calculate the average price of all the products that are of type t-shirt. Response: diff --git a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc index 89aa091bba2..f215857bf8c 100644 --- a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc @@ -1,7 +1,7 @@ [[search-aggregations-metrics-geocentroid-aggregation]] === Geo Centroid Aggregation -A metric aggregation that computes the weighted centroid from all coordinate values for a <> field. +A metric aggregation that computes the weighted https://en.wikipedia.org/wiki/Centroid[centroid] from all coordinate values for a <> field. Example: diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 2e9e311dda8..0c840071bb9 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -24,10 +24,17 @@ green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 // TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] We can tell quickly how many shards make up an index, the number of -docs at the Lucene level, including hidden docs (e.g., from nested types), -deleted docs, primary store size, and total store size (all shards including replicas). +docs, deleted docs, primary store size, and total store size (all shards including replicas). All these exposed metrics come directly from Lucene APIs. +*Notes:* + +1. As the number of documents and deleted documents shown in this are at the lucene level, +it includes all the hidden documents (e.g. from nested documents) as well. + +2. To get actual count of documents at the elasticsearch level, the recommended way +is to use either the <> or the <> + [float] [[pri-flag]] === Primaries diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc index cbf5fc42ce5..648f839e373 100644 --- a/docs/reference/cat/recovery.asciidoc +++ b/docs/reference/cat/recovery.asciidoc @@ -29,6 +29,7 @@ twitter 0 13ms store done n/a n/a 127.0.0.1 node-0 n // TESTRESPONSE[s/store/empty_store/] // TESTRESPONSE[s/100%/0.0%/] // TESTRESPONSE[s/9928/0/] +// TESTRESPONSE[s/13ms/\\d+m?s/] // TESTRESPONSE[s/13/\\d+/ _cat] In the above case, the source and target nodes are the same because the recovery diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index 16d318ee974..025bad63306 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -1,7 +1,7 @@ [[indices-aliases]] == Index Aliases -APIs in elasticsearch accept an index name when working against a +APIs in Elasticsearch accept an index name when working against a specific index, and several indices when applicable. The index aliases API allow to alias an index with a name, with all APIs automatically converting the alias name to the actual index name. An alias can also be @@ -374,7 +374,7 @@ DELETE /logs_20162801/_alias/current_day [[alias-retrieving]] === Retrieving existing aliases -The get index alias api allows to filter by +The get index alias API allows to filter by alias name and index name. This api redirects to the master and fetches the requested index aliases, if available. This api only serialises the found index aliases. diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index 9ae8c72a93c..9aec8243af3 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -84,7 +84,7 @@ POST /my_alias/_rollover/my_new_index_name // TEST[s/^/PUT my_old_index_name\nPUT my_old_index_name\/_alias\/my_alias\n/] [float] -=== Using date math with the rolllover API +=== Using date math with the rollover API It can be useful to use <> to name the rollover index according to the date that the index rolled over, e.g. diff --git a/docs/reference/migration/migrate_6_0.asciidoc b/docs/reference/migration/migrate_6_0.asciidoc index 45e4dbf2759..22a698bd482 100644 --- a/docs/reference/migration/migrate_6_0.asciidoc +++ b/docs/reference/migration/migrate_6_0.asciidoc @@ -39,6 +39,7 @@ way to reindex old indices is to use the `reindex` API. * <> * <> * <> +* <> include::migrate_6_0/cat.asciidoc[] @@ -69,3 +70,5 @@ include::migrate_6_0/ingest.asciidoc[] include::migrate_6_0/percolator.asciidoc[] include::migrate_6_0/java.asciidoc[] + +include::migrate_6_0/packaging.asciidoc[] diff --git a/docs/reference/migration/migrate_6_0/packaging.asciidoc b/docs/reference/migration/migrate_6_0/packaging.asciidoc new file mode 100644 index 00000000000..fd0cd31d0af --- /dev/null +++ b/docs/reference/migration/migrate_6_0/packaging.asciidoc @@ -0,0 +1,11 @@ +[[breaking_60_packaging_changes]] +=== Packaging changes + +==== Configuring custom user and group for package is no longer allowed + +Previously someone could configure the `$ES_USER` and `$ES_GROUP` variables to +change which user and group Elasticsearch was run as. This is no longer +possible, the DEB and RPM packages now exclusively use the user and group +`elasticsearch`. If a custom user or group is needed then a provisioning system +should use the tarball distribution instead of the provided RPM and DEB +packages. diff --git a/docs/reference/migration/migrate_6_0/plugins.asciidoc b/docs/reference/migration/migrate_6_0/plugins.asciidoc index 0bc0bb0653d..3e2a0a9dd0e 100644 --- a/docs/reference/migration/migrate_6_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_6_0/plugins.asciidoc @@ -23,6 +23,10 @@ the region of the configured bucket. * Specifying s3 signer type has been removed, including `cloud.aws.signer` and `cloud.aws.s3.signer`. +* All `cloud.aws` and `repositories.s3` settings have been removed. Use `s3.client.*` settings instead. + +* All repository level client settings have been removed. Use `s3.client.*` settings instead. + ==== Azure Repository plugin * The container an azure repository is configured with will no longer be created automatically. diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index a4adbc982f6..2f96e17954e 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -130,3 +130,5 @@ include::search/explain.asciidoc[] include::search/profile.asciidoc[] include::search/field-stats.asciidoc[] + +include::search/field-caps.asciidoc[] diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 666375c10b6..5901fe0c9b0 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -401,6 +401,128 @@ GET /_search // CONSOLE // TEST[setup:twitter] +==== Fragmenter + +Fragmenter can control how text should be broken up in highlight snippets. +However, this option is applicable only for the Plain Highlighter. +There are two options: + +[horizontal] +`simple`:: Breaks up text into same sized fragments. +`span`:: Same as the simple fragmenter, but tries not to break up text between highlighted terms (this is applicable when using phrase like queries). This is the default. + +[source,js] +-------------------------------------------------- +GET twitter/tweet/_search +{ + "query" : { + "match_phrase": { "message": "number 1" } + }, + "highlight" : { + "fields" : { + "message" : { + "fragment_size" : 15, + "number_of_fragments" : 3, + "fragmenter": "simple" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +Response: + +[source,js] +-------------------------------------------------- +{ + ... + "hits": { + "total": 1, + "max_score": 1.4818809, + "hits": [ + { + "_index": "twitter", + "_type": "tweet", + "_id": "1", + "_score": 1.4818809, + "_source": { + "user": "test", + "message": "some message with the number 1", + "date": "2009-11-15T14:12:12", + "likes": 1 + }, + "highlight": { + "message": [ + " with the number", + " 1" + ] + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,/] + +[source,js] +-------------------------------------------------- +GET twitter/tweet/_search +{ + "query" : { + "match_phrase": { "message": "number 1" } + }, + "highlight" : { + "fields" : { + "message" : { + "fragment_size" : 15, + "number_of_fragments" : 3, + "fragmenter": "span" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +Response: + +[source,js] +-------------------------------------------------- +{ + ... + "hits": { + "total": 1, + "max_score": 1.4818809, + "hits": [ + { + "_index": "twitter", + "_type": "tweet", + "_id": "1", + "_score": 1.4818809, + "_source": { + "user": "test", + "message": "some message with the number 1", + "date": "2009-11-15T14:12:12", + "likes": 1 + }, + "highlight": { + "message": [ + "some message with the number 1" + ] + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,/] + +If the `number_of_fragments` option is set to `0`, +`NullFragmenter` is used which does not fragment the text at all. +This is useful for highlighting the entire content of a document or field. ==== Highlight query diff --git a/docs/reference/setup/configuration.asciidoc b/docs/reference/setup/configuration.asciidoc index 1c662aef291..375caf5b75a 100644 --- a/docs/reference/setup/configuration.asciidoc +++ b/docs/reference/setup/configuration.asciidoc @@ -89,23 +89,6 @@ Enter value for [node.name]: NOTE: Elasticsearch will not start if `${prompt.text}` or `${prompt.secret}` is used in the settings and the process is run as a service or in the background. -[float] -=== Setting default settings - -New default settings may be specified on the command line using the -`default.` prefix. This will specify a value that will be used by -default unless another value is specified in the config file. - -For instance, if Elasticsearch is started as follows: - -[source,sh] ---------------------------- -./bin/elasticsearch -Edefault.node.name=My_Node ---------------------------- - -the value for `node.name` will be `My_Node`, unless it is overwritten on the -command line with `es.node.name` or in the config file with `node.name`. - [float] [[logging]] == Logging configuration diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 8d1b1282cfb..0a41d0f4653 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -227,8 +227,6 @@ The image offers several methods for configuring Elasticsearch settings with the ===== A. Present the parameters via Docker environment variables For example, to define the cluster name with `docker run` you can pass `-e "cluster.name=mynewclustername"`. Double quotes are required. -NOTE: There is a difference between defining <<_setting_default_settings,default settings>> and normal settings. The former are prefixed with `default.` and cannot override normal settings, if defined. - ===== B. Bind-mounted configuration Create your custom config file and mount this over the image's corresponding file. For example, bind-mounting a `custom_elasticsearch.yml` with `docker run` can be accomplished with the parameter: diff --git a/docs/reference/setup/install/sysconfig-file.asciidoc b/docs/reference/setup/install/sysconfig-file.asciidoc index 1ab0057f01e..3070d08d578 100644 --- a/docs/reference/setup/install/sysconfig-file.asciidoc +++ b/docs/reference/setup/install/sysconfig-file.asciidoc @@ -1,12 +1,4 @@ [horizontal] -`ES_USER`:: - - The user to run as, defaults to `elasticsearch`. - -`ES_GROUP`:: - - The group to run as, defaults to `elasticsearch`. - `JAVA_HOME`:: Set a custom Java path to be used. diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java index 16995f60dff..9045a390f2a 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java @@ -1,5 +1,3 @@ -package org.elasticsearch.painless; - /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -19,7 +17,11 @@ package org.elasticsearch.painless; * under the License. */ -import org.apache.lucene.util.Constants; +package org.elasticsearch.painless; + +import org.elasticsearch.test.ESTestCase; + +import static java.util.Collections.singletonMap; // TODO: Figure out a way to test autobox caching properly from methods such as Integer.valueOf(int); public class EqualsTests extends ScriptTestCase { @@ -132,11 +134,13 @@ public class EqualsTests extends ScriptTestCase { } public void testBranchEqualsDefAndPrimitive() { - assumeFalse("test fails on Windows", Constants.WINDOWS); - assertEquals(true, exec("def x = 1000; int y = 1000; return x == y;")); - assertEquals(false, exec("def x = 1000; int y = 1000; return x === y;")); - assertEquals(true, exec("def x = 1000; int y = 1000; return y == x;")); - assertEquals(false, exec("def x = 1000; int y = 1000; return y === x;")); + /* This test needs an Integer that isn't cached by Integer.valueOf so we draw one randomly. We can't use any fixed integer because + * we can never be sure that the JVM hasn't configured itself to cache that Integer. It is sneaky like that. */ + int uncachedAutoboxedInt = randomValueOtherThanMany(i -> Integer.valueOf(i) == Integer.valueOf(i), ESTestCase::randomInt); + assertEquals(true, exec("def x = params.i; int y = params.i; return x == y;", singletonMap("i", uncachedAutoboxedInt), true)); + assertEquals(false, exec("def x = params.i; int y = params.i; return x === y;", singletonMap("i", uncachedAutoboxedInt), true)); + assertEquals(true, exec("def x = params.i; int y = params.i; return y == x;", singletonMap("i", uncachedAutoboxedInt), true)); + assertEquals(false, exec("def x = params.i; int y = params.i; return y === x;", singletonMap("i", uncachedAutoboxedInt), true)); } public void testBranchNotEquals() { @@ -150,11 +154,13 @@ public class EqualsTests extends ScriptTestCase { } public void testBranchNotEqualsDefAndPrimitive() { - assumeFalse("test fails on Windows", Constants.WINDOWS); - assertEquals(false, exec("def x = 1000; int y = 1000; return x != y;")); - assertEquals(true, exec("def x = 1000; int y = 1000; return x !== y;")); - assertEquals(false, exec("def x = 1000; int y = 1000; return y != x;")); - assertEquals(true, exec("def x = 1000; int y = 1000; return y !== x;")); + /* This test needs an Integer that isn't cached by Integer.valueOf so we draw one randomly. We can't use any fixed integer because + * we can never be sure that the JVM hasn't configured itself to cache that Integer. It is sneaky like that. */ + int uncachedAutoboxedInt = randomValueOtherThanMany(i -> Integer.valueOf(i) == Integer.valueOf(i), ESTestCase::randomInt); + assertEquals(false, exec("def x = params.i; int y = params.i; return x != y;", singletonMap("i", uncachedAutoboxedInt), true)); + assertEquals(true, exec("def x = params.i; int y = params.i; return x !== y;", singletonMap("i", uncachedAutoboxedInt), true)); + assertEquals(false, exec("def x = params.i; int y = params.i; return y != x;", singletonMap("i", uncachedAutoboxedInt), true)); + assertEquals(true, exec("def x = params.i; int y = params.i; return y !== x;", singletonMap("i", uncachedAutoboxedInt), true)); } public void testRightHandNull() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java index fdaf0fa83cc..c528de7694b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; @@ -27,7 +28,10 @@ import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery; import static org.elasticsearch.index.query.QueryBuilders.idsQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; /** * Index-by-search tests for parent/child. @@ -76,12 +80,11 @@ public class ReindexParentChildTests extends ReindexTestCase { createParentChildDocs("source"); ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCity); - try { - copy.get(); - fail("Expected exception"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured")); - } + final BulkByScrollResponse response = copy.get(); + assertThat(response.getBulkFailures().size(), equalTo(1)); + final Exception cause = response.getBulkFailures().get(0).getCause(); + assertThat(cause, instanceOf(IllegalArgumentException.class)); + assertThat(cause, hasToString(containsString("can't specify parent if no parent field has been configured"))); } /** diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index c1ef1142e1d..be22d28daa9 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.bulk.byscroll.AbstractBulkByScrollRequestBuilder import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; import org.elasticsearch.action.bulk.byscroll.BulkByScrollTask; import org.elasticsearch.action.bulk.byscroll.BulkIndexByScrollResponseMatcher; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -76,8 +77,9 @@ public class RetryTests extends ESSingleNodeTestCase { for (int i = 0; i < DOC_COUNT; i++) { bulk.add(client().prepareIndex("source", "test").setSource("foo", "bar " + i)); } - Retry retry = Retry.on(EsRejectedExecutionException.class).policy(BackoffPolicy.exponentialBackoff()).using(client().threadPool()); - BulkResponse response = retry.withSyncBackoff(client()::bulk, bulk.request(), client().settings()); + + Retry retry = new Retry(EsRejectedExecutionException.class, BackoffPolicy.exponentialBackoff(), client().threadPool()); + BulkResponse response = retry.withBackoff(client()::bulk, bulk.request(), client().settings()).actionGet(); assertFalse(response.buildFailureMessage(), response.hasFailures()); client().admin().indices().prepareRefresh("source").get(); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index a4259b41fd8..07e91ec50e4 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -85,7 +85,7 @@ final class Netty4HttpChannel extends AbstractRestChannel { } @Override - public BytesStreamOutput newBytesOutput() { + protected BytesStreamOutput newBytesOutput() { return new ReleasableBytesStreamOutput(transport.bigArrays); } @@ -114,7 +114,8 @@ final class Netty4HttpChannel extends AbstractRestChannel { addCustomHeaders(resp, threadContext.getResponseHeaders()); BytesReference content = response.content(); - boolean release = content instanceof Releasable; + boolean releaseContent = content instanceof Releasable; + boolean releaseBytesStreamOutput = bytesOutputOrNull() instanceof ReleasableBytesStreamOutput; try { // If our response doesn't specify a content-type header, set one setHeaderField(resp, HttpHeaderNames.CONTENT_TYPE.toString(), response.contentType(), false); @@ -125,10 +126,14 @@ final class Netty4HttpChannel extends AbstractRestChannel { final ChannelPromise promise = channel.newPromise(); - if (release) { + if (releaseContent) { promise.addListener(f -> ((Releasable)content).close()); } + if (releaseBytesStreamOutput) { + promise.addListener(f -> bytesOutputOrNull().close()); + } + if (isCloseConnection()) { promise.addListener(ChannelFutureListener.CLOSE); } @@ -140,11 +145,15 @@ final class Netty4HttpChannel extends AbstractRestChannel { msg = resp; } channel.writeAndFlush(msg, promise); - release = false; + releaseContent = false; + releaseBytesStreamOutput = false; } finally { - if (release) { + if (releaseContent) { ((Releasable) content).close(); } + if (releaseBytesStreamOutput) { + bytesOutputOrNull().close(); + } if (pipelinedRequest != null) { pipelinedRequest.release(); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java index c075afd463f..7d8101df10e 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java @@ -43,18 +43,24 @@ import io.netty.util.Attribute; import io.netty.util.AttributeKey; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasablePagedBytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -64,6 +70,7 @@ import org.elasticsearch.transport.netty4.Netty4Utils; import org.junit.After; import org.junit.Before; +import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.SocketAddress; import java.nio.charset.StandardCharsets; @@ -78,6 +85,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -243,6 +251,37 @@ public class Netty4HttpChannelTests extends ESTestCase { } } + public void testReleaseOnSendToChannelAfterException() throws IOException { + final Settings settings = Settings.builder().build(); + final NamedXContentRegistry registry = xContentRegistry(); + try (Netty4HttpServerTransport httpServerTransport = + new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, registry, new NullDispatcher())) { + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); + final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); + final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null; + final Netty4HttpChannel channel = + new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, randomBoolean(), threadPool.getThreadContext()); + final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, + JsonXContent.contentBuilder().startObject().endObject()); + assertThat(response.content(), not(instanceOf(Releasable.class))); + + // ensure we have reserved bytes + if (randomBoolean()) { + BytesStreamOutput out = channel.bytesOutput(); + assertThat(out, instanceOf(ReleasableBytesStreamOutput.class)); + } else { + try (XContentBuilder builder = channel.newBuilder()) { + // do something builder + builder.startObject().endObject(); + } + } + + channel.sendResponse(response); + // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released + } + } + public void testConnectionClose() throws Exception { final Settings settings = Settings.builder().build(); try (Netty4HttpServerTransport httpServerTransport = @@ -549,7 +588,7 @@ public class Netty4HttpChannelTests extends ESTestCase { } final ByteArray bigArray = bigArrays.newByteArray(bytes.length); bigArray.set(0, bytes, 0, bytes.length); - reference = new ReleasablePagedBytesReference(bigArrays, bigArray, bytes.length); + reference = new ReleasablePagedBytesReference(bigArrays, bigArray, bytes.length, Releasables.releaseOnce(bigArray)); } @Override diff --git a/plugins/discovery-file/build.gradle b/plugins/discovery-file/build.gradle index c6d622f7248..91457924351 100644 --- a/plugins/discovery-file/build.gradle +++ b/plugins/discovery-file/build.gradle @@ -50,12 +50,10 @@ setupSeedNodeAndUnicastHostsFile.doLast { // second cluster, which will connect to the first via the unicast_hosts.txt file integTestCluster { + dependsOn setupSeedNodeAndUnicastHostsFile clusterName = 'discovery-file-test-cluster' extraConfigFile 'discovery-file/unicast_hosts.txt', srcUnicastHostsFile } integTestRunner.finalizedBy ':plugins:discovery-file:initialCluster#stop' -integTest { - dependsOn(setupSeedNodeAndUnicastHostsFile) -} diff --git a/plugins/jvm-example/build.gradle b/plugins/jvm-example/build.gradle index 3753d3ae831..e8a37a144a5 100644 --- a/plugins/jvm-example/build.gradle +++ b/plugins/jvm-example/build.gradle @@ -41,7 +41,7 @@ task exampleFixture(type: org.elasticsearch.gradle.test.Fixture) { baseDir } -integTest { +integTestCluster { dependsOn exampleFixture } integTestRunner { diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 8977b6952ea..82548f3410e 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -88,7 +88,7 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { } if (fixtureSupported) { - integTest.dependsOn hdfsFixture + integTestCluster.dependsOn hdfsFixture } else { logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH") // just tests that the plugin loads diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java index 872e713c546..dbffe293a43 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java @@ -19,134 +19,12 @@ package org.elasticsearch.repositories.s3; -import java.util.Locale; -import java.util.function.Function; - -import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; import com.amazonaws.services.s3.AmazonS3; -import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; interface AwsS3Service extends LifecycleComponent { - // Legacy global AWS settings (shared between discovery-ec2 and repository-s3) - // Each setting starting with `cloud.aws` also exists in discovery-ec2 project. Don't forget to update - // the code there if you change anything here. - /** - * cloud.aws.access_key: AWS Access key. Shared with discovery-ec2 plugin - */ - Setting KEY_SETTING = new Setting<>("cloud.aws.access_key", "", SecureString::new, - Property.NodeScope, Property.Filtered, Property.Deprecated, Property.Shared); - /** - * cloud.aws.secret_key: AWS Secret key. Shared with discovery-ec2 plugin - */ - Setting SECRET_SETTING = new Setting<>("cloud.aws.secret_key", "", SecureString::new, - Property.NodeScope, Property.Filtered, Property.Deprecated, Property.Shared); - /** - * cloud.aws.protocol: Protocol for AWS API: http or https. Defaults to https. Shared with discovery-ec2 plugin - */ - Setting PROTOCOL_SETTING = new Setting<>("cloud.aws.protocol", "https", - s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope, Property.Deprecated, Property.Shared); - /** - * cloud.aws.proxy.host: In case of proxy, define its hostname/IP. Shared with discovery-ec2 plugin - */ - Setting PROXY_HOST_SETTING = Setting.simpleString("cloud.aws.proxy.host", - Property.NodeScope, Property.Deprecated, Property.Shared); - /** - * cloud.aws.proxy.port: In case of proxy, define its port. Defaults to 80. Shared with discovery-ec2 plugin - */ - Setting PROXY_PORT_SETTING = Setting.intSetting("cloud.aws.proxy.port", 80, 0, 1<<16, - Property.NodeScope, Property.Deprecated, Property.Shared); - /** - * cloud.aws.proxy.username: In case of proxy with auth, define the username. Shared with discovery-ec2 plugin - */ - Setting PROXY_USERNAME_SETTING = new Setting<>("cloud.aws.proxy.username", "", SecureString::new, - Property.NodeScope, Property.Deprecated, Property.Shared); - /** - * cloud.aws.proxy.password: In case of proxy with auth, define the password. Shared with discovery-ec2 plugin - */ - Setting PROXY_PASSWORD_SETTING = new Setting<>("cloud.aws.proxy.password", "", SecureString::new, - Property.NodeScope, Property.Filtered, Property.Deprecated, Property.Shared); - /** - * cloud.aws.read_timeout: Socket read timeout. Shared with discovery-ec2 plugin - */ - Setting READ_TIMEOUT = Setting.timeSetting("cloud.aws.read_timeout", - TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope, Property.Deprecated, Property.Shared); - - /** - * Defines specific s3 settings starting with cloud.aws.s3. - * NOTE: These are legacy settings. Use the named client configs in {@link org.elasticsearch.repositories.s3.S3Repository}. - */ - interface CLOUD_S3 { - /** - * cloud.aws.s3.access_key: AWS Access key specific for S3 API calls. Defaults to cloud.aws.access_key. - * @see AwsS3Service#KEY_SETTING - */ - Setting KEY_SETTING = - new Setting<>("cloud.aws.s3.access_key", AwsS3Service.KEY_SETTING, SecureString::new, - Property.NodeScope, Property.Filtered, Property.Deprecated); - /** - * cloud.aws.s3.secret_key: AWS Secret key specific for S3 API calls. Defaults to cloud.aws.secret_key. - * @see AwsS3Service#SECRET_SETTING - */ - Setting SECRET_SETTING = - new Setting<>("cloud.aws.s3.secret_key", AwsS3Service.SECRET_SETTING, SecureString::new, - Property.NodeScope, Property.Filtered, Property.Deprecated); - /** - * cloud.aws.s3.protocol: Protocol for AWS API specific for S3 API calls: http or https. Defaults to cloud.aws.protocol. - * @see AwsS3Service#PROTOCOL_SETTING - */ - Setting PROTOCOL_SETTING = - new Setting<>("cloud.aws.s3.protocol", AwsS3Service.PROTOCOL_SETTING, s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), - Property.NodeScope, Property.Deprecated); - /** - * cloud.aws.s3.proxy.host: In case of proxy, define its hostname/IP specific for S3 API calls. Defaults to cloud.aws.proxy.host. - * @see AwsS3Service#PROXY_HOST_SETTING - */ - Setting PROXY_HOST_SETTING = - new Setting<>("cloud.aws.s3.proxy.host", AwsS3Service.PROXY_HOST_SETTING, Function.identity(), - Property.NodeScope, Property.Deprecated); - /** - * cloud.aws.s3.proxy.port: In case of proxy, define its port specific for S3 API calls. Defaults to cloud.aws.proxy.port. - * @see AwsS3Service#PROXY_PORT_SETTING - */ - Setting PROXY_PORT_SETTING = - new Setting<>("cloud.aws.s3.proxy.port", AwsS3Service.PROXY_PORT_SETTING, - s -> Setting.parseInt(s, 0, 1<<16, "cloud.aws.s3.proxy.port"), Property.NodeScope, Property.Deprecated); - /** - * cloud.aws.s3.proxy.username: In case of proxy with auth, define the username specific for S3 API calls. - * Defaults to cloud.aws.proxy.username. - * @see AwsS3Service#PROXY_USERNAME_SETTING - */ - Setting PROXY_USERNAME_SETTING = - new Setting<>("cloud.aws.s3.proxy.username", AwsS3Service.PROXY_USERNAME_SETTING, SecureString::new, - Property.NodeScope, Property.Deprecated); - /** - * cloud.aws.s3.proxy.password: In case of proxy with auth, define the password specific for S3 API calls. - * Defaults to cloud.aws.proxy.password. - * @see AwsS3Service#PROXY_PASSWORD_SETTING - */ - Setting PROXY_PASSWORD_SETTING = - new Setting<>("cloud.aws.s3.proxy.password", AwsS3Service.PROXY_PASSWORD_SETTING, SecureString::new, - Property.NodeScope, Property.Filtered, Property.Deprecated); - /** - * cloud.aws.s3.endpoint: Endpoint. - */ - Setting ENDPOINT_SETTING = Setting.simpleString("cloud.aws.s3.endpoint", Property.NodeScope); - /** - * cloud.aws.s3.read_timeout: Socket read timeout. Defaults to cloud.aws.read_timeout - * @see AwsS3Service#READ_TIMEOUT - */ - Setting READ_TIMEOUT = - Setting.timeSetting("cloud.aws.s3.read_timeout", AwsS3Service.READ_TIMEOUT, Property.NodeScope, Property.Deprecated); - } - /** * Creates an {@code AmazonS3} client from the given repository metadata and node settings. */ diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsSigner.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsSigner.java deleted file mode 100644 index 61cdb2a7b43..00000000000 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsSigner.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.SignerFactory; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; - -class AwsSigner { - - private static final Logger logger = Loggers.getLogger(AwsSigner.class); - - private AwsSigner() { - - } - - protected static void validateSignerType(String signer, String endpoint) { - if (signer == null) { - throw new IllegalArgumentException("[null] signer set"); - } - - // do not block user to any signerType - switch (signer) { - case "S3SignerType": - if (endpoint.equals("s3.cn-north-1.amazonaws.com.cn") || endpoint.equals("s3.eu-central-1.amazonaws.com")) { - throw new IllegalArgumentException("[S3SignerType] may not be supported in aws Beijing and Frankfurt region"); - } - break; - case "AWSS3V4SignerType": - break; - default: - try { - SignerFactory.getSignerByTypeAndService(signer, null); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException("[" + signer + "] may not be supported"); - } - } - } - - /** - * Add a AWS API Signer. - * @param signer Signer to use - * @param configuration AWS Client configuration - */ - public static void configureSigner(String signer, ClientConfiguration configuration, String endpoint) { - try { - validateSignerType(signer, endpoint); - } catch (IllegalArgumentException e) { - logger.warn("{}", e.getMessage()); - } - - configuration.setSignerOverride(signer); - } - -} diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java index eb2f22782f4..1ba0414afe2 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java @@ -75,7 +75,6 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); } - String endpoint = findEndpoint(logger, clientSettings, repositorySettings); Integer maxRetries = getValue(repositorySettings, settings, S3Repository.Repository.MAX_RETRIES_SETTING, S3Repository.Repositories.MAX_RETRIES_SETTING); @@ -94,10 +93,10 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se logger.debug("creating S3 client with client_name [{}], endpoint [{}], max_retries [{}], " + "use_throttle_retries [{}], path_style_access [{}]", - clientName, endpoint, maxRetries, useThrottleRetries, pathStyleAccess); + clientName, clientSettings.endpoint, maxRetries, useThrottleRetries, pathStyleAccess); - AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, clientSettings, repositorySettings); - ClientConfiguration configuration = buildConfiguration(logger, clientSettings, repositorySettings, maxRetries, endpoint, useThrottleRetries); + AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); + ClientConfiguration configuration = buildConfiguration(clientSettings, maxRetries, useThrottleRetries); client = new AmazonS3Client(credentials, configuration); @@ -105,8 +104,8 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(pathStyleAccess)); } - if (Strings.hasText(endpoint)) { - client.setEndpoint(endpoint); + if (Strings.hasText(clientSettings.endpoint)) { + client.setEndpoint(clientSettings.endpoint); } clientsCache.put(clientName, client); @@ -114,14 +113,12 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se } // pkg private for tests - static ClientConfiguration buildConfiguration(Logger logger, S3ClientSettings clientSettings, Settings repositorySettings, - Integer maxRetries, String endpoint, boolean useThrottleRetries) { + static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings, Integer maxRetries, boolean useThrottleRetries) { ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); - Protocol protocol = getRepoValue(repositorySettings, S3Repository.Repository.PROTOCOL_SETTING, clientSettings.protocol); - clientConfiguration.setProtocol(protocol); + clientConfiguration.setProtocol(clientSettings.protocol); if (Strings.hasText(clientSettings.proxyHost)) { // TODO: remove this leniency, these settings should exist together and be validated @@ -142,52 +139,16 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se } // pkg private for tests - static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger deprecationLogger, - S3ClientSettings clientSettings, Settings repositorySettings) { - BasicAWSCredentials credentials = clientSettings.credentials; - if (S3Repository.Repository.KEY_SETTING.exists(repositorySettings)) { - if (S3Repository.Repository.SECRET_SETTING.exists(repositorySettings) == false) { - throw new IllegalArgumentException("Repository setting [" + S3Repository.Repository.KEY_SETTING + - " must be accompanied by setting [" + S3Repository.Repository.SECRET_SETTING + "]"); - } - // backcompat for reading keys out of repository settings - deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " + - "store these in named clients and the elasticsearch keystore for secure settings."); - try (SecureString key = S3Repository.Repository.KEY_SETTING.get(repositorySettings); - SecureString secret = S3Repository.Repository.SECRET_SETTING.get(repositorySettings)) { - credentials = new BasicAWSCredentials(key.toString(), secret.toString()); - } - } else if (S3Repository.Repository.SECRET_SETTING.exists(repositorySettings)) { - throw new IllegalArgumentException("Repository setting [" + S3Repository.Repository.SECRET_SETTING + - " must be accompanied by setting [" + S3Repository.Repository.KEY_SETTING + "]"); - } - if (credentials == null) { + static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) { + if (clientSettings.credentials == null) { logger.debug("Using instance profile credentials"); return new PrivilegedInstanceProfileCredentialsProvider(); } else { logger.debug("Using basic key/secret credentials"); - return new StaticCredentialsProvider(credentials); + return new StaticCredentialsProvider(clientSettings.credentials); } } - // pkg private for tests - /** Returns the endpoint the client should use, based on the available endpoint settings found. */ - static String findEndpoint(Logger logger, S3ClientSettings clientSettings, Settings repositorySettings) { - String endpoint = getRepoValue(repositorySettings, S3Repository.Repository.ENDPOINT_SETTING, clientSettings.endpoint); - if (Strings.hasText(endpoint)) { - logger.debug("using repository level endpoint [{}]", endpoint); - } - return endpoint; - } - - /** Returns the value for a given setting from the repository, or returns the fallback value. */ - private static T getRepoValue(Settings repositorySettings, Setting repositorySetting, T fallback) { - if (repositorySetting.exists(repositorySettings)) { - return repositorySetting.get(repositorySettings); - } - return fallback; - } - @Override protected void doStart() throws ElasticsearchException { } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index edaf44289c6..ece4a5d29ec 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.repositories.s3.AwsS3Service.CLOUD_S3; /** * A container for settings used to create an S3 client. @@ -45,15 +44,15 @@ class S3ClientSettings { /** The access key (ie login id) for connecting to s3. */ static final Setting.AffixSetting ACCESS_KEY_SETTING = Setting.affixKeySetting(PREFIX, "access_key", - key -> SecureSetting.secureString(key, S3Repository.Repositories.KEY_SETTING)); + key -> SecureSetting.secureString(key, null)); /** The secret key (ie password) for connecting to s3. */ static final Setting.AffixSetting SECRET_KEY_SETTING = Setting.affixKeySetting(PREFIX, "secret_key", - key -> SecureSetting.secureString(key, S3Repository.Repositories.SECRET_SETTING)); + key -> SecureSetting.secureString(key, null)); /** An override for the s3 endpoint to connect to. */ static final Setting.AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint", - key -> new Setting<>(key, S3Repository.Repositories.ENDPOINT_SETTING, s -> s.toLowerCase(Locale.ROOT), + key -> new Setting<>(key, "", s -> s.toLowerCase(Locale.ROOT), Setting.Property.NodeScope)); /** The protocol to use to connect to s3. */ @@ -70,11 +69,11 @@ class S3ClientSettings { /** The username of a proxy to connect to s3 through. */ static final Setting.AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting(PREFIX, "proxy.username", - key -> SecureSetting.secureString(key, AwsS3Service.PROXY_USERNAME_SETTING)); + key -> SecureSetting.secureString(key, null)); /** The password of a proxy to connect to s3 through. */ static final Setting.AffixSetting PROXY_PASSWORD_SETTING = Setting.affixKeySetting(PREFIX, "proxy.password", - key -> SecureSetting.secureString(key, AwsS3Service.PROXY_PASSWORD_SETTING)); + key -> SecureSetting.secureString(key, null)); /** The socket timeout for connecting to s3. */ static final Setting.AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "read_timeout", @@ -142,10 +141,10 @@ class S3ClientSettings { // pkg private for tests /** Parse settings for a single client. */ static S3ClientSettings getClientSettings(Settings settings, String clientName) { - try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING, S3Repository.Repositories.KEY_SETTING); - SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING, S3Repository.Repositories.SECRET_SETTING); - SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING, CLOUD_S3.PROXY_USERNAME_SETTING); - SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING, CLOUD_S3.PROXY_PASSWORD_SETTING)) { + try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING); + SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING); + SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); + SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) { BasicAWSCredentials credentials = null; if (accessKey.length() != 0) { if (secretKey.length() != 0) { @@ -158,26 +157,21 @@ class S3ClientSettings { } return new S3ClientSettings( credentials, - getConfigValue(settings, clientName, ENDPOINT_SETTING, S3Repository.Repositories.ENDPOINT_SETTING), - getConfigValue(settings, clientName, PROTOCOL_SETTING, S3Repository.Repositories.PROTOCOL_SETTING), - getConfigValue(settings, clientName, PROXY_HOST_SETTING, AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING), - getConfigValue(settings, clientName, PROXY_PORT_SETTING, AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING), + getConfigValue(settings, clientName, ENDPOINT_SETTING), + getConfigValue(settings, clientName, PROTOCOL_SETTING), + getConfigValue(settings, clientName, PROXY_HOST_SETTING), + getConfigValue(settings, clientName, PROXY_PORT_SETTING), proxyUsername.toString(), proxyPassword.toString(), - (int)getConfigValue(settings, clientName, READ_TIMEOUT_SETTING, AwsS3Service.CLOUD_S3.READ_TIMEOUT).millis() + (int)getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis() ); } } private static T getConfigValue(Settings settings, String clientName, - Setting.AffixSetting clientSetting, - Setting globalSetting) { + Setting.AffixSetting clientSetting) { Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); - if (concreteSetting.exists(settings)) { - return concreteSetting.get(settings); - } else { - return globalSetting.get(settings); - } + return concreteSetting.get(settings); } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 2ce6396465a..c9f37f24ded 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -19,31 +19,24 @@ package org.elasticsearch.repositories.s3; +import java.io.IOException; + import com.amazonaws.ClientConfiguration; -import com.amazonaws.Protocol; import com.amazonaws.services.s3.AmazonS3; -import org.elasticsearch.repositories.s3.AwsS3Service.CLOUD_S3; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.settings.SecureSetting; -import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.AffixSetting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; -import java.io.IOException; -import java.util.Locale; - /** * Shared file system implementation of the BlobStoreRepository *

@@ -65,32 +58,6 @@ class S3Repository extends BlobStoreRepository { * NOTE: These are legacy settings. Use the named client config settings above. */ public interface Repositories { - /** - * repositories.s3.access_key: AWS Access key specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.access_key. - * @see CLOUD_S3#KEY_SETTING - */ - Setting KEY_SETTING = new Setting<>("repositories.s3.access_key", CLOUD_S3.KEY_SETTING, SecureString::new, - Property.NodeScope, Property.Filtered, Property.Deprecated); - - /** - * repositories.s3.secret_key: AWS Secret key specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.secret_key. - * @see CLOUD_S3#SECRET_SETTING - */ - Setting SECRET_SETTING = new Setting<>("repositories.s3.secret_key", CLOUD_S3.SECRET_SETTING, SecureString::new, - Property.NodeScope, Property.Filtered, Property.Deprecated); - - /** - * repositories.s3.endpoint: Endpoint specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.endpoint. - * @see CLOUD_S3#ENDPOINT_SETTING - */ - Setting ENDPOINT_SETTING = new Setting<>("repositories.s3.endpoint", CLOUD_S3.ENDPOINT_SETTING, - s -> s.toLowerCase(Locale.ROOT), Property.NodeScope, Property.Deprecated); - /** - * repositories.s3.protocol: Protocol specific for all S3 Repositories API calls. Defaults to cloud.aws.s3.protocol. - * @see CLOUD_S3#PROTOCOL_SETTING - */ - Setting PROTOCOL_SETTING = new Setting<>("repositories.s3.protocol", CLOUD_S3.PROTOCOL_SETTING, - s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope, Property.Deprecated); /** * repositories.s3.bucket: The name of the bucket to be used for snapshots. */ @@ -173,25 +140,9 @@ class S3Repository extends BlobStoreRepository { * If undefined, they use the repositories.s3.xxx equivalent setting. */ public interface Repository { - Setting KEY_SETTING = new Setting<>("access_key", "", SecureString::new, - Property.Filtered, Property.Deprecated); - - - Setting SECRET_SETTING = new Setting<>("secret_key", "", SecureString::new, - Property.Filtered, Property.Deprecated); Setting BUCKET_SETTING = Setting.simpleString("bucket"); - /** - * endpoint - * @see Repositories#ENDPOINT_SETTING - */ - Setting ENDPOINT_SETTING = Setting.simpleString("endpoint", Property.Deprecated); - /** - * protocol - * @see Repositories#PROTOCOL_SETTING - */ - Setting PROTOCOL_SETTING = new Setting<>("protocol", "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), - Property.Deprecated); + /** * server_side_encryption * @see Repositories#SERVER_SIDE_ENCRYPTION_SETTING diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 04814b99e88..1ab0ca35441 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -76,13 +76,6 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { (metadata) -> new S3Repository(metadata, env.settings(), namedXContentRegistry, createStorageService(env.settings()))); } - @Override - public List getSettingsFilter() { - return Arrays.asList( - S3Repository.Repository.KEY_SETTING.getKey(), - S3Repository.Repository.SECRET_SETTING.getKey()); - } - @Override public List> getSettings() { return Arrays.asList( @@ -98,33 +91,8 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { S3ClientSettings.PROXY_PASSWORD_SETTING, S3ClientSettings.READ_TIMEOUT_SETTING, - // Register global cloud aws settings: cloud.aws (might have been registered in ec2 plugin) - AwsS3Service.KEY_SETTING, - AwsS3Service.SECRET_SETTING, - AwsS3Service.PROTOCOL_SETTING, - AwsS3Service.PROXY_HOST_SETTING, - AwsS3Service.PROXY_PORT_SETTING, - AwsS3Service.PROXY_USERNAME_SETTING, - AwsS3Service.PROXY_PASSWORD_SETTING, - AwsS3Service.READ_TIMEOUT, - - // Register S3 specific settings: cloud.aws.s3 - AwsS3Service.CLOUD_S3.KEY_SETTING, - AwsS3Service.CLOUD_S3.SECRET_SETTING, - AwsS3Service.CLOUD_S3.PROTOCOL_SETTING, - AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING, - AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING, - AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING, - AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING, - AwsS3Service.CLOUD_S3.ENDPOINT_SETTING, - AwsS3Service.CLOUD_S3.READ_TIMEOUT, - // Register S3 repositories settings: repositories.s3 - S3Repository.Repositories.KEY_SETTING, - S3Repository.Repositories.SECRET_SETTING, S3Repository.Repositories.BUCKET_SETTING, - S3Repository.Repositories.ENDPOINT_SETTING, - S3Repository.Repositories.PROTOCOL_SETTING, S3Repository.Repositories.SERVER_SIDE_ENCRYPTION_SETTING, S3Repository.Repositories.BUFFER_SIZE_SETTING, S3Repository.Repositories.MAX_RETRIES_SETTING, diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java deleted file mode 100644 index cc33fcc243e..00000000000 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.s3; - -import com.amazonaws.ClientConfiguration; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.repositories.s3.AwsSigner; -import org.elasticsearch.repositories.s3.S3RepositoryPlugin; -import org.elasticsearch.test.ESTestCase; -import org.junit.BeforeClass; - -import static org.hamcrest.CoreMatchers.is; - -public class AWSSignersTests extends ESTestCase { - - /** - * Starts S3RepositoryPlugin. It's a workaround when you run test from IntelliJ. Otherwise it generates - * java.security.AccessControlException: access denied ("java.lang.RuntimePermission" "accessDeclaredMembers") - */ - @BeforeClass - public static void instantiatePlugin() { - new S3RepositoryPlugin(Settings.EMPTY); - } - - public void testSigners() { - assertThat(signerTester(null), is(false)); - assertThat(signerTester("QueryStringSignerType"), is(true)); - assertThat(signerTester("AWS3SignerType"), is(true)); - assertThat(signerTester("AWS4SignerType"), is(true)); - assertThat(signerTester("NoOpSignerType"), is(true)); - assertThat(signerTester("UndefinedSigner"), is(false)); - assertThat(signerTester("S3SignerType"), is(true)); - assertThat(signerTester("AWSS3V4SignerType"), is(true)); - - ClientConfiguration configuration = new ClientConfiguration(); - AwsSigner.configureSigner("AWS4SignerType", configuration, "any"); - assertEquals(configuration.getSignerOverride(), "AWS4SignerType"); - AwsSigner.configureSigner("S3SignerType", configuration, "any"); - assertEquals(configuration.getSignerOverride(), "S3SignerType"); - } - - public void testV2InInvalidRegion() { - try { - AwsSigner.validateSignerType("S3SignerType", "s3.cn-north-1.amazonaws.com.cn"); - fail("S3SignerType should not be available for China region"); - } catch (IllegalArgumentException e) { - assertEquals("[S3SignerType] may not be supported in aws Beijing and Frankfurt region", e.getMessage()); - } - - try { - AwsSigner.validateSignerType("S3SignerType", "s3.eu-central-1.amazonaws.com"); - fail("S3SignerType should not be available for Frankfurt region"); - } catch (IllegalArgumentException e) { - assertEquals("[S3SignerType] may not be supported in aws Beijing and Frankfurt region", e.getMessage()); - } - } - - - /** - * Test a signer configuration - * @param signer signer name - * @return true if successful, false otherwise - */ - private boolean signerTester(String signer) { - try { - AwsSigner.validateSignerType(signer, "s3.amazonaws.com"); - return true; - } catch (IllegalArgumentException e) { - return false; - } - } -} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index 9b94744883a..e3a524c640e 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -283,8 +283,6 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("s3").setSettings(Settings.builder() .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) - .put(S3Repository.Repository.KEY_SETTING.getKey(), bucketSettings.get("access_key")) - .put(S3Repository.Repository.SECRET_SETTING.getKey(), bucketSettings.get("secret_key")) .put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); @@ -300,9 +298,6 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") .setType("s3").setSettings(Settings.builder() .put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket")) - .put(S3Repository.Repository.ENDPOINT_SETTING.getKey(), bucketSettings.get("endpoint")) - .put(S3Repository.Repository.KEY_SETTING.getKey(), bucketSettings.get("access_key")) - .put(S3Repository.Repository.SECRET_SETTING.getKey(), bucketSettings.get("secret_key")) .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 77dbfd1dc5c..18608b83627 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -24,7 +24,6 @@ import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -35,7 +34,7 @@ public class AwsS3ServiceImplTests extends ESTestCase { public void testAWSCredentialsWithSystemProviders() { S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, "default"); - AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, clientSettings, Settings.EMPTY); + AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, clientSettings); assertThat(credentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } @@ -58,147 +57,11 @@ public class AwsS3ServiceImplTests extends ESTestCase { launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "aws_key", "aws_secret"); } - public void testAWSCredentialsWithElasticsearchAwsSettingsBackcompat() { - Settings settings = Settings.builder() - .put(AwsS3Service.KEY_SETTING.getKey(), "aws_key") - .put(AwsS3Service.SECRET_SETTING.getKey(), "aws_secret") - .build(); - launchAWSCredentialsWithElasticsearchSettingsTest(Settings.EMPTY, settings, "aws_key", "aws_secret"); - assertSettingDeprecationsAndWarnings(new Setting[]{AwsS3Service.KEY_SETTING, AwsS3Service.SECRET_SETTING}); - } - - public void testAWSCredentialsWithElasticsearchS3SettingsBackcompat() { - Settings settings = Settings.builder() - .put(AwsS3Service.CLOUD_S3.KEY_SETTING.getKey(), "s3_key") - .put(AwsS3Service.CLOUD_S3.SECRET_SETTING.getKey(), "s3_secret") - .build(); - launchAWSCredentialsWithElasticsearchSettingsTest(Settings.EMPTY, settings, "s3_key", "s3_secret"); - assertSettingDeprecationsAndWarnings(new Setting[]{AwsS3Service.CLOUD_S3.KEY_SETTING, AwsS3Service.CLOUD_S3.SECRET_SETTING}); - } - - public void testAWSCredentialsWithElasticsearchAwsAndS3SettingsBackcompat() { - Settings settings = Settings.builder() - .put(AwsS3Service.KEY_SETTING.getKey(), "aws_key") - .put(AwsS3Service.SECRET_SETTING.getKey(), "aws_secret") - .put(AwsS3Service.CLOUD_S3.KEY_SETTING.getKey(), "s3_key") - .put(AwsS3Service.CLOUD_S3.SECRET_SETTING.getKey(), "s3_secret") - .build(); - launchAWSCredentialsWithElasticsearchSettingsTest(Settings.EMPTY, settings, "s3_key", "s3_secret"); - assertSettingDeprecationsAndWarnings(new Setting[]{ - AwsS3Service.KEY_SETTING, - AwsS3Service.SECRET_SETTING, - AwsS3Service.CLOUD_S3.KEY_SETTING, - AwsS3Service.CLOUD_S3.SECRET_SETTING}); - } - - public void testAWSCredentialsWithElasticsearchRepositoriesSettingsBackcompat() { - Settings settings = Settings.builder() - .put(S3Repository.Repositories.KEY_SETTING.getKey(), "repositories_key") - .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") - .build(); - launchAWSCredentialsWithElasticsearchSettingsTest(Settings.EMPTY, settings, "repositories_key", "repositories_secret"); - assertSettingDeprecationsAndWarnings( - new Setting[]{S3Repository.Repositories.KEY_SETTING, S3Repository.Repositories.SECRET_SETTING}); - } - - public void testAWSCredentialsWithElasticsearchAwsAndRepositoriesSettingsBackcompat() { - Settings settings = Settings.builder() - .put(AwsS3Service.KEY_SETTING.getKey(), "aws_key") - .put(AwsS3Service.SECRET_SETTING.getKey(), "aws_secret") - .put(S3Repository.Repositories.KEY_SETTING.getKey(), "repositories_key") - .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") - .build(); - launchAWSCredentialsWithElasticsearchSettingsTest(Settings.EMPTY, settings, "repositories_key", "repositories_secret"); - assertSettingDeprecationsAndWarnings(new Setting[]{ - AwsS3Service.KEY_SETTING, - AwsS3Service.SECRET_SETTING, - S3Repository.Repositories.KEY_SETTING, - S3Repository.Repositories.SECRET_SETTING}); - } - - public void testAWSCredentialsWithElasticsearchAwsAndS3AndRepositoriesSettingsBackcompat() { - Settings settings = Settings.builder() - .put(AwsS3Service.KEY_SETTING.getKey(), "aws_key") - .put(AwsS3Service.SECRET_SETTING.getKey(), "aws_secret") - .put(AwsS3Service.CLOUD_S3.KEY_SETTING.getKey(), "s3_key") - .put(AwsS3Service.CLOUD_S3.SECRET_SETTING.getKey(), "s3_secret") - .put(S3Repository.Repositories.KEY_SETTING.getKey(), "repositories_key") - .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") - .build(); - launchAWSCredentialsWithElasticsearchSettingsTest(Settings.EMPTY, settings, "repositories_key", "repositories_secret"); - assertSettingDeprecationsAndWarnings(new Setting[]{ - AwsS3Service.KEY_SETTING, - AwsS3Service.SECRET_SETTING, - AwsS3Service.CLOUD_S3.KEY_SETTING, - AwsS3Service.CLOUD_S3.SECRET_SETTING, - S3Repository.Repositories.KEY_SETTING, - S3Repository.Repositories.SECRET_SETTING}); - } - - public void testAWSCredentialsWithElasticsearchRepositoriesSettingsAndRepositorySettingsBackcompat() { - Settings repositorySettings = generateRepositorySettings("repository_key", "repository_secret", null, null); - Settings settings = Settings.builder() - .put(S3Repository.Repositories.KEY_SETTING.getKey(), "repositories_key") - .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") - .build(); - launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "repository_key", "repository_secret"); - assertSettingDeprecationsAndWarnings(new Setting[]{ - S3Repository.Repositories.KEY_SETTING, - S3Repository.Repositories.SECRET_SETTING, - S3Repository.Repository.KEY_SETTING, - S3Repository.Repository.SECRET_SETTING}, - "Using s3 access/secret key from repository settings. Instead store these in named clients and the elasticsearch keystore for secure settings."); - } - - public void testAWSCredentialsWithElasticsearchAwsAndRepositoriesSettingsAndRepositorySettingsBackcompat() { - Settings repositorySettings = generateRepositorySettings("repository_key", "repository_secret", null, null); - Settings settings = Settings.builder() - .put(AwsS3Service.KEY_SETTING.getKey(), "aws_key") - .put(AwsS3Service.SECRET_SETTING.getKey(), "aws_secret") - .put(S3Repository.Repositories.KEY_SETTING.getKey(), "repositories_key") - .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") - .build(); - launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "repository_key", "repository_secret"); - assertSettingDeprecationsAndWarnings(new Setting[]{ - AwsS3Service.KEY_SETTING, - AwsS3Service.SECRET_SETTING, - S3Repository.Repositories.KEY_SETTING, - S3Repository.Repositories.SECRET_SETTING, - S3Repository.Repository.KEY_SETTING, - S3Repository.Repository.SECRET_SETTING}, - "Using s3 access/secret key from repository settings. Instead store these in named clients and the elasticsearch keystore for secure settings."); - } - - public void testAWSCredentialsWithElasticsearchAwsAndS3AndRepositoriesSettingsAndRepositorySettingsBackcompat() { - Settings repositorySettings = generateRepositorySettings("repository_key", "repository_secret", null, null); - Settings settings = Settings.builder() - .put(AwsS3Service.KEY_SETTING.getKey(), "aws_key") - .put(AwsS3Service.SECRET_SETTING.getKey(), "aws_secret") - .put(AwsS3Service.CLOUD_S3.KEY_SETTING.getKey(), "s3_key") - .put(AwsS3Service.CLOUD_S3.SECRET_SETTING.getKey(), "s3_secret") - .put(S3Repository.Repositories.KEY_SETTING.getKey(), "repositories_key") - .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") - .build(); - launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "repository_key", "repository_secret"); - assertSettingDeprecationsAndWarnings(new Setting[]{ - AwsS3Service.KEY_SETTING, - AwsS3Service.SECRET_SETTING, - AwsS3Service.CLOUD_S3.KEY_SETTING, - AwsS3Service.CLOUD_S3.SECRET_SETTING, - S3Repository.Repositories.KEY_SETTING, - S3Repository.Repositories.SECRET_SETTING, - S3Repository.Repository.KEY_SETTING, - S3Repository.Repository.SECRET_SETTING}, - "Using s3 access/secret key from repository settings. Instead store these in named clients and the elasticsearch keystore for secure settings."); - } - - protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings singleRepositorySettings, Settings settings, + private void launchAWSCredentialsWithElasticsearchSettingsTest(Settings singleRepositorySettings, Settings settings, String expectedKey, String expectedSecret) { String configName = InternalAwsS3Service.CLIENT_NAME.get(singleRepositorySettings); S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); - AWSCredentials credentials = InternalAwsS3Service - .buildCredentials(logger, deprecationLogger, clientSettings, singleRepositorySettings) - .getCredentials(); + AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, clientSettings).getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); } @@ -223,58 +86,6 @@ public class AwsS3ServiceImplTests extends ESTestCase { "aws_proxy_password", 3, false, 10000); } - public void testAWSConfigurationWithAwsSettingsBackcompat() { - Settings settings = Settings.builder() - .put(AwsS3Service.PROTOCOL_SETTING.getKey(), "http") - .put(AwsS3Service.PROXY_HOST_SETTING.getKey(), "aws_proxy_host") - .put(AwsS3Service.PROXY_PORT_SETTING.getKey(), 8080) - .put(AwsS3Service.PROXY_USERNAME_SETTING.getKey(), "aws_proxy_username") - .put(AwsS3Service.PROXY_PASSWORD_SETTING.getKey(), "aws_proxy_password") - .put(AwsS3Service.READ_TIMEOUT.getKey(), "10s") - .build(); - launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", - "aws_proxy_password", 3, false, 10000); - assertSettingDeprecationsAndWarnings(new Setting[]{ - AwsS3Service.PROXY_USERNAME_SETTING, - AwsS3Service.PROXY_PASSWORD_SETTING, - AwsS3Service.PROTOCOL_SETTING, - AwsS3Service.PROXY_HOST_SETTING, - AwsS3Service.PROXY_PORT_SETTING, - AwsS3Service.READ_TIMEOUT}); - } - - public void testAWSConfigurationWithAwsAndS3SettingsBackcompat() { - Settings settings = Settings.builder() - .put(AwsS3Service.PROTOCOL_SETTING.getKey(), "http") - .put(AwsS3Service.PROXY_HOST_SETTING.getKey(), "aws_proxy_host") - .put(AwsS3Service.PROXY_PORT_SETTING.getKey(), 8080) - .put(AwsS3Service.PROXY_USERNAME_SETTING.getKey(), "aws_proxy_username") - .put(AwsS3Service.PROXY_PASSWORD_SETTING.getKey(), "aws_proxy_password") - .put(AwsS3Service.READ_TIMEOUT.getKey(), "5s") - .put(AwsS3Service.CLOUD_S3.PROTOCOL_SETTING.getKey(), "https") - .put(AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING.getKey(), "s3_proxy_host") - .put(AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING.getKey(), 8081) - .put(AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING.getKey(), "s3_proxy_username") - .put(AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING.getKey(), "s3_proxy_password") - .put(AwsS3Service.CLOUD_S3.READ_TIMEOUT.getKey(), "10s") - .build(); - launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, "s3_proxy_host", 8081, "s3_proxy_username", - "s3_proxy_password", 3, false, 10000); - assertSettingDeprecationsAndWarnings(new Setting[] { - AwsS3Service.PROXY_USERNAME_SETTING, - AwsS3Service.PROXY_PASSWORD_SETTING, - AwsS3Service.PROTOCOL_SETTING, - AwsS3Service.PROXY_HOST_SETTING, - AwsS3Service.PROXY_PORT_SETTING, - AwsS3Service.READ_TIMEOUT, - AwsS3Service.CLOUD_S3.PROXY_USERNAME_SETTING, - AwsS3Service.CLOUD_S3.PROXY_PASSWORD_SETTING, - AwsS3Service.CLOUD_S3.PROTOCOL_SETTING, - AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING, - AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING, - AwsS3Service.CLOUD_S3.READ_TIMEOUT}); - } - public void testGlobalMaxRetries() { Settings settings = Settings.builder() .put(S3Repository.Repositories.MAX_RETRIES_SETTING.getKey(), 10) @@ -284,7 +95,7 @@ public class AwsS3ServiceImplTests extends ESTestCase { } public void testRepositoryMaxRetries() { - Settings repositorySettings = generateRepositorySettings(null, null, null, 20); + Settings repositorySettings = generateRepositorySettings(20); Settings settings = Settings.builder() .put(S3Repository.Repositories.MAX_RETRIES_SETTING.getKey(), 10) .build(); @@ -292,7 +103,7 @@ public class AwsS3ServiceImplTests extends ESTestCase { null, 20, false, 50000); } - protected void launchAWSConfigurationTest(Settings settings, + private void launchAWSConfigurationTest(Settings settings, Settings singleRepositorySettings, Protocol expectedProtocol, String expectedProxyHost, @@ -308,8 +119,7 @@ public class AwsS3ServiceImplTests extends ESTestCase { S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING, S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING); S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); - ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(logger, clientSettings, - singleRepositorySettings, maxRetries, null, useThrottleRetries); + ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(clientSettings, maxRetries, useThrottleRetries); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); @@ -322,52 +132,25 @@ public class AwsS3ServiceImplTests extends ESTestCase { assertThat(configuration.getSocketTimeout(), is(expectedReadTimeout)); } - private static Settings generateRepositorySettings(String key, String secret, String endpoint, Integer maxRetries) { + private static Settings generateRepositorySettings(Integer maxRetries) { Settings.Builder builder = Settings.builder(); - if (endpoint != null) { - builder.put(S3Repository.Repository.ENDPOINT_SETTING.getKey(), endpoint); - } - if (key != null) { - builder.put(S3Repository.Repository.KEY_SETTING.getKey(), key); - } - if (secret != null) { - builder.put(S3Repository.Repository.SECRET_SETTING.getKey(), secret); - } if (maxRetries != null) { builder.put(S3Repository.Repository.MAX_RETRIES_SETTING.getKey(), maxRetries); } return builder.build(); } - public void testDefaultEndpoint() { - assertEndpoint(generateRepositorySettings("repository_key", "repository_secret", null, null), Settings.EMPTY, ""); - } - public void testEndpointSetting() { Settings settings = Settings.builder() .put("s3.client.default.endpoint", "s3.endpoint") .build(); - assertEndpoint(generateRepositorySettings("repository_key", "repository_secret", null, null), settings, "s3.endpoint"); + assertEndpoint(Settings.EMPTY, settings, "s3.endpoint"); } - public void testEndpointSettingBackcompat() { - assertEndpoint(generateRepositorySettings("repository_key", "repository_secret", "repository.endpoint", null), - Settings.EMPTY, "repository.endpoint"); - assertSettingDeprecationsAndWarnings(new Setting[]{S3Repository.Repository.ENDPOINT_SETTING}); - Settings settings = Settings.builder() - .put(S3Repository.Repositories.ENDPOINT_SETTING.getKey(), "repositories.endpoint") - .build(); - assertEndpoint(generateRepositorySettings("repository_key", "repository_secret", null, null), settings, - "repositories.endpoint"); - assertSettingDeprecationsAndWarnings(new Setting[]{S3Repository.Repositories.ENDPOINT_SETTING}); - } - - private void assertEndpoint(Settings repositorySettings, Settings settings, - String expectedEndpoint) { + private void assertEndpoint(Settings repositorySettings, Settings settings, String expectedEndpoint) { String configName = InternalAwsS3Service.CLIENT_NAME.get(repositorySettings); S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); - String foundEndpoint = InternalAwsS3Service.findEndpoint(logger, clientSettings, repositorySettings); - assertThat(foundEndpoint, is(expectedEndpoint)); + assertThat(clientSettings.endpoint, is(expectedEndpoint)); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index f1b3ceb28f1..11daf7b18ff 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -65,21 +65,6 @@ public class S3RepositoryTests extends ESTestCase { } } - public void testSettingsResolution() throws Exception { - Settings localSettings = Settings.builder().put(Repository.KEY_SETTING.getKey(), "key1").build(); - Settings globalSettings = Settings.builder().put(Repositories.KEY_SETTING.getKey(), "key2").build(); - - assertEquals(new SecureString("key1".toCharArray()), - getValue(localSettings, globalSettings, Repository.KEY_SETTING, Repositories.KEY_SETTING)); - assertEquals(new SecureString("key1".toCharArray()), - getValue(localSettings, Settings.EMPTY, Repository.KEY_SETTING, Repositories.KEY_SETTING)); - assertEquals(new SecureString("key2".toCharArray()), - getValue(Settings.EMPTY, globalSettings, Repository.KEY_SETTING, Repositories.KEY_SETTING)); - assertEquals(new SecureString("".toCharArray()), - getValue(Settings.EMPTY, Settings.EMPTY, Repository.KEY_SETTING, Repositories.KEY_SETTING)); - assertSettingDeprecationsAndWarnings(new Setting[]{Repository.KEY_SETTING, Repositories.KEY_SETTING}); - } - public void testInvalidChunkBufferSizeSettings() throws IOException { // chunk < buffer should fail assertInvalidBuffer(10, 5, RepositoryException.class, "chunk_size (5mb) can't be lower than buffer_size (10mb)."); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java index eb679df9f6a..3f0709f1a30 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java @@ -33,7 +33,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class EvilJNANativesTests extends ESTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/23640") public void testSetMaximumNumberOfThreads() throws IOException { if (Constants.LINUX) { final List lines = Files.readAllLines(PathUtils.get("/proc/self/limits")); @@ -56,7 +55,6 @@ public class EvilJNANativesTests extends ESTestCase { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/23640") public void testSetMaxSizeVirtualMemory() throws IOException { if (Constants.LINUX) { final List lines = Files.readAllLines(PathUtils.get("/proc/self/limits")); diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index c6e34963af9..024e306d817 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -36,11 +36,10 @@ remoteClusterTestRunner { systemProperty 'tests.rest.suite', 'remote_cluster' } -task mixedClusterTest(type: RestIntegTestTask) { - dependsOn(remoteClusterTestRunner) -} +task mixedClusterTest(type: RestIntegTestTask) {} mixedClusterTestCluster { + dependsOn remoteClusterTestRunner distribution = 'zip' setting 'search.remote.my_remote_cluster.seeds', "\"${-> remoteClusterTest.nodes.get(0).transportUri()}\"" setting 'search.remote.connections_per_cluster', 1 diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 9b8259eecb5..23835026181 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -39,11 +39,10 @@ oldClusterTestRunner { systemProperty 'tests.rest.suite', 'old_cluster' } -task mixedClusterTest(type: RestIntegTestTask) { - dependsOn(oldClusterTestRunner, 'oldClusterTestCluster#node1.stop') -} +task mixedClusterTest(type: RestIntegTestTask) {} mixedClusterTestCluster { + dependsOn oldClusterTestRunner, 'oldClusterTestCluster#node1.stop' distribution = 'zip' clusterName = 'rolling-upgrade' unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } diff --git a/qa/smoke-test-client/build.gradle b/qa/smoke-test-client/build.gradle index e4d197e7e6a..a575a131d87 100644 --- a/qa/smoke-test-client/build.gradle +++ b/qa/smoke-test-client/build.gradle @@ -37,6 +37,6 @@ singleNodeIntegTestCluster { setting 'discovery.type', 'single-node' } -integTest.dependsOn(singleNodeIntegTestRunner, 'singleNodeIntegTestCluster#stop') +integTestCluster.dependsOn(singleNodeIntegTestRunner, 'singleNodeIntegTestCluster#stop') check.dependsOn(integTest) diff --git a/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats index dde2147ea45..2d502084a4a 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/20_tar_package.bats @@ -116,6 +116,10 @@ setup() { chown -R elasticsearch:elasticsearch "$temp" echo "-Xms512m" >> "$temp/jvm.options" echo "-Xmx512m" >> "$temp/jvm.options" + # we have to disable Log4j from using JMX lest it will hit a security + # manager exception before we have configured logging; this will fail + # startup since we detect usages of logging before it is configured + echo "-Dlog4j2.disable.jmx=true" >> "$temp/jvm.options" export ES_JVM_OPTIONS="$temp/jvm.options" export ES_JAVA_OPTS="-XX:-UseCompressedOops" start_elasticsearch_service diff --git a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats index 64f0e977c7d..70db8744456 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats @@ -147,6 +147,10 @@ setup() { chown -R elasticsearch:elasticsearch "$temp" echo "-Xms512m" >> "$temp/jvm.options" echo "-Xmx512m" >> "$temp/jvm.options" + # we have to disable Log4j from using JMX lest it will hit a security + # manager exception before we have configured logging; this will fail + # startup since we detect usages of logging before it is configured + echo "-Dlog4j2.disable.jmx=true" >> "$temp/jvm.options" cp $ESENVFILE "$temp/elasticsearch" echo "ES_JVM_OPTIONS=\"$temp/jvm.options\"" >> $ESENVFILE echo "ES_JAVA_OPTS=\"-XX:-UseCompressedOops\"" >> $ESENVFILE diff --git a/qa/vagrant/src/test/resources/packaging/tests/75_bad_data_paths.bats b/qa/vagrant/src/test/resources/packaging/tests/75_bad_data_paths.bats new file mode 100644 index 00000000000..0f802a439b7 --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/tests/75_bad_data_paths.bats @@ -0,0 +1,77 @@ +#!/usr/bin/env bats + +# Tests data.path settings which in the past have misbehaving, leaking the +# default.data.path setting into the data.path even when it doesn't belong. + +# WARNING: This testing file must be executed as root and can +# dramatically change your system. It removes the 'elasticsearch' +# user/group and also many directories. Do not execute this file +# unless you know exactly what you are doing. + +# The test case can be executed with the Bash Automated +# Testing System tool available at https://github.com/sstephenson/bats +# Thanks to Sam Stephenson! + +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Load test utilities +load $BATS_UTILS/packages.bash +load $BATS_UTILS/tar.bash +load $BATS_UTILS/utils.bash + +@test "[BAD data.path] install package" { + clean_before_test + skip_not_dpkg_or_rpm + install_package +} + +@test "[BAD data.path] setup funny path.data in package install" { + skip_not_dpkg_or_rpm + local temp=`mktemp -d` + chown elasticsearch:elasticsearch "$temp" + echo "path.data: [$temp]" > "/etc/elasticsearch/elasticsearch.yml" +} + +@test "[BAD data.path] start installed from package" { + skip_not_dpkg_or_rpm + start_elasticsearch_service green +} + +@test "[BAD data.path] check for bad dir after starting from package" { + skip_not_dpkg_or_rpm + assert_file_not_exist /var/lib/elasticsearch/nodes +} + +@test "[BAD data.path] install tar" { + clean_before_test + install_archive +} + +@test "[BAD data.path] setup funny path.data in tar install" { + local temp=`mktemp -d` + chown elasticsearch:elasticsearch "$temp" + echo "path.data: [$temp]" > "/tmp/elasticsearch/config/elasticsearch.yml" +} + +@test "[BAD data.path] start installed from tar" { + start_elasticsearch_service green "" "-Edefault.path.data=/tmp/elasticsearch/data" +} + +@test "[BAD data.path] check for bad dir after starting from tar" { + assert_file_not_exist "/tmp/elasticsearch/data/nodes" +} diff --git a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash index 705daad6279..64797a33f57 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash @@ -142,7 +142,7 @@ fi move_config CONF_DIR="$ESCONFIG" install_jvm_example - CONF_DIR="$ESCONFIG" start_elasticsearch_service + CONF_DIR="$ESCONFIG" ES_JVM_OPTIONS="$ESCONFIG/jvm.options" start_elasticsearch_service diff <(curl -s localhost:9200/_cat/configured_example | sed 's/ //g') <(echo "foo") stop_elasticsearch_service CONF_DIR="$ESCONFIG" remove_jvm_example diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index f8abe1b5266..877f49b576d 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -297,8 +297,9 @@ purge_elasticsearch() { start_elasticsearch_service() { local desiredStatus=${1:-green} local index=$2 + local commandLineArgs=$3 - run_elasticsearch_service 0 + run_elasticsearch_service 0 $commandLineArgs wait_for_elasticsearch_status $desiredStatus $index @@ -330,8 +331,10 @@ run_elasticsearch_service() { if [ ! -z "$CONF_DIR" ] ; then if is_dpkg ; then echo "CONF_DIR=$CONF_DIR" >> /etc/default/elasticsearch; + echo "ES_JVM_OPTIONS=$ES_JVM_OPTIONS" >> /etc/default/elasticsearch; elif is_rpm; then echo "CONF_DIR=$CONF_DIR" >> /etc/sysconfig/elasticsearch; + echo "ES_JVM_OPTIONS=$ES_JVM_OPTIONS" >> /etc/sysconfig/elasticsearch fi fi @@ -525,6 +528,7 @@ move_config() { mv "$oldConfig"/* "$ESCONFIG" chown -R elasticsearch:elasticsearch "$ESCONFIG" assert_file_exist "$ESCONFIG/elasticsearch.yml" + assert_file_exist "$ESCONFIG/jvm.options" assert_file_exist "$ESCONFIG/log4j2.properties" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yaml index f8617116dc1..3573f8ba75b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yaml @@ -23,3 +23,38 @@ - match: {count: 2} +--- +"Empty _id": + - skip: + version: " - 5.3.0" + reason: empty IDs were not rejected until 5.3.1 + - do: + bulk: + refresh: true + body: + - index: + _index: test + _type: type + _id: '' + - f: 1 + - index: + _index: test + _type: type + _id: id + - f: 2 + - index: + _index: test + _type: type + - f: 3 + - match: { errors: true } + - match: { items.0.index.status: 400 } + - match: { items.0.index.error.type: illegal_argument_exception } + - match: { items.0.index.error.reason: if _id is specified it must not be empty } + - match: { items.1.index.created: true } + - match: { items.2.index.created: true } + + - do: + count: + index: test + + - match: { count: 2 } diff --git a/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java index f3fd0e2f9c0..c6551543473 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -432,7 +432,7 @@ public abstract class AbstractBytesReferenceTestCase extends ESTestCase { } public void testSliceArrayOffset() throws IOException { - int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); + int length = randomIntBetween(1, PAGE_SIZE * randomIntBetween(2, 5)); BytesReference pbr = newBytesReference(length); int sliceOffset = randomIntBetween(0, pbr.length() - 1); // an offset to the end would be len 0 int sliceLength = randomIntBetween(1, pbr.length() - sliceOffset); diff --git a/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java b/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java index ae4aff917a9..e99c7b90631 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java +++ b/test/framework/src/main/java/org/elasticsearch/node/NodeTests.java @@ -19,31 +19,41 @@ package org.elasticsearch.node; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.transport.MockTcpTransportPlugin; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; +@LuceneTestCase.SuppressFileSystems(value = "ExtrasFS") public class NodeTests extends ESTestCase { public void testNodeName() throws IOException { @@ -165,14 +175,81 @@ public class NodeTests extends ESTestCase { } } + public void testDefaultPathDataSet() throws IOException { + final Path zero = createTempDir().toAbsolutePath(); + final Path one = createTempDir().toAbsolutePath(); + final Path defaultPathData = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder() + .put("path.home", "/home") + .put("path.data.0", zero) + .put("path.data.1", one) + .put("default.path.data", defaultPathData) + .build(); + try (NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings))) { + final Path defaultPathDataWithNodesAndId = defaultPathData.resolve("nodes/0"); + Files.createDirectories(defaultPathDataWithNodesAndId); + final NodeEnvironment.NodePath defaultNodePath = new NodeEnvironment.NodePath(defaultPathDataWithNodesAndId); + final boolean indexExists = randomBoolean(); + final List indices; + if (indexExists) { + indices = IntStream.range(0, randomIntBetween(1, 3)).mapToObj(i -> UUIDs.randomBase64UUID()).collect(Collectors.toList()); + for (final String index : indices) { + Files.createDirectories(defaultNodePath.indicesPath.resolve(index)); + } + } else { + indices = Collections.emptyList(); + } + final Logger mock = mock(Logger.class); + if (indexExists) { + final IllegalStateException e = expectThrows( + IllegalStateException.class, + () -> Node.checkForIndexDataInDefaultPathData(settings, nodeEnv, mock)); + final String message = String.format( + Locale.ROOT, + "detected index data in default.path.data [%s] where there should not be any; check the logs for details", + defaultPathData); + assertThat(e, hasToString(containsString(message))); + verify(mock) + .error("detected index data in default.path.data [{}] where there should not be any", defaultNodePath.indicesPath); + for (final String index : indices) { + verify(mock).info( + "index folder [{}] in default.path.data [{}] must be moved to any of {}", + index, + defaultNodePath.indicesPath, + Arrays.stream(nodeEnv.nodePaths()).map(np -> np.indicesPath).collect(Collectors.toList())); + } + verifyNoMoreInteractions(mock); + } else { + Node.checkForIndexDataInDefaultPathData(settings, nodeEnv, mock); + verifyNoMoreInteractions(mock); + } + } + } + + public void testDefaultPathDataNotSet() throws IOException { + final Path zero = createTempDir().toAbsolutePath(); + final Path one = createTempDir().toAbsolutePath(); + final Settings settings = Settings.builder() + .put("path.home", "/home") + .put("path.data.0", zero) + .put("path.data.1", one) + .build(); + try (NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings))) { + final Logger mock = mock(Logger.class); + Node.checkForIndexDataInDefaultPathData(settings, nodeEnv, mock); + verifyNoMoreInteractions(mock); + } + } + private static Settings.Builder baseSettings() { final Path tempDir = createTempDir(); return Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) - .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) - .put(NetworkModule.HTTP_ENABLED.getKey(), false) - .put("transport.type", "mock-socket-network") - .put(Node.NODE_DATA_SETTING.getKey(), true); + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong())) + .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) + .put(NetworkModule.HTTP_ENABLED.getKey(), false) + .put("transport.type", "mock-socket-network") + .put(Node.NODE_DATA_SETTING.getKey(), true); } + }