From 414c04eb66fb7619b5280e726bc91aac52dd7fc3 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 4 Dec 2015 10:08:11 +0100 Subject: [PATCH 01/26] Restore chunksize of 512kb on recovery and remove configurability This commit restores the chunk size of 512kb lost in a previous but unreleased refactoring. At the same time it removes the configurability of: * `indices.recovery.file_chunk_size` - now fixed to 512kb * `indices.recovery.translog_ops` - removed without replacement * `indices.recovery.translog_size` - now fixed to 512kb * `indices.recovery.compress` - file chunks are not compressed due to lucene's compression but translog operations are. The compress option is gone entirely and compression is used where it makes sense. On sending files of the index we don't compress as we rely on the lucene compression for stored fields etc. Relates to #15161 --- .../elasticsearch/cluster/ClusterModule.java | 4 -- .../cluster/metadata/MetaData.java | 2 - .../indices/recovery/RecoverySettings.java | 64 +------------------ .../recovery/RecoverySourceHandler.java | 26 ++++---- .../indices/recovery/IndexRecoveryIT.java | 2 - .../recovery/RecoverySettingsTests.java | 25 -------- .../recovery/TruncatedRecoveryIT.java | 7 -- .../test/ESBackcompatTestCase.java | 17 ----- .../test/InternalTestCluster.java | 6 +- 9 files changed, 16 insertions(+), 137 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index b2e793ba0ab..2b7786fdb1d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -164,10 +164,6 @@ public class ClusterModule extends AbstractModule { registerClusterDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME); registerClusterDynamicSetting(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, Validator.TIME); registerClusterDynamicSetting(MetaData.SETTING_READ_ONLY, Validator.EMPTY); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, Validator.POSITIVE_BYTES_SIZE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, Validator.INTEGER); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, Validator.BYTES_SIZE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_COMPRESS, Validator.EMPTY); registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, Validator.POSITIVE_INTEGER); registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, Validator.POSITIVE_INTEGER); registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 68fa6e45d88..751f8a09ea5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -746,8 +746,6 @@ public class MetaData implements Iterable, Diffable, Fr /** All known byte-sized cluster settings. */ public static final Set CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet( IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, - RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, - RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC)); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 749ba4f3360..2841049eedc 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -40,10 +40,6 @@ import java.util.concurrent.TimeUnit; */ public class RecoverySettings extends AbstractComponent implements Closeable { - public static final String INDICES_RECOVERY_FILE_CHUNK_SIZE = "indices.recovery.file_chunk_size"; - public static final String INDICES_RECOVERY_TRANSLOG_OPS = "indices.recovery.translog_ops"; - public static final String INDICES_RECOVERY_TRANSLOG_SIZE = "indices.recovery.translog_size"; - public static final String INDICES_RECOVERY_COMPRESS = "indices.recovery.compress"; public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams"; public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams"; public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec"; @@ -75,12 +71,6 @@ public class RecoverySettings extends AbstractComponent implements Closeable { public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes(); - private volatile ByteSizeValue fileChunkSize; - - private volatile boolean compress; - private volatile int translogOps; - private volatile ByteSizeValue translogSize; - private volatile int concurrentStreams; private volatile int concurrentSmallFileStreams; private final ThreadPoolExecutor concurrentStreamPool; @@ -94,16 +84,10 @@ public class RecoverySettings extends AbstractComponent implements Closeable { private volatile TimeValue internalActionTimeout; private volatile TimeValue internalActionLongTimeout; - @Inject public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) { super(settings); - this.fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, new ByteSizeValue(512, ByteSizeUnit.KB)); - this.translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, 1000); - this.translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, new ByteSizeValue(512, ByteSizeUnit.KB)); - this.compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, true); - this.retryDelayStateSync = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(500)); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) // and we want to give the master time to remove a faulty node @@ -132,8 +116,8 @@ public class RecoverySettings extends AbstractComponent implements Closeable { rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac()); } - logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}], file_chunk_size [{}], translog_size [{}], translog_ops [{}], and compress [{}]", - maxBytesPerSec, concurrentStreams, fileChunkSize, translogSize, translogOps, compress); + logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}], file_chunk_size [{}], translog_size [{}]", + maxBytesPerSec, concurrentStreams); nodeSettingsService.addListener(new ApplySettings()); } @@ -144,26 +128,6 @@ public class RecoverySettings extends AbstractComponent implements Closeable { ThreadPool.terminate(concurrentSmallFileStreamPool, 1, TimeUnit.SECONDS); } - public ByteSizeValue fileChunkSize() { - return fileChunkSize; - } - - public boolean compress() { - return compress; - } - - public int translogOps() { - return translogOps; - } - - public ByteSizeValue translogSize() { - return translogSize; - } - - public int concurrentStreams() { - return concurrentStreams; - } - public ThreadPoolExecutor concurrentStreamPool() { return concurrentStreamPool; } @@ -213,30 +177,6 @@ public class RecoverySettings extends AbstractComponent implements Closeable { } } - ByteSizeValue fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, RecoverySettings.this.fileChunkSize); - if (!fileChunkSize.equals(RecoverySettings.this.fileChunkSize)) { - logger.info("updating [indices.recovery.file_chunk_size] from [{}] to [{}]", RecoverySettings.this.fileChunkSize, fileChunkSize); - RecoverySettings.this.fileChunkSize = fileChunkSize; - } - - int translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, RecoverySettings.this.translogOps); - if (translogOps != RecoverySettings.this.translogOps) { - logger.info("updating [indices.recovery.translog_ops] from [{}] to [{}]", RecoverySettings.this.translogOps, translogOps); - RecoverySettings.this.translogOps = translogOps; - } - - ByteSizeValue translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, RecoverySettings.this.translogSize); - if (!translogSize.equals(RecoverySettings.this.translogSize)) { - logger.info("updating [indices.recovery.translog_size] from [{}] to [{}]", RecoverySettings.this.translogSize, translogSize); - RecoverySettings.this.translogSize = translogSize; - } - - boolean compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, RecoverySettings.this.compress); - if (compress != RecoverySettings.this.compress) { - logger.info("updating [indices.recovery.compress] from [{}] to [{}]", RecoverySettings.this.compress, compress); - RecoverySettings.this.compress = compress; - } - int concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, RecoverySettings.this.concurrentStreams); if (concurrentStreams != RecoverySettings.this.concurrentStreams) { logger.info("updating [indices.recovery.concurrent_streams] from [{}] to [{}]", RecoverySettings.this.concurrentStreams, concurrentStreams); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 73193161d12..138e49e5dae 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -49,6 +49,7 @@ import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import java.io.BufferedOutputStream; import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; @@ -68,6 +69,7 @@ import java.util.stream.StreamSupport; */ public class RecoverySourceHandler { + private static final int CHUNK_SIZE = 512 * 1000; // 512KB protected final ESLogger logger; // Shard that is going to be recovered (the "source") private final IndexShard shard; @@ -79,7 +81,6 @@ public class RecoverySourceHandler { private final TransportService transportService; protected final RecoveryResponse response; - private final TransportRequestOptions requestOptions; private final CancellableThreads cancellableThreads = new CancellableThreads() { @Override @@ -108,12 +109,6 @@ public class RecoverySourceHandler { this.shardId = this.request.shardId().id(); this.response = new RecoveryResponse(); - this.requestOptions = TransportRequestOptions.builder() - .withCompress(recoverySettings.compress()) - .withType(TransportRequestOptions.Type.RECOVERY) - .withTimeout(recoverySettings.internalActionTimeout()) - .build(); - } /** @@ -218,7 +213,7 @@ public class RecoverySourceHandler { totalSize += md.length(); } List phase1Files = new ArrayList<>(diff.different.size() + diff.missing.size()); - phase1Files.addAll(diff.different); + phase1Files.addAll(diff.different); phase1Files.addAll(diff.missing); for (StoreFileMetaData md : phase1Files) { if (request.metadataSnapshot().asMap().containsKey(md.name())) { @@ -249,7 +244,7 @@ public class RecoverySourceHandler { }); // How many bytes we've copied since we last called RateLimiter.pause final AtomicLong bytesSinceLastPause = new AtomicLong(); - final Function outputStreamFactories = (md) -> new RecoveryOutputStream(md, bytesSinceLastPause, translogView); + final Function outputStreamFactories = (md) -> new BufferedOutputStream(new RecoveryOutputStream(md, bytesSinceLastPause, translogView), CHUNK_SIZE); sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories); cancellableThreads.execute(() -> { // Send the CLEAN_FILES request, which takes all of the files that @@ -432,7 +427,7 @@ public class RecoverySourceHandler { } final TransportRequestOptions recoveryOptions = TransportRequestOptions.builder() - .withCompress(recoverySettings.compress()) + .withCompress(true) .withType(TransportRequestOptions.Type.RECOVERY) .withTimeout(recoverySettings.internalActionLongTimeout()) .build(); @@ -451,9 +446,9 @@ public class RecoverySourceHandler { size += operation.estimateSize(); totalOperations++; - // Check if this request is past the size or bytes threshold, and + // Check if this request is past bytes threshold, and // if so, send it off - if (ops >= recoverySettings.translogOps() || size >= recoverySettings.translogSize().bytes()) { + if (size >= CHUNK_SIZE) { // don't throttle translog, since we lock for phase3 indexing, // so we need to move it as fast as possible. Note, since we @@ -548,6 +543,11 @@ public class RecoverySourceHandler { } private void sendNextChunk(long position, BytesArray content, boolean lastChunk) throws IOException { + final TransportRequestOptions chunkSendOptions = TransportRequestOptions.builder() + .withCompress(false) + .withType(TransportRequestOptions.Type.RECOVERY) + .withTimeout(recoverySettings.internalActionTimeout()) + .build(); cancellableThreads.execute(() -> { // Pause using the rate limiter, if desired, to throttle the recovery final long throttleTimeInNanos; @@ -577,7 +577,7 @@ public class RecoverySourceHandler { * see how many translog ops we accumulate while copying files across the network. A future optimization * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up. */ - throttleTimeInNanos), requestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); + throttleTimeInNanos), chunkSendOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); }); if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us throw new IndexShardClosedException(request.shardId()); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 5ece413f796..52e8c6a81ba 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -143,7 +143,6 @@ public class IndexRecoveryIT extends ESIntegTestCase { .setTransientSettings(Settings.builder() // one chunk per sec.. .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, chunkSize, ByteSizeUnit.BYTES) - .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, chunkSize, ByteSizeUnit.BYTES) ) .get().isAcknowledged()); } @@ -152,7 +151,6 @@ public class IndexRecoveryIT extends ESIntegTestCase { assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, "20mb") - .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, "512kb") ) .get().isAcknowledged()); } diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java index b0288042ecc..4bcbb8c8ee7 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java @@ -32,24 +32,6 @@ public class RecoverySettingsTests extends ESSingleNodeTestCase { } public void testAllSettingsAreDynamicallyUpdatable() { - innerTestSettings(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() { - @Override - public void validate(RecoverySettings recoverySettings, int expectedValue) { - assertEquals(expectedValue, recoverySettings.fileChunkSize().bytesAsInt()); - } - }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, randomIntBetween(1, 200), new Validator() { - @Override - public void validate(RecoverySettings recoverySettings, int expectedValue) { - assertEquals(expectedValue, recoverySettings.translogOps()); - } - }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() { - @Override - public void validate(RecoverySettings recoverySettings, int expectedValue) { - assertEquals(expectedValue, recoverySettings.translogSize().bytesAsInt()); - } - }); innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, randomIntBetween(1, 200), new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { @@ -98,13 +80,6 @@ public class RecoverySettingsTests extends ESSingleNodeTestCase { assertEquals(expectedValue, recoverySettings.internalActionLongTimeout().millis()); } }); - - innerTestSettings(RecoverySettings.INDICES_RECOVERY_COMPRESS, false, new Validator() { - @Override - public void validate(RecoverySettings recoverySettings, boolean expectedValue) { - assertEquals(expectedValue, recoverySettings.compress()); - } - }); } private static class Validator { diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index d94f72ea80f..3198c04121f 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -58,13 +58,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; @ESIntegTestCase.ClusterScope(numDataNodes = 2, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST) @SuppressCodecs("*") // test relies on exact file extensions public class TruncatedRecoveryIT extends ESIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Settings.Builder builder = Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)); - return builder.build(); - } @Override protected Collection> nodePlugins() { diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java index 47e163a6291..3e5c903a1ba 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java @@ -131,16 +131,6 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { return file; } - @Override - protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builder builder) { - if (globalCompatibilityVersion().before(Version.V_1_3_2)) { - // if we test against nodes before 1.3.2 we disable all the compression due to a known bug - // see #7210 - builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS, false); - } - return builder; - } - /** * Retruns the tests compatibility version. */ @@ -250,13 +240,6 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase { Settings.Builder builder = Settings.builder().put(requiredSettings()); builder.put(TransportModule.TRANSPORT_TYPE_KEY, "netty"); // run same transport / disco as external builder.put("node.mode", "network"); - - if (compatibilityVersion().before(Version.V_1_3_2)) { - // if we test against nodes before 1.3.2 we disable all the compression due to a known bug - // see #7210 - builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, false) - .put(RecoverySettings.INDICES_RECOVERY_COMPRESS, false); - } return builder.build(); } diff --git a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index b86c8689699..7ae3226b66a 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -447,10 +447,6 @@ public final class InternalTestCluster extends TestCluster { } } - if (random.nextBoolean()) { - builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS, random.nextBoolean()); - } - if (random.nextBoolean()) { builder.put(NettyTransport.PING_SCHEDULE, RandomInts.randomIntBetween(random, 100, 2000) + "ms"); } @@ -1554,7 +1550,7 @@ public final class InternalTestCluster extends TestCluster { for (int i = 0; i < numNodes; i++) { asyncs.add(startNodeAsync(settings, version)); } - + return () -> { List ids = new ArrayList<>(); for (Async async : asyncs) { From 37b60bd76bf86375684560d492fd02899922b196 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 7 Dec 2015 10:19:04 +0100 Subject: [PATCH 02/26] more accurate chunk size --- .../elasticsearch/indices/recovery/RecoverySourceHandler.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 138e49e5dae..d9f697ec6a1 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -36,7 +36,9 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.CancellableThreads.Interruptable; import org.elasticsearch.index.engine.RecoveryEngineException; @@ -69,7 +71,7 @@ import java.util.stream.StreamSupport; */ public class RecoverySourceHandler { - private static final int CHUNK_SIZE = 512 * 1000; // 512KB + private static final int CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB).bytesAsInt(); protected final ESLogger logger; // Shard that is going to be recovered (the "source") private final IndexShard shard; From 8b8de5cb4b6398e3b9655a53fcc0b0b9010fc37d Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 8 Dec 2015 11:54:37 +0100 Subject: [PATCH 03/26] Don't send chunks after a previous send call failed. --- .../recovery/RecoverySourceHandler.java | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index d9f697ec6a1..b78f8523f1a 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -525,6 +525,7 @@ public class RecoverySourceHandler { private final AtomicLong bytesSinceLastPause; private final Translog.View translogView; private long position = 0; + private boolean failed = false; RecoveryOutputStream(StoreFileMetaData md, AtomicLong bytesSinceLastPause, Translog.View translogView) { this.md = md; @@ -539,9 +540,21 @@ public class RecoverySourceHandler { @Override public final void write(byte[] b, int offset, int length) throws IOException { - sendNextChunk(position, new BytesArray(b, offset, length), md.length() == position + length); - position += length; - assert md.length() >= position : "length: " + md.length() + " but positions was: " + position; + if (failed == false) { + /* since we are an outputstream a wrapper might get flushed on close after we threw an exception. + * that might cause another exception from the other side of the recovery since we are in a bad state + * due to a corrupted file stream etc. the biggest issue is that we will turn into a loop of exceptions + * and we will always suppress the original one which might cause the recovery to retry over and over again. + * To prevent this we try to not send chunks again after we failed once.*/ + try { + sendNextChunk(position, new BytesArray(b, offset, length), md.length() == position + length); + position += length; + assert md.length() >= position : "length: " + md.length() + " but positions was: " + position; + } catch (Exception e) { + failed = true; + throw e; + } + } } private void sendNextChunk(long position, BytesArray content, boolean lastChunk) throws IOException { From 449f9e725896260ff62310f8c046b2d62df1ee97 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 8 Dec 2015 12:41:19 +0100 Subject: [PATCH 04/26] don't allow writing single bytes on recovery outputstream --- .../indices/recovery/RecoverySourceHandler.java | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index b78f8523f1a..0e1efaf812c 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -535,7 +535,7 @@ public class RecoverySourceHandler { @Override public final void write(int b) throws IOException { - write(new byte[]{(byte) b}, 0, 1); + throw new UnsupportedOperationException("we can't send single bytes over the wire"); } @Override @@ -546,13 +546,16 @@ public class RecoverySourceHandler { * due to a corrupted file stream etc. the biggest issue is that we will turn into a loop of exceptions * and we will always suppress the original one which might cause the recovery to retry over and over again. * To prevent this we try to not send chunks again after we failed once.*/ + boolean success = false; try { sendNextChunk(position, new BytesArray(b, offset, length), md.length() == position + length); position += length; assert md.length() >= position : "length: " + md.length() + " but positions was: " + position; - } catch (Exception e) { - failed = true; - throw e; + success = true; + } finally { + if (success == false) { + failed = true; + } } } } From 7d6663e6e5f33ff91731e87e77611da7026c7833 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 8 Dec 2015 14:32:48 +0100 Subject: [PATCH 05/26] use AtomicBoolean instead of a simple boolean --- .../indices/recovery/RecoverySourceHandler.java | 7 ++++--- .../elasticsearch/indices/recovery/IndexRecoveryIT.java | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 0e1efaf812c..ffd2c8edc6b 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -60,6 +60,7 @@ import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.stream.StreamSupport; @@ -525,7 +526,7 @@ public class RecoverySourceHandler { private final AtomicLong bytesSinceLastPause; private final Translog.View translogView; private long position = 0; - private boolean failed = false; + private final AtomicBoolean failed = new AtomicBoolean(false); RecoveryOutputStream(StoreFileMetaData md, AtomicLong bytesSinceLastPause, Translog.View translogView) { this.md = md; @@ -540,7 +541,7 @@ public class RecoverySourceHandler { @Override public final void write(byte[] b, int offset, int length) throws IOException { - if (failed == false) { + if (failed.get() == false) { /* since we are an outputstream a wrapper might get flushed on close after we threw an exception. * that might cause another exception from the other side of the recovery since we are in a bad state * due to a corrupted file stream etc. the biggest issue is that we will turn into a loop of exceptions @@ -554,7 +555,7 @@ public class RecoverySourceHandler { success = true; } finally { if (success == false) { - failed = true; + failed.compareAndSet(false, true); } } } diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 52e8c6a81ba..ae63c8d4b92 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -138,7 +138,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { } private void slowDownRecovery(ByteSizeValue shardSize) { - long chunkSize = shardSize.bytes() / 10; + long chunkSize = shardSize.bytes() / 5; assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() // one chunk per sec.. From ab44366d939d277043a40bad08caf34709f37502 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 9 Dec 2015 19:12:15 +0100 Subject: [PATCH 06/26] Remove back compat for the `_source` compression options. These options have been deprecated before 2.0 so they don't need to be supported by 3.0. --- .../mapper/internal/SourceFieldMapper.java | 85 +--------------- .../source/CompressSourceMappingTests.java | 97 ------------------- .../source/DefaultSourceMappingTests.java | 26 ----- 3 files changed, 5 insertions(+), 203 deletions(-) delete mode 100644 core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index da3b8dbc5ab..a844ab223aa 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -35,7 +34,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -93,10 +91,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { private boolean enabled = Defaults.ENABLED; - private long compressThreshold = Defaults.COMPRESS_THRESHOLD; - - private Boolean compress = null; - private String format = Defaults.FORMAT; private String[] includes = null; @@ -111,16 +105,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { return this; } - public Builder compress(boolean compress) { - this.compress = compress; - return this; - } - - public Builder compressThreshold(long compressThreshold) { - this.compressThreshold = compressThreshold; - return this; - } - public Builder format(String format) { this.format = format; return this; @@ -138,7 +122,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { @Override public SourceFieldMapper build(BuilderContext context) { - return new SourceFieldMapper(enabled, format, compress, compressThreshold, includes, excludes, context.indexSettings()); + return new SourceFieldMapper(enabled, format, includes, excludes, context.indexSettings()); } } @@ -154,22 +138,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (fieldName.equals("compress") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - if (fieldNode != null) { - builder.compress(nodeBooleanValue(fieldNode)); - } - iterator.remove(); - } else if (fieldName.equals("compress_threshold") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - if (fieldNode != null) { - if (fieldNode instanceof Number) { - builder.compressThreshold(((Number) fieldNode).longValue()); - builder.compress(true); - } else { - builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString(), "compress_threshold").bytes()); - builder.compress(true); - } - } - iterator.remove(); } else if ("format".equals(fieldName)) { builder.format(nodeStringValue(fieldNode, null)); iterator.remove(); @@ -242,9 +210,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { /** indicates whether the source will always exist and be complete, for use by features like the update API */ private final boolean complete; - private Boolean compress; - private long compressThreshold; - private final String[] includes; private final String[] excludes; @@ -253,15 +218,13 @@ public class SourceFieldMapper extends MetadataFieldMapper { private XContentType formatContentType; private SourceFieldMapper(Settings indexSettings) { - this(Defaults.ENABLED, Defaults.FORMAT, null, -1, null, null, indexSettings); + this(Defaults.ENABLED, Defaults.FORMAT, null, null, indexSettings); } - private SourceFieldMapper(boolean enabled, String format, Boolean compress, long compressThreshold, + private SourceFieldMapper(boolean enabled, String format, String[] includes, String[] excludes, Settings indexSettings) { super(NAME, Defaults.FIELD_TYPE.clone(), Defaults.FIELD_TYPE, indexSettings); // Only stored. this.enabled = enabled; - this.compress = compress; - this.compressThreshold = compressThreshold; this.includes = includes; this.excludes = excludes; this.format = format; @@ -321,35 +284,14 @@ public class SourceFieldMapper extends MetadataFieldMapper { Tuple> mapTuple = XContentHelper.convertToMap(source, true); Map filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes); BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput streamOutput = bStream; - if (compress != null && compress && (compressThreshold == -1 || source.length() > compressThreshold)) { - streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - } XContentType contentType = formatContentType; if (contentType == null) { contentType = mapTuple.v1(); } - XContentBuilder builder = XContentFactory.contentBuilder(contentType, streamOutput).map(filteredSource); + XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); builder.close(); source = bStream.bytes(); - } else if (compress != null && compress && !CompressorFactory.isCompressed(source)) { - if (compressThreshold == -1 || source.length() > compressThreshold) { - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentType contentType = XContentFactory.xContentType(source); - if (formatContentType != null && formatContentType != contentType) { - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, CompressorFactory.defaultCompressor().streamOutput(bStream)); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source)); - builder.close(); - } else { - StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - source.writeTo(streamOutput); - streamOutput.close(); - } - source = bStream.bytes(); - // update the data in the context, so it can be compressed and stored compressed outside... - context.source(source); - } } else if (formatContentType != null) { // see if we need to convert the content type Compressor compressor = CompressorFactory.compressor(source); @@ -403,7 +345,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { boolean includeDefaults = params.paramAsBoolean("include_defaults", false); // all are defaults, no need to write it at all - if (!includeDefaults && enabled == Defaults.ENABLED && compress == null && compressThreshold == -1 && includes == null && excludes == null) { + if (!includeDefaults && enabled == Defaults.ENABLED && includes == null && excludes == null) { return builder; } builder.startObject(contentType()); @@ -413,16 +355,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (includeDefaults || !Objects.equals(format, Defaults.FORMAT)) { builder.field("format", format); } - if (compress != null) { - builder.field("compress", compress); - } else if (includeDefaults) { - builder.field("compress", false); - } - if (compressThreshold != -1) { - builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString()); - } else if (includeDefaults) { - builder.field("compress_threshold", -1); - } if (includes != null) { builder.field("includes", includes); @@ -453,13 +385,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) { mergeResult.addConflict("Cannot update excludes setting for [_source]"); } - } else { - if (sourceMergeWith.compress != null) { - this.compress = sourceMergeWith.compress; - } - if (sourceMergeWith.compressThreshold != -1) { - this.compressThreshold = sourceMergeWith.compressThreshold; - } } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java deleted file mode 100644 index 7c1875be550..00000000000 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper.source; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import static org.hamcrest.Matchers.equalTo; - -/** - * - */ -public class CompressSourceMappingTests extends ESSingleNodeTestCase { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - - public void testCompressDisabled() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress", false).endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2") - .endObject().bytes()); - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false)); - } - - public void testCompressEnabled() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress", true).endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2") - .endObject().bytes()); - - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true)); - } - - public void testCompressThreshold() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress_threshold", "200b").endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .endObject().bytes()); - - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false)); - - doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .endObject().bytes()); - - bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true)); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java index 4ec0ff5211e..bbc1847dc08 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java @@ -84,32 +84,6 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); } - public void testJsonFormatCompressedBackcompat() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("format", "json").field("compress", true).endObject() - .endObject().endObject().string(); - - Settings backcompatSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - DocumentMapperParser parser = createIndex("test", backcompatSettings).mapperService().documentMapperParser(); - DocumentMapper documentMapper = parser.parse(mapping); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true)); - byte[] uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes(); - assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON)); - - documentMapper = parser.parse(mapping); - doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true)); - uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes(); - assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON)); - } - public void testIncludes() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("includes", new String[]{"path1*"}).endObject() From 437488ae64f863daeda4ee5c9b1a53123eceb9be Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 10 Dec 2015 12:55:05 +0100 Subject: [PATCH 07/26] Remove the MissingQueryBuilder which was deprecated in 2.2.0. As a replacement use ExistsQueryBuilder inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use: `new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`. Closes #14112 --- .../classic/MapperQueryParser.java | 1 - .../classic/MissingFieldQueryExtension.java | 42 ---- .../index/query/MissingQueryBuilder.java | 234 ------------------ .../index/query/MissingQueryParser.java | 88 ------- .../index/query/QueryBuilders.java | 21 -- .../elasticsearch/indices/IndicesModule.java | 1 - .../elasticsearch/aliases/IndexAliasesIT.java | 2 +- .../BasicBackwardsCompatibilityIT.java | 17 -- .../OldIndexBackwardsCompatibilityIT.java | 7 - .../index/query/MissingQueryBuilderTests.java | 107 -------- .../query/QueryDSLDocumentationTests.java | 5 - .../template/SimpleIndexTemplateIT.java | 2 +- .../search/highlight/HighlighterSearchIT.java | 8 +- .../{ExistsMissingIT.java => ExistsIT.java} | 122 +++------ .../search/query/SearchQueryIT.java | 27 -- .../java-api/query-dsl/missing-query.asciidoc | 14 -- docs/reference/migration/migrate_3_0.asciidoc | 7 +- .../reference/query-dsl/exists-query.asciidoc | 20 +- .../query-dsl/missing-query.asciidoc | 132 ---------- 19 files changed, 63 insertions(+), 794 deletions(-) delete mode 100644 core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java delete mode 100644 core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java delete mode 100644 core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java delete mode 100644 core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java rename core/src/test/java/org/elasticsearch/search/query/{ExistsMissingIT.java => ExistsIT.java} (54%) delete mode 100644 docs/java-api/query-dsl/missing-query.asciidoc delete mode 100644 docs/reference/query-dsl/missing-query.asciidoc diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index fce58d2f88f..9f2b1b66221 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -54,7 +54,6 @@ public class MapperQueryParser extends QueryParser { static { Map fieldQueryExtensions = new HashMap<>(); fieldQueryExtensions.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension()); - fieldQueryExtensions.put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension()); FIELD_QUERY_EXTENSIONS = unmodifiableMap(fieldQueryExtensions); } diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java deleted file mode 100644 index f9fc8c9d5dc..00000000000 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.lucene.queryparser.classic; - -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.index.query.MissingQueryBuilder; -import org.elasticsearch.index.query.QueryShardContext; - -/** - * - */ -public class MissingFieldQueryExtension implements FieldQueryExtension { - - public static final String NAME = "_missing_"; - - @Override - public Query query(QueryShardContext context, String queryText) { - Query query = MissingQueryBuilder.newFilter(context, queryText, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE, MissingQueryBuilder.DEFAULT_NULL_VALUE); - if (query != null) { - return new ConstantScoreQuery(query); - } - return null; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java deleted file mode 100644 index 70d0bb9350f..00000000000 --- a/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermRangeQuery; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; -import org.elasticsearch.index.mapper.object.ObjectMapper; - -import java.io.IOException; -import java.util.Collection; -import java.util.Objects; - -/** - * Constructs a filter that have only null values or no value in the original field. - */ -public class MissingQueryBuilder extends AbstractQueryBuilder { - - public static final String NAME = "missing"; - - public static final boolean DEFAULT_NULL_VALUE = false; - - public static final boolean DEFAULT_EXISTENCE_VALUE = true; - - private final String fieldPattern; - - private final boolean nullValue; - - private final boolean existence; - - static final MissingQueryBuilder PROTOTYPE = new MissingQueryBuilder("field", DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE); - - /** - * Constructs a filter that returns documents with only null values or no value in the original field. - * @param fieldPattern the field to query - * @param nullValue should the missing filter automatically include fields with null value configured in the - * mappings. Defaults to false. - * @param existence should the missing filter include documents where the field doesn't exist in the docs. - * Defaults to true. - * @throws IllegalArgumentException when both existence and nullValue are set to false - */ - public MissingQueryBuilder(String fieldPattern, boolean nullValue, boolean existence) { - if (Strings.isEmpty(fieldPattern)) { - throw new IllegalArgumentException("missing query must be provided with a [field]"); - } - if (nullValue == false && existence == false) { - throw new IllegalArgumentException("missing query must have either 'existence', or 'null_value', or both set to true"); - } - this.fieldPattern = fieldPattern; - this.nullValue = nullValue; - this.existence = existence; - } - - public MissingQueryBuilder(String fieldPattern) { - this(fieldPattern, DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE); - } - - public String fieldPattern() { - return this.fieldPattern; - } - - /** - * Returns true if the missing filter will include documents where the field contains a null value, otherwise - * these documents will not be included. - */ - public boolean nullValue() { - return this.nullValue; - } - - /** - * Returns true if the missing filter will include documents where the field has no values, otherwise - * these documents will not be included. - */ - public boolean existence() { - return this.existence; - } - - @Override - protected void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); - builder.field(MissingQueryParser.FIELD_FIELD.getPreferredName(), fieldPattern); - builder.field(MissingQueryParser.NULL_VALUE_FIELD.getPreferredName(), nullValue); - builder.field(MissingQueryParser.EXISTENCE_FIELD.getPreferredName(), existence); - printBoostAndQueryName(builder); - builder.endObject(); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - protected Query doToQuery(QueryShardContext context) throws IOException { - return newFilter(context, fieldPattern, existence, nullValue); - } - - public static Query newFilter(QueryShardContext context, String fieldPattern, boolean existence, boolean nullValue) { - if (!existence && !nullValue) { - throw new QueryShardException(context, "missing must have either existence, or null_value, or both set to true"); - } - - final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context.getMapperService().fullName(FieldNamesFieldMapper.NAME); - if (fieldNamesFieldType == null) { - // can only happen when no types exist, so no docs exist either - return Queries.newMatchNoDocsQuery(); - } - - ObjectMapper objectMapper = context.getObjectMapper(fieldPattern); - if (objectMapper != null) { - // automatic make the object mapper pattern - fieldPattern = fieldPattern + ".*"; - } - - Collection fields = context.simpleMatchToIndexNames(fieldPattern); - if (fields.isEmpty()) { - if (existence) { - // if we ask for existence of fields, and we found none, then we should match on all - return Queries.newMatchAllQuery(); - } - return null; - } - - Query existenceFilter = null; - Query nullFilter = null; - - if (existence) { - BooleanQuery.Builder boolFilter = new BooleanQuery.Builder(); - for (String field : fields) { - MappedFieldType fieldType = context.fieldMapper(field); - Query filter = null; - if (fieldNamesFieldType.isEnabled()) { - final String f; - if (fieldType != null) { - f = fieldType.names().indexName(); - } else { - f = field; - } - filter = fieldNamesFieldType.termQuery(f, context); - } - // if _field_names are not indexed, we need to go the slow way - if (filter == null && fieldType != null) { - filter = fieldType.rangeQuery(null, null, true, true); - } - if (filter == null) { - filter = new TermRangeQuery(field, null, null, true, true); - } - boolFilter.add(filter, BooleanClause.Occur.SHOULD); - } - - existenceFilter = boolFilter.build(); - existenceFilter = Queries.not(existenceFilter);; - } - - if (nullValue) { - for (String field : fields) { - MappedFieldType fieldType = context.fieldMapper(field); - if (fieldType != null) { - nullFilter = fieldType.nullValueQuery(); - } - } - } - - Query filter; - if (nullFilter != null) { - if (existenceFilter != null) { - filter = new BooleanQuery.Builder() - .add(existenceFilter, BooleanClause.Occur.SHOULD) - .add(nullFilter, BooleanClause.Occur.SHOULD) - .build(); - } else { - filter = nullFilter; - } - } else { - filter = existenceFilter; - } - - if (filter == null) { - return null; - } - - return new ConstantScoreQuery(filter); - } - - @Override - protected MissingQueryBuilder doReadFrom(StreamInput in) throws IOException { - return new MissingQueryBuilder(in.readString(), in.readBoolean(), in.readBoolean()); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeString(fieldPattern); - out.writeBoolean(nullValue); - out.writeBoolean(existence); - } - - @Override - protected int doHashCode() { - return Objects.hash(fieldPattern, nullValue, existence); - } - - @Override - protected boolean doEquals(MissingQueryBuilder other) { - return Objects.equals(fieldPattern, other.fieldPattern) && - Objects.equals(nullValue, other.nullValue) && - Objects.equals(existence, other.existence); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java deleted file mode 100644 index 467971b65ca..00000000000 --- a/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; - -/** - * Parser for missing query - */ -public class MissingQueryParser implements QueryParser { - - public static final ParseField FIELD_FIELD = new ParseField("field"); - public static final ParseField NULL_VALUE_FIELD = new ParseField("null_value"); - public static final ParseField EXISTENCE_FIELD = new ParseField("existence"); - - @Override - public String[] names() { - return new String[]{MissingQueryBuilder.NAME}; - } - - @Override - public MissingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { - XContentParser parser = parseContext.parser(); - - String fieldPattern = null; - String queryName = null; - float boost = AbstractQueryBuilder.DEFAULT_BOOST; - boolean nullValue = MissingQueryBuilder.DEFAULT_NULL_VALUE; - boolean existence = MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE; - - XContentParser.Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (parseContext.parseFieldMatcher().match(currentFieldName, FIELD_FIELD)) { - fieldPattern = parser.text(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, NULL_VALUE_FIELD)) { - nullValue = parser.booleanValue(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, EXISTENCE_FIELD)) { - existence = parser.booleanValue(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { - queryName = parser.text(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { - boost = parser.floatValue(); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]"); - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); - } - } - - if (fieldPattern == null) { - throw new ParsingException(parser.getTokenLocation(), "missing must be provided with a [field]"); - } - return new MissingQueryBuilder(fieldPattern, nullValue, existence) - .boost(boost) - .queryName(queryName); - } - - @Override - public MissingQueryBuilder getBuilderPrototype() { - return MissingQueryBuilder.PROTOTYPE; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 45f97d68c3f..3fb09679204 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -810,27 +810,6 @@ public abstract class QueryBuilders { return new ExistsQueryBuilder(name); } - /** - * A filter to filter only documents where a field does not exists in them. - * @param name the field to query - */ - public static MissingQueryBuilder missingQuery(String name) { - return missingQuery(name, MissingQueryBuilder.DEFAULT_NULL_VALUE, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE); - } - - /** - * A filter to filter only documents where a field does not exists in them. - * @param name the field to query - * @param nullValue should the missing filter automatically include fields with null value configured in the - * mappings. Defaults to false. - * @param existence should the missing filter include documents where the field doesn't exist in the docs. - * Defaults to true. - * @throws IllegalArgumentException when both existence and nullValue are set to false - */ - public static MissingQueryBuilder missingQuery(String name, boolean nullValue, boolean existence) { - return new MissingQueryBuilder(name, nullValue, existence); - } - private QueryBuilders() { } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index cdd7f050331..6878002c015 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -120,7 +120,6 @@ public class IndicesModule extends AbstractModule { registerQueryParser(GeohashCellQuery.Parser.class); registerQueryParser(GeoPolygonQueryParser.class); registerQueryParser(ExistsQueryParser.class); - registerQueryParser(MissingQueryParser.class); registerQueryParser(MatchNoneQueryParser.class); if (ShapesAvailability.JTS_AVAILABLE) { diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index 81b09c8c8e2..76759394d31 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -865,7 +865,7 @@ public class IndexAliasesIT extends ESIntegTestCase { assertAcked(prepareCreate("test") .addMapping("type", "field", "type=string") .addAlias(new Alias("alias1")) - .addAlias(new Alias("alias2").filter(QueryBuilders.missingQuery("field"))) + .addAlias(new Alias("alias2").filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("field")))) .addAlias(new Alias("alias3").indexRouting("index").searchRouting("search"))); checkAliases(); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java index fbc6cc58067..95735b8648f 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java @@ -71,7 +71,6 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -440,25 +439,9 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase { countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("obj1")).get(); assertHitCount(countResponse, 2l); - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("field1")).get(); - assertHitCount(countResponse, 2l); - - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("field1")).get(); - assertHitCount(countResponse, 2l); - - countResponse = client().prepareSearch().setSize(0).setQuery(constantScoreQuery(missingQuery("field1"))).get(); - assertHitCount(countResponse, 2l); - countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_missing_:field1")).get(); assertHitCount(countResponse, 2l); - // wildcard check - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("x*")).get(); - assertHitCount(countResponse, 2l); - - // object check - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("obj1")).get(); - assertHitCount(countResponse, 2l); if (!backwardsCluster().upgradeOneNode()) { break; } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 137c6c5b2c2..eb0d17d22c9 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -341,13 +341,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { searchRsp = searchReq.get(); ElasticsearchAssertions.assertNoFailures(searchRsp); assertEquals(numDocs, searchRsp.getHits().getTotalHits()); - - logger.info("--> testing missing filter"); - // the field for the missing filter here needs to be different than the exists filter above, to avoid being found in the cache - searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.missingQuery("long_sort")); - searchRsp = searchReq.get(); - ElasticsearchAssertions.assertNoFailures(searchRsp); - assertEquals(0, searchRsp.getHits().getTotalHits()); } void assertBasicAggregationWorks(String indexName) { diff --git a/core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java deleted file mode 100644 index 253509b2fc6..00000000000 --- a/core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.search.Query; - -import java.io.IOException; - -import static org.hamcrest.Matchers.containsString; - -public class MissingQueryBuilderTests extends AbstractQueryTestCase { - - @Override - protected MissingQueryBuilder doCreateTestQueryBuilder() { - String fieldName = randomBoolean() ? randomFrom(MAPPED_FIELD_NAMES) : randomAsciiOfLengthBetween(1, 10); - Boolean existence = randomBoolean(); - Boolean nullValue = randomBoolean(); - if (existence == false && nullValue == false) { - if (randomBoolean()) { - existence = true; - } else { - nullValue = true; - } - } - return new MissingQueryBuilder(fieldName, nullValue, existence); - } - - @Override - protected void doAssertLuceneQuery(MissingQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { - // too many mapping dependent cases to test, we don't want to end up - // duplication the toQuery method - } - - public void testIllegalArguments() { - try { - if (randomBoolean()) { - new MissingQueryBuilder("", true, true); - } else { - new MissingQueryBuilder(null, true, true); - } - fail("must not be null or empty"); - } catch (IllegalArgumentException e) { - // expected - } - - try { - new MissingQueryBuilder("fieldname", false, false); - fail("existence and nullValue cannot both be false"); - } catch (IllegalArgumentException e) { - // expected - } - - try { - new MissingQueryBuilder("fieldname", MissingQueryBuilder.DEFAULT_NULL_VALUE, false); - fail("existence and nullValue cannot both be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - - public void testBothNullValueAndExistenceFalse() throws IOException { - QueryShardContext context = createShardContext(); - context.setAllowUnmappedFields(true); - try { - MissingQueryBuilder.newFilter(context, "field", false, false); - fail("Expected QueryShardException"); - } catch (QueryShardException e) { - assertThat(e.getMessage(), containsString("missing must have either existence, or null_value")); - } - } - - public void testFromJson() throws IOException { - String json = - "{\n" + - " \"missing\" : {\n" + - " \"field\" : \"user\",\n" + - " \"null_value\" : false,\n" + - " \"existence\" : true,\n" + - " \"boost\" : 1.0\n" + - " }\n" + - "}"; - - MissingQueryBuilder parsed = (MissingQueryBuilder) parseQuery(json); - checkGeneratedJson(json, parsed); - - assertEquals(json, false, parsed.nullValue()); - assertEquals(json, true, parsed.existence()); - assertEquals(json, "user", parsed.fieldPattern()); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java index 7ba1d11815a..028987448f0 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.query.QueryBuilders.idsQuery; import static org.elasticsearch.index.query.QueryBuilders.indicesQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.nestedQuery; @@ -240,10 +239,6 @@ public class QueryDSLDocumentationTests extends ESTestCase { matchQuery("name", "kimchy elasticsearch"); } - public void testMissing() { - missingQuery("user", true, true); - } - public void testMLT() { String[] fields = {"name.first", "name.last"}; String[] texts = {"text like this one"}; diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index ca2025ced1b..b32cfef76b6 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -560,7 +560,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addAlias(new Alias("alias1")) .addAlias(new Alias("{index}-alias")) - .addAlias(new Alias("alias3").filter(QueryBuilders.missingQuery("test"))) + .addAlias(new Alias("alias3").filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("test")))) .addAlias(new Alias("alias4")).get(); client().admin().indices().preparePutTemplate("template2") diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index 8a2506ec3ad..310ffff3007 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.query.QueryBuilders.fuzzyQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhrasePrefixQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; @@ -67,6 +66,7 @@ import static org.elasticsearch.index.query.QueryBuilders.regexpQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.typeQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; +import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -2471,7 +2471,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(boolQuery() - .should(constantScoreQuery(QueryBuilders.missingQuery("field1"))) + .should(boolQuery().mustNot(constantScoreQuery(QueryBuilders.existsQuery("field1")))) .should(matchQuery("field1", "test")) .should(constantScoreQuery(queryStringQuery("field1:photo*")))) .highlighter(highlight().field("field1")); @@ -2501,7 +2501,9 @@ public class HighlighterSearchIT extends ESIntegTestCase { refresh(); logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(boolQuery().must(queryStringQuery("field1:photo*")).filter(missingQuery("field_null"))) + SearchSourceBuilder source = searchSource().query(boolQuery() + .must(queryStringQuery("field1:photo*")) + .mustNot(existsQuery("field_null"))) .highlighter(highlight().field("field1")); SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); diff --git a/core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java similarity index 54% rename from core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java rename to core/src/test/java/org/elasticsearch/search/query/ExistsIT.java index 349197d5f48..ae1f7ac5aa4 100644 --- a/core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java @@ -43,7 +43,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -public class ExistsMissingIT extends ESIntegTestCase { +public class ExistsIT extends ESIntegTestCase { // TODO: move this to a unit test somewhere... public void testEmptyIndex() throws Exception { @@ -51,50 +51,50 @@ public class ExistsMissingIT extends ESIntegTestCase { ensureYellow("test"); SearchResponse resp = client().prepareSearch("test").setQuery(QueryBuilders.existsQuery("foo")).execute().actionGet(); assertSearchResponse(resp); - resp = client().prepareSearch("test").setQuery(QueryBuilders.missingQuery("foo")).execute().actionGet(); + resp = client().prepareSearch("test").setQuery(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("foo"))).execute().actionGet(); assertSearchResponse(resp); } - public void testExistsMissing() throws Exception { + public void testExists() throws Exception { XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent) - .startObject() + .startObject() .startObject("type") - .startObject(FieldNamesFieldMapper.NAME) - .field("enabled", randomBoolean()) - .endObject() - .startObject("properties") - .startObject("foo") - .field("type", "string") - .endObject() - .startObject("bar") - .field("type", "object") - .startObject("properties") - .startObject("foo") - .field("type", "string") - .endObject() - .startObject("bar") - .field("type", "object") - .startObject("properties") - .startObject("bar") - .field("type", "string") - .endObject() - .endObject() - .endObject() - .startObject("baz") - .field("type", "long") - .endObject() - .endObject() - .endObject() - .endObject() + .startObject(FieldNamesFieldMapper.NAME) + .field("enabled", randomBoolean()) .endObject() - .endObject(); + .startObject("properties") + .startObject("foo") + .field("type", "string") + .endObject() + .startObject("bar") + .field("type", "object") + .startObject("properties") + .startObject("foo") + .field("type", "string") + .endObject() + .startObject("bar") + .field("type", "object") + .startObject("properties") + .startObject("bar") + .field("type", "string") + .endObject() + .endObject() + .endObject() + .startObject("baz") + .field("type", "long") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", mapping)); @SuppressWarnings("unchecked") Map barObject = new HashMap<>(); barObject.put("foo", "bar"); barObject.put("bar", singletonMap("bar", "foo")); - final Map[] sources = new Map[] { + final Map[] sources = new Map[]{ // simple property singletonMap("foo", "bar"), // object fields @@ -145,62 +145,6 @@ public class ExistsMissingIT extends ESIntegTestCase { } throw e; } - - // missing - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery(fieldName)).execute().actionGet(); - assertSearchResponse(resp); - assertEquals(String.format(Locale.ROOT, "missing(%s, %d) mapping: %s response: %s", fieldName, count, mapping.string(), resp), numDocs - count, resp.getHits().totalHits()); } } - - public void testNullValueUnset() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed")); - indexRandom(true, - client().prepareIndex("idx", "type", "1").setSource("f", "foo"), - client().prepareIndex("idx", "type", "2").setSource("f", null), - client().prepareIndex("idx", "type", "3").setSource("g", "bar"), - client().prepareIndex("idx", "type", "4").setSource("f", "bar")); - - SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, true)).get(); - assertSearchHits(resp, "2", "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, true)).get(); - assertSearchHits(resp, "2", "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, false)).get(); - assertSearchHits(resp); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, false)).get(); - fail("both existence and null_value can't be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - - public void testNullValueSet() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed,null_value=bar")); - indexRandom(true, - client().prepareIndex("idx", "type", "1").setSource("f", "foo"), - client().prepareIndex("idx", "type", "2").setSource("f", null), - client().prepareIndex("idx", "type", "3").setSource("g", "bar"), - client().prepareIndex("idx", "type", "4").setSource("f", "bar")); - - SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, true)).get(); - assertSearchHits(resp, "2", "3", "4"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, true)).get(); - assertSearchHits(resp, "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, false)).get(); - assertSearchHits(resp, "2", "4"); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, false)).get(); - fail("both existence and null_value can't be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - } diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 61a237c1a93..9918d449657 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -70,7 +70,6 @@ import static org.elasticsearch.index.query.QueryBuilders.idsQuery; import static org.elasticsearch.index.query.QueryBuilders.indicesQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; @@ -805,32 +804,6 @@ public class SearchQueryIT extends ESIntegTestCase { searchResponse = client().prepareSearch().setQuery(existsQuery("obj1")).get(); assertHitCount(searchResponse, 2l); assertSearchHits(searchResponse, "1", "2"); - - searchResponse = client().prepareSearch().setQuery(missingQuery("field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(missingQuery("field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(constantScoreQuery(missingQuery("field1"))).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(queryStringQuery("_missing_:field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - // wildcard check - searchResponse = client().prepareSearch().setQuery(missingQuery("x*")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - // object check - searchResponse = client().prepareSearch().setQuery(missingQuery("obj1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); } public void testPassQueryOrFilterAsJSONString() throws Exception { diff --git a/docs/java-api/query-dsl/missing-query.asciidoc b/docs/java-api/query-dsl/missing-query.asciidoc deleted file mode 100644 index 00086cf737b..00000000000 --- a/docs/java-api/query-dsl/missing-query.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -[[java-query-dsl-missing-query]] -==== Missing Query - -See {ref}/query-dsl-missing-query.html[Missing Query] - -[source,java] --------------------------------------------------- -QueryBuilder qb = missingQuery("user", <1> - true, <2> - true); <3> --------------------------------------------------- -<1> field -<2> find missing field with an explicit `null` value -<3> find missing field that doesn’t exist diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index b8683bc6fd0..c649c679352 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -411,9 +411,9 @@ Use the `field(String, float)` method instead. ==== MissingQueryBuilder -The two individual setters for existence() and nullValue() were removed in favour of -optional constructor settings in order to better capture and validate their interdependent -settings at construction time. +The MissingQueryBuilder which was deprecated in 2.2.0 is removed. As a replacement use ExistsQueryBuilder +inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use +`new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`. ==== NotQueryBuilder @@ -491,3 +491,4 @@ from `OsStats.Cpu#getPercent`. === Fields option Only stored fields are retrievable with this option. The fields option won't be able to load non stored fields from _source anymore. + diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index 0ae3bf28ffa..404dce4a4ae 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -38,7 +38,7 @@ These documents would *not* match the above query: <3> The `user` field is missing completely. [float] -===== `null_value` mapping +==== `null_value` mapping If the field mapping includes the <> setting then explicit `null` values are replaced with the specified `null_value`. For @@ -70,3 +70,21 @@ no values in the `user` field and thus would not match the `exists` filter: { "foo": "bar" } -------------------------------------------------- +==== `missing` query + +'missing' query has been removed because it can be advantageously replaced by an `exists` query inside a must_not +clause as follows: + +[source,js] +-------------------------------------------------- +"bool": { + "must_not": { + "exists": { + "field": "user" + } + } +} +-------------------------------------------------- + +This query returns documents that have no value in the user field. + diff --git a/docs/reference/query-dsl/missing-query.asciidoc b/docs/reference/query-dsl/missing-query.asciidoc deleted file mode 100644 index 648da068189..00000000000 --- a/docs/reference/query-dsl/missing-query.asciidoc +++ /dev/null @@ -1,132 +0,0 @@ -[[query-dsl-missing-query]] -=== Missing Query - -Returns documents that have only `null` values or no value in the original field: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "missing" : { "field" : "user" } - } - } -} --------------------------------------------------- - -For instance, the following docs would match the above filter: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [] } <1> -{ "user": [null] } <2> -{ "foo": "bar" } <3> --------------------------------------------------- -<1> This field has no values. -<2> This field has no non-`null` values. -<3> The `user` field is missing completely. - -These documents would *not* match the above filter: - -[source,js] --------------------------------------------------- -{ "user": "jane" } -{ "user": "" } <1> -{ "user": "-" } <2> -{ "user": ["jane"] } -{ "user": ["jane", null ] } <3> --------------------------------------------------- -<1> An empty string is a non-`null` value. -<2> Even though the `standard` analyzer would emit zero tokens, the original field is non-`null`. -<3> This field has one non-`null` value. - -[float] -==== `null_value` mapping - -If the field mapping includes a <> then explicit `null` values -are replaced with the specified `null_value`. For instance, if the `user` field were mapped -as follows: - -[source,js] --------------------------------------------------- - "user": { - "type": "string", - "null_value": "_null_" - } --------------------------------------------------- - -then explicit `null` values would be indexed as the string `_null_`, and the -the following docs would *not* match the `missing` filter: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [null] } --------------------------------------------------- - -However, these docs--without explicit `null` values--would still have -no values in the `user` field and thus would match the `missing` filter: - -[source,js] --------------------------------------------------- -{ "user": [] } -{ "foo": "bar" } --------------------------------------------------- - -[float] -===== `existence` and `null_value` parameters - -When the field being queried has a `null_value` mapping, then the behaviour of -the `missing` filter can be altered with the `existence` and `null_value` -parameters: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "missing" : { - "field" : "user", - "existence" : true, - "null_value" : false - } - } - } -} --------------------------------------------------- - - -`existence`:: -+ --- -When the `existence` parameter is set to `true` (the default), the missing -filter will include documents where the field has *no* values, ie: - -[source,js] --------------------------------------------------- -{ "user": [] } -{ "foo": "bar" } --------------------------------------------------- - -When set to `false`, these documents will not be included. --- - -`null_value`:: -+ --- -When the `null_value` parameter is set to `true`, the missing -filter will include documents where the field contains a `null` value, ie: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [null] } -{ "user": ["jane",null] } <1> --------------------------------------------------- -<1> Matches because the field contains a `null` value, even though it also contains a non-`null` value. - -When set to `false` (the default), these documents will not be included. --- - -NOTE: Either `existence` or `null_value` or both must be set to `true`. From 176a4fc8dabfc3868597e0040e93beca35b53db7 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 10 Dec 2015 14:19:19 +0100 Subject: [PATCH 08/26] Review ++ --- .../search/highlight/HighlighterSearchIT.java | 2 +- .../elasticsearch/search/query/ExistsIT.java | 60 +++++++++---------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index 310ffff3007..63378baa721 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -2471,7 +2471,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(boolQuery() - .should(boolQuery().mustNot(constantScoreQuery(QueryBuilders.existsQuery("field1")))) + .should(boolQuery().mustNot(QueryBuilders.existsQuery("field1"))) .should(matchQuery("field1", "test")) .should(constantScoreQuery(queryStringQuery("field1:photo*")))) .highlighter(highlight().field("field1")); diff --git a/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java index ae1f7ac5aa4..73906b2ed83 100644 --- a/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java @@ -57,44 +57,44 @@ public class ExistsIT extends ESIntegTestCase { public void testExists() throws Exception { XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent) - .startObject() + .startObject() .startObject("type") - .startObject(FieldNamesFieldMapper.NAME) - .field("enabled", randomBoolean()) + .startObject(FieldNamesFieldMapper.NAME) + .field("enabled", randomBoolean()) + .endObject() + .startObject("properties") + .startObject("foo") + .field("type", "string") + .endObject() + .startObject("bar") + .field("type", "object") + .startObject("properties") + .startObject("foo") + .field("type", "string") + .endObject() + .startObject("bar") + .field("type", "object") + .startObject("properties") + .startObject("bar") + .field("type", "string") + .endObject() + .endObject() + .endObject() + .startObject("baz") + .field("type", "long") + .endObject() + .endObject() + .endObject() + .endObject() .endObject() - .startObject("properties") - .startObject("foo") - .field("type", "string") - .endObject() - .startObject("bar") - .field("type", "object") - .startObject("properties") - .startObject("foo") - .field("type", "string") - .endObject() - .startObject("bar") - .field("type", "object") - .startObject("properties") - .startObject("bar") - .field("type", "string") - .endObject() - .endObject() - .endObject() - .startObject("baz") - .field("type", "long") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject(); + .endObject(); assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", mapping)); @SuppressWarnings("unchecked") Map barObject = new HashMap<>(); barObject.put("foo", "bar"); barObject.put("bar", singletonMap("bar", "foo")); - final Map[] sources = new Map[]{ + final Map[] sources = new Map[] { // simple property singletonMap("foo", "bar"), // object fields From 12f905a6759ce962ab76e89a1c5b06dc650a12cf Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 11 Dec 2015 09:44:08 +0100 Subject: [PATCH 09/26] Make chunkSize configurabel for tests and use correct close handling for closing streams to not hide original exception --- .../org/elasticsearch/common/io/Streams.java | 32 +++++++--------- .../indices/recovery/RecoverySettings.java | 13 +++++++ .../recovery/RecoverySourceHandler.java | 37 ++++++------------- .../indices/recovery/IndexRecoveryIT.java | 12 +++++- .../recovery/TruncatedRecoveryIT.java | 5 +++ 5 files changed, 54 insertions(+), 45 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/io/Streams.java b/core/src/main/java/org/elasticsearch/common/io/Streams.java index 8adf0919e50..36b1d9445b0 100644 --- a/core/src/main/java/org/elasticsearch/common/io/Streams.java +++ b/core/src/main/java/org/elasticsearch/common/io/Streams.java @@ -20,6 +20,8 @@ package org.elasticsearch.common.io; import java.nio.charset.StandardCharsets; + +import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.util.Callback; import java.io.BufferedReader; @@ -68,6 +70,7 @@ public abstract class Streams { public static long copy(InputStream in, OutputStream out, byte[] buffer) throws IOException { Objects.requireNonNull(in, "No InputStream specified"); Objects.requireNonNull(out, "No OutputStream specified"); + boolean success = false; try { long byteCount = 0; int bytesRead; @@ -76,17 +79,13 @@ public abstract class Streams { byteCount += bytesRead; } out.flush(); + success = true; return byteCount; } finally { - try { - in.close(); - } catch (IOException ex) { - // do nothing - } - try { - out.close(); - } catch (IOException ex) { - // do nothing + if (success) { + IOUtils.close(in, out); + } else { + IOUtils.closeWhileHandlingException(in, out); } } } @@ -130,6 +129,7 @@ public abstract class Streams { public static int copy(Reader in, Writer out) throws IOException { Objects.requireNonNull(in, "No Reader specified"); Objects.requireNonNull(out, "No Writer specified"); + boolean success = false; try { int byteCount = 0; char[] buffer = new char[BUFFER_SIZE]; @@ -139,17 +139,13 @@ public abstract class Streams { byteCount += bytesRead; } out.flush(); + success = true; return byteCount; } finally { - try { - in.close(); - } catch (IOException ex) { - // do nothing - } - try { - out.close(); - } catch (IOException ex) { - // do nothing + if (success) { + IOUtils.close(in, out); + } else { + IOUtils.closeWhileHandlingException(in, out); } } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 2841049eedc..f2f06921680 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -71,6 +71,8 @@ public class RecoverySettings extends AbstractComponent implements Closeable { public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes(); + public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB); + private volatile int concurrentStreams; private volatile int concurrentSmallFileStreams; private final ThreadPoolExecutor concurrentStreamPool; @@ -84,6 +86,8 @@ public class RecoverySettings extends AbstractComponent implements Closeable { private volatile TimeValue internalActionTimeout; private volatile TimeValue internalActionLongTimeout; + private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; + @Inject public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) { super(settings); @@ -160,6 +164,15 @@ public class RecoverySettings extends AbstractComponent implements Closeable { return internalActionLongTimeout; } + public ByteSizeValue getChunkSize() { return chunkSize; } + + void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests + if (chunkSize.bytesAsInt() <= 0) { + throw new IllegalArgumentException("chunkSize must be > 0"); + } + this.chunkSize = chunkSize; + } + class ApplySettings implements NodeSettingsService.Listener { @Override diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index ffd2c8edc6b..036260624a6 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -72,7 +72,6 @@ import java.util.stream.StreamSupport; */ public class RecoverySourceHandler { - private static final int CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB).bytesAsInt(); protected final ESLogger logger; // Shard that is going to be recovered (the "source") private final IndexShard shard; @@ -82,6 +81,7 @@ public class RecoverySourceHandler { private final StartRecoveryRequest request; private final RecoverySettings recoverySettings; private final TransportService transportService; + private final int chunkSizeInBytes; protected final RecoveryResponse response; @@ -110,7 +110,7 @@ public class RecoverySourceHandler { this.transportService = transportService; this.indexName = this.request.shardId().index().name(); this.shardId = this.request.shardId().id(); - + this.chunkSizeInBytes = recoverySettings.getChunkSize().bytesAsInt(); this.response = new RecoveryResponse(); } @@ -247,7 +247,7 @@ public class RecoverySourceHandler { }); // How many bytes we've copied since we last called RateLimiter.pause final AtomicLong bytesSinceLastPause = new AtomicLong(); - final Function outputStreamFactories = (md) -> new BufferedOutputStream(new RecoveryOutputStream(md, bytesSinceLastPause, translogView), CHUNK_SIZE); + final Function outputStreamFactories = (md) -> new BufferedOutputStream(new RecoveryOutputStream(md, bytesSinceLastPause, translogView), chunkSizeInBytes); sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories); cancellableThreads.execute(() -> { // Send the CLEAN_FILES request, which takes all of the files that @@ -451,7 +451,7 @@ public class RecoverySourceHandler { // Check if this request is past bytes threshold, and // if so, send it off - if (size >= CHUNK_SIZE) { + if (size >= chunkSizeInBytes) { // don't throttle translog, since we lock for phase3 indexing, // so we need to move it as fast as possible. Note, since we @@ -526,7 +526,6 @@ public class RecoverySourceHandler { private final AtomicLong bytesSinceLastPause; private final Translog.View translogView; private long position = 0; - private final AtomicBoolean failed = new AtomicBoolean(false); RecoveryOutputStream(StoreFileMetaData md, AtomicLong bytesSinceLastPause, Translog.View translogView) { this.md = md; @@ -541,24 +540,9 @@ public class RecoverySourceHandler { @Override public final void write(byte[] b, int offset, int length) throws IOException { - if (failed.get() == false) { - /* since we are an outputstream a wrapper might get flushed on close after we threw an exception. - * that might cause another exception from the other side of the recovery since we are in a bad state - * due to a corrupted file stream etc. the biggest issue is that we will turn into a loop of exceptions - * and we will always suppress the original one which might cause the recovery to retry over and over again. - * To prevent this we try to not send chunks again after we failed once.*/ - boolean success = false; - try { - sendNextChunk(position, new BytesArray(b, offset, length), md.length() == position + length); - position += length; - assert md.length() >= position : "length: " + md.length() + " but positions was: " + position; - success = true; - } finally { - if (success == false) { - failed.compareAndSet(false, true); - } - } - } + sendNextChunk(position, new BytesArray(b, offset, length), md.length() == position + length); + position += length; + assert md.length() >= position : "length: " + md.length() + " but positions was: " + position; } private void sendNextChunk(long position, BytesArray content, boolean lastChunk) throws IOException { @@ -689,9 +673,10 @@ public class RecoverySourceHandler { pool = recoverySettings.concurrentSmallFileStreamPool(); } Future future = pool.submit(() -> { - try (final OutputStream outputStream = outputStreamFactory.apply(md); - final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) { - Streams.copy(new InputStreamIndexInput(indexInput, md.length()), outputStream); + try (final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) { + // it's fine that we are only having the indexInput int he try/with block. The copy methods handles + // exceptions during close correctly and doesn't hide the original exception. + Streams.copy(new InputStreamIndexInput(indexInput, md.length()), outputStreamFactory.apply(md)); } return null; }); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index ae63c8d4b92..4cf60289a18 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -138,7 +138,10 @@ public class IndexRecoveryIT extends ESIntegTestCase { } private void slowDownRecovery(ByteSizeValue shardSize) { - long chunkSize = shardSize.bytes() / 5; + long chunkSize = Math.max(1, shardSize.bytes() / 10); + for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) { + setChunkSize(settings, new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES)); + } assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() // one chunk per sec.. @@ -148,6 +151,9 @@ public class IndexRecoveryIT extends ESIntegTestCase { } private void restoreRecoverySpeed() { + for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) { + setChunkSize(settings, RecoverySettings.DEFAULT_CHUNK_SIZE); + } assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, "20mb") @@ -629,4 +635,8 @@ public class IndexRecoveryIT extends ESIntegTestCase { transport.sendRequest(node, requestId, action, request, options); } } + + public static void setChunkSize(RecoverySettings recoverySettings, ByteSizeValue chunksSize) { + recoverySettings.setChunkSize(chunksSize); + } } diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 3198c04121f..4d111469505 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.recovery.IndexRecoveryIT; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoveryTarget; @@ -71,6 +72,10 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { * Later we allow full recovery to ensure we can still recover and don't run into corruptions. */ public void testCancelRecoveryAndResume() throws Exception { + for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) { + IndexRecoveryIT.setChunkSize(settings, new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)); + } + NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get(); List dataNodeStats = new ArrayList<>(); for (NodeStats stat : nodeStats.getNodes()) { From 259c6eeb591d0ea4f1b2b164119280f16e54bf8b Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Fri, 11 Dec 2015 14:38:13 +0100 Subject: [PATCH 10/26] Merge pull request #15274 from murnieza/patch-1 [Doc] Redundant indefinite article removed --- docs/reference/search/suggesters/completion-suggest.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 4aa07983fa1..b0be107af1c 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -92,7 +92,7 @@ PUT music/song/1?refresh=true The following parameters are supported: `input`:: - The input to store, this can be a an array of strings or just + The input to store, this can be an array of strings or just a string. This field is mandatory. `weight`:: From db15682b3ca33de6c79d0d3a8a397ede5f748e7e Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 11 Dec 2015 15:16:14 +0100 Subject: [PATCH 11/26] apply review comments from @bleskes --- .../org/elasticsearch/indices/recovery/RecoverySettings.java | 2 +- .../elasticsearch/indices/recovery/RecoverySourceHandler.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index f2f06921680..6db38d59e85 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -120,7 +120,7 @@ public class RecoverySettings extends AbstractComponent implements Closeable { rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac()); } - logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}], file_chunk_size [{}], translog_size [{}]", + logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}]", maxBytesPerSec, concurrentStreams); nodeSettingsService.addListener(new ApplySettings()); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 036260624a6..4057af00841 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -547,7 +547,7 @@ public class RecoverySourceHandler { private void sendNextChunk(long position, BytesArray content, boolean lastChunk) throws IOException { final TransportRequestOptions chunkSendOptions = TransportRequestOptions.builder() - .withCompress(false) + .withCompress(false) // lucene files are already compressed and therefore compressing this won't really help much so we are safing the cpu for other things .withType(TransportRequestOptions.Type.RECOVERY) .withTimeout(recoverySettings.internalActionTimeout()) .build(); From f5d307e426ae75f24f45949bc4da9aa3197a7ae0 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Fri, 11 Dec 2015 16:22:36 +0100 Subject: [PATCH 12/26] BulkItemResponse returns status code instead of status name In commit fafeb3a, we've refactored REST response handling logic and returned HTTP status names instead of HTTP status codes for bulk item responses. With this commit we restore the original behavior. Checked with @bleskes. --- .../java/org/elasticsearch/action/bulk/BulkItemResponse.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 982700016b7..b8d09583f69 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -51,12 +51,12 @@ public class BulkItemResponse implements Streamable, StatusToXContent { builder.startObject(opType); if (failure == null) { response.toXContent(builder, params); - builder.field(Fields.STATUS, response.status()); + builder.field(Fields.STATUS, response.status().getStatus()); } else { builder.field(Fields._INDEX, failure.getIndex()); builder.field(Fields._TYPE, failure.getType()); builder.field(Fields._ID, failure.getId()); - builder.field(Fields.STATUS, failure.getStatus()); + builder.field(Fields.STATUS, failure.getStatus().getStatus()); builder.startObject(Fields.ERROR); ElasticsearchException.toXContent(builder, params, failure.getCause()); builder.endObject(); From 3383c24be0a137929f7240edd8f984b0d3df5792 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 7 Dec 2015 18:46:26 -0500 Subject: [PATCH 13/26] Remove and forbid use of Collections#shuffle(List) and Random#() This commit removes and now forbids all uses of Collections#shuffle(List) and Random#() across the codebase. The rationale for removing and forbidding these methods is to increase test reproducibility. As these methods use non-reproducible seeds, production code and tests that rely on these methods contribute to non-reproducbility of tests. Instead of Collections#shuffle(List) the method Collections#shuffle(List, Random) can be used. All that is required then is a reproducible source of randomness. Consequently, the utility class Randomness has been added to assist in creating reproducible sources of randomness. Instead of Random#(), Random#(long) with a reproducible seed or the aforementioned Randomess class can be used. Closes #15287 --- .../resources/forbidden/all-signatures.txt | 3 + .../cluster/routing/RoutingNodes.java | 13 +- .../routing/allocation/AllocationService.java | 7 +- .../org/elasticsearch/common/Randomness.java | 132 ++++++++++++++++++ .../discovery/DiscoveryService.java | 9 +- .../elasticsearch/plugins/PluginManager.java | 7 +- .../lucene/queries/BlendedTermQueryTests.java | 21 +-- .../IndicesShardStoreResponseTests.java | 14 +- .../OldIndexBackwardsCompatibilityIT.java | 19 +-- .../node/DiscoveryNodeFiltersTests.java | 8 +- .../cluster/routing/DelayedAllocationIT.java | 2 +- .../allocation/AddIncrementallyTests.java | 3 +- .../NodeVersionAllocationDeciderTests.java | 11 +- .../common/lucene/LuceneTests.java | 16 +-- .../lucene/index/FreqTermsEnumTests.java | 19 +-- .../common/util/CollectionUtilsTests.java | 13 +- .../concurrent/PrioritizedExecutorsTests.java | 10 +- .../DiscoveryWithServiceDisruptionsIT.java | 6 +- .../zen/ElectMasterServiceTests.java | 8 +- .../zen/NodeJoinControllerTests.java | 6 +- .../discovery/zen/ZenPingTests.java | 2 +- .../PublishClusterStateActionTests.java | 30 +--- .../gateway/MetaDataStateFormatTests.java | 29 +--- .../mapper/all/SimpleAllMapperTests.java | 15 +- .../core/TokenCountFieldMapperTests.java | 8 +- .../mapper/multifield/MultiFieldTests.java | 2 +- .../index/store/CorruptedFileIT.java | 37 ++--- .../indices/recovery/RecoveryStateTests.java | 19 +-- .../recovery/TruncatedRecoveryIT.java | 3 +- .../suggest/CompletionSuggestSearchIT.java | 4 +- .../messy/tests/SimpleSortTests.java | 48 +------ .../cloud/aws/AwsEc2ServiceImpl.java | 3 +- .../elasticsearch/tribe/TribeUnitTests.java | 21 ++- .../org/elasticsearch/tribe/elasticsearch.yml | 4 +- .../test/ESAllocationTestCase.java | 3 +- .../elasticsearch/test/ESIntegTestCase.java | 4 +- .../org/elasticsearch/test/ESTestCase.java | 4 +- 37 files changed, 251 insertions(+), 312 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/common/Randomness.java diff --git a/buildSrc/src/main/resources/forbidden/all-signatures.txt b/buildSrc/src/main/resources/forbidden/all-signatures.txt index 4bc24c9a2af..c1e65cbaf22 100644 --- a/buildSrc/src/main/resources/forbidden/all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/all-signatures.txt @@ -123,3 +123,6 @@ java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a r java.util.Collections#EMPTY_LIST java.util.Collections#EMPTY_MAP java.util.Collections#EMPTY_SET + +java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness +java.util.Random#() @ Use org.elasticsearch.common.random.Randomness#create for reproducible sources of randomness diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index badf70a191e..5d17a59339a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -27,18 +27,11 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.shard.ShardId; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import java.util.function.Predicate; /** @@ -671,7 +664,7 @@ public class RoutingNodes implements Iterable { } public void shuffle() { - Collections.shuffle(unassigned); + Randomness.shuffle(unassigned); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index feafb76a5f2..2268bf1d995 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -39,15 +39,12 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.Set; +import java.util.*; import java.util.function.Function; import java.util.stream.Collectors; diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java new file mode 100644 index 00000000000..770b6554cf6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import org.elasticsearch.common.settings.Settings; + +import java.lang.reflect.Method; +import java.security.SecureRandom; +import java.util.Collections; +import java.util.List; +import java.util.Random; + +/** + * Provides factory methods for producing reproducible sources of + * randomness. Reproducible sources of randomness contribute to + * reproducible tests. When running the Elasticsearch test suite, the + * test runner will establish a global random seed accessible via the + * system property "tests.seed". By seeding a random number generator + * with this global seed, we ensure that instances of Random produced + * with this class produce reproducible sources of randomness under + * when running under the Elasticsearch test suite. Alternatively, + * a reproducible source of randomness can be produced by providing a + * setting a reproducible seed. When running the Elasticsearch server + * process, non-reproducible sources of randomness are provided (unless + * a setting is provided for a module that exposes a seed setting (e.g., + * DiscoveryService#SETTING_DISCOVERY_SEED)). + */ +public final class Randomness { + private static final SecureRandom SR = new SecureRandom(); + private static final Method currentMethod; + private static final Method getRandomMethod; + + static { + Method maybeCurrentMethod; + Method maybeGetRandomMethod; + try { + Class clazz = Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext"); + maybeCurrentMethod = clazz.getMethod("current"); + maybeGetRandomMethod = clazz.getMethod("getRandom"); + } catch (Throwable t) { + maybeCurrentMethod = null; + maybeGetRandomMethod = null; + } + currentMethod = maybeCurrentMethod; + getRandomMethod = maybeGetRandomMethod; + } + + private Randomness() {} + + /** + * Provides a reproducible source of randomness seeded by a long + * seed in the settings with the key setting. + * + * @param settings the settings containing the seed + * @param setting the key to access the seed + * @return a reproducible source of randomness + */ + public static Random get(Settings settings, String setting) { + Long maybeSeed = settings.getAsLong(setting, null); + if (maybeSeed != null) { + return new Random(maybeSeed); + } else { + return get(); + } + } + + /** + * Provides a source of randomness that is reproducible when + * running under the Elasticsearch test suite, and otherwise + * produces a non-reproducible source of randomness. Reproducible + * sources of randomness are created when the system property + * "tests.seed" is set and the security policy allows reading this + * system property. Otherwise, non-reproducible sources of + * randomness are created. + * + * @return a source of randomness + * @throws IllegalStateException if running tests but was not able + * to acquire an instance of Random from + * RandomizedContext or tests are + * running but tests.seed is not set + */ + public static Random get() { + if (currentMethod != null && getRandomMethod != null) { + try { + Object randomizedContext = currentMethod.invoke(null); + return (Random) getRandomMethod.invoke(randomizedContext); + } catch (ReflectiveOperationException e) { + // unexpected, bail + throw new IllegalStateException("running tests but failed to invoke RandomizedContext#getRandom", e); + } + } else { + return getWithoutSeed(); + } + } + + private static ThreadLocal LOCAL; + + private static Random getWithoutSeed() { + assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; + if (LOCAL.get() == null) { + byte[] bytes = SR.generateSeed(8); + long accumulator = 0; + for (int i = 0; i < bytes.length; i++) { + accumulator = (accumulator << 8) + bytes[i] & 0xFFL; + } + final long seed = accumulator; + LOCAL = ThreadLocal.withInitial(() -> new Random(seed)); + } + return LOCAL.get(); + } + + public static void shuffle(List list) { + Collections.shuffle(list, get()); + } +} diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java index eeba9baa32a..a82099658ea 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java @@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -41,7 +42,6 @@ public class DiscoveryService extends AbstractLifecycleComponent MODULES = unmodifiableSet(newHashSet( "lang-expression", "lang-groovy")); @@ -124,7 +125,7 @@ public class PluginManager { checkForForbiddenName(pluginHandle.name); } else { // if we have no name but url, use temporary name that will be overwritten later - pluginHandle = new PluginHandle("temp_name" + new Random().nextInt(), null, null); + pluginHandle = new PluginHandle("temp_name" + Randomness.get().nextInt(), null, null); } Path pluginFile = download(pluginHandle, terminal); @@ -224,7 +225,7 @@ public class PluginManager { PluginInfo info = PluginInfo.readFromProperties(root); terminal.println(VERBOSE, "%s", info); - // don't let luser install plugin as a module... + // don't let luser install plugin as a module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(info.getName())) { throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); diff --git a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index 0f29ed5a2f7..a287ec119e7 100644 --- a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -23,19 +23,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.DisjunctionMaxQuery; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.QueryUtils; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.index.*; +import org.apache.lucene.search.*; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.DefaultSimilarity; import org.apache.lucene.search.similarities.Similarity; @@ -44,11 +33,7 @@ import org.apache.lucene.util.TestUtil; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java index c862583a3f7..cf197a27faf 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java @@ -26,20 +26,12 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.transport.DummyTransportAddress; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.NodeDisconnectedException; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static org.hamcrest.Matchers.equalTo; @@ -119,7 +111,7 @@ public class IndicesShardStoreResponseTests extends ESTestCase { orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, new IOException("corrupted"))); List storeStatuses = new ArrayList<>(orderedStoreStatuses); - Collections.shuffle(storeStatuses); + Collections.shuffle(storeStatuses, random()); CollectionUtil.timSort(storeStatuses); assertThat(storeStatuses, equalTo(orderedStoreStatuses)); } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 137c6c5b2c2..297099e45ac 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -32,8 +32,6 @@ import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; @@ -62,23 +60,12 @@ import org.junit.Before; import java.io.IOException; import java.io.InputStream; -import java.nio.file.DirectoryStream; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; +import java.nio.file.*; import java.nio.file.attribute.BasicFileAttributes; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.SortedSet; -import java.util.TreeSet; +import java.util.*; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; // needs at least 2 nodes since it bumps replicas to 1 @@ -271,7 +258,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { public void testOldIndexes() throws Exception { setupCluster(); - Collections.shuffle(indexes, getRandom()); + Collections.shuffle(indexes, random()); for (String index : indexes) { long startTime = System.currentTimeMillis(); logger.info("--> Testing old index " + index); diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java index ef548140192..9a91e1cd562 100644 --- a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java @@ -29,11 +29,7 @@ import org.junit.BeforeClass; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; @@ -246,7 +242,7 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { private Settings shuffleSettings(Settings source) { Settings.Builder settings = Settings.settingsBuilder(); List keys = new ArrayList<>(source.getAsMap().keySet()); - Collections.shuffle(keys, getRandom()); + Collections.shuffle(keys, random()); for (String o : keys) { settings.put(o, source.getAsMap().get(o)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index c236ea54878..2d704380ae0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -170,7 +170,7 @@ public class DelayedAllocationIT extends ESIntegTestCase { private String findNodeWithShard() { ClusterState state = client().admin().cluster().prepareState().get().getState(); List startedShards = state.routingTable().shardsWithState(ShardRoutingState.STARTED); - Collections.shuffle(startedShards, getRandom()); + Collections.shuffle(startedShards,random()); return state.nodes().get(startedShards.get(0).currentNodeId()).getName(); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index d10912e69db..ee8bd067008 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -388,7 +387,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("Removing [{}] nodes", numNodes); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); ArrayList discoveryNodes = CollectionUtils.iterableAsArrayList(clusterState.nodes()); - Collections.shuffle(discoveryNodes, getRandom()); + Collections.shuffle(discoveryNodes, random()); for (DiscoveryNode node : discoveryNodes) { nodes.remove(node.id()); numNodes--; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 62501cbf9fb..2fe1d85b1f4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -39,15 +39,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; -import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.elasticsearch.cluster.routing.ShardRoutingState.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; /** * @@ -199,7 +194,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); int numNodes = between(1, 20); if (nodes.size() > numNodes) { - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); nodes = nodes.subList(0, numNodes); } else { for (int j = nodes.size(); j < numNodes; j++) { diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index fcd2f7d8767..17345fd714f 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -17,20 +17,14 @@ * under the License. */ package org.elasticsearch.common.lucene; + import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.NoDeletionPolicy; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; @@ -41,11 +35,7 @@ import org.apache.lucene.util.Version; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java index 96715a05b3c..ad811a38aed 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java @@ -24,13 +24,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -42,14 +36,7 @@ import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -201,7 +188,7 @@ public class FreqTermsEnumTests extends ESTestCase { for (int i = 0; i < cycles; i++) { List terms = new ArrayList<>(Arrays.asList(this.terms)); - Collections.shuffle(terms, getRandom()); + Collections.shuffle(terms, random()); for (String term : terms) { if (!termsEnum.seekExact(new BytesRef(term))) { assertThat("term : " + term, reference.get(term).docFreq, is(0)); diff --git a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java index fe9ba6b1fcf..4c3612da8e0 100644 --- a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java @@ -25,14 +25,7 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.Counter; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.SortedSet; -import java.util.TreeSet; +import java.util.*; import static org.elasticsearch.common.util.CollectionUtils.eagerPartition; import static org.hamcrest.Matchers.equalTo; @@ -80,7 +73,7 @@ public class CollectionUtilsTests extends ESTestCase { array.append(new BytesRef(s)); } if (randomBoolean()) { - Collections.shuffle(tmpList, getRandom()); + Collections.shuffle(tmpList, random()); for (BytesRef ref : tmpList) { array.append(ref); } @@ -111,7 +104,7 @@ public class CollectionUtilsTests extends ESTestCase { array.append(new BytesRef(s)); } if (randomBoolean()) { - Collections.shuffle(values, getRandom()); + Collections.shuffle(values, random()); } int[] indices = new int[array.size()]; for (int i = 0; i < indices.length; i++) { diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java index 1d2d2141166..deac15b50d3 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java @@ -27,13 +27,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.PriorityBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; @@ -46,7 +40,7 @@ public class PrioritizedExecutorsTests extends ESTestCase { public void testPriorityQueue() throws Exception { PriorityBlockingQueue queue = new PriorityBlockingQueue<>(); List priorities = Arrays.asList(Priority.values()); - Collections.shuffle(priorities); + Collections.shuffle(priorities, random()); for (Priority priority : priorities) { queue.add(priority); diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index b14792a2c33..f9778f6438f 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -473,7 +473,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { docsPerIndexer = 1 + randomInt(5); logger.info("indexing " + docsPerIndexer + " docs per indexer during partition"); countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size())); - Collections.shuffle(semaphores); + Collections.shuffle(semaphores, random()); for (Semaphore semaphore : semaphores) { assertThat(semaphore.availablePermits(), equalTo(0)); semaphore.release(docsPerIndexer); @@ -683,7 +683,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureGreen("test"); nodes = new ArrayList<>(nodes); - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); String isolatedNode = nodes.get(0); String notIsolatedNode = nodes.get(1); @@ -1038,7 +1038,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { new NetworkDisconnectPartition(getRandom()), new SlowClusterStateProcessing(getRandom()) ); - Collections.shuffle(list); + Collections.shuffle(list, random()); setDisruptionScheme(list.get(0)); return list.get(0); } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java index e7bded344d9..c4955561905 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java @@ -26,11 +26,7 @@ import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; public class ElectMasterServiceTests extends ESTestCase { @@ -53,7 +49,7 @@ public class ElectMasterServiceTests extends ESTestCase { nodes.add(node); } - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); return nodes; } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index ea590756a8b..42cb7cf43f4 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -244,7 +244,7 @@ public class NodeJoinControllerTests extends ESTestCase { // add - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] unique master nodes. Total of [{}] join requests", initialJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); @@ -269,7 +269,7 @@ public class NodeJoinControllerTests extends ESTestCase { } } - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", finalJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); @@ -316,7 +316,7 @@ public class NodeJoinControllerTests extends ESTestCase { nodesToJoin.add(node); } } - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", initialJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java index 82733d92206..c54489bceba 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java @@ -62,7 +62,7 @@ public class ZenPingTests extends ESTestCase { } // shuffle - Collections.shuffle(pings); + Collections.shuffle(pings, random()); ZenPing.PingCollection collection = new ZenPing.PingCollection(); collection.addPings(pings.toArray(new ZenPing.PingResponse[pings.size()])); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index d33aadd84aa..8dea09ba093 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -21,17 +21,14 @@ package org.elasticsearch.discovery.zen.publish; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -47,35 +44,20 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BytesTransportRequest; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportConnectionListener; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseOptions; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.*; import org.elasticsearch.transport.local.LocalTransport; import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.emptyIterable; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; @TestLogging("discovery.zen.publish:TRACE") public class PublishClusterStateActionTests extends ESTestCase { @@ -675,7 +657,7 @@ public class PublishClusterStateActionTests extends ESTestCase { logger.info("--> committing states"); - Collections.shuffle(states, random()); + Randomness.shuffle(states); for (ClusterState state : states) { node.action.handleCommitRequest(new PublishClusterStateAction.CommitClusterStateRequest(state.stateUUID()), channel); assertThat(channel.response.get(), equalTo((TransportResponse) TransportResponse.Empty.INSTANCE)); diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index d7f4c9176d0..441314b1e35 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -19,12 +19,7 @@ package org.elasticsearch.gateway; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.ChecksumIndexInput; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.MockDirectoryWrapper; -import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.*; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -33,11 +28,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -50,20 +41,10 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import java.util.stream.StreamSupport; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.Matchers.*; @LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to work with ExtrasFS public class MetaDataStateFormatTests extends ESTestCase { @@ -349,7 +330,7 @@ public class MetaDataStateFormatTests extends ESTestCase { } List dirList = Arrays.asList(dirs); - Collections.shuffle(dirList, getRandom()); + Collections.shuffle(dirList, random()); MetaData loadedMetaData = format.loadLatestState(logger, dirList.toArray(new Path[0])); MetaData latestMetaData = meta.get(numStates-1); assertThat(loadedMetaData.clusterUUID(), not(equalTo("_na_"))); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java index bfb7ead734d..bbba3432b66 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java @@ -49,21 +49,12 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.hamcrest.Matchers; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; public class SimpleAllMapperTests extends ESSingleNodeTestCase { @@ -251,7 +242,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { if (randomBoolean()) { booleanOptionList.add(new Tuple<>("store_term_vector_payloads", tv_payloads = randomBoolean())); } - Collections.shuffle(booleanOptionList, getRandom()); + Collections.shuffle(booleanOptionList, random()); for (Tuple option : booleanOptionList) { mappingBuilder.field(option.v1(), option.v2().booleanValue()); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java index b1224d5c6c7..ba9303e8b58 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java @@ -19,11 +19,7 @@ package org.elasticsearch.index.mapper.core; -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.CannedTokenStream; -import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.analysis.Token; -import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.*; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; @@ -85,7 +81,7 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase { t2.setPositionIncrement(2); // Count funny tokens with more than one increment int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them Token[] tokens = new Token[] {t1, t2, t3}; - Collections.shuffle(Arrays.asList(tokens), getRandom()); + Collections.shuffle(Arrays.asList(tokens), random()); final TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens); // TODO: we have no CannedAnalyzer? Analyzer analyzer = new Analyzer() { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java index a5a073d147f..58fa8fd69b0 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java @@ -479,7 +479,7 @@ public class MultiFieldTests extends ESSingleNodeTestCase { .startObject("my_field").field("type", "string").startObject("fields").startObject(MY_MULTI_FIELD) .field("type", "string").startObject("fielddata"); String[] keys = possibleSettings.keySet().toArray(new String[]{}); - Collections.shuffle(Arrays.asList(keys)); + Collections.shuffle(Arrays.asList(keys), random()); for(int i = randomIntBetween(0, possibleSettings.size()-1); i >= 0; --i) builder.field(keys[i], possibleSettings.get(keys[i])); builder.endObject().endObject().endObject().endObject().endObject().endObject().endObject(); diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 4858e0a6e3c..53d18eaef81 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -19,26 +19,21 @@ package org.elasticsearch.index.store; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexFileNames; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Nullable; @@ -48,7 +43,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.discovery.Discovery; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.shard.*; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; @@ -75,14 +69,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -91,16 +78,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.hamcrest.Matchers.*; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class CorruptedFileIT extends ESIntegTestCase { @@ -320,7 +299,7 @@ public class CorruptedFileIT extends ESIntegTestCase { } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); NodeStats primariesNode = dataNodeStats.get(0); NodeStats unluckyNode = dataNodeStats.get(1); assertAcked(prepareCreate("test").setSettings(Settings.builder() @@ -380,7 +359,7 @@ public class CorruptedFileIT extends ESIntegTestCase { } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); NodeStats primariesNode = dataNodeStats.get(0); NodeStats unluckyNode = dataNodeStats.get(1); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java index fea8f0fdf14..8b23354ebb8 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java @@ -26,13 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.recovery.RecoveryState.File; -import org.elasticsearch.indices.recovery.RecoveryState.Index; -import org.elasticsearch.indices.recovery.RecoveryState.Stage; -import org.elasticsearch.indices.recovery.RecoveryState.Timer; -import org.elasticsearch.indices.recovery.RecoveryState.Translog; -import org.elasticsearch.indices.recovery.RecoveryState.Type; -import org.elasticsearch.indices.recovery.RecoveryState.VerifyIndex; +import org.elasticsearch.indices.recovery.RecoveryState.*; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -43,14 +37,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.either; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.*; public class RecoveryStateTests extends ESTestCase { abstract class Streamer extends Thread { @@ -201,7 +188,7 @@ public class RecoveryStateTests extends ESTestCase { } } - Collections.shuffle(Arrays.asList(files)); + Collections.shuffle(Arrays.asList(files), random()); final RecoveryState.Index index = new RecoveryState.Index(); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 4d111469505..60a14abac7c 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.recovery.IndexRecoveryIT; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; @@ -84,7 +83,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { } } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); // we use 2 nodes a lucky and unlucky one // the lucky one holds the primary // the unlucky one gets the replica and the truncated leftovers diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 6f00e9977cf..0f5ac1a522f 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -942,13 +942,13 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { } public void assertSuggestions(String suggestion, String... suggestions) { - String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10); + String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10); CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestion).size(10); assertSuggestions(suggestionName, suggestionBuilder, suggestions); } public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) { - String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10); + String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10); SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestString).size(10) ).execute().actionGet(); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java index 5a00bca9fac..a5c5f8aa2fe 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java @@ -48,58 +48,24 @@ import org.elasticsearch.script.groovy.GroovyPlugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.sort.FieldSortBuilder; -import org.elasticsearch.search.sort.GeoDistanceSortBuilder; -import org.elasticsearch.search.sort.ScriptSortBuilder; -import org.elasticsearch.search.sort.SortBuilders; -import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.search.sort.*; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matchers; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; +import java.util.*; import java.util.Map.Entry; -import java.util.Random; -import java.util.Set; -import java.util.TreeMap; import java.util.concurrent.ExecutionException; +import static org.apache.lucene.util.GeoUtils.TOLERANCE; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.index.query.QueryBuilders.*; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; import static org.elasticsearch.search.sort.SortBuilders.fieldSort; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSortValues; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -import static org.apache.lucene.util.GeoUtils.TOLERANCE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.hamcrest.Matchers.*; /** * @@ -530,7 +496,7 @@ public class SimpleSortTests extends ESIntegTestCase { .endObject()); builders.add(builder); } - Collections.shuffle(builders, random); + Randomness.shuffle(builders); for (IndexRequestBuilder builder : builders) { builder.execute().actionGet(); if (random.nextBoolean()) { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java index ec1ffd54a77..b6306e6209c 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java @@ -32,6 +32,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.aws.network.Ec2NameResolver; import org.elasticsearch.cloud.aws.node.Ec2CustomNodeAttributes; import org.elasticsearch.cluster.node.DiscoveryNodeService; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkService; @@ -119,7 +120,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent } // Increase the number of retries in case of 5xx API responses - final Random rand = new Random(); + final Random rand = Randomness.get(); RetryPolicy retryPolicy = new RetryPolicy( RetryPolicy.RetryCondition.NO_RETRY_CONDITION, new RetryPolicy.BackoffStrategy() { diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 66c3d7959f8..01aaad1ca9d 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoveryService; import org.elasticsearch.node.Node; import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.test.ESIntegTestCase; @@ -57,8 +58,20 @@ public class TribeUnitTests extends ESTestCase { .put("node.mode", NODE_MODE) .put("path.home", createTempDir()).build(); - tribe1 = new TribeClientNode(Settings.builder().put(baseSettings).put("cluster.name", "tribe1").put("name", "tribe1_node").build()).start(); - tribe2 = new TribeClientNode(Settings.builder().put(baseSettings).put("cluster.name", "tribe2").put("name", "tribe2_node").build()).start(); + tribe1 = new TribeClientNode( + Settings.builder() + .put(baseSettings) + .put("cluster.name", "tribe1") + .put("node.name", "tribe1_node") + .put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong()) + .build()).start(); + tribe2 = new TribeClientNode( + Settings.builder() + .put(baseSettings) + .put("cluster.name", "tribe2") + .put("node.name", "tribe2_node") + .put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong()) + .build()).start(); } @AfterClass @@ -73,6 +86,8 @@ public class TribeUnitTests extends ESTestCase { System.setProperty("es.cluster.name", "tribe_node_cluster"); System.setProperty("es.tribe.t1.cluster.name", "tribe1"); System.setProperty("es.tribe.t2.cluster.name", "tribe2"); + System.setProperty("es.tribe.t1.discovery.id.seed", Long.toString(random().nextLong())); + System.setProperty("es.tribe.t2.discovery.id.seed", Long.toString(random().nextLong())); try { assertTribeNodeSuccesfullyCreated(Settings.EMPTY); @@ -80,6 +95,8 @@ public class TribeUnitTests extends ESTestCase { System.clearProperty("es.cluster.name"); System.clearProperty("es.tribe.t1.cluster.name"); System.clearProperty("es.tribe.t2.cluster.name"); + System.clearProperty("es.tribe.t1.discovery.id.seed"); + System.clearProperty("es.tribe.t2.discovery.id.seed"); } } diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml b/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml index 89f4922a6af..ad1b9be8c89 100644 --- a/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml @@ -1,3 +1,5 @@ cluster.name: tribe_node_cluster tribe.t1.cluster.name: tribe1 -tribe.t2.cluster.name: tribe2 \ No newline at end of file +tribe.t2.cluster.name: tribe2 +tribe.t1.discovery.id.seed: 1 +tribe.t2.discovery.id.seed: 2 diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java index c4f4b196739..e82823ae997 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -111,7 +112,7 @@ public abstract class ESAllocationTestCase extends ESTestCase { for (AllocationDecider d : list) { assertThat(defaultAllocationDeciders.contains(d.getClass()), is(true)); } - Collections.shuffle(list, random); + Randomness.shuffle(list); return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0])); } diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 05dab1c35dc..fafff57c92f 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -533,7 +533,7 @@ public abstract class ESIntegTestCase extends ESTestCase { } private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception { - return RandomizedContext.current().runWithPrivateRandomness(new Randomness(seed), new Callable() { + return RandomizedContext.current().runWithPrivateRandomness(new com.carrotsearch.randomizedtesting.Randomness(seed), new Callable() { @Override public TestCluster call() throws Exception { return buildTestCluster(scope, seed); @@ -1388,7 +1388,7 @@ public abstract class ESIntegTestCase extends ESTestCase { } } final String[] indices = indicesSet.toArray(new String[indicesSet.size()]); - Collections.shuffle(builders, random); + Collections.shuffle(builders, random()); final CopyOnWriteArrayList> errors = new CopyOnWriteArrayList<>(); List inFlightAsyncOperations = new ArrayList<>(); // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk. diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java index c59c3ba4d4e..e1443110c0d 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -566,7 +566,7 @@ public abstract class ESTestCase extends LuceneTestCase { throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects"); } List list = arrayAsArrayList(values); - Collections.shuffle(list); + Collections.shuffle(list, random()); return list.subList(0, size); } @@ -615,7 +615,7 @@ public abstract class ESTestCase extends LuceneTestCase { sb.append("]"); assertThat(count + " files exist that should have been cleaned:\n" + sb.toString(), count, equalTo(0)); } - + /** Returns the suite failure marker: internal use only! */ public static TestRuleMarkFailure getSuiteFailureMarker() { return suiteFailureMarker; From 47e922ba85a120a05ea9166a6fd1ce0f40a0cfb5 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 11 Dec 2015 08:26:00 -0800 Subject: [PATCH 14/26] Add excludes and patterns as explicit inputs to forbidden patterns task --- .../gradle/precommit/ForbiddenPatternsTask.groovy | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy index 5fa63956b57..6809adca946 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy @@ -61,11 +61,14 @@ public class ForbiddenPatternsTask extends DefaultTask { // add mandatory rules patterns.put('nocommit', /nocommit/) patterns.put('tab', /\t/) + + inputs.property("excludes", filesFilter.excludes) + inputs.property("rules", patterns) } /** Adds a file glob pattern to be excluded */ public void exclude(String... excludes) { - this.filesFilter.exclude(excludes) + filesFilter.exclude(excludes) } /** Adds a pattern to forbid. T */ From 11cbc08e45cb7afa5a236e72d68bf2404fd7728b Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 11 Dec 2015 11:46:50 -0500 Subject: [PATCH 15/26] Fix compilation in o.e.m.t.SimpleSortTests --- .../java/org/elasticsearch/messy/tests/SimpleSortTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java index a5c5f8aa2fe..db8a13c5ab8 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java @@ -469,7 +469,7 @@ public class SimpleSortTests extends ESIntegTestCase { } public void testSimpleSorts() throws Exception { - Random random = getRandom(); + Random random = random(); assertAcked(prepareCreate("test") .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("str_value").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() @@ -496,7 +496,7 @@ public class SimpleSortTests extends ESIntegTestCase { .endObject()); builders.add(builder); } - Randomness.shuffle(builders); + Collections.shuffle(builders, random); for (IndexRequestBuilder builder : builders) { builder.execute().actionGet(); if (random.nextBoolean()) { From 9fd843748e424cac728bb08eff1e71c9ca897eb4 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 11 Dec 2015 18:02:15 +0100 Subject: [PATCH 16/26] Remove the `format` option of the `_source` field. This option allows to force the xcontent type to use to store the `_source` document. The default is to use the same format as the input format. This commit makes this option ignored for 2.x indices and rejected for 3.0 indices. --- .../mapper/internal/SourceFieldMapper.java | 77 ++----------------- .../source/DefaultSourceMappingTests.java | 22 ++---- docs/reference/migration/migrate_3_0.asciidoc | 7 ++ 3 files changed, 21 insertions(+), 85 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index a844ab223aa..f9bcb31b406 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -24,14 +24,13 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -46,17 +45,13 @@ import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; -import java.io.BufferedInputStream; import java.io.IOException; -import java.io.InputStream; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; /** * @@ -70,8 +65,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { public static class Defaults { public static final String NAME = SourceFieldMapper.NAME; public static final boolean ENABLED = true; - public static final long COMPRESS_THRESHOLD = -1; - public static final String FORMAT = null; // default format is to use the one provided public static final MappedFieldType FIELD_TYPE = new SourceFieldType(); @@ -91,8 +84,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { private boolean enabled = Defaults.ENABLED; - private String format = Defaults.FORMAT; - private String[] includes = null; private String[] excludes = null; @@ -105,11 +96,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { return this; } - public Builder format(String format) { - this.format = format; - return this; - } - public Builder includes(String[] includes) { this.includes = includes; return this; @@ -122,7 +108,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { @Override public SourceFieldMapper build(BuilderContext context) { - return new SourceFieldMapper(enabled, format, includes, excludes, context.indexSettings()); + return new SourceFieldMapper(enabled, includes, excludes, context.indexSettings()); } } @@ -138,8 +124,8 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if ("format".equals(fieldName)) { - builder.format(nodeStringValue(fieldNode, null)); + } else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_3_0_0)) { + // ignore on old indices, reject on and after 3.0 iterator.remove(); } else if (fieldName.equals("includes")) { List values = (List) fieldNode; @@ -213,22 +199,15 @@ public class SourceFieldMapper extends MetadataFieldMapper { private final String[] includes; private final String[] excludes; - private String format; - - private XContentType formatContentType; - private SourceFieldMapper(Settings indexSettings) { - this(Defaults.ENABLED, Defaults.FORMAT, null, null, indexSettings); + this(Defaults.ENABLED, null, null, indexSettings); } - private SourceFieldMapper(boolean enabled, String format, - String[] includes, String[] excludes, Settings indexSettings) { + private SourceFieldMapper(boolean enabled, String[] includes, String[] excludes, Settings indexSettings) { super(NAME, Defaults.FIELD_TYPE.clone(), Defaults.FIELD_TYPE, indexSettings); // Only stored. this.enabled = enabled; this.includes = includes; this.excludes = excludes; - this.format = format; - this.formatContentType = format == null ? null : XContentType.fromRestContentType(format); this.complete = enabled && includes == null && excludes == null; } @@ -284,50 +263,11 @@ public class SourceFieldMapper extends MetadataFieldMapper { Tuple> mapTuple = XContentHelper.convertToMap(source, true); Map filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes); BytesStreamOutput bStream = new BytesStreamOutput(); - XContentType contentType = formatContentType; - if (contentType == null) { - contentType = mapTuple.v1(); - } + XContentType contentType = mapTuple.v1(); XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); builder.close(); source = bStream.bytes(); - } else if (formatContentType != null) { - // see if we need to convert the content type - Compressor compressor = CompressorFactory.compressor(source); - if (compressor != null) { - InputStream compressedStreamInput = compressor.streamInput(source.streamInput()); - if (compressedStreamInput.markSupported() == false) { - compressedStreamInput = new BufferedInputStream(compressedStreamInput); - } - XContentType contentType = XContentFactory.xContentType(compressedStreamInput); - if (contentType != formatContentType) { - // we need to reread and store back, compressed.... - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, streamOutput); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(compressedStreamInput)); - builder.close(); - source = bStream.bytes(); - // update the data in the context, so we store it in the translog in this format - context.source(source); - } else { - compressedStreamInput.close(); - } - } else { - XContentType contentType = XContentFactory.xContentType(source); - if (contentType != formatContentType) { - // we need to reread and store back - // we need to reread and store back, compressed.... - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, bStream); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source)); - builder.close(); - source = bStream.bytes(); - // update the data in the context, so we store it in the translog in this format - context.source(source); - } - } } if (!source.hasArray()) { source = source.toBytesArray(); @@ -352,9 +292,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (includeDefaults || enabled != Defaults.ENABLED) { builder.field("enabled", enabled); } - if (includeDefaults || !Objects.equals(format, Defaults.FORMAT)) { - builder.field("format", format); - } if (includes != null) { builder.field("includes", includes); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java index bbc1847dc08..364e9f2063f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.*; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -63,25 +64,16 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE)); } - public void testJsonFormat() throws Exception { + public void testFormatBackCompat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("format", "json").endObject() .endObject().endObject().string(); + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_2_0)) + .build(); - DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - DocumentMapper documentMapper = parser.parse(mapping); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); - - documentMapper = parser.parse(mapping); - doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); + DocumentMapperParser parser = createIndex("test", settings).mapperService().documentMapperParser(); + parser.parse(mapping); // no exception } public void testIncludes() throws Exception { diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index fcee4105160..6588f22a85a 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -213,6 +213,13 @@ float by default instead of a double. The reasoning is that floats should be more than enough for most cases but would decrease storage requirements significantly. +==== `_source`'s `format` option + +The `_source` mapping does not support the `format` option anymore. This option +will still be accepted for indices created before the upgrade to 3.0 for backward +compatibility, but it will have no effect. Indices created on or after 3.0 will +reject this option. + [[breaking_30_plugins]] === Plugin changes From a087d557f9920bac8f3d3309677416db292cd926 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 11 Dec 2015 12:10:43 -0500 Subject: [PATCH 17/26] Fix NullPointerException in o.e.c.Randomness --- core/src/main/java/org/elasticsearch/common/Randomness.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java index 770b6554cf6..cc0af86cbba 100644 --- a/core/src/main/java/org/elasticsearch/common/Randomness.java +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -114,7 +114,7 @@ public final class Randomness { private static Random getWithoutSeed() { assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; - if (LOCAL.get() == null) { + if (LOCAL == null) { byte[] bytes = SR.generateSeed(8); long accumulator = 0; for (int i = 0; i < bytes.length; i++) { From 0f51d81410e2e941518caa8a9f077c7250062be1 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 11 Dec 2015 20:00:38 +0100 Subject: [PATCH 18/26] Fix settings for TribeUnitTests. --- .../src/test/java/org/elasticsearch/tribe/TribeUnitTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 01aaad1ca9d..1ad972e10ef 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -62,14 +62,14 @@ public class TribeUnitTests extends ESTestCase { Settings.builder() .put(baseSettings) .put("cluster.name", "tribe1") - .put("node.name", "tribe1_node") + .put("name", "tribe1_node") .put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong()) .build()).start(); tribe2 = new TribeClientNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe2") - .put("node.name", "tribe2_node") + .put("name", "tribe2_node") .put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong()) .build()).start(); } From 9ab168dbf6b7069022b9a1a2ae6df6d2de42a815 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 11 Dec 2015 20:07:57 +0100 Subject: [PATCH 19/26] Removes all the reference of the query in the docs --- docs/java-api/query-dsl/term-level-queries.asciidoc | 10 ---------- .../mapping/fields/field-names-field.asciidoc | 8 +++----- docs/reference/mapping/params/null-value.asciidoc | 4 ---- docs/reference/query-dsl/term-level-queries.asciidoc | 10 ---------- docs/reference/redirects.asciidoc | 9 --------- 5 files changed, 3 insertions(+), 38 deletions(-) diff --git a/docs/java-api/query-dsl/term-level-queries.asciidoc b/docs/java-api/query-dsl/term-level-queries.asciidoc index 44fc3639072..e7d5ad4e52b 100644 --- a/docs/java-api/query-dsl/term-level-queries.asciidoc +++ b/docs/java-api/query-dsl/term-level-queries.asciidoc @@ -30,11 +30,6 @@ The queries in this group are: Find documents where the field specified contains any non-null value. -<>:: - - Find documents where the field specified does is missing or contains only - `null` values. - <>:: Find documents where the field specified contains terms which being with @@ -75,8 +70,6 @@ include::range-query.asciidoc[] include::exists-query.asciidoc[] -include::missing-query.asciidoc[] - include::prefix-query.asciidoc[] include::wildcard-query.asciidoc[] @@ -88,6 +81,3 @@ include::fuzzy-query.asciidoc[] include::type-query.asciidoc[] include::ids-query.asciidoc[] - - - diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index 2c40f72bbea..bafc3e3f7d9 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -3,9 +3,8 @@ The `_field_names` field indexes the names of every field in a document that contains any value other than `null`. This field is used by the -<> and <> -queries to find documents that either have or don't have any non-+null+ value -for a particular field. +<> query to find documents that +either have or don't have any non-+null+ value for a particular field. The value of the `_field_name` field is accessible in queries, aggregations, and scripts: @@ -49,7 +48,6 @@ GET my_index/_search -------------------------- // AUTOSENSE -<1> Querying on the `_field_names` field (also see the <> and <> queries) +<1> Querying on the `_field_names` field (also see the <> query) <2> Aggregating on the `_field_names` field <3> Accessing the `_field_names` field in scripts (inline scripts must be <> for this example to work) - diff --git a/docs/reference/mapping/params/null-value.asciidoc b/docs/reference/mapping/params/null-value.asciidoc index 552ce66ded8..4d70d4a6ac5 100644 --- a/docs/reference/mapping/params/null-value.asciidoc +++ b/docs/reference/mapping/params/null-value.asciidoc @@ -53,7 +53,3 @@ IMPORTANT: The `null_value` needs to be the same datatype as the field. For instance, a `long` field cannot have a string `null_value`. String fields which are `analyzed` will also pass the `null_value` through the configured analyzer. - -Also see the <> for its `null_value` support. - - diff --git a/docs/reference/query-dsl/term-level-queries.asciidoc b/docs/reference/query-dsl/term-level-queries.asciidoc index 7e9f5e5ca3e..9c28a727b33 100644 --- a/docs/reference/query-dsl/term-level-queries.asciidoc +++ b/docs/reference/query-dsl/term-level-queries.asciidoc @@ -30,11 +30,6 @@ The queries in this group are: Find documents where the field specified contains any non-null value. -<>:: - - Find documents where the field specified does is missing or contains only - `null` values. - <>:: Find documents where the field specified contains terms which being with @@ -75,8 +70,6 @@ include::range-query.asciidoc[] include::exists-query.asciidoc[] -include::missing-query.asciidoc[] - include::prefix-query.asciidoc[] include::wildcard-query.asciidoc[] @@ -88,6 +81,3 @@ include::fuzzy-query.asciidoc[] include::type-query.asciidoc[] include::ids-query.asciidoc[] - - - diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 6de6699984e..823bdb70d07 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -96,14 +96,6 @@ The `exists` filter has been replaced by the <>. It beh as a query in ``query context'' and as a filter in ``filter context'' (see <>). -[role="exclude",id="query-dsl-missing-filter"] -=== Missing Filter - -The `missing` filter has been replaced by the <>. It behaves -as a query in ``query context'' and as a filter in ``filter context'' (see -<>). - - [role="exclude",id="query-dsl-geo-bounding-box-filter"] === Geo Bounding Box Filter @@ -451,4 +443,3 @@ The `not` query has been replaced by using a `mustNot` clause in a Boolean query === Nested type The docs for the `nested` field datatype have moved to <>. - From 5f3d807f6127fcaad9923e25923f402b0557487f Mon Sep 17 00:00:00 2001 From: Nicholas Knize Date: Wed, 2 Dec 2015 10:09:00 -0600 Subject: [PATCH 20/26] Update geo_shape/query docs, fix TermStrategy defaults This commit adds the following: * SpatialStrategy documentation to the geo-shape reference docs. * Updates relation documentation to geo-shape-query reference docs. * Updates GeoShapeFiledMapper to set points_only to true if TERM strategy is used (to be consistent with documentation) --- .../index/mapper/geo/GeoShapeFieldMapper.java | 10 ++- .../mapping/types/geo-shape.asciidoc | 64 +++++++++++++++---- .../query-dsl/geo-shape-query.asciidoc | 5 +- 3 files changed, 64 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 7e784324f36..71b6d89610f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -193,7 +193,8 @@ public class GeoShapeFieldMapper extends FieldMapper { } else if (Names.COERCE.equals(fieldName)) { builder.coerce(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)) { + } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName) + && builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) { builder.fieldType().setPointsOnly(XContentMapValues.nodeBooleanValue(fieldNode)); iterator.remove(); } @@ -284,6 +285,7 @@ public class GeoShapeFieldMapper extends FieldMapper { termStrategy = new TermQueryPrefixTreeStrategy(prefixTree, names().indexName()); termStrategy.setDistErrPct(distanceErrorPct()); defaultStrategy = resolveStrategy(strategyName); + defaultStrategy.setPointsOnly(pointsOnly); } @Override @@ -347,6 +349,9 @@ public class GeoShapeFieldMapper extends FieldMapper { public void setStrategyName(String strategyName) { checkIfFrozen(); this.strategyName = strategyName; + if (this.strategyName.equals(SpatialStrategy.TERM)) { + this.pointsOnly = true; + } } public boolean pointsOnly() { @@ -406,7 +411,6 @@ public class GeoShapeFieldMapper extends FieldMapper { public PrefixTreeStrategy resolveStrategy(String strategyName) { if (SpatialStrategy.RECURSIVE.getStrategyName().equals(strategyName)) { - recursiveStrategy.setPointsOnly(pointsOnly()); return recursiveStrategy; } if (SpatialStrategy.TERM.getStrategyName().equals(strategyName)) { @@ -446,7 +450,7 @@ public class GeoShapeFieldMapper extends FieldMapper { } shape = shapeBuilder.build(); } - if (fieldType().defaultStrategy() instanceof RecursivePrefixTreeStrategy && fieldType().pointsOnly() && !(shape instanceof Point)) { + if (fieldType().pointsOnly() && !(shape instanceof Point)) { throw new MapperParsingException("[{" + fieldType().names().fullName() + "}] is configured for points only but a " + ((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found"); } diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index d974847a98a..1f0c76e1b93 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -17,13 +17,13 @@ The geo_shape mapping maps geo_json geometry objects to the geo_shape type. To enable it, users must explicitly map fields to the geo_shape type. -[cols="<,<",options="header",] +[cols="<,<,<",options="header",] |======================================================================= -|Option |Description +|Option |Description| Default |`tree` |Name of the PrefixTree implementation to be used: `geohash` for -GeohashPrefixTree and `quadtree` for QuadPrefixTree. Defaults to -`geohash`. +GeohashPrefixTree and `quadtree` for QuadPrefixTree. +| `geohash` |`precision` |This parameter may be used instead of `tree_levels` to set an appropriate value for the `tree_levels` parameter. The value @@ -31,7 +31,8 @@ specifies the desired precision and Elasticsearch will calculate the best tree_levels value to honor this precision. The value should be a number followed by an optional distance unit. Valid distance units include: `in`, `inch`, `yd`, `yard`, `mi`, `miles`, `km`, `kilometers`, -`m`,`meters` (default), `cm`,`centimeters`, `mm`, `millimeters`. +`m`,`meters`, `cm`,`centimeters`, `mm`, `millimeters`. +| `meters` |`tree_levels` |Maximum number of layers to be used by the PrefixTree. This can be used to control the precision of shape representations and @@ -41,27 +42,40 @@ certain level of understanding of the underlying implementation, users may use the `precision` parameter instead. However, Elasticsearch only uses the tree_levels parameter internally and this is what is returned via the mapping API even if you use the precision parameter. +| `50m` + +|`strategy` |The strategy parameter defines the approach for how to +represent shapes at indexing and search time. It also influences the +capabilities available so it is recommended to let Elasticsearch set +this parameter automatically. There are two strategies available: +`recursive` and `term`. Term strategy supports point types only (the +`points_only` parameter will be automatically set to true) while +Recursive strategy supports all shape types. (IMPORTANT: see +<> for more detailed information) +| `recursive` |`distance_error_pct` |Used as a hint to the PrefixTree about how precise it should be. Defaults to 0.025 (2.5%) with 0.5 as the maximum -supported value. PERFORMANCE NOTE: This value will be default to 0 if a `precision` or +supported value. PERFORMANCE NOTE: This value will default to 0 if a `precision` or `tree_level` definition is explicitly defined. This guarantees spatial precision at the level defined in the mapping. This can lead to significant memory usage for high resolution shapes with low error (e.g., large shapes at 1m with < 0.001 error). To improve indexing performance (at the cost of query accuracy) explicitly define `tree_level` or `precision` along with a reasonable `distance_error_pct`, noting that large shapes will have greater false positives. +| `0.025` |`orientation` |Optionally define how to interpret vertex order for polygons / multipolygons. This parameter defines one of two coordinate system rules (Right-hand or Left-hand) each of which can be specified in three -different ways. 1. Right-hand rule (default): `right`, `ccw`, `counterclockwise`, +different ways. 1. Right-hand rule: `right`, `ccw`, `counterclockwise`, 2. Left-hand rule: `left`, `cw`, `clockwise`. The default orientation (`counterclockwise`) complies with the OGC standard which defines outer ring vertices in counterclockwise order with inner ring(s) vertices (holes) in clockwise order. Setting this parameter in the geo_shape mapping explicitly sets vertex order for the coordinate list of a geo_shape field but can be overridden in each individual GeoJSON document. +| `ccw` |`points_only` |Setting this option to `true` (defaults to `false`) configures the `geo_shape` field type for point shapes only (NOTE: Multi-Points are not @@ -70,18 +84,21 @@ yet supported). This optimizes index and search performance for the `geohash` an queries can not be executed on `geo_point` field types. This option bridges the gap by improving point performance on a `geo_shape` field so that `geo_shape` queries are optimal on a point only field. +| `false` |======================================================================= +[[prefix-trees]] [float] ==== Prefix trees To efficiently represent shapes in the index, Shapes are converted into -a series of hashes representing grid squares using implementations of a -PrefixTree. The tree notion comes from the fact that the PrefixTree uses -multiple grid layers, each with an increasing level of precision to -represent the Earth. +a series of hashes representing grid squares (commonly referred to as "rasters") +using implementations of a PrefixTree. The tree notion comes from the fact that +the PrefixTree uses multiple grid layers, each with an increasing level of +precision to represent the Earth. This can be thought of as increasing the level +of detail of a map or image at higher zoom levels. Multiple PrefixTree implementations are provided: @@ -100,6 +117,29 @@ longitude the resulting hash is a bit set. A tree level in a quad tree represents 2 bits in this bit set, one for each coordinate. The maximum amount of levels for the quad trees in Elasticsearch is 50. +[[spatial-strategy]] +[float] +===== Spatial strategies +The PrefixTree implementations rely on a SpatialStrategy for decomposing +the provided Shape(s) into approximated grid squares. Each strategy answers +the following: + +* What type of Shapes can be indexed? +* What types of Query Operations and Shapes can be used? +* Does it support more than one Shape per field? + +The following Strategy implementations (with corresponding capabilities) +are provided: + +[cols="<,<,<,<",options="header",] +|======================================================================= +|Strategy |Supported Shapes |Supported Queries |Multiple Shapes + +|`recursive` |<> |`INTERSECTS`, `DISJOINT`, `WITHIN`, `CONTAINS` |Yes +|`term` |<> |`INTERSECTS` |Yes + +|======================================================================= + [float] ===== Accuracy @@ -149,6 +189,7 @@ between index size and a reasonable level of precision of 50m at the equator. This allows for indexing tens of millions of shapes without overly bloating the resulting index too much relative to the input size. +[[input-structure]] [float] ==== Input Structure @@ -189,6 +230,7 @@ differs from many Geospatial APIs (e.g., Google Maps) that generally use the colloquial latitude, longitude (Y, X). ============================================= +[[point]] [float] ===== http://geojson.org/geojson-spec.html#id2[Point] diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index a6d13562bb0..d389380b781 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -104,7 +104,10 @@ shape: ==== Spatial Relations -The Query supports the following spatial relations: +The <> mapping parameter determines +which spatial relation operators may be used at search time. + +The following is a complete list of spatial relation operators available: * `INTERSECTS` - (default) Return all documents whose `geo_shape` field intersects the query geometry. From a42823ad459eef6d07ea005d9b63074ad79ca56c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 11 Dec 2015 19:03:16 -0500 Subject: [PATCH 21/26] Improve ThreadLocal handling in o.e.c.Randomness This commit improves the handling of ThreadLocal Random instance allocation in o.e.c.Randomness. - the seed per instance is no longer fixed - a non-dangerous race to create the ThreadLocal instance has been removed - encapsulated all state into an static nested class for safe and lazy instantiation --- .../org/elasticsearch/common/Randomness.java | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java index cc0af86cbba..e912a9977f1 100644 --- a/core/src/main/java/org/elasticsearch/common/Randomness.java +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -43,7 +43,6 @@ import java.util.Random; * DiscoveryService#SETTING_DISCOVERY_SEED)). */ public final class Randomness { - private static final SecureRandom SR = new SecureRandom(); private static final Method currentMethod; private static final Method getRandomMethod; @@ -110,23 +109,27 @@ public final class Randomness { } } - private static ThreadLocal LOCAL; - private static Random getWithoutSeed() { assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; - if (LOCAL == null) { - byte[] bytes = SR.generateSeed(8); - long accumulator = 0; - for (int i = 0; i < bytes.length; i++) { - accumulator = (accumulator << 8) + bytes[i] & 0xFFL; - } - final long seed = accumulator; - LOCAL = ThreadLocal.withInitial(() -> new Random(seed)); - } - return LOCAL.get(); + return RandomnessHelper.LOCAL.get(); } public static void shuffle(List list) { Collections.shuffle(list, get()); } + + private static class RandomnessHelper { + private static final SecureRandom SR = new SecureRandom(); + + private static final ThreadLocal LOCAL = ThreadLocal.withInitial( + () -> { + byte[] bytes = SR.generateSeed(8); + long accumulator = 0; + for (int i = 0; i < bytes.length; i++) { + accumulator = (accumulator << 8) + bytes[i] & 0xFFL; + } + return new Random(accumulator); + } + ); + } } From 5c8a0da1fd12f16ab0cb26f479465059ada68f87 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 11 Dec 2015 17:36:08 -0800 Subject: [PATCH 22/26] Build: Change rest integ tests to not have hardcoded ports This change removes hardcoded ports from cluster formation. It passes port 0 for http and transport, and then uses a special property to have the node log the ports used for http and transport (just for tests). This does not yet work for multi node tests. This brings us one step closer to working with --parallel. --- .../junit4/RandomizedTestingTask.groovy | 8 ++-- .../gradle/test/ClusterConfiguration.groovy | 6 +-- .../gradle/test/ClusterFormationTasks.groovy | 43 ++++++++++++----- .../elasticsearch/gradle/test/NodeInfo.groovy | 23 +++++++--- .../gradle/test/RestIntegTestTask.groovy | 4 +- .../elasticsearch/gradle/test/RunTask.groovy | 2 +- .../java/org/elasticsearch/node/Node.java | 46 +++++++++++++++++++ .../transport/netty/NettyTransport.java | 6 +++ .../smoketest/ESSmokeClientTestCase.java | 20 ++++---- .../elasticsearch/test/ESIntegTestCase.java | 17 +++---- 10 files changed, 126 insertions(+), 49 deletions(-) diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy index 33e16feb44a..ccb5d5904bf 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy @@ -78,7 +78,7 @@ class RandomizedTestingTask extends DefaultTask { @Input String argLine = null - Map systemProperties = new HashMap<>() + Map systemProperties = new HashMap<>() PatternFilterable patternSet = new PatternSet() RandomizedTestingTask() { @@ -100,7 +100,7 @@ class RandomizedTestingTask extends DefaultTask { jvmArgs.add(argument) } - void systemProperty(String property, String value) { + void systemProperty(String property, Object value) { systemProperties.put(property, value) } @@ -245,8 +245,8 @@ class RandomizedTestingTask extends DefaultTask { exclude(name: excludePattern) } } - for (Map.Entry prop : systemProperties) { - sysproperty key: prop.getKey(), value: prop.getValue() + for (Map.Entry prop : systemProperties) { + sysproperty key: prop.getKey(), value: prop.getValue().toString() } makeListeners() } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 8bc80da74b5..fa23299cee4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -33,10 +33,10 @@ class ClusterConfiguration { int numNodes = 1 @Input - int baseHttpPort = 9400 + int httpPort = 0 @Input - int baseTransportPort = 9500 + int transportPort = 0 @Input boolean daemonize = true @@ -55,7 +55,7 @@ class ClusterConfiguration { @Input Closure waitCondition = { NodeInfo node, AntBuilder ant -> File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://localhost:${node.httpPort()}", + ant.get(src: "http://${node.httpUri()}", dest: tmpFile.toString(), ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task retries: 10) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index e62175b743e..08976dbdb39 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -38,8 +38,10 @@ class ClusterFormationTasks { /** * Adds dependent tasks to the given task to start and stop a cluster with the given configuration. + * + * Returns an object that will resolve at execution time of the given task to a uri for the cluster. */ - static void setup(Project project, Task task, ClusterConfiguration config) { + static Object setup(Project project, Task task, ClusterConfiguration config) { if (task.getEnabled() == false) { // no need to add cluster formation tasks if the task won't run! return @@ -55,6 +57,9 @@ class ClusterFormationTasks { Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks) task.dependsOn(wait) + + // delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests + return "${-> nodes[0].transportUri()}" } /** Adds a dependency on the given distribution */ @@ -200,17 +205,24 @@ class ClusterFormationTasks { /** Adds a task to write elasticsearch.yml for the given node configuration */ static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) { Map esConfig = [ - 'cluster.name' : node.clusterName, - 'http.port' : node.httpPort(), - 'transport.tcp.port' : node.transportPort(), - 'pidfile' : node.pidFile, - 'discovery.zen.ping.unicast.hosts': (0.. logger.error("| ${line}") } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 337eea3de97..b369d35c03a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -43,6 +43,12 @@ class NodeInfo { /** the pid file the node will use */ File pidFile + /** a file written by elasticsearch containing the ports of each bound address for http */ + File httpPortsFile + + /** a file written by elasticsearch containing the ports of each bound address for transport */ + File transportPortsFile + /** elasticsearch home dir */ File homeDir @@ -92,6 +98,10 @@ class NodeInfo { homeDir = homeDir(baseDir, config.distribution) confDir = confDir(baseDir, config.distribution) configFile = new File(confDir, 'elasticsearch.yml') + // even for rpm/deb, the logs are under home because we dont start with real services + File logsDir = new File(homeDir, 'logs') + httpPortsFile = new File(logsDir, 'http.ports') + transportPortsFile = new File(logsDir, 'transport.ports') cwd = new File(baseDir, "cwd") failedMarker = new File(cwd, 'run.failed') startLog = new File(cwd, 'run.log') @@ -119,6 +129,7 @@ class NodeInfo { 'JAVA_HOME' : project.javaHome, 'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc ] + args.add("-Des.tests.portsfile=true") args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" }) for (Map.Entry property : System.properties.entrySet()) { if (property.getKey().startsWith('es.')) { @@ -159,14 +170,14 @@ class NodeInfo { wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8') } - /** Returns the http port for this node */ - int httpPort() { - return config.baseHttpPort + nodeNum + /** Returns an address and port suitable for a uri to connect to this node over http */ + String httpUri() { + return httpPortsFile.readLines("UTF-8").get(0) } - /** Returns the transport port for this node */ - int transportPort() { - return config.baseTransportPort + nodeNum + /** Returns an address and port suitable for a uri to connect to this node over transport protocol */ + String transportUri() { + return transportPortsFile.readLines("UTF-8").get(0) } /** Returns the directory elasticsearch home is contained in for the given distribution */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index cd43cd2ca67..24bd57a3a59 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -57,12 +57,12 @@ public class RestIntegTestTask extends RandomizedTestingTask { RestSpecHack.configureDependencies(project) project.afterEvaluate { dependsOn(RestSpecHack.configureTask(project, includePackaged)) - systemProperty('tests.cluster', "localhost:${clusterConfig.baseTransportPort}") } // this must run after all projects have been configured, so we know any project // references can be accessed as a fully configured project.gradle.projectsEvaluated { - ClusterFormationTasks.setup(project, this, clusterConfig) + Object clusterUri = ClusterFormationTasks.setup(project, this, clusterConfig) + systemProperty('tests.cluster', clusterUri) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy index 32469218a5b..842ef8c35cd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy @@ -8,7 +8,7 @@ import org.gradle.util.ConfigureUtil public class RunTask extends DefaultTask { - ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false) + ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false) public RunTask() { description = "Runs elasticsearch with '${project.path}'" diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index f4bc34a91e2..aac0b0cd9d9 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.common.StopWatch; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; @@ -42,11 +43,14 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryService; @@ -59,6 +63,7 @@ import org.elasticsearch.gateway.GatewayModule; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.http.HttpServer; import org.elasticsearch.http.HttpServerModule; +import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -97,7 +102,15 @@ import org.elasticsearch.tribe.TribeService; import org.elasticsearch.watcher.ResourceWatcherModule; import org.elasticsearch.watcher.ResourceWatcherService; +import java.io.BufferedWriter; import java.io.IOException; +import java.net.Inet6Address; +import java.net.InetSocketAddress; +import java.nio.charset.Charset; +import java.nio.file.CopyOption; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -274,6 +287,15 @@ public class Node implements Releasable { injector.getInstance(ResourceWatcherService.class).start(); injector.getInstance(TribeService.class).start(); + if (System.getProperty("es.tests.portsfile", "false").equals("true")) { + if (settings.getAsBoolean("http.enabled", true)) { + HttpServerTransport http = injector.getInstance(HttpServerTransport.class); + writePortsFile("http", http.boundAddress()); + } + TransportService transport = injector.getInstance(TransportService.class); + writePortsFile("transport", transport.boundAddress()); + } + logger.info("started"); return this; @@ -425,4 +447,28 @@ public class Node implements Releasable { public Injector injector() { return this.injector; } + + @SuppressForbidden(reason = "Need to specify port and pass to formatter") + private void writePortsFile(String type, BoundTransportAddress boundAddress) { + Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp"); + try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) { + for (TransportAddress address : boundAddress.boundAddresses()) { + InetSocketAddress socketAddress = new InetSocketAddress(address.getAddress(), address.getPort()); + if (socketAddress.getAddress() instanceof Inet6Address && + socketAddress.getAddress().isLinkLocalAddress()) { + // no link local, just causes problems + continue; + } + writer.write(NetworkAddress.formatAddress(socketAddress) + "\n"); + } + } catch (IOException e) { + throw new RuntimeException("Failed to write ports file", e); + } + Path portsFile = environment.logsFile().resolve(type + ".ports"); + try { + Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE); + } catch (IOException e) { + throw new RuntimeException("Failed to rename ports file", e); + } + } } diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index ab39a35d224..2f1c52a0ac2 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -87,6 +87,7 @@ import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory; import org.jboss.netty.util.HashedWheelTimer; import java.io.IOException; +import java.net.BindException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketAddress; @@ -763,6 +764,11 @@ public class NettyTransport extends AbstractLifecycleComponent implem // close the channel as safe measure, which will cause a node to be disconnected if relevant ctx.getChannel().close(); disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); + } else if (e.getCause() instanceof BindException) { + logger.trace("bind exception caught on transport layer [{}]", e.getCause(), ctx.getChannel()); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + ctx.getChannel().close(); + disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); } else if (e.getCause() instanceof CancelledKeyException) { logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel()); // close the channel as safe measure, which will cause a node to be disconnected if relevant diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java index 6e912cfab22..45cba5871e0 100644 --- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java +++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java @@ -20,6 +20,7 @@ package org.elasticsearch.smoketest; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.SuppressForbidden; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; @@ -34,7 +35,10 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import java.io.IOException; import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.URL; import java.net.UnknownHostException; import java.nio.file.Path; import java.util.Locale; @@ -103,20 +107,14 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase { return client; } - private static Client startClient() throws UnknownHostException { + @SuppressForbidden(reason = "Must use port when creating address") + private static Client startClient() throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; for (String stringAddress : stringAddresses) { - String[] split = stringAddress.split(":"); - if (split.length < 2) { - throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid"); - } - try { - transportAddresses[i++] = new InetSocketTransportAddress(InetAddress.getByName(split[0]), Integer.valueOf(split[1])); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]"); - } + URL url = new URL("http://" + stringAddress); + transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(url.getHost(), url.getPort())); } return startClient(createTempDir(), transportAddresses); } @@ -125,7 +123,7 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase { if (client == null) { try { client = startClient(); - } catch (UnknownHostException e) { + } catch (IOException e) { logger.error("can not start the client", e); } assertThat(client, notNullValue()); diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index fafff57c92f..51d9a5c55ab 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -28,6 +28,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.http.impl.client.HttpClients; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.SuppressForbidden; import org.apache.lucene.util.TestUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -136,6 +137,8 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.MalformedURLException; +import java.net.URL; import java.net.UnknownHostException; import java.nio.file.DirectoryStream; import java.nio.file.Files; @@ -1727,20 +1730,14 @@ public abstract class ESIntegTestCase extends ESTestCase { return Settings.EMPTY; } - private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws UnknownHostException { + @SuppressForbidden(reason = "Must use port when creating address") + private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; for (String stringAddress : stringAddresses) { - String[] split = stringAddress.split(":"); - if (split.length < 2) { - throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid"); - } - try { - transportAddresses[i++] = new InetSocketTransportAddress(InetAddress.getByName(split[0]), Integer.valueOf(split[1])); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]"); - } + URL url = new URL("http://" + stringAddress); + transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(url.getHost(), url.getPort())); } return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); } From 7cbb955d19128987798e73b98e16b6ba10caaf72 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 11 Dec 2015 18:08:39 -0800 Subject: [PATCH 23/26] Move forbidden suppression for InetSocketAddress to its own method --- core/src/main/java/org/elasticsearch/node/Node.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index aac0b0cd9d9..5cdd32b3f97 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -448,12 +448,12 @@ public class Node implements Releasable { return this.injector; } - @SuppressForbidden(reason = "Need to specify port and pass to formatter") + /** Writes a file to the logs dir containing the ports for the given transport type */ private void writePortsFile(String type, BoundTransportAddress boundAddress) { Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp"); try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) { for (TransportAddress address : boundAddress.boundAddresses()) { - InetSocketAddress socketAddress = new InetSocketAddress(address.getAddress(), address.getPort()); + InetSocketAddress socketAddress = buildAddress(address.getAddress(), address.getPort()); if (socketAddress.getAddress() instanceof Inet6Address && socketAddress.getAddress().isLinkLocalAddress()) { // no link local, just causes problems @@ -471,4 +471,9 @@ public class Node implements Releasable { throw new RuntimeException("Failed to rename ports file", e); } } + + @SuppressForbidden(reason = "Need to specify address and port") + private static InetSocketAddress buildAddress(String address, int port) { + return new InetSocketAddress(address, port); + } } From a0c69fe7f946c54e06225ae9b447f827c59baee0 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 11 Dec 2015 18:20:09 -0800 Subject: [PATCH 24/26] Remove forbidden suppressions for InetSocketAddress --- core/src/main/java/org/elasticsearch/node/Node.java | 13 ++++--------- .../smoketest/ESSmokeClientTestCase.java | 4 ++-- .../org/elasticsearch/test/ESIntegTestCase.java | 4 ++-- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 5cdd32b3f97..d3f6367cac0 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -105,6 +105,7 @@ import org.elasticsearch.watcher.ResourceWatcherService; import java.io.BufferedWriter; import java.io.IOException; import java.net.Inet6Address; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.charset.Charset; import java.nio.file.CopyOption; @@ -453,13 +454,12 @@ public class Node implements Releasable { Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp"); try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) { for (TransportAddress address : boundAddress.boundAddresses()) { - InetSocketAddress socketAddress = buildAddress(address.getAddress(), address.getPort()); - if (socketAddress.getAddress() instanceof Inet6Address && - socketAddress.getAddress().isLinkLocalAddress()) { + InetAddress inetAddress = InetAddress.getByName(address.getAddress()); + if (inetAddress instanceof Inet6Address && inetAddress.isLinkLocalAddress()) { // no link local, just causes problems continue; } - writer.write(NetworkAddress.formatAddress(socketAddress) + "\n"); + writer.write(NetworkAddress.formatAddress(new InetSocketAddress(inetAddress, address.getPort())) + "\n"); } } catch (IOException e) { throw new RuntimeException("Failed to write ports file", e); @@ -471,9 +471,4 @@ public class Node implements Releasable { throw new RuntimeException("Failed to rename ports file", e); } } - - @SuppressForbidden(reason = "Need to specify address and port") - private static InetSocketAddress buildAddress(String address, int port) { - return new InetSocketAddress(address, port); - } } diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java index 45cba5871e0..95df2d04458 100644 --- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java +++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java @@ -107,14 +107,14 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase { return client; } - @SuppressForbidden(reason = "Must use port when creating address") private static Client startClient() throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; for (String stringAddress : stringAddresses) { URL url = new URL("http://" + stringAddress); - transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(url.getHost(), url.getPort())); + InetAddress inetAddress = InetAddress.getByName(url.getHost()); + transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort())); } return startClient(createTempDir(), transportAddresses); } diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 51d9a5c55ab..37a8fd388b8 100644 --- a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1730,14 +1730,14 @@ public abstract class ESIntegTestCase extends ESTestCase { return Settings.EMPTY; } - @SuppressForbidden(reason = "Must use port when creating address") private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; for (String stringAddress : stringAddresses) { URL url = new URL("http://" + stringAddress); - transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(url.getHost(), url.getPort())); + InetAddress inetAddress = InetAddress.getByName(url.getHost()); + transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort())); } return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); } From 2602439a51f109e059123b232ec60c9d816c60dd Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 12 Dec 2015 17:51:46 -0500 Subject: [PATCH 25/26] Delegate to j.u.c.ThreadLocalRandom for Random instances --- .../org/elasticsearch/common/Randomness.java | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java index e912a9977f1..dbfa8034b99 100644 --- a/core/src/main/java/org/elasticsearch/common/Randomness.java +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -22,10 +22,10 @@ package org.elasticsearch.common; import org.elasticsearch.common.settings.Settings; import java.lang.reflect.Method; -import java.security.SecureRandom; import java.util.Collections; import java.util.List; import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; /** * Provides factory methods for producing reproducible sources of @@ -111,25 +111,10 @@ public final class Randomness { private static Random getWithoutSeed() { assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; - return RandomnessHelper.LOCAL.get(); + return ThreadLocalRandom.current(); } public static void shuffle(List list) { Collections.shuffle(list, get()); } - - private static class RandomnessHelper { - private static final SecureRandom SR = new SecureRandom(); - - private static final ThreadLocal LOCAL = ThreadLocal.withInitial( - () -> { - byte[] bytes = SR.generateSeed(8); - long accumulator = 0; - for (int i = 0; i < bytes.length; i++) { - accumulator = (accumulator << 8) + bytes[i] & 0xFFL; - } - return new Random(accumulator); - } - ); - } } From f520f8bfb97f8bbb9fa6205045facf1c12123cb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 11 Dec 2015 14:40:50 +0100 Subject: [PATCH 26/26] Tests: Add test for parsing "_name" field in RangeQueryParser --- .../index/query/RangeQueryBuilderTests.java | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index bf580e55f46..4df799e9f37 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -23,7 +23,9 @@ import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.lucene.BytesRefs; +import org.hamcrest.core.IsEqual; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -353,4 +355,42 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase