diff --git a/Vagrantfile b/Vagrantfile index 4f8ee7164f6..5b51f612eb3 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -42,7 +42,7 @@ Vagrant.configure(2) do |config| # debian and it works fine. config.vm.define "debian-8" do |config| config.vm.box = "elastic/debian-8-x86_64" - deb_common config, 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports' + deb_common config, 'echo deb http://cloudfront.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports' end config.vm.define "centos-6" do |config| config.vm.box = "elastic/centos-6-x86_64" diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java index aba7fda1021..97fbda80dc6 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java @@ -31,7 +31,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.gateway.GatewayAllocator; @@ -102,7 +102,7 @@ public final class Allocators { } public static DiscoveryNode newNode(String nodeId, Map attributes) { - return new DiscoveryNode("", nodeId, DummyTransportAddress.INSTANCE, attributes, Sets.newHashSet(DiscoveryNode.Role.MASTER, + return new DiscoveryNode("", nodeId, LocalTransportAddress.buildUnique(), attributes, Sets.newHashSet(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.DATA), Version.CURRENT); } } diff --git a/buildSrc/src/main/resources/checkstyle.xml b/buildSrc/src/main/resources/checkstyle.xml index fe726062706..706ef46ffa1 100644 --- a/buildSrc/src/main/resources/checkstyle.xml +++ b/buildSrc/src/main/resources/checkstyle.xml @@ -58,6 +58,8 @@ --> + + adding two nodes with the same host"); clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder() - .put(new DiscoveryNode("node1", "node1", "test1", "test1", DummyTransportAddress.INSTANCE, emptyMap(), + .put(new DiscoveryNode("node1", "node1", "node1", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT)) - .put(new DiscoveryNode("node2", "node2", "test1", "test1", DummyTransportAddress.INSTANCE, emptyMap(), + .put(new DiscoveryNode("node2", "node2", "node2", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -82,7 +82,7 @@ public class SameShardRoutingTests extends ESAllocationTestCase { logger.info("--> add another node, with a different host, replicas will be allocating"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(new DiscoveryNode("node3", "node3", "test2", "test2", DummyTransportAddress.INSTANCE, emptyMap(), + .put(new DiscoveryNode("node3", "node3", "node3", "test2", "test2", LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index be50c5f5331..56ca6381af9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.Index; @@ -110,9 +109,9 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { final Index index = metaData.index("test").getIndex(); ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), null, true, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); - DiscoveryNode node_0 = new DiscoveryNode("node_0", DummyTransportAddress.INSTANCE, Collections.emptyMap(), + DiscoveryNode node_0 = new DiscoveryNode("node_0", LocalTransportAddress.buildUnique(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); - DiscoveryNode node_1 = new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Collections.emptyMap(), + DiscoveryNode node_1 = new DiscoveryNode("node_1", LocalTransportAddress.buildUnique(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); RoutingTable routingTable = RoutingTable.builder() @@ -149,9 +148,9 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); ImmutableOpenMap.Builder shardRoutingMap = ImmutableOpenMap.builder(); - DiscoveryNode node_0 = new DiscoveryNode("node_0", DummyTransportAddress.INSTANCE, Collections.emptyMap(), + DiscoveryNode node_0 = new DiscoveryNode("node_0", LocalTransportAddress.buildUnique(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); - DiscoveryNode node_1 = new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Collections.emptyMap(), + DiscoveryNode node_1 = new DiscoveryNode("node_1", LocalTransportAddress.buildUnique(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java index 99cde60f086..9957a6d3603 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.test.ESAllocationTestCase; import static java.util.Collections.emptyMap; @@ -50,7 +50,7 @@ public class ClusterStateToStringTests extends ESAllocationTestCase { .addAsNew(metaData.index("test_idx")) .build(); - DiscoveryNodes nodes = DiscoveryNodes.builder().put(new DiscoveryNode("node_foo", DummyTransportAddress.INSTANCE, + DiscoveryNodes nodes = DiscoveryNodes.builder().put(new DiscoveryNode("node_foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)).localNodeId("node_foo").masterNodeId("node_foo").build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes) diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index 7907ad3e63c..23713832edf 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -94,7 +94,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onAllNodesAcked(@Nullable Throwable t) { + public void onAllNodesAcked(@Nullable Exception e) { allNodesAcked.set(true); latch.countDown(); } @@ -127,8 +127,8 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { - logger.error("failed to execute callback in test {}", t, source); + public void onFailure(String source, Exception e) { + logger.error("failed to execute callback in test {}", e, source); onFailure.set(true); latch.countDown(); } @@ -165,7 +165,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onAllNodesAcked(@Nullable Throwable t) { + public void onAllNodesAcked(@Nullable Exception e) { allNodesAcked.set(true); latch.countDown(); } @@ -198,8 +198,8 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { - logger.error("failed to execute callback in test {}", t, source); + public void onFailure(String source, Exception e) { + logger.error("failed to execute callback in test {}", e, source); onFailure.set(true); latch.countDown(); } @@ -240,7 +240,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onAllNodesAcked(@Nullable Throwable t) { + public void onAllNodesAcked(@Nullable Exception e) { allNodesAcked.set(true); latch.countDown(); } @@ -272,8 +272,8 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { - logger.error("failed to execute callback in test {}", t, source); + public void onFailure(String source, Exception e) { + logger.error("failed to execute callback in test {}", e, source); onFailure.set(true); latch.countDown(); } @@ -313,7 +313,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onAllNodesAcked(@Nullable Throwable t) { + public void onAllNodesAcked(@Nullable Exception e) { allNodesAcked.set(true); latch.countDown(); } @@ -346,8 +346,8 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { - logger.error("failed to execute callback in test {}", t, source); + public void onFailure(String source, Exception e) { + logger.error("failed to execute callback in test {}", e, source); onFailure.set(true); latch.countDown(); } @@ -388,7 +388,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { invoked1.countDown(); fail(); } @@ -403,7 +403,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { fail(); } @@ -458,7 +458,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { invoked3.countDown(); fail(); } @@ -473,7 +473,7 @@ public class ClusterServiceIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { fail(); } }); diff --git a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java index 66f96f8cd3a..54f6233631b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java @@ -37,7 +37,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -109,7 +109,7 @@ public class ClusterServiceTests extends ESTestCase { TimedClusterService timedClusterService = new TimedClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool); - timedClusterService.setLocalNode(new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, emptyMap(), + timedClusterService.setLocalNode(new DiscoveryNode("node1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override @@ -149,8 +149,8 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { - throw new RuntimeException(t); + public void onFailure(String source, Exception e) { + throw new RuntimeException(e); } }); @@ -163,7 +163,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { timedOut.countDown(); } @@ -183,8 +183,8 @@ public class ClusterServiceTests extends ESTestCase { final CountDownLatch allProcessed = new CountDownLatch(1); clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override - public void onFailure(String source, Throwable t) { - throw new RuntimeException(t); + public void onFailure(String source, Exception e) { + throw new RuntimeException(e); } @Override @@ -212,7 +212,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { taskFailed[0] = true; latch1.countDown(); } @@ -237,7 +237,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { taskFailed[0] = true; latch2.countDown(); } @@ -286,7 +286,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { } } ); @@ -326,9 +326,9 @@ public class ClusterServiceTests extends ESTestCase { ClusterStateTaskListener listener = new ClusterStateTaskListener() { @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected failure: [{}]", t, source); - failures.add(new Tuple<>(source, t)); + public void onFailure(String source, Exception e) { + logger.error("unexpected failure: [{}]", e, source); + failures.add(new Tuple<>(source, e)); updateLatch.countDown(); } @@ -387,8 +387,8 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { - fail(ExceptionsHelper.detailedMessage(t)); + public void onFailure(String source, Exception e) { + fail(ExceptionsHelper.detailedMessage(e)); } })) ; } @@ -523,8 +523,8 @@ public class ClusterServiceTests extends ESTestCase { final CountDownLatch updateLatch = new CountDownLatch(totalTaskCount); final ClusterStateTaskListener listener = new ClusterStateTaskListener() { @Override - public void onFailure(String source, Throwable t) { - fail(ExceptionsHelper.detailedMessage(t)); + public void onFailure(String source, Exception e) { + fail(ExceptionsHelper.detailedMessage(e)); } @Override @@ -647,8 +647,8 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { - fail(ExceptionsHelper.detailedMessage(t)); + public void onFailure(String source, Exception e) { + fail(ExceptionsHelper.detailedMessage(e)); } }; @@ -693,7 +693,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { fail(); } }); @@ -710,7 +710,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { latch.countDown(); } }); @@ -727,7 +727,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { fail(); } }); @@ -745,7 +745,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { fail(); } }); @@ -788,7 +788,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { fail(); } }); @@ -807,7 +807,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { latch.countDown(); } }); @@ -824,7 +824,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { fail(); } }); @@ -841,7 +841,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { fail(); } }); @@ -859,7 +859,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { fail(); } }); @@ -902,7 +902,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { } public void close() { @@ -930,7 +930,7 @@ public class ClusterServiceTests extends ESTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { latch.countDown(); } } diff --git a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java index bb9d23db1cb..f10a0da3029 100644 --- a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java +++ b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java @@ -43,7 +43,7 @@ public class MemoryCircuitBreakerTests extends ESTestCase { final int BYTES_PER_THREAD = scaledRandomIntBetween(500, 4500); final Thread[] threads = new Thread[NUM_THREADS]; final AtomicBoolean tripped = new AtomicBoolean(false); - final AtomicReference lastException = new AtomicReference<>(null); + final AtomicReference lastException = new AtomicReference<>(null); final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue((BYTES_PER_THREAD * NUM_THREADS) - 1), 1.0, logger); @@ -60,8 +60,8 @@ public class MemoryCircuitBreakerTests extends ESTestCase { } else { assertThat(tripped.compareAndSet(false, true), equalTo(true)); } - } catch (Throwable e2) { - lastException.set(e2); + } catch (Exception e) { + lastException.set(e); } } } @@ -117,8 +117,8 @@ public class MemoryCircuitBreakerTests extends ESTestCase { } else { assertThat(tripped.compareAndSet(false, true), equalTo(true)); } - } catch (Throwable e2) { - lastException.set(e2); + } catch (Exception e) { + lastException.set(e); } } } @@ -178,8 +178,8 @@ public class MemoryCircuitBreakerTests extends ESTestCase { breaker.addEstimateBytesAndMaybeBreak(1L, "test"); } catch (CircuitBreakingException e) { tripped.incrementAndGet(); - } catch (Throwable e2) { - lastException.set(e2); + } catch (Exception e) { + lastException.set(e); } } } diff --git a/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java b/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java index 90922327732..c521314f92d 100644 --- a/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/bytes/AbstractBytesReferenceTestCase.java @@ -42,11 +42,20 @@ public abstract class AbstractBytesReferenceTestCase extends ESTestCase { public void testGet() throws IOException { int length = randomIntBetween(1, PAGE_SIZE * 3); BytesReference pbr = newBytesReference(length); + int sliceOffset = randomIntBetween(0, length / 2); int sliceLength = Math.max(1, length - sliceOffset - 1); BytesReference slice = pbr.slice(sliceOffset, sliceLength); assertEquals(pbr.get(sliceOffset), slice.get(0)); assertEquals(pbr.get(sliceOffset + sliceLength - 1), slice.get(sliceLength - 1)); + final int probes = randomIntBetween(20, 100); + BytesReference copy = new BytesArray(pbr.toBytesRef(), true); + for (int i = 0; i < probes; i++) { + int index = randomIntBetween(0, copy.length() - 1); + assertEquals(pbr.get(index), copy.get(index)); + index = randomIntBetween(sliceOffset, sliceOffset + sliceLength - 1); + assertEquals(pbr.get(index), slice.get(index - sliceOffset)); + } } public void testLength() throws IOException { @@ -121,6 +130,26 @@ public abstract class AbstractBytesReferenceTestCase extends ESTestCase { si.readBytes(targetBuf, 0, length * 2)); } + public void testStreamInputMarkAndReset() throws IOException { + int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); + BytesReference pbr = newBytesReference(length); + StreamInput si = pbr.streamInput(); + assertNotNull(si); + + StreamInput wrap = StreamInput.wrap(BytesReference.toBytes(pbr)); + while(wrap.available() > 0) { + if (rarely()) { + wrap.mark(Integer.MAX_VALUE); + si.mark(Integer.MAX_VALUE); + } else if (rarely()) { + wrap.reset(); + si.reset(); + } + assertEquals(si.readByte(), wrap.readByte()); + assertEquals(si.available(), wrap.available()); + } + } + public void testStreamInputBulkReadWithOffset() throws IOException { final int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); BytesReference pbr = newBytesReference(length); @@ -233,6 +262,24 @@ public abstract class AbstractBytesReferenceTestCase extends ESTestCase { out.close(); } + public void testInputStreamSkip() throws IOException { + int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20)); + BytesReference pbr = newBytesReference(length); + final int iters = randomIntBetween(5, 50); + for (int i = 0; i < iters; i++) { + try (StreamInput input = pbr.streamInput()) { + final int offset = randomIntBetween(0, length-1); + assertEquals(offset, input.skip(offset)); + assertEquals(pbr.get(offset), input.readByte()); + final int nextOffset = randomIntBetween(offset, length-2); + assertEquals(nextOffset - offset, input.skip(nextOffset - offset)); + assertEquals(pbr.get(nextOffset+1), input.readByte()); // +1 for the one byte we read above + assertEquals(length - (nextOffset+2), input.skip(Long.MAX_VALUE)); + assertEquals(0, input.skip(randomIntBetween(0, Integer.MAX_VALUE))); + } + } + } + public void testSliceWriteToOutputStream() throws IOException { int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 5)); BytesReference pbr = newBytesReference(length); @@ -252,6 +299,9 @@ public abstract class AbstractBytesReferenceTestCase extends ESTestCase { BytesReference pbr = newBytesReference(sizes[i]); byte[] bytes = BytesReference.toBytes(pbr); assertEquals(sizes[i], bytes.length); + for (int j = 0; j < bytes.length; j++) { + assertEquals(bytes[j], pbr.get(j)); + } } } @@ -412,9 +462,16 @@ public abstract class AbstractBytesReferenceTestCase extends ESTestCase { // get a BytesRef from a slice int sliceOffset = randomIntBetween(0, pbr.length()); int sliceLength = randomIntBetween(0, pbr.length() - sliceOffset); + BytesRef sliceRef = pbr.slice(sliceOffset, sliceLength).toBytesRef(); - // note that these are only true if we have <= than a page, otherwise offset/length are shifted - assertEquals(sliceOffset, sliceRef.offset); + + if (sliceLength == 0 && sliceOffset != sliceRef.offset) { + // some impls optimize this to an empty instance then the offset will be 0 + assertEquals(0, sliceRef.offset); + } else { + // note that these are only true if we have <= than a page, otherwise offset/length are shifted + assertEquals(sliceOffset, sliceRef.offset); + } assertEquals(sliceLength, sliceRef.length); } diff --git a/core/src/test/java/org/elasticsearch/common/bytes/CompositeBytesReferenceTests.java b/core/src/test/java/org/elasticsearch/common/bytes/CompositeBytesReferenceTests.java new file mode 100644 index 00000000000..aec957aba68 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/bytes/CompositeBytesReferenceTests.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.bytes; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class CompositeBytesReferenceTests extends AbstractBytesReferenceTestCase { + @Override + protected BytesReference newBytesReference(int length) throws IOException { + // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content + List referenceList = newRefList(length); + BytesReference ref = new CompositeBytesReference(referenceList.toArray(new BytesReference[0])); + assertEquals(length, ref.length()); + return ref; + } + + private List newRefList(int length) throws IOException { + List referenceList = new ArrayList<>(); + for (int i = 0; i < length;) { + int remaining = length-i; + int sliceLength = randomIntBetween(1, remaining); + ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(sliceLength, bigarrays); + for (int j = 0; j < sliceLength; j++) { + out.writeByte((byte) random().nextInt(1 << 8)); + } + assertEquals(sliceLength, out.size()); + referenceList.add(out.bytes()); + i+=sliceLength; + } + return referenceList; + } + + public void testCompositeBuffer() throws IOException { + List referenceList = newRefList(randomIntBetween(1, PAGE_SIZE * 2)); + BytesReference ref = new CompositeBytesReference(referenceList.toArray(new BytesReference[0])); + BytesRefIterator iterator = ref.iterator(); + BytesRefBuilder builder = new BytesRefBuilder(); + + for (BytesReference reference : referenceList) { + BytesRefIterator innerIter = reference.iterator(); // sometimes we have a paged ref - pull an iter and walk all pages! + BytesRef scratch; + while ((scratch = innerIter.next()) != null) { + BytesRef next = iterator.next(); + assertNotNull(next); + assertEquals(next, scratch); + builder.append(next); + } + + } + assertNull(iterator.next()); + + int offset = 0; + for (BytesReference reference : referenceList) { + assertEquals(reference, ref.slice(offset, reference.length())); + int probes = randomIntBetween(Math.min(10, reference.length()), reference.length()); + for (int i = 0; i < probes; i++) { + int index = randomIntBetween(0, reference.length()-1); + assertEquals(ref.get(offset + index), reference.get(index)); + } + offset += reference.length(); + } + + BytesArray array = new BytesArray(builder.toBytesRef()); + assertEquals(array, ref); + assertEquals(array.hashCode(), ref.hashCode()); + + BytesStreamOutput output = new BytesStreamOutput(); + ref.writeTo(output); + assertEquals(array, output.bytes()); + } + + @Override + public void testToBytesRefSharedPage() throws IOException { + // CompositeBytesReference doesn't share pages + } + + @Override + public void testSliceArrayOffset() throws IOException { + // the assertions in this test only work on no-composite buffers + } + + @Override + public void testSliceToBytesRef() throws IOException { + // CompositeBytesReference shifts offsets + } +} diff --git a/core/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java b/core/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java deleted file mode 100644 index 8c7b0c1f255..00000000000 --- a/core/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.settings.bar; - -//used in SettingsTest -public class BarTestClass { -} diff --git a/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java b/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java index a89cb48c37a..729c431d2b2 100644 --- a/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTests.java @@ -132,7 +132,7 @@ public class CancellableThreadsTests extends ESTestCase { public void testCancellableThreads() throws InterruptedException { Thread[] threads = new Thread[randomIntBetween(3, 10)]; final TestPlan[] plans = new TestPlan[threads.length]; - final Throwable[] throwables = new Throwable[threads.length]; + final Exception[] exceptions = new Exception[threads.length]; final boolean[] interrupted = new boolean[threads.length]; final CancellableThreads cancellableThreads = new CancellableThreads(); final CountDownLatch readyForCancel = new CountDownLatch(threads.length); @@ -153,8 +153,8 @@ public class CancellableThreadsTests extends ESTestCase { } else { cancellableThreads.execute(new TestRunnable(plan, readyForCancel)); } - } catch (Throwable t) { - throwables[plan.id] = t; + } catch (Exception e) { + exceptions[plan.id] = e; } if (plan.exceptBeforeCancel || plan.exitBeforeCancel) { // we have to mark we're ready now (actually done). @@ -176,19 +176,19 @@ public class CancellableThreadsTests extends ESTestCase { TestPlan plan = plans[i]; final Class exceptionClass = plan.ioException ? IOCustomException.class : CustomException.class; if (plan.exceptBeforeCancel) { - assertThat(throwables[i], Matchers.instanceOf(exceptionClass)); + assertThat(exceptions[i], Matchers.instanceOf(exceptionClass)); } else if (plan.exitBeforeCancel) { - assertNull(throwables[i]); + assertNull(exceptions[i]); } else { // in all other cases, we expect a cancellation exception. - assertThat(throwables[i], Matchers.instanceOf(CancellableThreads.ExecutionCancelledException.class)); + assertThat(exceptions[i], Matchers.instanceOf(CancellableThreads.ExecutionCancelledException.class)); if (plan.exceptAfterCancel) { - assertThat(throwables[i].getSuppressed(), + assertThat(exceptions[i].getSuppressed(), Matchers.arrayContaining( Matchers.instanceOf(exceptionClass) )); } else { - assertThat(throwables[i].getSuppressed(), Matchers.emptyArray()); + assertThat(exceptions[i].getSuppressed(), Matchers.emptyArray()); } } assertThat(interrupted[plan.id], Matchers.equalTo(plan.presetInterrupt)); diff --git a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java index 26d6af1cd5f..5302ba8d55c 100644 --- a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java @@ -67,7 +67,7 @@ public class IndexFolderUpgraderTests extends ESTestCase { public void testUpgradeCustomDataPath() throws IOException { Path customPath = createTempDir(); final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()) + .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build(); try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { final Index index = new Index(randomAsciiOfLength(10), UUIDs.randomBase64UUID()); @@ -96,7 +96,7 @@ public class IndexFolderUpgraderTests extends ESTestCase { public void testPartialUpgradeCustomDataPath() throws IOException { Path customPath = createTempDir(); final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()) + .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build(); try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { final Index index = new Index(randomAsciiOfLength(10), UUIDs.randomBase64UUID()); @@ -136,7 +136,7 @@ public class IndexFolderUpgraderTests extends ESTestCase { public void testUpgrade() throws IOException { final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build(); + .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build(); try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { final Index index = new Index(randomAsciiOfLength(10), UUIDs.randomBase64UUID()); Settings settings = Settings.builder() @@ -159,7 +159,7 @@ public class IndexFolderUpgraderTests extends ESTestCase { public void testUpgradeIndices() throws IOException { final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build(); + .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build(); try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { Map> indexSettingsMap = new HashMap<>(); for (int i = 0; i < randomIntBetween(2, 5); i++) { @@ -256,7 +256,7 @@ public class IndexFolderUpgraderTests extends ESTestCase { .numberOfReplicas(0) .build(); try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) { - IndexMetaData.FORMAT.write(indexState, 1, nodeEnvironment.indexPaths(index)); + IndexMetaData.FORMAT.write(indexState, nodeEnvironment.indexPaths(index)); assertFalse(IndexFolderUpgrader.needsUpgrade(index, index.getUUID())); } } @@ -305,7 +305,7 @@ public class IndexFolderUpgraderTests extends ESTestCase { for (int i = 0; i < nodePaths.length; i++) { oldIndexPaths[i] = nodePaths[i].indicesPath.resolve(indexSettings.getIndex().getName()); } - IndexMetaData.FORMAT.write(indexSettings.getIndexMetaData(), 1, oldIndexPaths); + IndexMetaData.FORMAT.write(indexSettings.getIndexMetaData(), oldIndexPaths); for (int id = 0; id < indexSettings.getNumberOfShards(); id++) { Path oldIndexPath = randomFrom(oldIndexPaths); ShardId shardId = new ShardId(indexSettings.getIndex(), id); @@ -316,7 +316,7 @@ public class IndexFolderUpgraderTests extends ESTestCase { writeShard(shardId, oldIndexPath, numIdxFiles, numTranslogFiles); } ShardStateMetaData state = new ShardStateMetaData(true, indexSettings.getUUID(), AllocationId.newInitializing()); - ShardStateMetaData.FORMAT.write(state, 1, oldIndexPath.resolve(String.valueOf(shardId.getId()))); + ShardStateMetaData.FORMAT.write(state, oldIndexPath.resolve(String.valueOf(shardId.getId()))); } } diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java index 4c2e4700943..02adb783197 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractLifecycleRunnableTests.java @@ -48,7 +48,7 @@ public class AbstractLifecycleRunnableTests extends ESTestCase { AbstractLifecycleRunnable runnable = new AbstractLifecycleRunnable(lifecycle, logger) { @Override - public void onFailure(Throwable t) { + public void onFailure(Exception e) { fail("It should not fail"); } @@ -77,7 +77,7 @@ public class AbstractLifecycleRunnableTests extends ESTestCase { AbstractLifecycleRunnable runnable = new AbstractLifecycleRunnable(lifecycle, logger) { @Override - public void onFailure(Throwable t) { + public void onFailure(Exception e) { fail("It should not fail"); } @@ -106,7 +106,7 @@ public class AbstractLifecycleRunnableTests extends ESTestCase { AbstractLifecycleRunnable runnable = new AbstractLifecycleRunnable(lifecycle, logger) { @Override - public void onFailure(Throwable t) { + public void onFailure(Exception e) { fail("It should not fail"); } @@ -145,7 +145,7 @@ public class AbstractLifecycleRunnableTests extends ESTestCase { AbstractLifecycleRunnable runnable = new AbstractLifecycleRunnable(lifecycle, logger) { @Override - public void onFailure(Throwable t) { + public void onFailure(Exception e) { fail("It should not fail"); } diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractRunnableTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractRunnableTests.java index 54491aade6f..2373b30e1b2 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractRunnableTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/AbstractRunnableTests.java @@ -37,8 +37,8 @@ public class AbstractRunnableTests extends ESTestCase { AbstractRunnable runnable = new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - fail("It should not fail"); + public void onFailure(Exception e) { + fail(e.toString()); } @Override @@ -57,8 +57,8 @@ public class AbstractRunnableTests extends ESTestCase { AbstractRunnable runnable = new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - assertSame(exception, t); + public void onFailure(Exception e) { + assertSame(exception, e); } @Override @@ -76,8 +76,8 @@ public class AbstractRunnableTests extends ESTestCase { AbstractRunnable runnable = new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - fail("It should not fail"); + public void onFailure(Exception e) { + fail(e.toString()); } @Override @@ -91,7 +91,7 @@ public class AbstractRunnableTests extends ESTestCase { afterCallable.call(); } catch (Exception e) { - fail("Unexpected for mock."); + fail(e.toString()); } } }; @@ -111,8 +111,8 @@ public class AbstractRunnableTests extends ESTestCase { AbstractRunnable runnable = new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - assertSame(exception, t); + public void onFailure(Exception e) { + assertSame(exception, e); } @Override @@ -126,7 +126,7 @@ public class AbstractRunnableTests extends ESTestCase { afterCallable.call(); } catch (Exception e) { - fail("Unexpected for mock."); + fail(e.toString()); } } }; @@ -142,14 +142,15 @@ public class AbstractRunnableTests extends ESTestCase { AbstractRunnable runnable = new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - assertSame(exception, t); + public void onFailure(Exception e) { + assertSame(exception, e); try { failureCallable.call(); } - catch (Exception e) { - fail("Unexpected for mock."); + catch (Exception inner) { + inner.addSuppressed(e); + fail(inner.toString()); } } @@ -165,8 +166,8 @@ public class AbstractRunnableTests extends ESTestCase { public void testIsForceExecutuonDefaultsFalse() { AbstractRunnable runnable = new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - fail("Not tested"); + public void onFailure(Exception e) { + fail(e.toString()); } @Override diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index 57da614e689..72db2911fc0 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -88,8 +88,8 @@ public class EsExecutorsTests extends ESTestCase { } @Override - public void onFailure(Throwable t) { - throw new AssertionError(t); + public void onFailure(Exception e) { + throw new AssertionError(e); } }); @@ -178,7 +178,7 @@ public class EsExecutorsTests extends ESTestCase { try { barrier.await(); barrier.await(); - } catch (Throwable e) { + } catch (Exception e) { barrier.reset(e); } } @@ -214,7 +214,7 @@ public class EsExecutorsTests extends ESTestCase { try { barrier.await(); barrier.await(); - } catch (Throwable e) { + } catch (Exception e) { barrier.reset(e); } } diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java index 9338beccb9a..c5d0ec4257e 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java @@ -88,7 +88,7 @@ public class RefCountedTests extends ESTestCase { final MyRefCounted counted = new MyRefCounted(); Thread[] threads = new Thread[randomIntBetween(2, 5)]; final CountDownLatch latch = new CountDownLatch(1); - final CopyOnWriteArrayList exceptions = new CopyOnWriteArrayList<>(); + final CopyOnWriteArrayList exceptions = new CopyOnWriteArrayList<>(); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { @Override @@ -103,7 +103,7 @@ public class RefCountedTests extends ESTestCase { counted.decRef(); } } - } catch (Throwable e) { + } catch (Exception e) { exceptions.add(e); } } diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/SuspendableRefContainerTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/SuspendableRefContainerTests.java deleted file mode 100644 index 83db2d4a7c6..00000000000 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/SuspendableRefContainerTests.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.util.concurrent; - -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.test.ESTestCase; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; - -public class SuspendableRefContainerTests extends ESTestCase { - - public void testBasicAcquire() throws InterruptedException { - SuspendableRefContainer refContainer = new SuspendableRefContainer(); - assertThat(refContainer.activeRefs(), equalTo(0)); - - Releasable lock1 = randomLockingMethod(refContainer); - assertThat(refContainer.activeRefs(), equalTo(1)); - Releasable lock2 = randomLockingMethod(refContainer); - assertThat(refContainer.activeRefs(), equalTo(2)); - lock1.close(); - assertThat(refContainer.activeRefs(), equalTo(1)); - lock1.close(); // check idempotence - assertThat(refContainer.activeRefs(), equalTo(1)); - lock2.close(); - assertThat(refContainer.activeRefs(), equalTo(0)); - } - - public void testAcquisitionBlockingBlocksNewAcquisitions() throws InterruptedException { - SuspendableRefContainer refContainer = new SuspendableRefContainer(); - assertThat(refContainer.activeRefs(), equalTo(0)); - - try (Releasable block = refContainer.blockAcquisition()) { - assertThat(refContainer.activeRefs(), equalTo(0)); - assertThat(refContainer.tryAcquire(), nullValue()); - assertThat(refContainer.activeRefs(), equalTo(0)); - } - try (Releasable lock = refContainer.tryAcquire()) { - assertThat(refContainer.activeRefs(), equalTo(1)); - } - - // same with blocking acquire - AtomicBoolean acquired = new AtomicBoolean(); - Thread t = new Thread(() -> { - try (Releasable lock = randomBoolean() ? refContainer.acquire() : refContainer.acquireUninterruptibly()) { - acquired.set(true); - assertThat(refContainer.activeRefs(), equalTo(1)); - } catch (InterruptedException e) { - fail("Interrupted"); - } - }); - try (Releasable block = refContainer.blockAcquisition()) { - assertThat(refContainer.activeRefs(), equalTo(0)); - t.start(); - // check that blocking acquire really blocks - assertThat(acquired.get(), equalTo(false)); - assertThat(refContainer.activeRefs(), equalTo(0)); - } - t.join(); - assertThat(acquired.get(), equalTo(true)); - assertThat(refContainer.activeRefs(), equalTo(0)); - } - - public void testAcquisitionBlockingWaitsOnExistingAcquisitions() throws InterruptedException { - SuspendableRefContainer refContainer = new SuspendableRefContainer(); - - AtomicBoolean acquired = new AtomicBoolean(); - Thread t = new Thread(() -> { - try (Releasable block = refContainer.blockAcquisition()) { - acquired.set(true); - assertThat(refContainer.activeRefs(), equalTo(0)); - } - }); - try (Releasable lock = randomLockingMethod(refContainer)) { - assertThat(refContainer.activeRefs(), equalTo(1)); - t.start(); - assertThat(acquired.get(), equalTo(false)); - assertThat(refContainer.activeRefs(), equalTo(1)); - } - t.join(); - assertThat(acquired.get(), equalTo(true)); - assertThat(refContainer.activeRefs(), equalTo(0)); - } - - private Releasable randomLockingMethod(SuspendableRefContainer refContainer) throws InterruptedException { - switch (randomInt(2)) { - case 0: return refContainer.tryAcquire(); - case 1: return refContainer.acquire(); - case 2: return refContainer.acquireUninterruptibly(); - } - throw new IllegalArgumentException("randomLockingMethod inconsistent"); - } -} diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java index d6797d4be26..e6726879513 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java @@ -294,8 +294,8 @@ public class ThreadContextTests extends ESTestCase { } return new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - throw new RuntimeException(t); + public void onFailure(Exception e) { + throw new RuntimeException(e); } @Override diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java index f3592936765..bef4a047ef5 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java @@ -68,7 +68,7 @@ public class ConstructingObjectParserTests extends ESTestCase { assertEquals(expected.b, parsed.b); assertEquals(expected.c, parsed.c); assertEquals(expected.d, parsed.d); - } catch (Throwable e) { + } catch (Exception e) { // It is convenient to decorate the error message with the json throw new Exception("Error parsing: [" + builder.string() + "]", e); } diff --git a/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java b/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java index bb38e329103..4ff4c4cd035 100644 --- a/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.discovery; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.ESTestCase; @@ -58,8 +58,8 @@ public class BlockingClusterStatePublishResponseHandlerTests extends ESTestCase } @Override - public void onFailure(Throwable t) { - logger.error("unexpected error", t); + public void onFailure(Exception e) { + logger.error("unexpected error", e); } @Override @@ -77,7 +77,7 @@ public class BlockingClusterStatePublishResponseHandlerTests extends ESTestCase int nodeCount = scaledRandomIntBetween(10, 20); DiscoveryNode[] allNodes = new DiscoveryNode[nodeCount]; for (int i = 0; i < nodeCount; i++) { - DiscoveryNode node = new DiscoveryNode("node_" + i, DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode node = new DiscoveryNode("node_" + i, LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); allNodes[i] = node; } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 13e19e84978..a1bac928daf 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -503,8 +503,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } } catch (InterruptedException e) { // fine - semaphore interrupt - } catch (Throwable t) { - logger.info("unexpected exception in background thread of [{}]", t, node); + } catch (AssertionError | Exception e) { + logger.info("unexpected exception in background thread of [{}]", e, node); } } }); @@ -690,8 +690,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { - logger.warn("failure [{}]", t, source); + public void onFailure(String source, Exception e) { + logger.warn("failure [{}]", e, source); } }); @@ -960,7 +960,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable t) { + public void onFailure(Exception e) { success.set(false); latch.countDown(); assert false; diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java index 0f93e5d460c..b31b0cbaa55 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java @@ -22,7 +22,7 @@ package org.elasticsearch.discovery.zen; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESTestCase; @@ -46,7 +46,7 @@ public class ElectMasterServiceTests extends ESTestCase { if (randomBoolean()) { roles.add(DiscoveryNode.Role.MASTER); } - DiscoveryNode node = new DiscoveryNode("n_" + i, "n_" + i, DummyTransportAddress.INSTANCE, Collections.emptyMap(), + DiscoveryNode node = new DiscoveryNode("n_" + i, "n_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(), roles, Version.CURRENT); nodes.add(node); } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 15c8d312952..cd2b4eaf2e4 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; @@ -32,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -41,6 +39,7 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.membership.MembershipAction; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -50,6 +49,8 @@ import org.junit.Before; import org.junit.BeforeClass; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -68,6 +69,7 @@ import static java.util.Collections.emptySet; import static java.util.Collections.shuffle; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -99,10 +101,9 @@ public class NodeJoinControllerTests extends ESTestCase { // make sure we have a master setState(clusterService, ClusterState.builder(clusterService.state()).nodes( DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.getId()))); - nodeJoinController = new NodeJoinController(clusterService, new NoopRoutingService(Settings.EMPTY), - new ElectMasterService(Settings.EMPTY), - new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - Settings.EMPTY); + nodeJoinController = new NodeJoinController(clusterService, new NoopAllocationService(Settings.EMPTY), + new ElectMasterService(Settings.EMPTY), new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), Settings.EMPTY); } @After @@ -198,17 +199,19 @@ public class NodeJoinControllerTests extends ESTestCase { final SimpleFuture electionFuture = new SimpleFuture("master election"); final Thread masterElection = new Thread(new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - logger.error("unexpected error from waitToBeElectedAsMaster", t); - electionFuture.markAsFailed(t); + public void onFailure(Exception e) { + logger.error("unexpected error from waitToBeElectedAsMaster", e); + electionFuture.markAsFailed(e); } @Override protected void doRun() throws Exception { - nodeJoinController.waitToBeElectedAsMaster(requiredJoins, TimeValue.timeValueHours(30), new NodeJoinController.ElectionCallback() { + nodeJoinController.waitToBeElectedAsMaster(requiredJoins, TimeValue.timeValueHours(30), + new NodeJoinController.ElectionCallback() { @Override public void onElectedAsMaster(ClusterState state) { - assertThat("callback called with elected as master, but state disagrees", state.nodes().isLocalNodeElectedMaster(), equalTo(true)); + assertThat("callback called with elected as master, but state disagrees", state.nodes().isLocalNodeElectedMaster(), + equalTo(true)); electionFuture.markAsDone(); } @@ -246,17 +249,19 @@ public class NodeJoinControllerTests extends ESTestCase { final SimpleFuture electionFuture = new SimpleFuture("master election"); final Thread masterElection = new Thread(new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - logger.error("unexpected error from waitToBeElectedAsMaster", t); - electionFuture.markAsFailed(t); + public void onFailure(Exception e) { + logger.error("unexpected error from waitToBeElectedAsMaster", e); + electionFuture.markAsFailed(e); } @Override protected void doRun() throws Exception { - nodeJoinController.waitToBeElectedAsMaster(requiredJoins, TimeValue.timeValueHours(30), new NodeJoinController.ElectionCallback() { + nodeJoinController.waitToBeElectedAsMaster(requiredJoins, TimeValue.timeValueHours(30), + new NodeJoinController.ElectionCallback() { @Override public void onElectedAsMaster(ClusterState state) { - assertThat("callback called with elected as master, but state disagrees", state.nodes().isLocalNodeElectedMaster(), equalTo(true)); + assertThat("callback called with elected as master, but state disagrees", state.nodes().isLocalNodeElectedMaster(), + equalTo(true)); electionFuture.markAsDone(); } @@ -298,7 +303,8 @@ public class NodeJoinControllerTests extends ESTestCase { } logger.debug("--> asserting master election didn't finish yet"); - assertThat("election finished after [" + initialJoins + "] master nodes but required joins is [" + requiredJoins + "]", electionFuture.isDone(), equalTo(false)); + assertThat("election finished after [" + initialJoins + "] master nodes but required joins is [" + requiredJoins + "]", + electionFuture.isDone(), equalTo(false)); final int finalJoins = requiredJoins - initialJoins + randomInt(5); nodesToJoin.clear(); @@ -374,7 +380,8 @@ public class NodeJoinControllerTests extends ESTestCase { nodeJoinController.waitToBeElectedAsMaster(requiredJoins, TimeValue.timeValueMillis(1), new NodeJoinController.ElectionCallback() { @Override public void onElectedAsMaster(ClusterState state) { - assertThat("callback called with elected as master, but state disagrees", state.nodes().isLocalNodeElectedMaster(), equalTo(true)); + assertThat("callback called with elected as master, but state disagrees", state.nodes().isLocalNodeElectedMaster(), + equalTo(true)); latch.countDown(); } @@ -403,7 +410,7 @@ public class NodeJoinControllerTests extends ESTestCase { public void testNewClusterStateOnExistingNodeJoin() throws InterruptedException, ExecutionException { ClusterState state = clusterService.state(); final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes()); - final DiscoveryNode other_node = new DiscoveryNode("other_node", DummyTransportAddress.INSTANCE, + final DiscoveryNode other_node = new DiscoveryNode("other_node", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); nodesBuilder.put(other_node); setState(clusterService, ClusterState.builder(state).nodes(nodesBuilder)); @@ -425,9 +432,9 @@ public class NodeJoinControllerTests extends ESTestCase { nodes.add(node); threads[i] = new Thread(new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - logger.error("unexpected error in join thread", t); - backgroundExceptions.add(t); + public void onFailure(Exception e) { + logger.error("unexpected error in join thread", e); + backgroundExceptions.add(e); } @Override @@ -468,9 +475,9 @@ public class NodeJoinControllerTests extends ESTestCase { nodes.add(node); threads[i] = new Thread(new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - logger.error("unexpected error in join thread", t); - backgroundExceptions.add(t); + public void onFailure(Exception e) { + logger.error("unexpected error in join thread", e); + backgroundExceptions.add(e); } @Override @@ -492,7 +499,8 @@ public class NodeJoinControllerTests extends ESTestCase { nodeJoinController.waitToBeElectedAsMaster(requiredJoins, TimeValue.timeValueHours(30), new NodeJoinController.ElectionCallback() { @Override public void onElectedAsMaster(ClusterState state) { - assertThat("callback called with elected as master, but state disagrees", state.nodes().isLocalNodeElectedMaster(), equalTo(true)); + assertThat("callback called with elected as master, but state disagrees", state.nodes().isLocalNodeElectedMaster(), + equalTo(true)); latch.countDown(); } @@ -515,17 +523,37 @@ public class NodeJoinControllerTests extends ESTestCase { assertNodesInCurrentState(nodes); } + public void testRejectingJoinWithSameAddressButDifferentId() throws InterruptedException, ExecutionException { + ClusterState state = clusterService.state(); + final DiscoveryNode other_node = new DiscoveryNode("other_node", state.nodes().getLocalNode().getAddress(), + emptyMap(), emptySet(), Version.CURRENT); - static class NoopRoutingService extends RoutingService { + ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node)); + assertThat(e.getMessage(), containsString("found existing node")); + } - public NoopRoutingService(Settings settings) { - super(settings, null, new NoopAllocationService(settings)); - } + public void testRejectingJoinWithSameIdButDifferentAddress() throws InterruptedException, ExecutionException { + ClusterState state = clusterService.state(); + final DiscoveryNode other_node = new DiscoveryNode(state.nodes().getLocalNode().getId(), + new LocalTransportAddress(randomAsciiOfLength(20)), emptyMap(), emptySet(), Version.CURRENT); - @Override - protected void performReroute(String reason) { + ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node)); + assertThat(e.getMessage(), containsString("found existing node")); + } - } + public void testJoinWithSameIdSameAddressButDifferentMeta() throws InterruptedException, ExecutionException { + ClusterState state = clusterService.state(); + final DiscoveryNode localNode = state.nodes().getLocalNode(); + final DiscoveryNode other_node = new DiscoveryNode( + randomBoolean() ? localNode.getName() : "other_name", + localNode.getId(), localNode.getAddress(), + randomBoolean() ? localNode.getAttributes() : Collections.singletonMap("attr", "other"), + randomBoolean() ? localNode.getRoles() : new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))), + randomBoolean() ? localNode.getVersion() : VersionUtils.randomVersion(random())); + + joinNode(other_node); + + assertThat(clusterService.localNode(), equalTo(other_node)); } static class NoopAllocationService extends AllocationService { @@ -535,12 +563,14 @@ public class NodeJoinControllerTests extends ESTestCase { } @Override - public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List startedShards, boolean withReroute) { + public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List startedShards, + boolean withReroute) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } @Override - public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, List failedShards) { + public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, + List failedShards) { return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); } @@ -596,9 +626,9 @@ public class NodeJoinControllerTests extends ESTestCase { } @Override - public void onFailure(Throwable t) { - logger.error("unexpected error for {}", t, future); - future.markAsFailed(t); + public void onFailure(Exception e) { + logger.error("unexpected error for {}", e, future); + future.markAsFailed(e); } }); return future; @@ -608,8 +638,8 @@ public class NodeJoinControllerTests extends ESTestCase { * creates an object clone of node, so it will be a different object instance */ private DiscoveryNode cloneNode(DiscoveryNode node) { - return new DiscoveryNode(node.getName(), node.getId(), node.getHostName(), node.getHostAddress(), node.getAddress(), - node.getAttributes(), node.getRoles(), node.getVersion()); + return new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), node.getHostName(), node.getHostAddress(), + node.getAddress(), node.getAttributes(), node.getRoles(), node.getVersion()); } private void joinNode(final DiscoveryNode node) throws InterruptedException, ExecutionException { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index fd0b11eae01..3d0d9ddd8b1 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -261,8 +261,8 @@ public class ZenDiscoveryIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable t) { - holder.set((IllegalStateException) t); + public void onFailure(Exception e) { + holder.set((IllegalStateException) e); } }); @@ -309,8 +309,8 @@ public class ZenDiscoveryIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable t) { - holder.set((IllegalStateException) t); + public void onFailure(Exception e) { + holder.set((IllegalStateException) e); } }); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index a6638eb19cf..9db83f48f0e 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -24,8 +24,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.test.ESTestCase; @@ -51,9 +50,9 @@ public class ZenDiscoveryUnitTests extends ESTestCase { ClusterName clusterName = new ClusterName("abc"); DiscoveryNodes.Builder currentNodes = DiscoveryNodes.builder(); - currentNodes.masterNodeId("a").put(new DiscoveryNode("a", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT)); + currentNodes.masterNodeId("a").put(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); DiscoveryNodes.Builder newNodes = DiscoveryNodes.builder(); - newNodes.masterNodeId("a").put(new DiscoveryNode("a", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT)); + newNodes.masterNodeId("a").put(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); ClusterState.Builder currentState = ClusterState.builder(clusterName); currentState.nodes(currentNodes); @@ -71,7 +70,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { assertFalse("should not ignore, because new state's version is higher to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); currentNodes = DiscoveryNodes.builder(); - currentNodes.masterNodeId("b").put(new DiscoveryNode("b", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT)); + currentNodes.masterNodeId("b").put(new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); ; // version isn't taken into account, so randomize it to ensure this. if (randomBoolean()) { @@ -109,7 +108,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { ArrayList allNodes = new ArrayList<>(); for (int i = randomIntBetween(10, 20); i >= 0; i--) { Set roles = new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))); - DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, DummyTransportAddress.INSTANCE, Collections.emptyMap(), + DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(), roles, Version.CURRENT); responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomBoolean())); allNodes.add(node); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java index 8aa5114c387..72674f44e3d 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.test.ESTestCase; @@ -42,7 +42,7 @@ public class ZenPingTests extends ESTestCase { boolean hasJoinedOncePerNode[] = new boolean[nodes.length]; ArrayList pings = new ArrayList<>(); for (int i = 0; i < nodes.length; i++) { - nodes[i] = new DiscoveryNode("" + i, DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + nodes[i] = new DiscoveryNode("" + i, LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); } for (int pingCount = scaledRandomIntBetween(10, nodes.length * 10); pingCount > 0; pingCount--) { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java index f072c5faf8a..7715749fdf6 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen.ping.unicast; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -31,6 +30,7 @@ import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.PingContextProvider; import org.elasticsearch.discovery.zen.ping.ZenPing; @@ -43,7 +43,6 @@ import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.transport.netty.NettyTransport; -import org.jboss.netty.util.internal.ConcurrentHashMap; import java.net.InetSocketAddress; import java.util.concurrent.ConcurrentMap; @@ -209,7 +208,7 @@ public class UnicastZenPingIT extends ESTestCase { final TransportService transportService = new TransportService(settings, transport, threadPool); transportService.start(); transportService.acceptIncomingRequests(); - ConcurrentMap counters = new ConcurrentHashMap<>(); + ConcurrentMap counters = ConcurrentCollections.newConcurrentMap(); transportService.addConnectionListener(new TransportConnectionListener() { @Override public void onNodeConnected(DiscoveryNode node) { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java index ab9aed6ba44..42aa792c95f 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.zen.publish.PendingClusterStatesQueue.ClusterStateContext; import org.elasticsearch.test.ESTestCase; @@ -237,7 +237,7 @@ public class PendingClusterStatesQueueTests extends ESTestCase { ClusterState state = lastClusterStatePerMaster[masterIndex]; if (state == null) { state = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(DiscoveryNodes.builder() - .put(new DiscoveryNode(masters[masterIndex], DummyTransportAddress.INSTANCE, + .put(new DiscoveryNode(masters[masterIndex], LocalTransportAddress.buildUnique(), emptyMap(), emptySet(),Version.CURRENT)).masterNodeId(masters[masterIndex]).build() ).build(); } else { @@ -259,8 +259,8 @@ public class PendingClusterStatesQueueTests extends ESTestCase { } @Override - public void onNewClusterStateFailed(Throwable t) { - failure = t; + public void onNewClusterStateFailed(Exception e) { + failure = e; } } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index 61374cc0d8f..0d4274a5a53 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -152,16 +153,17 @@ public class PublishClusterStateActionTests extends ESTestCase { return createMockNode(name, settings, null); } - public MockNode createMockNode(String name, Settings settings, @Nullable ClusterStateListener listener) throws Exception { - settings = Settings.builder() + public MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener) throws Exception { + final Settings settings = Settings.builder() .put("name", name) .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") - .put(settings) + .put(basSettings) .build(); MockTransportService service = buildTransportService(settings); DiscoveryNodeService discoveryNodeService = new DiscoveryNodeService(settings); - DiscoveryNode discoveryNode = discoveryNodeService.buildLocalNode(service.boundAddress().publishAddress()); + DiscoveryNode discoveryNode = discoveryNodeService.buildLocalNode(service.boundAddress().publishAddress(), + () -> NodeEnvironment.generateNodeId(settings)); MockNode node = new MockNode(discoveryNode, service, listener, logger); node.action = buildPublishClusterStateAction(settings, service, () -> node.clusterState, node); final CountDownLatch latch = new CountDownLatch(nodes.size() * 2 + 1); @@ -797,9 +799,9 @@ public class PublishClusterStateActionTests extends ESTestCase { } @Override - public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) { - if (t != null) { - errors.add(new Tuple<>(node, t)); + public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { + if (e != null) { + errors.add(new Tuple<>(node, e)); } countDown.countDown(); } @@ -910,8 +912,8 @@ public class PublishClusterStateActionTests extends ESTestCase { } @Override - public void sendResponse(Throwable error) throws IOException { - this.error.set(error); + public void sendResponse(Exception exception) throws IOException { + this.error.set(exception); assertThat(response.get(), nullValue()); } diff --git a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index ad425d8afc9..50e05d97985 100644 --- a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; @@ -48,12 +47,11 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.not; @LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to allow extras public class NodeEnvironmentTests extends ESTestCase { @@ -269,9 +267,9 @@ public class NodeEnvironmentTests extends ESTestCase { if (randomBoolean()) { Thread t = new Thread(new AbstractRunnable() { @Override - public void onFailure(Throwable t) { - logger.error("unexpected error", t); - threadException.set(t); + public void onFailure(Exception e) { + logger.error("unexpected error", e); + threadException.set(e); latch.countDown(); blockLatch.countDown(); } @@ -392,7 +390,7 @@ public class NodeEnvironmentTests extends ESTestCase { env.close(); NodeEnvironment env2 = newNodeEnvironment(dataPaths, "/tmp", - Settings.builder().put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), false).build()); + Settings.builder().put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), false).build()); assertThat(env2.availableShardPaths(sid), equalTo(env2.availableShardPaths(sid))); assertThat(env2.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/" + index.getUUID() + "/0"))); @@ -450,6 +448,27 @@ public class NodeEnvironmentTests extends ESTestCase { } } + public void testPersistentNodeId() throws IOException { + String[] paths = tmpPaths(); + NodeEnvironment env = newNodeEnvironment(paths, Settings.builder() + .put("node.local_storage", false) + .put("node.master", false) + .put("node.data", false) + .build()); + String nodeID = env.nodeId(); + env.close(); + env = newNodeEnvironment(paths, Settings.EMPTY); + assertThat("previous node didn't have local storage enabled, id should change", env.nodeId(), not(equalTo(nodeID))); + nodeID = env.nodeId(); + env.close(); + env = newNodeEnvironment(paths, Settings.EMPTY); + assertThat(env.nodeId(), equalTo(nodeID)); + env.close(); + env = newNodeEnvironment(Settings.EMPTY); + assertThat(env.nodeId(), not(equalTo(nodeID))); + env.close(); + } + /** Converts an array of Strings to an array of Paths, adding an additional child if specified */ private Path[] stringsToPaths(String[] strings, String additional) { Path[] locations = new Path[strings.length]; diff --git a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java index 948f4820439..092e6eaff8a 100644 --- a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -45,11 +45,11 @@ import static org.hamcrest.Matchers.sameInstance; /** */ public class AsyncShardFetchTests extends ESTestCase { - private final DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Collections.emptyMap(), + private final DiscoveryNode node1 = new DiscoveryNode("node1", LocalTransportAddress.buildUnique(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT); private final Response response1 = new Response(node1); private final Throwable failure1 = new Throwable("simulated failure 1"); - private final DiscoveryNode node2 = new DiscoveryNode("node2", DummyTransportAddress.INSTANCE, Collections.emptyMap(), + private final DiscoveryNode node2 = new DiscoveryNode("node2", LocalTransportAddress.buildUnique(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT); private final Response response2 = new Response(node2); private final Throwable failure2 = new Throwable("simulate failure 2"); @@ -292,7 +292,7 @@ public class AsyncShardFetchTests extends ESTestCase { } else { processAsyncFetch(shardId, Collections.singletonList(entry.response), null); } - } catch (Throwable e) { + } catch (Exception e) { logger.error("unexpected failure", e); } finally { if (entry != null) { diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 129495ea15e..f86b56f1052 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -346,7 +346,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase { logger.info("--> created temp data path for shadow replicas [{}]", dataPath); logger.info("--> starting a cluster with " + numNodes + " nodes"); final Settings nodeSettings = Settings.builder() - .put("node.add_id_to_custom_path", false) + .put("node.add_lock_id_to_custom_path", false) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath.toString()) .put("index.store.fs.fs_lock", randomFrom("native", "simple")) .build(); @@ -426,7 +426,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase { // this one is not validated ahead of time and breaks allocation .put("index.analysis.filter.myCollator.type", "icu_collation") ).build(); - IndexMetaData.FORMAT.write(brokenMeta, brokenMeta.getVersion(), services.indexPaths(brokenMeta.getIndex())); + IndexMetaData.FORMAT.write(brokenMeta, services.indexPaths(brokenMeta.getIndex())); } internalCluster().fullRestart(); // ensureGreen(closedIndex) waits for the index to show up in the metadata @@ -483,7 +483,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase { for (NodeEnvironment services : internalCluster().getInstances(NodeEnvironment.class)) { IndexMetaData brokenMeta = IndexMetaData.builder(metaData).settings(metaData.getSettings() .filter((s) -> "index.analysis.analyzer.test.tokenizer".equals(s) == false)).build(); - IndexMetaData.FORMAT.write(brokenMeta, brokenMeta.getVersion(), services.indexPaths(brokenMeta.getIndex())); + IndexMetaData.FORMAT.write(brokenMeta, services.indexPaths(brokenMeta.getIndex())); } internalCluster().fullRestart(); // ensureGreen(closedIndex) waits for the index to show up in the metadata @@ -521,7 +521,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase { MetaData brokenMeta = MetaData.builder(metaData).persistentSettings(Settings.builder() .put(metaData.persistentSettings()).put("this.is.unknown", true) .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), "broken").build()).build(); - MetaData.FORMAT.write(brokenMeta, metaData.version(), nodeEnv.nodeDataPaths()); + MetaData.FORMAT.write(brokenMeta, nodeEnv.nodeDataPaths()); } internalCluster().fullRestart(); ensureYellow("test"); // wait for state recovery diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index 41eba406009..4cf505d839a 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -104,7 +104,7 @@ public class MetaDataStateFormatTests extends ESTestCase { Format format = new Format(randomFrom(XContentType.values()), "foo-"); DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); int version = between(0, Integer.MAX_VALUE/2); - format.write(state, version, dirs); + format.write(state, dirs); for (Path file : dirs) { Path[] list = content("*", file); assertEquals(list.length, 1); @@ -119,7 +119,7 @@ public class MetaDataStateFormatTests extends ESTestCase { } final int version2 = between(version, Integer.MAX_VALUE); DummyState state2 = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); - format.write(state2, version2, dirs); + format.write(state2, dirs); for (Path file : dirs) { Path[] list = content("*", file); @@ -146,7 +146,7 @@ public class MetaDataStateFormatTests extends ESTestCase { Format format = new Format(randomFrom(XContentType.values()), "foo-"); DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); int version = between(0, Integer.MAX_VALUE/2); - format.write(state, version, dirs); + format.write(state, dirs); for (Path file : dirs) { Path[] list = content("*", file); assertEquals(list.length, 1); @@ -170,7 +170,7 @@ public class MetaDataStateFormatTests extends ESTestCase { Format format = new Format(randomFrom(XContentType.values()), "foo-"); DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean()); int version = between(0, Integer.MAX_VALUE/2); - format.write(state, version, dirs); + format.write(state, dirs); for (Path file : dirs) { Path[] list = content("*", file); assertEquals(list.length, 1); @@ -261,7 +261,7 @@ public class MetaDataStateFormatTests extends ESTestCase { } } for (int j = numLegacy; j < numStates; j++) { - format.write(meta.get(j), j, dirs[i]); + format.write(meta.get(j), dirs[i]); if (randomBoolean() && (j < numStates - 1 || dirs.length > 0 && i != 0)) { // corrupt a file that we do not necessarily need here.... Path file = dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + j + ".st"); corruptedFiles.add(file); diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java index 4999ef5eac5..795046ba10c 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesIT.java @@ -161,8 +161,8 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase { logger.info("checking if meta state exists..."); try { assertTrue("Expecting meta state of index " + indexName + " to be on node " + nodeName, getIndicesMetaDataOnNode(nodeName).containsKey(indexName)); - } catch (Throwable t) { - logger.info("failed to load meta state", t); + } catch (Exception e) { + logger.info("failed to load meta state", e); fail("could not load meta state"); } } diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index 96e360550af..e64c816c4bf 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RestoreSource; @@ -41,6 +40,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardStateMetaData; import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Before; @@ -547,7 +547,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { return addData(node, version, allocationId, primary, null); } - public TestAllocator addData(DiscoveryNode node, long version, String allocationId, boolean primary, @Nullable Throwable storeException) { + public TestAllocator addData(DiscoveryNode node, long version, String allocationId, boolean primary, @Nullable Exception storeException) { if (data == null) { data = new HashMap<>(); } diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpClient.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpClient.java index 264876b7963..0aeb00914e2 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpClient.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpClient.java @@ -104,7 +104,7 @@ public class NettyHttpClient implements Closeable { } @SafeVarargs // Safe not because it doesn't do anything with the type parameters but because it won't leak them into other methods. - private final Collection processRequestsWithBody(HttpMethod method, SocketAddress remoteAddress, Tuple processRequestsWithBody(HttpMethod method, SocketAddress remoteAddress, Tuple... urisAndBodies) throws InterruptedException { Collection requests = new ArrayList<>(urisAndBodies.length); for (Tuple uriAndBody : urisAndBodies) { diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java index f0e12abeac8..bd78607c617 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java @@ -94,7 +94,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { private Settings nodeSettings(String dataPath) { return Settings.builder() - .put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), false) + .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), false) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath) .put(FsDirectoryService.INDEX_LOCK_FACTOR_SETTING.getKey(), randomFrom("native", "simple")) .build(); @@ -379,7 +379,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar")); } - public void testPrimaryRelocationWithConcurrentIndexing() throws Throwable { + public void testPrimaryRelocationWithConcurrentIndexing() throws Exception { Path dataPath = createTempDir(); Settings nodeSettings = nodeSettings(dataPath); @@ -408,7 +408,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { final int numPhase2Docs = scaledRandomIntBetween(25, 200); final CountDownLatch phase1finished = new CountDownLatch(1); final CountDownLatch phase2finished = new CountDownLatch(1); - final CopyOnWriteArrayList exceptions = new CopyOnWriteArrayList<>(); + final CopyOnWriteArrayList exceptions = new CopyOnWriteArrayList<>(); Thread thread = new Thread() { @Override public void run() { @@ -418,8 +418,8 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get(); assertTrue(indexResponse.isCreated()); - } catch (Throwable t) { - exceptions.add(t); + } catch (Exception e) { + exceptions.add(e); } final int docCount = counter.get(); if (docCount == numPhase1Docs) { @@ -454,7 +454,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { public void testPrimaryRelocationWhereRecoveryFails() throws Exception { Path dataPath = createTempDir(); Settings nodeSettings = Settings.builder() - .put("node.add_id_to_custom_path", false) + .put("node.add_lock_id_to_custom_path", false) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath) .build(); @@ -677,7 +677,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase { client().prepareIndex(IDX, "doc", "4").setSource("foo", "eggplant").get(); flushAndRefresh(IDX); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).addFieldDataField("foo").addSort("foo", SortOrder.ASC).get(); + SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).addDocValueField("foo").addSort("foo", SortOrder.ASC).get(); assertHitCount(resp, 4); assertOrderedSearchHits(resp, "2", "3", "4", "1"); SearchHit[] hits = resp.getHits().hits(); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 0ecf8462651..a3fd266f603 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -297,7 +297,7 @@ public class InternalEngineTests extends ESTestCase { } Engine.EventListener listener = new Engine.EventListener() { @Override - public void onFailedEngine(String reason, @Nullable Throwable t) { + public void onFailedEngine(String reason, @Nullable Exception e) { // we don't need to notify anybody in this test } }; @@ -2092,7 +2092,7 @@ public class InternalEngineTests extends ESTestCase { } public void testShardNotAvailableExceptionWhenEngineClosedConcurrently() throws IOException, InterruptedException { - AtomicReference throwable = new AtomicReference<>(); + AtomicReference exception = new AtomicReference<>(); String operation = randomFrom("optimize", "refresh", "flush"); Thread mergeThread = new Thread() { @Override @@ -2115,8 +2115,8 @@ public class InternalEngineTests extends ESTestCase { break; } } - } catch (Throwable t) { - throwable.set(t); + } catch (Exception e) { + exception.set(e); stop = true; } } @@ -2125,8 +2125,8 @@ public class InternalEngineTests extends ESTestCase { mergeThread.start(); engine.close(); mergeThread.join(); - logger.info("exception caught: ", throwable.get()); - assertTrue("expected an Exception that signals shard is not available", TransportActions.isShardNotAvailableException(throwable.get())); + logger.info("exception caught: ", exception.get()); + assertTrue("expected an Exception that signals shard is not available", TransportActions.isShardNotAvailableException(exception.get())); } public void testCurrentTranslogIDisCommitted() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 39112ed602e..672686926bd 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -242,7 +242,7 @@ public class ShadowEngineTests extends ESTestCase { } Engine.EventListener eventListener = new Engine.EventListener() { @Override - public void onFailedEngine(String reason, @Nullable Throwable t) { + public void onFailedEngine(String reason, @Nullable Exception e) { // we don't need to notify anybody in this test } }; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java index f9fb5e77b70..9a8815e9398 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingDisabledTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; @@ -116,7 +115,7 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { onFailureCalled.set(true); assertThat(e, instanceOf(IndexNotFoundException.class)); assertEquals(e.getMessage(), "no such index"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationIT.java index 8afdea27451..71628c06128 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationIT.java @@ -96,8 +96,8 @@ public class DynamicMappingIntegrationIT extends ESIntegTestCase { try { startLatch.await(); assertTrue(client().prepareIndex("index", "type", id).setSource("field" + id, "bar").get().isCreated()); - } catch (Throwable t) { - error.compareAndSet(null, t); + } catch (Exception e) { + error.compareAndSet(null, e); } } }); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index c93c181f860..2afeb02499d 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -103,7 +103,7 @@ public class MapperServiceTests extends ESSingleNodeTestCase { // 2. already existing index IndexService indexService = createIndex("index2"); - expectThrows(ExecutionException.class, () -> { + e = expectThrows(ExecutionException.class, () -> { client().prepareIndex("index1", MapperService.DEFAULT_MAPPING, "2").setSource().execute().get(); }); throwable = ExceptionsHelper.unwrapCause(e.getCause()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java index 31e4e2d0923..da0c3d081af 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java @@ -166,9 +166,9 @@ public class TokenCountFieldMapperIntegrationIT extends ESIntegTestCase { private SearchRequestBuilder prepareSearch() { SearchRequestBuilder request = client().prepareSearch("test").setTypes("test"); - request.addField("foo.token_count"); + request.addStoredField("foo.token_count"); if (loadCountedFields) { - request.addField("foo"); + request.addStoredField("foo"); } return request; } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index 202afd7a4b1..14dd370fbfd 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -816,7 +816,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .field("lon", -74.0059731).endObject().endObject()).setRefreshPolicy(IMMEDIATE).get(); // match all search with geohash field - SearchResponse searchResponse = client().prepareSearch().addField("location.geohash").setQuery(matchAllQuery()).execute().actionGet(); + SearchResponse searchResponse = client().prepareSearch().addStoredField("location.geohash").setQuery(matchAllQuery()).execute().actionGet(); Map m = searchResponse.getHits().getAt(0).getFields(); // ensure single geohash was indexed @@ -841,7 +841,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { .field("lon", -74.0059731).endObject().endObject()).setRefreshPolicy(IMMEDIATE).get(); // match all search with geohash field (includes prefixes) - SearchResponse searchResponse = client().prepareSearch().addField("location.geohash").setQuery(matchAllQuery()).execute().actionGet(); + SearchResponse searchResponse = client().prepareSearch().addStoredField("location.geohash").setQuery(matchAllQuery()).execute().actionGet(); Map m = searchResponse.getHits().getAt(0).getFields(); List hashes = m.get("location.geohash").values(); @@ -872,11 +872,11 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { } // query by geohash subfield - SearchResponse searchResponse = client().prepareSearch().addField("location.geohash").setQuery(matchAllQuery()).execute().actionGet(); + SearchResponse searchResponse = client().prepareSearch().addStoredField("location.geohash").setQuery(matchAllQuery()).execute().actionGet(); assertEquals(numDocs, searchResponse.getHits().totalHits()); // query by latlon subfield - searchResponse = client().prepareSearch().addField("location.latlon").setQuery(matchAllQuery()).execute().actionGet(); + searchResponse = client().prepareSearch().addStoredField("location.latlon").setQuery(matchAllQuery()).execute().actionGet(); assertEquals(numDocs, searchResponse.getHits().totalHits()); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java index 627f268545a..0133d3e5943 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java @@ -159,7 +159,7 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { final AtomicBoolean stopped = new AtomicBoolean(false); final CyclicBarrier barrier = new CyclicBarrier(2); final AtomicReference lastIntroducedFieldName = new AtomicReference<>(); - final AtomicReference error = new AtomicReference<>(); + final AtomicReference error = new AtomicReference<>(); final Thread updater = new Thread() { @Override public void run() { @@ -173,8 +173,8 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { lastIntroducedFieldName.set(fieldName); mapperService.merge("test", new CompressedXContent(update.toString()), MapperService.MergeReason.MAPPING_UPDATE, false); } - } catch (Throwable t) { - error.set(t); + } catch (Exception e) { + error.set(e); } finally { stopped.set(true); } diff --git a/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index 090aa906456..da8610f29f6 100644 --- a/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -218,8 +218,8 @@ public class InnerHitBuilderTests extends ESTestCase { innerHits.setExplain(randomBoolean()); innerHits.setVersion(randomBoolean()); innerHits.setTrackScores(randomBoolean()); - innerHits.setFieldNames(randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16))); - innerHits.setFieldDataFields(randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16))); + innerHits.setStoredFieldNames(randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16))); + innerHits.setDocValueFields(randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16))); // Random script fields deduped on their field name. Map scriptFields = new HashMap<>(); for (SearchSourceBuilder.ScriptField field: randomListStuff(16, InnerHitBuilderTests::randomScript)) { @@ -294,11 +294,11 @@ public class InnerHitBuilderTests extends ESTestCase { break; case 6: if (randomBoolean()) { - instance.setFieldDataFields(randomValueOtherThan(instance.getFieldDataFields(), () -> { + instance.setDocValueFields(randomValueOtherThan(instance.getDocValueFields(), () -> { return randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16)); })); } else { - instance.addFieldDataField(randomAsciiOfLengthBetween(1, 16)); + instance.addDocValueField(randomAsciiOfLengthBetween(1, 16)); } break; case 7: @@ -341,12 +341,12 @@ public class InnerHitBuilderTests extends ESTestCase { HighlightBuilderTests::randomHighlighterBuilder)); break; case 11: - if (instance.getFieldNames() == null || randomBoolean()) { - instance.setFieldNames(randomValueOtherThan(instance.getFieldNames(), () -> { + if (instance.getStoredFieldNames() == null || randomBoolean()) { + instance.setStoredFieldNames(randomValueOtherThan(instance.getStoredFieldNames(), () -> { return randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16)); })); } else { - instance.getFieldNames().add(randomAsciiOfLengthBetween(1, 16)); + instance.getStoredFieldNames().add(randomAsciiOfLengthBetween(1, 16)); } break; default: diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 1191806bc8e..2ccae5a287b 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -46,7 +46,7 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.NodeEnvironment; @@ -198,7 +198,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { } private DiscoveryNode getDiscoveryNode(String id) { - return new DiscoveryNode(id, id, DummyTransportAddress.INSTANCE, Collections.emptyMap(), + return new DiscoveryNode(id, id, LocalTransportAddress.buildUnique(), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT); } @@ -408,7 +408,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { } @Override - public void failShard(String message, Throwable throwable) { + public void failShard(String message, Exception exception) { throw new UnsupportedOperationException(); } @@ -436,14 +436,14 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { .filter(s -> replicaRouting.isSameAllocation(s.routingEntry())).findFirst().get(); TransportIndexAction.executeIndexRequestOnReplica(request, replica); listener.onResponse(TransportResponse.Empty.INSTANCE); - } catch (Throwable t) { + } catch (Exception t) { listener.onFailure(t); } } @Override - public void failShard(ShardRouting replica, ShardRouting primary, String message, Throwable throwable, Runnable onSuccess, - Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { + public void failShard(ShardRouting replica, ShardRouting primary, String message, Exception exception, Runnable onSuccess, + Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { throw new UnsupportedOperationException(); } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardOperationsLockTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardOperationsLockTests.java new file mode 100644 index 00000000000..c9bb9e19866 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardOperationsLockTests.java @@ -0,0 +1,219 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class IndexShardOperationsLockTests extends ESTestCase { + + private static ThreadPool threadPool; + + private IndexShardOperationsLock block; + + @BeforeClass + public static void setupThreadPool() { + threadPool = new TestThreadPool("IndexShardOperationsLockTests"); + } + + @AfterClass + public static void shutdownThreadPool() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + } + + @Before + public void createIndexShardOperationsLock() { + block = new IndexShardOperationsLock(new ShardId("blubb", "id", 0), logger, threadPool); + } + + @After + public void checkNoInflightOperations() { + assertThat(block.semaphore.availablePermits(), equalTo(Integer.MAX_VALUE)); + assertThat(block.getActiveOperationsCount(), equalTo(0)); + } + + public void testAllOperationsInvoked() throws InterruptedException, TimeoutException, ExecutionException { + int numThreads = 10; + + List> futures = new ArrayList<>(); + List operationThreads = new ArrayList<>(); + CountDownLatch latch = new CountDownLatch(numThreads / 2); + for (int i = 0; i < numThreads; i++) { + PlainActionFuture future = new PlainActionFuture() { + @Override + public void onResponse(Releasable releasable) { + releasable.close(); + super.onResponse(releasable); + } + }; + Thread thread = new Thread() { + public void run() { + latch.countDown(); + block.acquire(future, ThreadPool.Names.GENERIC, true); + } + }; + futures.add(future); + operationThreads.add(thread); + } + + CountDownLatch blockFinished = new CountDownLatch(1); + threadPool.generic().execute(() -> { + try { + latch.await(); + blockAndWait().close(); + blockFinished.countDown(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + for (Thread thread : operationThreads) { + thread.start(); + } + + for (PlainActionFuture future : futures) { + assertNotNull(future.get(1, TimeUnit.MINUTES)); + } + + for (Thread thread : operationThreads) { + thread.join(); + } + + blockFinished.await(); + } + + + public void testOperationsInvokedImmediatelyIfNoBlock() throws ExecutionException, InterruptedException { + PlainActionFuture future = new PlainActionFuture<>(); + block.acquire(future, ThreadPool.Names.GENERIC, true); + assertTrue(future.isDone()); + future.get().close(); + } + + public void testOperationsIfClosed() throws ExecutionException, InterruptedException { + PlainActionFuture future = new PlainActionFuture<>(); + block.close(); + block.acquire(future, ThreadPool.Names.GENERIC, true); + ExecutionException exception = expectThrows(ExecutionException.class, future::get); + assertThat(exception.getCause(), instanceOf(IndexShardClosedException.class)); + } + + public void testBlockIfClosed() throws ExecutionException, InterruptedException { + block.close(); + expectThrows(IndexShardClosedException.class, () -> block.blockOperations(randomInt(10), TimeUnit.MINUTES, + () -> { throw new IllegalArgumentException("fake error"); })); + } + + public void testOperationsDelayedIfBlock() throws ExecutionException, InterruptedException, TimeoutException { + PlainActionFuture future = new PlainActionFuture<>(); + try (Releasable releasable = blockAndWait()) { + block.acquire(future, ThreadPool.Names.GENERIC, true); + assertFalse(future.isDone()); + } + future.get(1, TimeUnit.MINUTES).close(); + } + + protected Releasable blockAndWait() throws InterruptedException { + CountDownLatch blockAcquired = new CountDownLatch(1); + CountDownLatch releaseBlock = new CountDownLatch(1); + CountDownLatch blockReleased = new CountDownLatch(1); + boolean throwsException = randomBoolean(); + IndexShardClosedException exception = new IndexShardClosedException(new ShardId("blubb", "id", 0)); + threadPool.generic().execute(() -> { + try { + block.blockOperations(1, TimeUnit.MINUTES, () -> { + try { + blockAcquired.countDown(); + releaseBlock.await(); + if (throwsException) { + throw exception; + } + } catch (InterruptedException e) { + throw new RuntimeException(); + } + }); + } catch (Exception e) { + if (e != exception) { + throw new RuntimeException(e); + } + } finally { + blockReleased.countDown(); + } + }); + blockAcquired.await(); + return () -> { + releaseBlock.countDown(); + try { + blockReleased.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }; + } + + public void testActiveOperationsCount() throws ExecutionException, InterruptedException { + PlainActionFuture future1 = new PlainActionFuture<>(); + block.acquire(future1, ThreadPool.Names.GENERIC, true); + assertTrue(future1.isDone()); + assertThat(block.getActiveOperationsCount(), equalTo(1)); + + PlainActionFuture future2 = new PlainActionFuture<>(); + block.acquire(future2, ThreadPool.Names.GENERIC, true); + assertTrue(future2.isDone()); + assertThat(block.getActiveOperationsCount(), equalTo(2)); + + future1.get().close(); + assertThat(block.getActiveOperationsCount(), equalTo(1)); + future1.get().close(); // check idempotence + assertThat(block.getActiveOperationsCount(), equalTo(1)); + future2.get().close(); + assertThat(block.getActiveOperationsCount(), equalTo(0)); + + try (Releasable releasable = blockAndWait()) { + assertThat(block.getActiveOperationsCount(), equalTo(0)); + } + + PlainActionFuture future3 = new PlainActionFuture<>(); + block.acquire(future3, ThreadPool.Names.GENERIC, true); + assertTrue(future3.isDone()); + assertThat(block.getActiveOperationsCount(), equalTo(1)); + future3.get().close(); + assertThat(block.getActiveOperationsCount(), equalTo(0)); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 862be713030..a0813fb572f 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.InternalClusterInfoService; @@ -68,7 +69,7 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -105,6 +106,7 @@ import org.elasticsearch.test.FieldMaskingReader; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.file.Files; @@ -121,6 +123,7 @@ import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -295,13 +298,13 @@ public class IndexShardTests extends ESSingleNodeTestCase { client().admin().indices().prepareDelete("test").get(); assertThat(indexShard.getActiveOperationsCount(), equalTo(0)); try { - indexShard.acquirePrimaryOperationLock(); + indexShard.acquirePrimaryOperationLock(null, ThreadPool.Names.INDEX); fail("we should not be able to increment anymore"); } catch (IndexShardClosedException e) { // expected } try { - indexShard.acquireReplicaOperationLock(indexShard.getPrimaryTerm()); + indexShard.acquireReplicaOperationLock(indexShard.getPrimaryTerm(), null, ThreadPool.Names.INDEX); fail("we should not be able to increment anymore"); } catch (IndexShardClosedException e) { // expected @@ -339,21 +342,33 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertEquals(0, indexShard.getActiveOperationsCount()); if (newPrimaryShardRouting.isRelocationTarget() == false) { try { - indexShard.acquireReplicaOperationLock(primaryTerm); + indexShard.acquireReplicaOperationLock(primaryTerm, null, ThreadPool.Names.INDEX); fail("shard shouldn't accept operations as replica"); } catch (IllegalStateException ignored) { } } - Releasable operation1 = indexShard.acquirePrimaryOperationLock(); + Releasable operation1 = acquirePrimaryOperationLockBlockingly(indexShard); assertEquals(1, indexShard.getActiveOperationsCount()); - Releasable operation2 = indexShard.acquirePrimaryOperationLock(); + Releasable operation2 = acquirePrimaryOperationLockBlockingly(indexShard); assertEquals(2, indexShard.getActiveOperationsCount()); Releasables.close(operation1, operation2); assertEquals(0, indexShard.getActiveOperationsCount()); } + private Releasable acquirePrimaryOperationLockBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException { + PlainActionFuture fut = new PlainActionFuture<>(); + indexShard.acquirePrimaryOperationLock(fut, ThreadPool.Names.INDEX); + return fut.get(); + } + + private Releasable acquireReplicaOperationLockBlockingly(IndexShard indexShard, long opPrimaryTerm) throws ExecutionException, InterruptedException { + PlainActionFuture fut = new PlainActionFuture<>(); + indexShard.acquireReplicaOperationLock(opPrimaryTerm, fut, ThreadPool.Names.INDEX); + return fut.get(); + } + public void testOperationLocksOnReplicaShards() throws InterruptedException, ExecutionException, IOException { assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get()); ensureGreen("test"); @@ -399,20 +414,20 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertEquals(0, indexShard.getActiveOperationsCount()); if (newShardRouting.primary() == false) { try { - indexShard.acquirePrimaryOperationLock(); + indexShard.acquirePrimaryOperationLock(null, ThreadPool.Names.INDEX); fail("shard shouldn't accept primary ops"); } catch (IllegalStateException ignored) { } } - Releasable operation1 = indexShard.acquireReplicaOperationLock(primaryTerm); + Releasable operation1 = acquireReplicaOperationLockBlockingly(indexShard, primaryTerm); assertEquals(1, indexShard.getActiveOperationsCount()); - Releasable operation2 = indexShard.acquireReplicaOperationLock(primaryTerm); + Releasable operation2 = acquireReplicaOperationLockBlockingly(indexShard, primaryTerm); assertEquals(2, indexShard.getActiveOperationsCount()); try { - indexShard.acquireReplicaOperationLock(primaryTerm - 1); + indexShard.acquireReplicaOperationLock(primaryTerm - 1, null, ThreadPool.Names.INDEX); fail("you can not increment the operation counter with an older primary term"); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("operation term")); @@ -420,7 +435,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { } // but you can increment with a newer one.. - indexShard.acquireReplicaOperationLock(primaryTerm + 1 + randomInt(20)).close(); + acquireReplicaOperationLockBlockingly(indexShard, primaryTerm + 1 + randomInt(20)).close(); Releasables.close(operation1, operation2); assertEquals(0, indexShard.getActiveOperationsCount()); } @@ -448,7 +463,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { public static void write(ShardStateMetaData shardStateMetaData, Path... shardPaths) throws IOException { - ShardStateMetaData.FORMAT.write(shardStateMetaData, shardStateMetaData.legacyVersion, shardPaths); + ShardStateMetaData.FORMAT.write(shardStateMetaData, shardPaths); } public void testDurableFlagHasEffect() { @@ -710,7 +725,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { } @Override - public void postIndex(Engine.Index index, Throwable ex) { + public void postIndex(Engine.Index index, Exception ex) { postIndexException.incrementAndGet(); } @@ -726,7 +741,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { } @Override - public void postDelete(Engine.Delete delete, Throwable ex) { + public void postDelete(Engine.Delete delete, Exception ex) { postDeleteException.incrementAndGet(); } @@ -882,13 +897,18 @@ public class IndexShardTests extends ESSingleNodeTestCase { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); + assertBusy(() -> assertThat(shard.state(), equalTo(IndexShardState.STARTED))); CountDownLatch latch = new CountDownLatch(1); Thread recoveryThread = new Thread(() -> { latch.countDown(); - shard.relocated("simulated recovery"); + try { + shard.relocated("simulated recovery"); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } }); - try (Releasable ignored = shard.acquirePrimaryOperationLock()) { + try (Releasable ignored = acquirePrimaryOperationLockBlockingly(shard)) { // start finalization of recovery recoveryThread.start(); latch.await(); @@ -898,12 +918,50 @@ public class IndexShardTests extends ESSingleNodeTestCase { // recovery can be now finalized recoveryThread.join(); assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); - try (Releasable ignored = shard.acquirePrimaryOperationLock()) { + try (Releasable ignored = acquirePrimaryOperationLockBlockingly(shard)) { // lock can again be acquired assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); } } + public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").setSettings( + Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0) + ).get()); + ensureGreen(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService test = indicesService.indexService(resolveIndex("test")); + final IndexShard shard = test.getShardOrNull(0); + assertBusy(() -> assertThat(shard.state(), equalTo(IndexShardState.STARTED))); + Thread recoveryThread = new Thread(() -> { + try { + shard.relocated("simulated recovery"); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + recoveryThread.start(); + List> onLockAcquiredActions = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + PlainActionFuture onLockAcquired = new PlainActionFuture() { + @Override + public void onResponse(Releasable releasable) { + releasable.close(); + super.onResponse(releasable); + } + }; + shard.acquirePrimaryOperationLock(onLockAcquired, ThreadPool.Names.INDEX); + onLockAcquiredActions.add(onLockAcquired); + } + + for (PlainActionFuture onLockAcquired : onLockAcquiredActions) { + assertNotNull(onLockAcquired.get(30, TimeUnit.SECONDS)); + } + + recoveryThread.join(); + } + public void testStressRelocated() throws Exception { assertAcked(client().admin().indices().prepareCreate("test").setSettings( Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0) @@ -920,10 +978,10 @@ public class IndexShardTests extends ESSingleNodeTestCase { indexThreads[i] = new Thread() { @Override public void run() { - try (Releasable operationLock = shard.acquirePrimaryOperationLock()) { + try (Releasable operationLock = acquirePrimaryOperationLockBlockingly(shard)) { allPrimaryOperationLocksAcquired.countDown(); barrier.await(); - } catch (InterruptedException | BrokenBarrierException e) { + } catch (InterruptedException | BrokenBarrierException | ExecutionException e) { throw new RuntimeException(e); } } @@ -932,7 +990,11 @@ public class IndexShardTests extends ESSingleNodeTestCase { } AtomicBoolean relocated = new AtomicBoolean(); final Thread recoveryThread = new Thread(() -> { - shard.relocated("simulated recovery"); + try { + shard.relocated("simulated recovery"); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } relocated.set(true); }); // ensure we wait for all primary operation locks to be acquired @@ -974,7 +1036,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { routing = ShardRoutingHelper.reinit(routing); IndexShard newShard = test.createShard(routing); newShard.updateRoutingEntry(routing); - DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode, localNode)); assertTrue(newShard.recoverFromStore()); assertEquals(translogOps, newShard.recoveryState().getTranslog().recoveredOperations()); @@ -1001,7 +1063,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { routing = ShardRoutingHelper.reinit(routing, UnassignedInfo.Reason.INDEX_CREATED); IndexShard newShard = test.createShard(routing); newShard.updateRoutingEntry(routing); - DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode, localNode)); assertTrue(newShard.recoverFromStore()); @@ -1018,7 +1080,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); - DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); @@ -1114,7 +1176,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { Store targetStore = test_target_shard.store(); test_target_shard.updateRoutingEntry(routing); - DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); test_target_shard.markAsRecovering("store", new RecoveryState(routing.shardId(), routing.primary(), RecoveryState.Type.SNAPSHOT, routing.restoreSource(), localNode)); assertTrue(test_target_shard.restoreFromRepository(new IndexShardRepository() { @Override @@ -1321,7 +1383,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {}; shard.close("simon says", false); AtomicReference shardRef = new AtomicReference<>(); - List failures = new ArrayList<>(); + List failures = new ArrayList<>(); IndexingOperationListener listener = new IndexingOperationListener() { @Override @@ -1331,9 +1393,9 @@ public class IndexShardTests extends ESSingleNodeTestCase { // this is all IMC needs to do - check current memory and refresh assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0); shardRef.get().refresh("test"); - } catch (Throwable t) { - failures.add(t); - throw t; + } catch (Exception e) { + failures.add(e); + throw e; } } @@ -1345,9 +1407,9 @@ public class IndexShardTests extends ESSingleNodeTestCase { // this is all IMC needs to do - check current memory and refresh assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0); shardRef.get().refresh("test"); - } catch (Throwable t) { - failures.add(t); - throw t; + } catch (Exception e) { + failures.add(e); + throw e; } } }; @@ -1398,7 +1460,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { } public static final IndexShard recoverShard(IndexShard newShard) throws IOException { - DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), newShard.routingEntry().primary(), RecoveryState.Type.STORE, localNode, localNode)); assertTrue(newShard.recoverFromStore()); newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted()); @@ -1410,8 +1472,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(), shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, - indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners) - ); + indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners)); return newShard; } @@ -1439,7 +1500,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ShardRouting routing = getInitializingShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); IndexShard newShard = test.createShard(routing); - DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("for testing", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.REPLICA, localNode, localNode)); List operations = new ArrayList<>(); operations.add(new Translog.Index("testtype", "1", BytesReference.toBytes(jsonBuilder().startObject().field("foo", "bar").endObject().bytes()))); @@ -1467,7 +1528,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { test.removeShard(0, "b/c britta says so"); IndexShard newShard = test.createShard(routing); newShard.shardRouting = routing; - DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("for testing", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.REPLICA, localNode, localNode)); // Shard is still inactive since we haven't started recovering yet assertFalse(newShard.isActive()); @@ -1495,7 +1556,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ShardRouting routing = getInitializingShardRouting(shard.routingEntry()); test.removeShard(0, "b/c britta says so"); IndexShard newShard = test.createShard(routing); - DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("for testing", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.REPLICA, localNode, localNode)); // Shard is still inactive since we haven't started recovering yet assertFalse(newShard.isActive()); @@ -1530,7 +1591,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { IndexShard shard = test.getShardOrNull(0); ShardRouting routing = ShardRoutingHelper.initWithSameId(shard.routingEntry()); test.removeShard(0, "b/c simon says so"); - DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); { final IndexShard newShard = test.createShard(routing); newShard.updateRoutingEntry(routing); diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index 8d86e64a391..d1cf8b32f58 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -51,7 +51,7 @@ public class IndexingOperationListenerTests extends ESTestCase{ } @Override - public void postIndex(Engine.Index index, Throwable ex) { + public void postIndex(Engine.Index index, Exception ex) { postIndexException.incrementAndGet(); } @@ -67,7 +67,7 @@ public class IndexingOperationListenerTests extends ESTestCase{ } @Override - public void postDelete(Engine.Delete delete, Throwable ex) { + public void postDelete(Engine.Delete delete, Exception ex) { postDeleteException.incrementAndGet(); } }; @@ -83,7 +83,7 @@ public class IndexingOperationListenerTests extends ESTestCase{ throw new RuntimeException(); } @Override - public void postIndex(Engine.Index index, Throwable ex) { + public void postIndex(Engine.Index index, Exception ex) { throw new RuntimeException(); } @Override @@ -96,7 +96,7 @@ public class IndexingOperationListenerTests extends ESTestCase{ throw new RuntimeException(); } @Override - public void postDelete(Engine.Delete delete, Throwable ex) { + public void postDelete(Engine.Delete delete, Exception ex) { throw new RuntimeException(); } }; diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 4938f686f60..79b0773481e 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -115,7 +115,7 @@ public class RefreshListenersTests extends ESTestCase { BigArrays.NON_RECYCLING_INSTANCE); Engine.EventListener eventListener = new Engine.EventListener() { @Override - public void onFailedEngine(String reason, @Nullable Throwable t) { + public void onFailedEngine(String reason, @Nullable Exception e) { // we don't need to notify anybody in this test } }; @@ -251,7 +251,7 @@ public class RefreshListenersTests extends ESTestCase { getResult.docIdAndVersion().context.reader().document(getResult.docIdAndVersion().docId, visitor); assertEquals(Arrays.asList(testFieldValue), visitor.fields().get("test")); } - } catch (Throwable t) { + } catch (Exception t) { throw new RuntimeException("failure on the [" + iteration + "] iteration of thread [" + threadId + "]", t); } } @@ -279,7 +279,7 @@ public class RefreshListenersTests extends ESTestCase { document.add(uidField); document.add(versionField); BytesReference source = new BytesArray(new byte[] { 1 }); - ParsedDocument doc = new ParsedDocument(versionField, id, type, null, -1, -1, Arrays.asList(document), source, null); + ParsedDocument doc = new ParsedDocument(versionField, id, type, null, -1, -1, Arrays.asList(document), source, null); Engine.Index index = new Engine.Index(new Term("_uid", uid), doc); engine.index(index); return index; @@ -290,7 +290,7 @@ public class RefreshListenersTests extends ESTestCase { * When the listener is called this captures it's only argument. */ AtomicReference forcedRefresh = new AtomicReference<>(); - private volatile Throwable error; + private volatile Exception error; @Override public void accept(Boolean forcedRefresh) { @@ -298,7 +298,7 @@ public class RefreshListenersTests extends ESTestCase { assertNotNull(forcedRefresh); Boolean oldValue = this.forcedRefresh.getAndSet(forcedRefresh); assertNull("Listener called twice", oldValue); - } catch (Throwable e) { + } catch (Exception e) { error = e; } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index b2bd7e1f9ff..749b1621e4d 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -45,7 +45,7 @@ public class ShardPathTests extends ESTestCase { ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, "0xDEADBEEF", AllocationId.newInitializing()), 2, path); + ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, "0xDEADBEEF", AllocationId.newInitializing()), path); ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); assertEquals(path, shardPath.getDataPath()); assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID()); @@ -65,7 +65,7 @@ public class ShardPathTests extends ESTestCase { Path[] paths = env.availableShardPaths(shardId); assumeTrue("This test tests multi data.path but we only got one", paths.length > 1); int id = randomIntBetween(1, 10); - ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, indexUUID, AllocationId.newInitializing()), id, paths); + ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, indexUUID, AllocationId.newInitializing()), paths); ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); fail("Expected IllegalStateException"); } catch (IllegalStateException e) { @@ -82,7 +82,7 @@ public class ShardPathTests extends ESTestCase { Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); int id = randomIntBetween(1, 10); - ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, "0xDEADBEEF", AllocationId.newInitializing()), id, path); + ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, "0xDEADBEEF", AllocationId.newInitializing()), path); ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); fail("Expected IllegalStateException"); } catch (IllegalStateException e) { @@ -124,7 +124,7 @@ public class ShardPathTests extends ESTestCase { final boolean includeNodeId = randomBoolean(); indexSettings = indexSettingsBuilder.put(IndexMetaData.SETTING_DATA_PATH, "custom").build(); nodeSettings = Settings.builder().put(Environment.PATH_SHARED_DATA_SETTING.getKey(), path.toAbsolutePath().toAbsolutePath()) - .put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), includeNodeId).build(); + .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), includeNodeId).build(); if (includeNodeId) { customPath = path.resolve("custom").resolve("0"); } else { @@ -139,7 +139,7 @@ public class ShardPathTests extends ESTestCase { ShardId shardId = new ShardId("foo", indexUUID, 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, indexUUID, AllocationId.newInitializing()), 2, path); + ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, indexUUID, AllocationId.newInitializing()), path); ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), indexSettings)); boolean found = false; for (Path p : env.nodeDataPaths()) { diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 84d50c6620f..a8f8a9f802d 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -193,7 +193,7 @@ public class CorruptedFileIT extends ESIntegTestCase { * run the checkindex. if the corruption is still there we will catch it. */ final CountDownLatch latch = new CountDownLatch(numShards * 3); // primary + 2 replicas - final CopyOnWriteArrayList exception = new CopyOnWriteArrayList<>(); + final CopyOnWriteArrayList exception = new CopyOnWriteArrayList<>(); final IndexEventListener listener = new IndexEventListener() { @Override public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard, Settings indexSettings) { @@ -215,8 +215,8 @@ public class CorruptedFileIT extends ESIntegTestCase { throw new IOException("index check failure"); } } - } catch (Throwable t) { - exception.add(t); + } catch (Exception e) { + exception.add(e); } finally { store.decRef(); latch.countDown(); @@ -646,12 +646,12 @@ public class CorruptedFileIT extends ESIntegTestCase { return shardRouting; } - private static final boolean isPerCommitFile(String fileName) { + private static boolean isPerCommitFile(String fileName) { // .liv and segments_N are per commit files and might change after corruption return fileName.startsWith("segments") || fileName.endsWith(".liv"); } - private static final boolean isPerSegmentFile(String fileName) { + private static boolean isPerSegmentFile(String fileName) { return isPerCommitFile(fileName) == false; } diff --git a/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java b/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java index 163d72f4553..590c5c624f1 100644 --- a/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryIT.java @@ -113,7 +113,7 @@ public class ExceptionRetryIT extends ESIntegTestCase { } refresh(); - SearchResponse searchResponse = client().prepareSearch("index").setSize(numDocs * 2).addField("_id").get(); + SearchResponse searchResponse = client().prepareSearch("index").setSize(numDocs * 2).addStoredField("_id").get(); Set uniqueIds = new HashSet(); long dupCounter = 0; diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 01eead9c96b..c9b3daa806a 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.translog; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.Term; import org.apache.lucene.mockfile.FilterFileChannel; @@ -456,7 +455,7 @@ public class TranslogTests extends ESTestCase { final BlockingQueue writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread); Thread[] threads = new Thread[threadCount]; - final Throwable[] threadExceptions = new Throwable[threadCount]; + final Exception[] threadExceptions = new Exception[threadCount]; final CountDownLatch downLatch = new CountDownLatch(1); for (int i = 0; i < threadCount; i++) { final int threadId = i; @@ -624,7 +623,7 @@ public class TranslogTests extends ESTestCase { final AtomicBoolean run = new AtomicBoolean(true); // any errors on threads - final List errors = new CopyOnWriteArrayList<>(); + final List errors = new CopyOnWriteArrayList<>(); logger.debug("using [{}] readers. [{}] writers. flushing every ~[{}] ops.", readers.length, writers.length, flushEveryOps); for (int i = 0; i < writers.length; i++) { final String threadName = "writer_" + i; @@ -663,9 +662,9 @@ public class TranslogTests extends ESTestCase { } @Override - public void onFailure(Throwable t) { - logger.error("--> writer [{}] had an error", t, threadName); - errors.add(t); + public void onFailure(Exception e) { + logger.error("--> writer [{}] had an error", e, threadName); + errors.add(e); } }, threadName); writers[i].start(); @@ -678,14 +677,14 @@ public class TranslogTests extends ESTestCase { Set writtenOpsAtView; @Override - public void onFailure(Throwable t) { - logger.error("--> reader [{}] had an error", t, threadId); - errors.add(t); + public void onFailure(Exception e) { + logger.error("--> reader [{}] had an error", e, threadId); + errors.add(e); try { closeView(); - } catch (IOException e) { - logger.error("unexpected error while closing view, after failure"); - t.addSuppressed(e); + } catch (IOException inner) { + inner.addSuppressed(e); + logger.error("unexpected error while closing view, after failure", inner); } } @@ -1240,7 +1239,7 @@ public class TranslogTests extends ESTestCase { final BlockingQueue writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread); Thread[] threads = new Thread[threadCount]; - final Throwable[] threadExceptions = new Throwable[threadCount]; + final Exception[] threadExceptions = new Exception[threadCount]; final CountDownLatch downLatch = new CountDownLatch(1); for (int i = 0; i < threadCount; i++) { final int threadId = i; @@ -1267,10 +1266,10 @@ public class TranslogTests extends ESTestCase { private final int opsPerThread; private final int threadId; private final Collection writtenOperations; - private final Throwable[] threadExceptions; + private final Exception[] threadExceptions; private final Translog translog; - public TranslogThread(Translog translog, CountDownLatch downLatch, int opsPerThread, int threadId, Collection writtenOperations, Throwable[] threadExceptions) { + public TranslogThread(Translog translog, CountDownLatch downLatch, int opsPerThread, int threadId, Collection writtenOperations, Exception[] threadExceptions) { this.translog = translog; this.downLatch = downLatch; this.opsPerThread = opsPerThread; @@ -1304,7 +1303,7 @@ public class TranslogTests extends ESTestCase { writtenOperations.add(new LocationOperation(op, loc)); afterAdd(); } - } catch (Throwable t) { + } catch (Exception t) { threadExceptions[threadId] = t; } } @@ -1446,7 +1445,7 @@ public class TranslogTests extends ESTestCase { final int threadCount = randomIntBetween(1, 5); Thread[] threads = new Thread[threadCount]; - final Throwable[] threadExceptions = new Throwable[threadCount]; + final Exception[] threadExceptions = new Exception[threadCount]; final CountDownLatch downLatch = new CountDownLatch(1); final CountDownLatch added = new CountDownLatch(randomIntBetween(10, 100)); List writtenOperations = Collections.synchronizedList(new ArrayList<>()); diff --git a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java index 23925f574ff..60e062c0d1c 100644 --- a/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java +++ b/core/src/test/java/org/elasticsearch/indexing/IndexActionIT.java @@ -50,7 +50,7 @@ public class IndexActionIT extends ESIntegTestCase { public void testAutoGenerateIdNoDuplicates() throws Exception { int numberOfIterations = scaledRandomIntBetween(10, 50); for (int i = 0; i < numberOfIterations; i++) { - Throwable firstError = null; + Exception firstError = null; createIndex("test"); int numOfDocs = randomIntBetween(10, 100); logger.info("indexing [{}] docs", numOfDocs); @@ -66,19 +66,19 @@ public class IndexActionIT extends ESIntegTestCase { try { logger.debug("running search with all types"); assertHitCount(client().prepareSearch("test").get(), numOfDocs); - } catch (Throwable t) { - logger.error("search for all docs types failed", t); + } catch (Exception e) { + logger.error("search for all docs types failed", e); if (firstError == null) { - firstError = t; + firstError = e; } } try { logger.debug("running search with a specific type"); assertHitCount(client().prepareSearch("test").setTypes("type").get(), numOfDocs); - } catch (Throwable t) { - logger.error("search for all docs of a specific type failed", t); + } catch (Exception e) { + logger.error("search for all docs of a specific type failed", e); if (firstError == null) { - firstError = t; + firstError = e; } } } diff --git a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 995beb1742c..7558fbd66fe 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -22,9 +22,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.IndexService; @@ -448,7 +447,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase { try { assertEquals(0, imc.availableShards().size()); ShardRouting routing = newShard.routingEntry(); - DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode, localNode)); assertEquals(1, imc.availableShards().size()); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 92a411a95de..17a4b93c240 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; @@ -103,7 +103,7 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); IndexShard shard = index.createShard(newRouting); shard.updateRoutingEntry(newRouting); - final DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, + final DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); shard.markAsRecovering("store", new RecoveryState(shard.shardId(), newRouting.primary(), RecoveryState.Type.SNAPSHOT, newRouting.restoreSource(), localNode)); shard.recoverFromStore(); diff --git a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java index 4312dd6105e..5e636bed939 100644 --- a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java +++ b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java @@ -456,9 +456,9 @@ public class AnalyzeActionIT extends ESIntegTestCase { .setAnalyzer("not_exist_analyzer") .get(); fail("shouldn't get here"); - } catch (Throwable t) { - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), startsWith("failed to find global analyzer")); + } catch (Exception e) { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), startsWith("failed to find global analyzer")); } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 78ef13dde56..ad20cb577bb 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -37,7 +37,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation.FailedShard; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.repositories.RepositoriesService; @@ -123,7 +123,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice for (Iterator> it = clusterStateServiceMap.entrySet().iterator(); it.hasNext(); ) { DiscoveryNode node = it.next().getKey(); - if (state.nodes().nodeExists(node.getId()) == false) { + if (state.nodes().nodeExists(node) == false) { it.remove(); } } @@ -254,7 +254,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice for (DiscoveryNode.Role mustHaveRole : mustHaveRoles) { roles.add(mustHaveRole); } - return new DiscoveryNode("node_" + randomAsciiOfLength(8), DummyTransportAddress.INSTANCE, Collections.emptyMap(), roles, + return new DiscoveryNode("node_" + randomAsciiOfLength(8), LocalTransportAddress.buildUnique(), Collections.emptyMap(), roles, Version.CURRENT); } diff --git a/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 8c724efdfc7..5f7f26cd38c 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -68,14 +68,14 @@ public class FlushIT extends ESIntegTestCase { // don't use assertAllSuccessful it uses a randomized context that belongs to a different thread assertThat("Unexpected ShardFailures: " + Arrays.toString(flushResponse.getShardFailures()), flushResponse.getFailedShards(), equalTo(0)); latch.countDown(); - } catch (Throwable ex) { + } catch (Exception ex) { onFailure(ex); } } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { errors.add(e); latch.countDown(); } diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index 5932434438c..ea2a80bada5 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.indices.flush; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -31,9 +32,11 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; /** */ @@ -103,7 +106,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { assertTrue(response.success()); } - public void testSyncFailsIfOperationIsInFlight() throws InterruptedException { + public void testSyncFailsIfOperationIsInFlight() throws InterruptedException, ExecutionException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); @@ -111,7 +114,9 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); final ShardId shardId = shard.shardId(); - try (Releasable operationLock = shard.acquirePrimaryOperationLock()) { + PlainActionFuture fut = new PlainActionFuture<>(); + shard.acquirePrimaryOperationLock(fut, ThreadPool.Names.INDEX); + try (Releasable operationLock = fut.get()) { SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); flushService.attemptSyncedFlush(shardId, listener); listener.latch.await(); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java index 485ec020c3f..b71ba63a157 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java @@ -57,7 +57,7 @@ public class SyncedFlushUtil { public static final class LatchedListener implements ActionListener { public volatile T result; - public volatile Throwable error; + public volatile Exception error; public final CountDownLatch latch = new CountDownLatch(1); @Override @@ -67,7 +67,7 @@ public class SyncedFlushUtil { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { error = e; latch.countDown(); } diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java index 2981f2d110c..eeaeb84d9a9 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java @@ -69,7 +69,7 @@ public class ConcurrentDynamicTemplateIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { throwable.add(e); latch.countDown(); } @@ -83,4 +83,4 @@ public class ConcurrentDynamicTemplateIT extends ESIntegTestCase { } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index 68a176e22c3..91fd7bb972b 100644 --- a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -43,6 +43,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_METADATA; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ; @@ -258,7 +259,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase { // not all shards are allocated with the initial create index. Wait for it.. ensureYellow(); - final Throwable[] threadException = new Throwable[1]; + final AtomicReference threadException = new AtomicReference<>(); final AtomicBoolean stop = new AtomicBoolean(false); Thread[] threads = new Thread[3]; final CyclicBarrier barrier = new CyclicBarrier(threads.length); @@ -298,8 +299,8 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase { assertThat(mappings.containsKey(typeName), equalTo(true)); assertThat(((Map) mappings.get(typeName).getSourceAsMap().get("properties")).keySet(), Matchers.hasItem(fieldName)); } - } catch (Throwable t) { - threadException[0] = t; + } catch (Exception e) { + threadException.set(e); stop.set(true); } } @@ -311,8 +312,8 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase { for (Thread t : threads) t.join(); - if (threadException[0] != null) { - throw threadException[0]; + if (threadException.get() != null) { + throw threadException.get(); } } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index 0ae8f71c742..b448f35c21b 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -28,6 +28,8 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -62,7 +64,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.startsWith; @@ -70,7 +71,7 @@ import static org.hamcrest.Matchers.startsWith; /** * Integration tests for InternalCircuitBreakerService */ -@ClusterScope(scope = TEST, randomDynamicTemplates = false) +@ClusterScope(scope = TEST, randomDynamicTemplates = false, numClientNodes = 0, maxNumDataNodes = 1) public class CircuitBreakerServiceIT extends ESIntegTestCase { /** Reset all breaker settings back to their defaults */ private void reset() { @@ -266,17 +267,26 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Perform a search to load field data for the "test" field try { - client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC).get(); - fail("should have thrown an exception"); + SearchResponse searchResponse = client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC).get(); + if (searchResponse.getShardFailures().length > 0) { + // each shard must have failed with CircuitBreakingException + for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) { + Throwable cause = ExceptionsHelper.unwrap(shardSearchFailure.getCause(), CircuitBreakingException.class); + assertThat(cause, instanceOf(CircuitBreakingException.class)); + assertEquals(((CircuitBreakingException) cause).getByteLimit(), 500L); + } + } else { + fail("should have thrown a CircuitBreakingException"); + } } catch (Exception e) { - final Throwable cause = ExceptionsHelper.unwrap(e, CircuitBreakingException.class); - assertNotNull("CircuitBreakingException is not the cause of " + e, cause); - String errMsg = "would be larger than limit of [500/500b]]"; - assertThat("Exception: [" + cause.toString() + "] should contain a CircuitBreakingException", + Throwable cause = ExceptionsHelper.unwrap(e, CircuitBreakingException.class); + assertThat(cause, instanceOf(CircuitBreakingException.class)); + assertEquals(((CircuitBreakingException) cause).getByteLimit(), 500L); + assertThat("Exception: [" + cause.toString() + "] should be caused by the parent circuit breaker", cause.toString(), startsWith("CircuitBreakingException[[parent] Data too large")); - assertThat("Exception: [" + cause.toString() + "] should contain a CircuitBreakingException", - cause.toString(), endsWith(errMsg)); } + + reset(); } public void testRequestBreaker() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 0d6d5122006..2c52cd33015 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.DirectoryService; @@ -69,8 +69,8 @@ public class RecoverySourceHandlerTests extends ESTestCase { put("indices.recovery.concurrent_small_file_streams", 1).build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, service); StartRecoveryRequest request = new StartRecoveryRequest(shardId, - new DiscoveryNode("b", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), null, RecoveryState.Type.STORE, randomLong()); Store store = newStore(createTempDir()); RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, recoverySettings.getChunkSize().bytesAsInt(), @@ -119,8 +119,8 @@ public class RecoverySourceHandlerTests extends ESTestCase { put("indices.recovery.concurrent_small_file_streams", 1).build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, service); StartRecoveryRequest request = new StartRecoveryRequest(shardId, - new DiscoveryNode("b", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), null, RecoveryState.Type.STORE, randomLong()); Path tempDir = createTempDir(); Store store = newStore(tempDir, false); @@ -182,8 +182,8 @@ public class RecoverySourceHandlerTests extends ESTestCase { put("indices.recovery.concurrent_small_file_streams", 1).build(); final RecoverySettings recoverySettings = new RecoverySettings(settings, service); StartRecoveryRequest request = new StartRecoveryRequest(shardId, - new DiscoveryNode("b", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT), - new DiscoveryNode("b", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), + new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), null, RecoveryState.Type.STORE, randomLong()); Path tempDir = createTempDir(); Store store = newStore(tempDir, false); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java index bcd614121b6..d0401196b95 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTargetTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryState.File; import org.elasticsearch.indices.recovery.RecoveryState.Index; @@ -59,7 +59,7 @@ public class RecoveryTargetTests extends ESTestCase { private T lastRead; private final AtomicBoolean shouldStop; private final T source; - final AtomicReference error = new AtomicReference<>(); + final AtomicReference error = new AtomicReference<>(); final Version streamVersion; Streamer(AtomicBoolean shouldStop, T source) { @@ -73,7 +73,7 @@ public class RecoveryTargetTests extends ESTestCase { } public T lastRead() throws Throwable { - Throwable t = error.get(); + Exception t = error.get(); if (t != null) { throw t; } @@ -105,8 +105,8 @@ public class RecoveryTargetTests extends ESTestCase { serializeDeserialize(); } serializeDeserialize(); - } catch (Throwable t) { - error.set(t); + } catch (Exception e) { + error.set(e); } } } @@ -339,7 +339,8 @@ public class RecoveryTargetTests extends ESTestCase { } public void testStageSequenceEnforcement() { - final DiscoveryNode discoveryNode = new DiscoveryNode("1", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + final DiscoveryNode discoveryNode = new DiscoveryNode("1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), + Version.CURRENT); Stage[] stages = Stage.values(); int i = randomIntBetween(0, stages.length - 1); int j; diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 81c50cc4f9c..2ad8ebb52f9 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -42,7 +42,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; @@ -126,7 +126,7 @@ public class RareClusterStateIT extends ESIntegTestCase { // inject a node ClusterState.Builder builder = ClusterState.builder(currentState); builder.nodes(DiscoveryNodes.builder(currentState.nodes()).put(new DiscoveryNode("_non_existent", - DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT))); + LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT))); // open index final IndexMetaData indexMetaData = IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.OPEN).build(); @@ -145,7 +145,7 @@ public class RareClusterStateIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { } }); @@ -165,7 +165,7 @@ public class RareClusterStateIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { } }); @@ -260,7 +260,7 @@ public class RareClusterStateIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { putMappingResponse.set(e); } }); @@ -292,7 +292,7 @@ public class RareClusterStateIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { docIndexResponse.set(e); } }); @@ -376,7 +376,7 @@ public class RareClusterStateIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { putMappingResponse.set(e); } }); @@ -403,7 +403,7 @@ public class RareClusterStateIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { docIndexResponse.set(e); } }); diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index b4f66c2e17b..ad26ec71226 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -426,7 +426,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { } }); waitNoPendingTasksOnAll(); diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 493f8b74e04..a5ec8e4ecd7 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.IndexTemplateAlreadyExistsException; import org.elasticsearch.indices.InvalidAliasNameException; -import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESIntegTestCase; @@ -116,7 +115,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { ensureGreen(); SearchResponse searchResponse = client().prepareSearch("test_index") .setQuery(termQuery("field1", "value1")) - .addField("field1").addField("field2") + .addStoredField("field1").addStoredField("field2") .execute().actionGet(); assertHitCount(searchResponse, 1); @@ -130,7 +129,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { // now only match on one template (template_1) searchResponse = client().prepareSearch("text_index") .setQuery(termQuery("field1", "value1")) - .addField("field1").addField("field2") + .addStoredField("field1").addStoredField("field2") .execute().actionGet(); if (searchResponse.getFailedShards() > 0) { logger.warn("failed search {}", Arrays.toString(searchResponse.getShardFailures())); diff --git a/core/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java b/core/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java index e7183dd718e..f2d9aaa5170 100644 --- a/core/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java @@ -27,8 +27,6 @@ import java.util.List; import java.util.Map; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -36,7 +34,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; - public class ConfigurationUtilsTests extends ESTestCase { private Map config; diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index fcc6e04c6c1..53964132abe 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -74,7 +74,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { public void testExecuteIndexPipelineDoesNotExist() { IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); try { @@ -83,7 +83,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("pipeline with id [_id] does not exist")); } - verify(failureHandler, never()).accept(any(Throwable.class)); + verify(failureHandler, never()).accept(any(Exception.class)); verify(completionHandler, never()).accept(anyBoolean()); } @@ -98,9 +98,9 @@ public class PipelineExecutionServiceTests extends ESTestCase { new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("does_not_exist"); bulkRequest.add(indexRequest2); @SuppressWarnings("unchecked") - BiConsumer failureHandler = mock(BiConsumer.class); + BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); + Consumer completionHandler = mock(Consumer.class); executionService.executeBulkRequest(bulkRequest.requests(), failureHandler, completionHandler); verify(failureHandler, times(1)).accept( argThat(new CustomTypeSafeMatcher("failure handler was not called with the expected arguments") { @@ -126,7 +126,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); @@ -141,7 +141,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); @@ -169,7 +169,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); @@ -193,7 +193,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); @@ -213,7 +213,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); @@ -231,7 +231,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); doThrow(new RuntimeException()).when(onFailureProcessor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); @@ -253,7 +253,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { doThrow(new RuntimeException()).when(onFailureProcessor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); @@ -268,7 +268,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); @@ -284,7 +284,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); @SuppressWarnings("unchecked") Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); @@ -298,7 +298,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").setPipeline("_id") .source(Collections.emptyMap()) .ttl(1000L); - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); Consumer completionHandler = mock(Consumer.class); executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); @@ -336,8 +336,8 @@ public class PipelineExecutionServiceTests extends ESTestCase { doThrow(error).when(processor).execute(any()); when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, processor)); - BiConsumer requestItemErrorHandler = mock(BiConsumer.class); - Consumer completionHandler = mock(Consumer.class); + BiConsumer requestItemErrorHandler = mock(BiConsumer.class); + Consumer completionHandler = mock(Consumer.class); executionService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); verify(requestItemErrorHandler, times(numIndexRequests)).accept(any(IndexRequest.class), eq(error)); @@ -358,9 +358,9 @@ public class PipelineExecutionServiceTests extends ESTestCase { when(store.get(pipelineId)).thenReturn(new Pipeline(pipelineId, null, new CompoundProcessor())); @SuppressWarnings("unchecked") - BiConsumer requestItemErrorHandler = mock(BiConsumer.class); + BiConsumer requestItemErrorHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); + Consumer completionHandler = mock(Consumer.class); executionService.executeBulkRequest(bulkRequest.requests(), requestItemErrorHandler, completionHandler); verify(requestItemErrorHandler, never()).accept(any(), any()); @@ -383,7 +383,7 @@ public class PipelineExecutionServiceTests extends ESTestCase { configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"))); executionService.updatePipelineStats(new IngestMetadata(configurationMap)); - Consumer failureHandler = mock(Consumer.class); + Consumer failureHandler = mock(Consumer.class); Consumer completionHandler = mock(Consumer.class); IndexRequest indexRequest = new IndexRequest("_index"); diff --git a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java index d4e0878a216..1510d25b695 100644 --- a/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java +++ b/core/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java @@ -258,7 +258,7 @@ public class PipelineStoreTests extends ESTestCase { store.validatePipeline(ingestInfos, putRequest); fail("exception expected"); } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Processor type [remove] is not installed on node [{_node_id2}{local}{local[_id]}]")); + assertThat(e.getMessage(), equalTo("Processor type [remove] is not installed on node [" + node2 + "]")); } ingestInfos.put(node2, new IngestInfo(Arrays.asList(new ProcessorInfo("set"), new ProcessorInfo("remove")))); diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java index a1f4d381911..94a56d11933 100644 --- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java @@ -54,9 +54,9 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { public void testNegativeSetting() throws InterruptedException { String collector = randomAsciiOfLength(5); Settings settings = Settings.builder().put("monitor.jvm.gc.collector." + collector + ".warn", "-" + randomTimeValue()).build(); - execute(settings, (command, interval) -> null, t -> { - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), allOf(containsString("invalid gc_threshold"), containsString("for [monitor.jvm.gc.collector." + collector + "."))); + execute(settings, (command, interval) -> null, e -> { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), allOf(containsString("invalid gc_threshold"), containsString("for [monitor.jvm.gc.collector." + collector + "."))); }, true, null); } @@ -74,9 +74,9 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { } // we should get an exception that a setting is missing - execute(builder.build(), (command, interval) -> null, t -> { - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), containsString("missing gc_threshold for [monitor.jvm.gc.collector." + collector + ".")); + execute(builder.build(), (command, interval) -> null, e -> { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), containsString("missing gc_threshold for [monitor.jvm.gc.collector." + collector + ".")); }, true, null); } @@ -84,18 +84,18 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { for (final String threshold : new String[] { "warn", "info", "debug" }) { final Settings.Builder builder = Settings.builder(); builder.put("monitor.jvm.gc.overhead." + threshold, randomIntBetween(Integer.MIN_VALUE, -1)); - execute(builder.build(), (command, interval) -> null, t -> { - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), containsString("setting [monitor.jvm.gc.overhead." + threshold + "] must be >= 0")); + execute(builder.build(), (command, interval) -> null, e -> { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), containsString("setting [monitor.jvm.gc.overhead." + threshold + "] must be >= 0")); }, true, null); } for (final String threshold : new String[] { "warn", "info", "debug" }) { final Settings.Builder builder = Settings.builder(); builder.put("monitor.jvm.gc.overhead." + threshold, randomIntBetween(100 + 1, Integer.MAX_VALUE)); - execute(builder.build(), (command, interval) -> null, t -> { - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), containsString("setting [monitor.jvm.gc.overhead." + threshold + "] must be <= 100")); + execute(builder.build(), (command, interval) -> null, e -> { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), containsString("setting [monitor.jvm.gc.overhead." + threshold + "] must be <= 100")); }, true, null); } @@ -104,9 +104,9 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { infoWarnOutOfOrderBuilder.put("monitor.jvm.gc.overhead.info", info); final int warn = randomIntBetween(1, info - 1); infoWarnOutOfOrderBuilder.put("monitor.jvm.gc.overhead.warn", warn); - execute(infoWarnOutOfOrderBuilder.build(), (command, interval) -> null, t -> { - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), containsString("[monitor.jvm.gc.overhead.warn] must be greater than [monitor.jvm.gc.overhead.info] [" + info + "] but was [" + warn + "]")); + execute(infoWarnOutOfOrderBuilder.build(), (command, interval) -> null, e -> { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), containsString("[monitor.jvm.gc.overhead.warn] must be greater than [monitor.jvm.gc.overhead.info] [" + info + "] but was [" + warn + "]")); }, true, null); final Settings.Builder debugInfoOutOfOrderBuilder = Settings.builder(); @@ -114,9 +114,9 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { final int debug = randomIntBetween(info + 1, 99); debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.debug", debug); debugInfoOutOfOrderBuilder.put("monitor.jvm.gc.overhead.warn", randomIntBetween(debug + 1, 100)); // or the test will fail for the wrong reason - execute(debugInfoOutOfOrderBuilder.build(), (command, interval) -> null, t -> { - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), containsString("[monitor.jvm.gc.overhead.info] must be greater than [monitor.jvm.gc.overhead.debug] [" + debug + "] but was [" + info + "]")); + execute(debugInfoOutOfOrderBuilder.build(), (command, interval) -> null, e -> { + assertThat(e, instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), containsString("[monitor.jvm.gc.overhead.info] must be greater than [monitor.jvm.gc.overhead.debug] [" + debug + "] but was [" + info + "]")); }, true, null); } @@ -124,7 +124,7 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { execute(settings, scheduler, null, false, asserts); } - private static void execute(Settings settings, BiFunction> scheduler, Consumer consumer, boolean constructionShouldFail, Runnable asserts) throws InterruptedException { + private static void execute(Settings settings, BiFunction> scheduler, Consumer consumer, boolean constructionShouldFail, Runnable asserts) throws InterruptedException { assert constructionShouldFail == (consumer != null); assert constructionShouldFail == (asserts == null); ThreadPool threadPool = null; @@ -143,7 +143,7 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { service.doStart(); asserts.run(); service.doStop(); - } catch (Throwable t) { + } catch (Exception t) { consumer.accept(t); } } finally { diff --git a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmMonitorTests.java b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmMonitorTests.java index 91862e9cd18..278a47ed21f 100644 --- a/core/src/test/java/org/elasticsearch/monitor/jvm/JvmMonitorTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/jvm/JvmMonitorTests.java @@ -48,10 +48,10 @@ public class JvmMonitorTests extends ESTestCase { AtomicBoolean invoked = new AtomicBoolean(); JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap(), IGNORE) { @Override - void onMonitorFailure(Throwable t) { + void onMonitorFailure(Exception e) { invoked.set(true); - assertThat(t, instanceOf(RuntimeException.class)); - assertThat(t, hasToString(containsString("simulated"))); + assertThat(e, instanceOf(RuntimeException.class)); + assertThat(e, hasToString(containsString("simulated"))); } @Override @@ -174,7 +174,7 @@ public class JvmMonitorTests extends ESTestCase { JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(gcThresholds, IGNORE) { @Override - void onMonitorFailure(Throwable t) { + void onMonitorFailure(Exception e) { } @Override @@ -284,7 +284,7 @@ public class JvmMonitorTests extends ESTestCase { final JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap(), IGNORE) { @Override - void onMonitorFailure(Throwable t) { + void onMonitorFailure(Exception e) { } @Override @@ -358,7 +358,7 @@ public class JvmMonitorTests extends ESTestCase { final JvmGcMonitorService.JvmMonitor monitor = new JvmGcMonitorService.JvmMonitor(Collections.emptyMap(), gcOverheadThreshold) { @Override - void onMonitorFailure(final Throwable t) { + void onMonitorFailure(final Exception e) { } @Override diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index 739dcd8b2c6..090517adfcd 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.ToXContent; @@ -117,7 +117,7 @@ public class NodeInfoStreamingTests extends ESTestCase { private NodeInfo createNodeInfo() { Build build = Build.CURRENT; - DiscoveryNode node = new DiscoveryNode("test_node", DummyTransportAddress.INSTANCE, + DiscoveryNode node = new DiscoveryNode("test_node", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), VersionUtils.randomVersion(random())); Map serviceAttributes = new HashMap<>(); serviceAttributes.put("test", "attribute"); @@ -129,7 +129,7 @@ public class NodeInfoStreamingTests extends ESTestCase { threadPoolInfos.add(new ThreadPool.Info("test_threadpool", ThreadPool.ThreadPoolType.FIXED, 5)); ThreadPoolInfo threadPoolInfo = new ThreadPoolInfo(threadPoolInfos); Map profileAddresses = new HashMap<>(); - BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress(new TransportAddress[]{DummyTransportAddress.INSTANCE}, DummyTransportAddress.INSTANCE); + BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress(new TransportAddress[]{LocalTransportAddress.buildUnique()}, LocalTransportAddress.buildUnique()); profileAddresses.put("test_address", dummyBoundTransportAddress); TransportInfo transport = new TransportInfo(dummyBoundTransportAddress, profileAddresses); HttpInfo htttpInfo = new HttpInfo(dummyBoundTransportAddress, randomLong()); diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index 0cc30f8d569..0916cad60d5 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -24,9 +24,9 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.ESIntegTestCase; import java.util.List; diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java index 740a027aecb..d56e1341165 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -30,16 +30,13 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.recovery.RecoveriesCollection; import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryState; -import org.elasticsearch.indices.recovery.RecoveryTarget; import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; -import java.util.ArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Predicate; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -135,7 +132,8 @@ public class RecoveriesCollectionTests extends ESSingleNodeTestCase { long startRecovery(RecoveriesCollection collection, RecoveryTargetService.RecoveryListener listener, TimeValue timeValue) { IndicesService indexServices = getInstanceFromNode(IndicesService.class); IndexShard indexShard = indexServices.indexServiceSafe(resolveIndex("test")).getShardOrNull(0); - final DiscoveryNode sourceNode = new DiscoveryNode("id", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT); + final DiscoveryNode sourceNode = new DiscoveryNode("id", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), + Version.CURRENT); return collection.startRecovery(indexShard, sourceNode, listener, timeValue); } } diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index fd5da198ed2..2d778de37c9 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -218,7 +218,7 @@ public class RelocationIT extends ESIntegTestCase { for (int i = 0; i < 10; i++) { try { logger.info("--> START search test round {}", i + 1); - SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexer.totalIndexedDocs()).setNoFields().execute().actionGet().getHits(); + SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexer.totalIndexedDocs()).setNoStoredFields().execute().actionGet().getHits(); ranOnce = true; if (hits.totalHits() != indexer.totalIndexedDocs()) { int[] hitIds = new int[(int) indexer.totalIndexedDocs()]; @@ -432,22 +432,23 @@ public class RelocationIT extends ESIntegTestCase { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/18553") public void testIndexAndRelocateConcurrently() throws ExecutionException, InterruptedException { + int halfNodes = randomIntBetween(1, 3); Settings blueSetting = Settings.builder().put("node.attr.color", "blue").build(); - InternalTestCluster.Async> blueFuture = internalCluster().startNodesAsync(blueSetting, blueSetting); + InternalTestCluster.Async> blueFuture = internalCluster().startNodesAsync(halfNodes, blueSetting); Settings redSetting = Settings.builder().put("node.attr.color", "red").build(); - InternalTestCluster.Async> redFuture = internalCluster().startNodesAsync(redSetting, redSetting); + InternalTestCluster.Async> redFuture = internalCluster().startNodesAsync(halfNodes, redSetting); blueFuture.get(); redFuture.get(); logger.info("blue nodes: {}", blueFuture.get()); logger.info("red nodes: {}", redFuture.get()); - ensureStableCluster(4); + ensureStableCluster(halfNodes * 2); assertAcked(prepareCreate("test").setSettings(Settings.builder() .put("index.routing.allocation.exclude.color", "blue") - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(indexSettings()))); + .put(indexSettings()) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) // NORELEASE: set to randomInt(halfNodes - 1) once replica data loss is fixed + )); ensureYellow(); assertAllShardsOnNodes("test", redFuture.get().toArray(new String[2])); int numDocs = randomIntBetween(100, 150); @@ -479,9 +480,11 @@ public class RelocationIT extends ESIntegTestCase { numDocs *= 2; logger.info(" --> waiting for relocation to complete"); - ensureGreen("test");// move all shards to the new node (it waits on relocation) + ensureGreen("test"); // move all shards to the new nodes (it waits on relocation) + final int numIters = randomIntBetween(10, 20); for (int i = 0; i < numIters; i++) { + logger.info(" --> checking iteration {}", i); SearchResponse afterRelocation = client().prepareSearch().setSize(ids.size()).get(); assertNoFailures(afterRelocation); assertSearchHits(afterRelocation, ids.toArray(new String[ids.size()])); diff --git a/core/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/core/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index 6bb1716cb0f..051159b448b 100644 --- a/core/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/core/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -41,11 +41,16 @@ import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -/** - * - */ public class BytesRestResponseTests extends ESTestCase { + class UnknownException extends Exception { + + public UnknownException(final String message, final Throwable cause) { + super(message, cause); + } + + } + public void testWithHeaders() throws Exception { RestRequest request = new FakeRestRequest(); RestChannel channel = randomBoolean() ? new DetailedExceptionRestChannel(request) : new SimpleExceptionRestChannel(request); @@ -61,7 +66,7 @@ public class BytesRestResponseTests extends ESTestCase { RestRequest request = new FakeRestRequest(); RestChannel channel = new SimpleExceptionRestChannel(request); - Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); + Exception t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); BytesRestResponse response = new BytesRestResponse(channel, t); String text = response.content().utf8ToString(); assertThat(text, containsString("ElasticsearchException[an error occurred reading data]")); @@ -74,7 +79,7 @@ public class BytesRestResponseTests extends ESTestCase { RestRequest request = new FakeRestRequest(); RestChannel channel = new DetailedExceptionRestChannel(request); - Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); + Exception t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); BytesRestResponse response = new BytesRestResponse(channel, t); String text = response.content().utf8ToString(); assertThat(text, containsString("{\"type\":\"exception\",\"reason\":\"an error occurred reading data\"}")); @@ -85,10 +90,10 @@ public class BytesRestResponseTests extends ESTestCase { RestRequest request = new FakeRestRequest(); RestChannel channel = new SimpleExceptionRestChannel(request); - Throwable t = new Throwable("an error occurred reading data", new FileNotFoundException("/foo/bar")); + Exception t = new UnknownException("an error occurred reading data", new FileNotFoundException("/foo/bar")); BytesRestResponse response = new BytesRestResponse(channel, t); String text = response.content().utf8ToString(); - assertThat(text, not(containsString("Throwable[an error occurred reading data]"))); + assertThat(text, not(containsString("UnknownException[an error occurred reading data]"))); assertThat(text, not(containsString("FileNotFoundException[/foo/bar]"))); assertThat(text, not(containsString("error_trace"))); assertThat(text, containsString("\"error\":\"No ElasticsearchException found\"")); @@ -99,10 +104,10 @@ public class BytesRestResponseTests extends ESTestCase { request.params().put("error_trace", "true"); RestChannel channel = new DetailedExceptionRestChannel(request); - Throwable t = new Throwable("an error occurred reading data", new FileNotFoundException("/foo/bar")); + Exception t = new UnknownException("an error occurred reading data", new FileNotFoundException("/foo/bar")); BytesRestResponse response = new BytesRestResponse(channel, t); String text = response.content().utf8ToString(); - assertThat(text, containsString("\"type\":\"throwable\",\"reason\":\"an error occurred reading data\"")); + assertThat(text, containsString("\"type\":\"unknown_exception\",\"reason\":\"an error occurred reading data\"")); assertThat(text, containsString("{\"type\":\"file_not_found_exception\"")); assertThat(text, containsString("\"stack_trace\":\"[an error occurred reading data]")); } @@ -111,14 +116,14 @@ public class BytesRestResponseTests extends ESTestCase { RestRequest request = new FakeRestRequest(); RestChannel channel = new DetailedExceptionRestChannel(request); { - Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); - BytesRestResponse response = new BytesRestResponse(channel, t); + Exception e = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); + BytesRestResponse response = new BytesRestResponse(channel, e); String text = response.content().utf8ToString(); assertThat(text, containsString("{\"root_cause\":[{\"type\":\"exception\",\"reason\":\"an error occurred reading data\"}]")); } { - Throwable t = new FileNotFoundException("/foo/bar"); - BytesRestResponse response = new BytesRestResponse(channel, t); + Exception e = new FileNotFoundException("/foo/bar"); + BytesRestResponse response = new BytesRestResponse(channel, e); String text = response.content().utf8ToString(); assertThat(text, containsString("{\"root_cause\":[{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}]")); } diff --git a/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java b/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java index dd6d1dac47b..19b9051dd7e 100644 --- a/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java +++ b/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java @@ -142,7 +142,7 @@ public class RestFilterChainTests extends ESTestCase { assertThat(fakeRestChannel.errors().get(), equalTo(additionalContinueCount)); } - private static enum Operation implements Callback { + private enum Operation implements Callback { CONTINUE_PROCESSING { @Override public void execute(RestRequest request, RestChannel channel, NodeClient client, RestFilterChain filterChain) throws Exception { @@ -157,7 +157,7 @@ public class RestFilterChainTests extends ESTestCase { } } - private static interface Callback { + private interface Callback { void execute(RestRequest request, RestChannel channel, NodeClient client, RestFilterChain filterChain) throws Exception; } diff --git a/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsIT.java b/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsIT.java index 2bb39ad10ea..6542bad5b8a 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsIT.java +++ b/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsIT.java @@ -65,7 +65,7 @@ public class SearchWithRejectionsIT extends ESIntegTestCase { for (int i = 0; i < numSearches; i++) { try { responses[i].get(); - } catch (Throwable t) { + } catch (Exception t) { } } awaitBusy(() -> client().admin().indices().prepareStats().execute().actionGet().getTotal().getSearch().getOpenContexts() == 0, 1, TimeUnit.SECONDS); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java index a48facc4d66..0c8b9a22c37 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java @@ -52,9 +52,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; -/** - * - */ @ESIntegTestCase.SuiteScopeTestCase public class FilterIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java index a95df3ff5e6..592861ccce2 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java @@ -58,9 +58,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsNull.notNullValue; -/** - * - */ @ESIntegTestCase.SuiteScopeTestCase public class FiltersIT extends ESIntegTestCase { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java index 49ef9e1b6b5..5cc6ec58630 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -181,7 +181,7 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase { // Added to debug a test failure where the terms aggregation seems to be reporting two documents with the same value for NUMBER_FIELD_NAME. This will check that after // random indexing each document only has 1 value for NUMBER_FIELD_NAME and it is the correct value. Following this initial change its seems that this call was getting // more that 2000 hits (actual value was 2059) so now it will also check to ensure all hits have the correct index and type - SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME).addField(NUMBER_FIELD_NAME).addSort(SortBuilders.fieldSort(NUMBER_FIELD_NAME) + SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME).addStoredField(NUMBER_FIELD_NAME).addSort(SortBuilders.fieldSort(NUMBER_FIELD_NAME) .order(SortOrder.ASC)).setSize(5000).get(); assertSearchResponse(response); long totalHits = response.getHits().totalHits(); diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 98ae3241dbb..eb6322c151e 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -98,11 +98,11 @@ public class SearchWhileRelocatingIT extends ESIntegTestCase { if (numberOfReplicas == 1 || !ex.getMessage().contains("all shards failed")) { thrownExceptions.add(ex); } - } catch (Throwable t) { + } catch (Exception ex) { if (!criticalException) { - nonCriticalExceptions.add(t); + nonCriticalExceptions.add(ex); } else { - thrownExceptions.add(t); + thrownExceptions.add(ex); } } } diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 8ddfa374ac0..5a8b9fc767f 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -221,12 +221,12 @@ public class SearchSourceBuilderTests extends ESTestCase { for (int i = 0; i < fieldsSize; i++) { fields.add(randomAsciiOfLengthBetween(5, 50)); } - builder.fields(fields); + builder.storedFields(fields); } if (randomBoolean()) { int fieldDataFieldsSize = randomInt(25); for (int i = 0; i < fieldDataFieldsSize; i++) { - builder.fieldDataField(randomAsciiOfLengthBetween(5, 50)); + builder.docValueField(randomAsciiOfLengthBetween(5, 50)); } } if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index f8ca1e1aaf7..68679e89ae6 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -202,7 +202,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { refresh(); // TEST FETCHING _parent from child - SearchResponse searchResponse = client().prepareSearch("test").setQuery(idsQuery("child").addIds("c1")).fields("_parent").execute() + SearchResponse searchResponse = client().prepareSearch("test").setQuery(idsQuery("child").addIds("c1")).storedFields("_parent").execute() .actionGet(); assertNoFailures(searchResponse); assertThat(searchResponse.getHits().totalHits(), equalTo(1L)); @@ -210,7 +210,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1")); // TEST matching on parent - searchResponse = client().prepareSearch("test").setQuery(termQuery("_parent#parent", "p1")).fields("_parent").get(); + searchResponse = client().prepareSearch("test").setQuery(termQuery("_parent#parent", "p1")).storedFields("_parent").get(); assertNoFailures(searchResponse); assertThat(searchResponse.getHits().totalHits(), equalTo(2L)); assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("c1"), equalTo("c2"))); @@ -218,7 +218,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("c1"), equalTo("c2"))); assertThat(searchResponse.getHits().getAt(1).field("_parent").value().toString(), equalTo("p1")); - searchResponse = client().prepareSearch("test").setQuery(queryStringQuery("_parent#parent:p1")).fields("_parent").get(); + searchResponse = client().prepareSearch("test").setQuery(queryStringQuery("_parent#parent:p1")).storedFields("_parent").get(); assertNoFailures(searchResponse); assertThat(searchResponse.getHits().totalHits(), equalTo(2L)); assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("c1"), equalTo("c2"))); @@ -1394,7 +1394,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { SearchResponse scrollResponse = client().prepareSearch("test") .setScroll(TimeValue.timeValueSeconds(30)) .setSize(1) - .addField("_id") + .addStoredField("_id") .setQuery(query) .execute() .actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index 0debdb263af..985605c4e65 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -415,13 +415,13 @@ public class GeoFilterIT extends ESIntegTestCase { assertThat(hit.getId(), equalTo(key)); } - SearchResponse world = client().prepareSearch().addField("pin").setQuery( + SearchResponse world = client().prepareSearch().addStoredField("pin").setQuery( geoBoundingBoxQuery("pin").setCorners(90, -179.99999, -90, 179.99999) ).execute().actionGet(); assertHitCount(world, 53); - SearchResponse distance = client().prepareSearch().addField("pin").setQuery( + SearchResponse distance = client().prepareSearch().addStoredField("pin").setQuery( geoDistanceQuery("pin").distance("425km").point(51.11, 9.851) ).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsIT.java b/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsIT.java index 01f98564814..56b33c6007e 100644 --- a/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsIT.java @@ -156,7 +156,7 @@ public class InnerHitsIT extends ESIntegTestCase { .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit( new InnerHitBuilder().setHighlightBuilder(new HighlightBuilder().field("comments.message")) .setExplain(true) - .addFieldDataField("comments.message") + .addDocValueField("comments.message") .addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap())) .setSize(1) )).get(); @@ -287,7 +287,7 @@ public class InnerHitsIT extends ESIntegTestCase { .setQuery( hasChildQuery("comment", matchQuery("message", "fox"), ScoreMode.None).innerHit( new InnerHitBuilder() - .addFieldDataField("message") + .addDocValueField("message") .setHighlightBuilder(new HighlightBuilder().field("message")) .setExplain(true).setSize(1) .addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE, diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java index 875256a0f92..766aff8d274 100644 --- a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryIT.java @@ -623,7 +623,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase { assertFirstHit(searchResponse, hasId("ultimate1")); } - private static final void assertEquivalent(String query, SearchResponse left, SearchResponse right) { + private static void assertEquivalent(String query, SearchResponse left, SearchResponse right) { assertNoFailures(left); assertNoFailures(right); SearchHits leftHits = left.getHits(); diff --git a/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java b/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java index 7f9b9761feb..88af1ddf2b6 100644 --- a/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/searchafter/SearchAfterBuilderTests.java @@ -65,7 +65,7 @@ public class SearchAfterBuilderTests extends ESTestCase { indicesQueriesRegistry = null; } - private final SearchAfterBuilder randomSearchFromBuilder() throws IOException { + private SearchAfterBuilder randomSearchFromBuilder() throws IOException { int numSearchFrom = randomIntBetween(1, 10); SearchAfterBuilder searchAfterBuilder = new SearchAfterBuilder(); Object[] values = new Object[numSearchFrom]; @@ -112,7 +112,7 @@ public class SearchAfterBuilderTests extends ESTestCase { // ensure that every number type remain the same before/after xcontent (de)serialization. // This is not a problem because the final type of each field value is extracted from associated sort field. // This little trick ensure that equals and hashcode are the same when using the xcontent serialization. - private final SearchAfterBuilder randomJsonSearchFromBuilder() throws IOException { + private SearchAfterBuilder randomJsonSearchFromBuilder() throws IOException { int numSearchAfter = randomIntBetween(1, 10); XContentBuilder jsonBuilder = XContentFactory.jsonBuilder(); jsonBuilder.startObject(); diff --git a/core/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index a0ce8f02ea3..668351c6db9 100644 --- a/core/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -88,7 +88,7 @@ public class SliceBuilderTests extends ESTestCase { indicesQueriesRegistry = null; } - private final SliceBuilder randomSliceBuilder() throws IOException { + private SliceBuilder randomSliceBuilder() throws IOException { int max = randomIntBetween(2, MAX_SLICE); int id = randomInt(max - 1); String field = randomAsciiOfLengthBetween(5, 20); diff --git a/core/src/test/java/org/elasticsearch/search/source/SourceFetchingIT.java b/core/src/test/java/org/elasticsearch/search/source/SourceFetchingIT.java index 33fcb55cada..df147ce106f 100644 --- a/core/src/test/java/org/elasticsearch/search/source/SourceFetchingIT.java +++ b/core/src/test/java/org/elasticsearch/search/source/SourceFetchingIT.java @@ -37,10 +37,10 @@ public class SourceFetchingIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch("test").get(); assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); - response = client().prepareSearch("test").addField("bla").get(); + response = client().prepareSearch("test").addStoredField("bla").get(); assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue()); - response = client().prepareSearch("test").addField("_source").get(); + response = client().prepareSearch("test").addStoredField("_source").get(); assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue()); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 50fb3f9074b..bbd1bec1d45 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -279,8 +279,8 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { } @Override - public void onFailure(String source, Throwable t) { - logger.warn("failed to execute [{}]", t, source); + public void onFailure(String source, Exception e) { + logger.warn("failed to execute [{}]", e, source); } }); diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index a25c8e0593e..9d571c02c90 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -272,7 +272,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest } @Override - public void onFailure(String source, @Nullable Throwable t) { + public void onFailure(String source, @Nullable Exception e) { countDownLatch.countDown(); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java b/core/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java index 79d1497912a..48a1cc6081e 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -40,6 +40,7 @@ import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -153,7 +154,9 @@ public class RepositoriesIT extends AbstractSnapshotIntegTestCase { .get(); fail("Shouldn't be here"); } catch (RepositoryException ex) { - assertThat(ex.toString(), containsString("unsupported url protocol [netdoc]")); + assertThat(ex.toString(), + either(containsString("unsupported url protocol [netdoc]")) + .or(containsString("unknown protocol: netdoc"))); // newer versions of JDK 9 } logger.info("--> trying creating url repository with location that is not registered in path.repo setting"); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index e1efbdfaf81..f6049002852 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -2042,7 +2042,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { fail(); } diff --git a/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java b/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java index 21c13816293..c36082f1475 100644 --- a/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java +++ b/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java @@ -20,7 +20,7 @@ package org.elasticsearch.test; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RoutingService; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.discovery.Discovery; @@ -41,7 +41,7 @@ public class NoopDiscovery implements Discovery { } @Override - public void setRoutingService(RoutingService routingService) { + public void setAllocationService(AllocationService allocationService) { } diff --git a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java index 897fa44b593..20c82e6f518 100644 --- a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java +++ b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java @@ -20,16 +20,9 @@ package org.elasticsearch.test.geo; import com.carrotsearch.randomizedtesting.generators.RandomInts; -import org.locationtech.spatial4j.context.jts.JtsSpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; -import org.locationtech.spatial4j.exception.InvalidShapeException; -import org.locationtech.spatial4j.shape.Point; -import org.locationtech.spatial4j.shape.Rectangle; -import org.locationtech.spatial4j.shape.impl.Range; import com.vividsolutions.jts.algorithm.ConvexHull; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; - import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.geo.builders.CoordinateCollection; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; @@ -42,6 +35,12 @@ import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.search.geo.GeoShapeQueryTests; import org.junit.Assert; +import org.locationtech.spatial4j.context.jts.JtsSpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; +import org.locationtech.spatial4j.exception.InvalidShapeException; +import org.locationtech.spatial4j.shape.Point; +import org.locationtech.spatial4j.shape.Rectangle; +import org.locationtech.spatial4j.shape.impl.Range; import java.util.Random; @@ -230,14 +229,10 @@ public class RandomShapeGenerator extends RandomGeoGenerator { // The validate flag will check for these possibilities and bail if an incorrect geometry is created try { pgb.build(); - } catch (Throwable e) { + } catch (AssertionError | InvalidShapeException e) { // jts bug may occasionally misinterpret coordinate order causing an unhelpful ('geom' assertion) // or InvalidShapeException - if (e instanceof InvalidShapeException || e instanceof AssertionError) { - return null; - } - // throw any other exception - throw e; + return null; } } return pgb; diff --git a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index 28267e9beb7..974929dddf2 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -19,8 +19,6 @@ package org.elasticsearch.threadpool; -import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; @@ -36,7 +34,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.hamcrest.RegexMatcher; -import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.tribe.TribeIT; import java.io.IOException; @@ -46,19 +43,11 @@ import java.lang.management.ThreadMXBean; import java.util.HashSet; import java.util.Map; import java.util.Set; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.Executor; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import java.util.regex.Pattern; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.sameInstance; /** */ @@ -136,9 +125,9 @@ public class SimpleThreadPoolIT extends ESIntegTestCase { try { new Node(settings); fail("The node startup is supposed to fail"); - } catch(Throwable t) { + } catch(Exception e) { //all good - assertThat(t.getMessage(), containsString("mandatory plugins [non_existing]")); + assertThat(e.getMessage(), containsString("mandatory plugins [non_existing]")); } } diff --git a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 9ae029a4aa4..c33db33b1f8 100644 --- a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -153,7 +153,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { }); TransportFuture res = serviceB.submitRequest(nodeA, "sayHello", - new StringMessageRequest("moshe"), new BaseTransportResponseHandler() { + new StringMessageRequest("moshe"), new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); @@ -184,7 +184,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } res = serviceB.submitRequest(nodeA, "sayHello", new StringMessageRequest("moshe"), - TransportRequestOptions.builder().withCompress(true).build(), new BaseTransportResponseHandler() { + TransportRequestOptions.builder().withCompress(true).build(), new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); @@ -233,7 +233,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { }); final Object context = new Object(); final String executor = randomFrom(ThreadPool.THREAD_POOL_TYPES.keySet().toArray(new String[0])); - TransportResponseHandler responseHandler = new BaseTransportResponseHandler() { + TransportResponseHandler responseHandler = new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); @@ -336,7 +336,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { TransportFuture res = serviceB.submitRequest(nodeA, "sayHello", TransportRequest.Empty.INSTANCE, TransportRequestOptions.builder().withCompress(true).build(), - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public TransportResponse.Empty newInstance() { return TransportResponse.Empty.INSTANCE; @@ -386,7 +386,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { TransportFuture res = serviceB.submitRequest(nodeA, "sayHello", new StringMessageRequest("moshe"), TransportRequestOptions.builder().withCompress(true).build(), - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); @@ -430,7 +430,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { }); TransportFuture res = serviceB.submitRequest(nodeA, "sayHelloException", - new StringMessageRequest("moshe"), new BaseTransportResponseHandler() { + new StringMessageRequest("moshe"), new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); @@ -520,7 +520,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { TransportFuture res = serviceB.submitRequest(nodeA, "sayHelloTimeoutNoResponse", new StringMessageRequest("moshe"), TransportRequestOptions.builder().withTimeout(100).build(), - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); @@ -575,7 +575,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { final CountDownLatch latch = new CountDownLatch(1); TransportFuture res = serviceB.submitRequest(nodeA, "sayHelloTimeoutDelayedResponse", new StringMessageRequest("2m"), TransportRequestOptions.builder().withTimeout(100).build(), - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); @@ -612,7 +612,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { // now, try and send another request, this times, with a short timeout res = serviceB.submitRequest(nodeA, "sayHelloTimeoutDelayedResponse", new StringMessageRequest(counter + "ms"), TransportRequestOptions.builder().withTimeout(3000).build(), - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); @@ -664,7 +664,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { }; final Semaphore requestCompleted = new Semaphore(0); - TransportResponseHandler noopResponseHandler = new BaseTransportResponseHandler() { + TransportResponseHandler noopResponseHandler = new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { @@ -967,7 +967,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { Version0Request version0Request = new Version0Request(); version0Request.value1 = 1; Version0Response version0Response = serviceA.submitRequest(nodeB, "/version", version0Request, - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public Version0Response newInstance() { return new Version0Response(); @@ -1009,7 +1009,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { version1Request.value1 = 1; version1Request.value2 = 2; Version1Response version1Response = serviceB.submitRequest(nodeA, "/version", version1Request, - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public Version1Response newInstance() { return new Version1Response(); @@ -1055,7 +1055,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { version1Request.value1 = 1; version1Request.value2 = 2; Version1Response version1Response = serviceB.submitRequest(nodeB, "/version", version1Request, - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public Version1Response newInstance() { return new Version1Response(); @@ -1098,7 +1098,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { Version0Request version0Request = new Version0Request(); version0Request.value1 = 1; Version0Response version0Response = serviceA.submitRequest(nodeA, "/version", version0Request, - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public Version0Response newInstance() { return new Version0Response(); @@ -1137,7 +1137,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { serviceB.addFailToSendNoConnectRule(serviceA); TransportFuture res = serviceB.submitRequest(nodeA, "sayHello", - new StringMessageRequest("moshe"), new BaseTransportResponseHandler() { + new StringMessageRequest("moshe"), new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); @@ -1197,7 +1197,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { TransportFuture res = serviceB.submitRequest(nodeA, "sayHello", new StringMessageRequest("moshe"), TransportRequestOptions.builder().withTimeout(100).build(), - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public StringMessageResponse newInstance() { return new StringMessageResponse(); diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java index c69f56c2cbd..f089e9068be 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java @@ -31,7 +31,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; @@ -114,7 +114,7 @@ public class NettyScheduledPingTests extends ESTestCase { for (int i = 0; i < rounds; i++) { serviceB.submitRequest(nodeA, "sayHello", TransportRequest.Empty.INSTANCE, TransportRequestOptions.builder().withCompress(randomBoolean()).build(), - new BaseTransportResponseHandler() { + new TransportResponseHandler() { @Override public TransportResponse.Empty newInstance() { return TransportResponse.Empty.INSTANCE; diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java b/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java index 2bbedd8784b..f361496c537 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeServiceTests.java @@ -27,7 +27,7 @@ public class TribeServiceTests extends ESTestCase { Settings globalSettings = Settings.builder() .put("node.name", "nodename") .put("path.home", "some/path").build(); - Settings clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, Settings.EMPTY); + Settings clientSettings = TribeService.buildClientSettings("tribe1", "parent_id", globalSettings, Settings.EMPTY); assertEquals("some/path", clientSettings.get("path.home")); assertEquals("nodename/tribe1", clientSettings.get("node.name")); assertEquals("tribe1", clientSettings.get("tribe.name")); @@ -35,7 +35,9 @@ public class TribeServiceTests extends ESTestCase { assertEquals("false", clientSettings.get("node.master")); assertEquals("false", clientSettings.get("node.data")); assertEquals("false", clientSettings.get("node.ingest")); - assertEquals(7, clientSettings.getAsMap().size()); + assertEquals("false", clientSettings.get("node.local_storage")); + assertEquals("3707202549613653169", clientSettings.get("node.id.seed")); // should be fixed by the parent id and tribe name + assertEquals(9, clientSettings.getAsMap().size()); } public void testEnvironmentSettings() { @@ -45,7 +47,7 @@ public class TribeServiceTests extends ESTestCase { .put("path.conf", "conf/path") .put("path.scripts", "scripts/path") .put("path.logs", "logs/path").build(); - Settings clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, Settings.EMPTY); + Settings clientSettings = TribeService.buildClientSettings("tribe1", "parent_id", globalSettings, Settings.EMPTY); assertEquals("some/path", clientSettings.get("path.home")); assertEquals("conf/path", clientSettings.get("path.conf")); assertEquals("scripts/path", clientSettings.get("path.scripts")); @@ -54,7 +56,7 @@ public class TribeServiceTests extends ESTestCase { Settings tribeSettings = Settings.builder() .put("path.home", "alternate/path").build(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - TribeService.buildClientSettings("tribe1", globalSettings, tribeSettings); + TribeService.buildClientSettings("tribe1", "parent_id", globalSettings, tribeSettings); }); assertTrue(e.getMessage(), e.getMessage().contains("Setting [path.home] not allowed in tribe client")); } @@ -69,7 +71,7 @@ public class TribeServiceTests extends ESTestCase { .put("transport.host", "3.3.3.3") .put("transport.bind_host", "4.4.4.4") .put("transport.publish_host", "5.5.5.5").build(); - Settings clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, Settings.EMPTY); + Settings clientSettings = TribeService.buildClientSettings("tribe1", "parent_id", globalSettings, Settings.EMPTY); assertEquals("0.0.0.0", clientSettings.get("network.host")); assertEquals("1.1.1.1", clientSettings.get("network.bind_host")); assertEquals("2.2.2.2", clientSettings.get("network.publish_host")); @@ -85,7 +87,7 @@ public class TribeServiceTests extends ESTestCase { .put("transport.host", "6.6.6.6") .put("transport.bind_host", "7.7.7.7") .put("transport.publish_host", "8.8.8.8").build(); - clientSettings = TribeService.buildClientSettings("tribe1", globalSettings, tribeSettings); + clientSettings = TribeService.buildClientSettings("tribe1", "parent_id", globalSettings, tribeSettings); assertEquals("3.3.3.3", clientSettings.get("network.host")); assertEquals("4.4.4.4", clientSettings.get("network.bind_host")); assertEquals("5.5.5.5", clientSettings.get("network.publish_host")); diff --git a/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java b/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java index 3573089fcaa..e81b4decb2d 100644 --- a/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java +++ b/core/src/test/java/org/elasticsearch/update/TimestampTTLBWIT.java @@ -87,7 +87,7 @@ public class TimestampTTLBWIT extends ESIntegTestCase { .setQuery(matchAllQuery()) .setSize(randomIntBetween(1, numDocs + 5)) .addSort("_timestamp", order) - .addField("_timestamp") + .addStoredField("_timestamp") .execute().actionGet(); assertNoFailures(searchResponse); SearchHit[] hits = searchResponse.getHits().hits(); diff --git a/core/src/test/java/org/elasticsearch/update/UpdateIT.java b/core/src/test/java/org/elasticsearch/update/UpdateIT.java index 55834c181b0..c8cacbc36c3 100644 --- a/core/src/test/java/org/elasticsearch/update/UpdateIT.java +++ b/core/src/test/java/org/elasticsearch/update/UpdateIT.java @@ -65,11 +65,8 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; -import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -800,7 +797,7 @@ public class UpdateIT extends ESIntegTestCase { final CountDownLatch latch = new CountDownLatch(numberOfThreads); final CountDownLatch startLatch = new CountDownLatch(1); final int numberOfUpdatesPerThread = scaledRandomIntBetween(100, 500); - final List failures = new CopyOnWriteArrayList<>(); + final List failures = new CopyOnWriteArrayList<>(); for (int i = 0; i < numberOfThreads; i++) { Runnable r = new Runnable() { @@ -832,7 +829,7 @@ public class UpdateIT extends ESIntegTestCase { logger.warn("Test was forcefully stopped. Client [{}] may still have outstanding requests.", Thread.currentThread().getName()); failures.add(e); Thread.currentThread().interrupt(); - } catch (Throwable e) { + } catch (Exception e) { failures.add(e); } finally { latch.countDown(); @@ -900,7 +897,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { synchronized (failedMap) { incrementMapValue(id, failedMap); } @@ -922,7 +919,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { synchronized (failedMap) { incrementMapValue(id, failedMap); } @@ -976,7 +973,7 @@ public class UpdateIT extends ESIntegTestCase { } } } - } catch (Throwable e) { + } catch (Exception e) { logger.error("Something went wrong", e); failures.add(e); } finally { diff --git a/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java b/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java index b2cc794ac6c..e2c572f783a 100644 --- a/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java +++ b/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationIT.java @@ -53,7 +53,7 @@ public class ConcurrentDocumentOperationIT extends ESIntegTestCase { } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { logger.error("Unexpected exception while indexing", e); failure.set(e); latch.countDown(); diff --git a/distribution/licenses/netty-3.10.5.Final.jar.sha1 b/distribution/licenses/netty-3.10.5.Final.jar.sha1 deleted file mode 100644 index 6f190752e9e..00000000000 --- a/distribution/licenses/netty-3.10.5.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ca7d55d246092bddd29b867706e2f6c7db701a0 diff --git a/distribution/licenses/netty-3.10.6.Final.jar.sha1 b/distribution/licenses/netty-3.10.6.Final.jar.sha1 new file mode 100644 index 00000000000..35872846658 --- /dev/null +++ b/distribution/licenses/netty-3.10.6.Final.jar.sha1 @@ -0,0 +1 @@ +18ed04a0e502896552854926e908509db2987a00 \ No newline at end of file diff --git a/docs/build.gradle b/docs/build.gradle index 660755a1c65..41e3e352b8d 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -30,6 +30,8 @@ integTest { configFile 'scripts/my_script.py' configFile 'userdict_ja.txt' configFile 'KeywordTokenizer.rbbi' + // Whitelist reindexing from the local node so we can test it. + setting 'reindex.remote.whitelist', 'myself' } } @@ -81,3 +83,15 @@ Closure setupTwitter = { String name, int count -> } setupTwitter('twitter', 5) setupTwitter('big_twitter', 120) + +buildRestTests.setups['host'] = ''' + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} +''' diff --git a/docs/plugins/mapper-attachments.asciidoc b/docs/plugins/mapper-attachments.asciidoc index b287decd4cf..f2c034a317e 100644 --- a/docs/plugins/mapper-attachments.asciidoc +++ b/docs/plugins/mapper-attachments.asciidoc @@ -246,7 +246,7 @@ PUT /test/person/1?refresh=true } GET /test/person/_search { - "fields": [ "file.content_type" ], + "stored_fields": [ "file.content_type" ], "query": { "match": { "file.content_type": "text plain" @@ -367,7 +367,7 @@ PUT /test/person/1?refresh=true } GET /test/person/_search { - "fields": [], + "stored_fields": [], "query": { "match": { "file.content": "king queen" diff --git a/docs/plugins/mapper-size.asciidoc b/docs/plugins/mapper-size.asciidoc index 800a640890a..df16d7eb857 100644 --- a/docs/plugins/mapper-size.asciidoc +++ b/docs/plugins/mapper-size.asciidoc @@ -52,7 +52,8 @@ PUT my_index -------------------------- // CONSOLE -The value of the `_size` field is accessible in queries: +The value of the `_size` field is accessible in queries, aggregations, scripts, +and when sorting: [source,js] -------------------------- @@ -75,6 +76,26 @@ GET my_index/_search "gt": 10 } } + }, + "aggs": { + "sizes": { + "terms": { + "field": "_size", <2> + "size": 10 + } + } + }, + "sort": [ + { + "_size": { <3> + "order": "desc" + } + } + ], + "script_fields": { + "size": { + "script": "doc['_size']" <4> + } } } -------------------------- @@ -82,3 +103,7 @@ GET my_index/_search // TEST[continued] <1> Querying on the `_size` field +<2> Aggregating on the `_size` field +<3> Sorting on the `_size` field +<4> Accessing the `_size` field in scripts (inline scripts must be modules-security-scripting.html#enable-dynamic-scripting[enabled] for this example to work) + diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index 83855a8aae5..1b955d2a898 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -22,7 +22,7 @@ The top_hits aggregation returns regular search hits, because of this many per h * <> * <> * <> -* <> +* <> * <> ==== Example diff --git a/docs/reference/docs/refresh.asciidoc b/docs/reference/docs/refresh.asciidoc index dd829e19bc3..90c5d4e3afe 100644 --- a/docs/reference/docs/refresh.asciidoc +++ b/docs/reference/docs/refresh.asciidoc @@ -17,11 +17,12 @@ indexing and a search standpoint. Wait for the changes made by the request to be made visible by a refresh before replying. This doesn't force an immediate refresh, rather, it waits for a -refresh happen. Elasticsearch automatically refreshes shards that have changed +refresh to happen. Elasticsearch automatically refreshes shards that have changed every `index.refresh_interval` which defaults to one second. That setting is -<>. The <> API will also -cause the request to return, as will setting `refresh` to `true` on any of the -APIs that support it. +<>. Calling the <> API or +setting `refresh` to `true` on any of the APIs that support it will also +cause a refresh, in turn causing already running requests with `refresh=wait_for` +to return. `false` (the default):: @@ -36,7 +37,7 @@ use `refresh=false`, or, because that is the default, just leave the `refresh` parameter out of the URL. That is the simplest and fastest choice. If you absolutely must have the changes made by a request visible synchronously -with the request then you must get to pick between putting more load on +with the request then you must pick between putting more load on Elasticsearch (`true`) and waiting longer for the response (`wait_for`). Here are a few points that should inform that decision: @@ -97,7 +98,7 @@ search: -------------------------------------------------- PUT /test/test/3 {"test": "test"} -PUT /test/test/4?refresh=true +PUT /test/test/4?refresh=false {"test": "test"} -------------------------------------------------- // CONSOLE diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index aac722e96a3..2afc8e86bb1 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -361,6 +361,60 @@ POST _reindex // CONSOLE // TEST[s/^/PUT source\nGET _cluster\/health?wait_for_status=yellow\n/] +[float] +=== Reindex from Remote + +Reindex supports reindexing from a remote Elasticsearch cluster: + +[source,js] +-------------------------------------------------- +POST _reindex +{ + "source": { + "remote": { + "host": "http://otherhost:9200", + "username": "user", + "password": "pass" + }, + "index": "source", + "query": { + "match": { + "test": "data" + } + } + }, + "dest": { + "index": "dest" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:host] +// TEST[s/^/PUT source\nGET _cluster\/health?wait_for_status=yellow\n/] +// TEST[s/otherhost:9200",/\${host}"/] +// TEST[s/"username": "user",//] +// TEST[s/"password": "pass"//] + +The `host` parameter must contain a scheme, host, and port (e.g. +`https://otherhost:9200`). The `username` and `password` parameters are +optional and when they are present reindex will connect to the remote +Elasticsearch node using using basic auth. Be sure to use `https` when using +basic auth or the password will be sent in plain text. + +Remote hosts have to be explicitly whitelisted in elasticsearch.yaml using the +`reindex.remote.whitelist` property. It can be set to a comma delimited list +of allowed remote `host` and `port` combinations (e.g. +`otherhost:9200, another:9200`). Scheme is ignored by the whitelist - only host +and port are used. + +This feature should work with remote clusters of any version of Elasticsearch +you are likely to find. This should allow you to upgrade from any version of +Elasticsearch to the current version by reindexing from a cluster of the old +version. + +To enable queries sent to older versions of Elasticsearch the `query` parameter +is sent directly to the remote host without validation or modification. + [float] === URL Parameters diff --git a/docs/reference/indices/shadow-replicas.asciidoc b/docs/reference/indices/shadow-replicas.asciidoc index 60360c147b5..3a0b23852b0 100644 --- a/docs/reference/indices/shadow-replicas.asciidoc +++ b/docs/reference/indices/shadow-replicas.asciidoc @@ -10,12 +10,12 @@ index. In order to fully utilize the `index.data_path` and `index.shadow_replicas` settings, you need to allow Elasticsearch to use the same data directory for -multiple instances by setting `node.add_id_to_custom_path` to false in +multiple instances by setting `node.add_lock_id_to_custom_path` to false in elasticsearch.yml: [source,yaml] -------------------------------------------------- -node.add_id_to_custom_path: false +node.add_lock_id_to_custom_path: false -------------------------------------------------- You will also need to indicate to the security manager where the custom indices @@ -114,7 +114,7 @@ settings API: These are non-dynamic settings that need to be configured in `elasticsearch.yml` -`node.add_id_to_custom_path`:: +`node.add_lock_id_to_custom_path`:: Boolean setting indicating whether Elasticsearch should append the node's ordinal to the custom data path. For example, if this is enabled and a path of "/tmp/foo" is used, the first locally-running node will use "/tmp/foo/0", diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 39c5134e23c..2975ef903f3 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -76,8 +76,8 @@ Indices can only be shrunk if they satisfy the following requirements: * The index must have more primary shards than the target index. * The number of primary shards in the target index must be a factor of the - number of primary shards in the source index. must have more primary shards - than the target index. + number of primary shards in the source index. The source index must have + more primary shards than the target index. * The index must not contain more than `2,147,483,519` documents in total across all shards that will be shrunk into a single shard on the target index diff --git a/docs/reference/mapping/params/fielddata.asciidoc b/docs/reference/mapping/params/fielddata.asciidoc index 92151c56d49..2e6b63698c7 100644 --- a/docs/reference/mapping/params/fielddata.asciidoc +++ b/docs/reference/mapping/params/fielddata.asciidoc @@ -81,7 +81,7 @@ can move the loading time from the first search request, to the refresh itself. Fielddata filtering can be used to reduce the number of terms loaded into memory, and thus reduce memory usage. Terms can be filtered by _frequency_: -The frequency filter allows you to only load terms whose term frequency falls +The frequency filter allows you to only load terms whose document frequency falls between a `min` and `max` value, which can be expressed an absolute number (when the number is bigger than 1.0) or as a percentage (eg `0.01` is `1%` and `1.0` is `100%`). Frequency is calculated diff --git a/docs/reference/mapping/params/store.asciidoc b/docs/reference/mapping/params/store.asciidoc index d34d1016546..53cac7493ff 100644 --- a/docs/reference/mapping/params/store.asciidoc +++ b/docs/reference/mapping/params/store.asciidoc @@ -48,7 +48,7 @@ PUT my_index/my_type/1 GET my_index/_search { - "fields": [ "title", "date" ] <2> + "stored_fields": [ "title", "date" ] <2> } -------------------------------------------------- // CONSOLE diff --git a/docs/reference/migration/migrate_5_0/fs.asciidoc b/docs/reference/migration/migrate_5_0/fs.asciidoc index 859f3092823..42c8b4ddcea 100644 --- a/docs/reference/migration/migrate_5_0/fs.asciidoc +++ b/docs/reference/migration/migrate_5_0/fs.asciidoc @@ -23,3 +23,9 @@ behavior will be removed. If you are using a multi-cluster setup with both instances of Elasticsearch pointing to the same data path, you will need to add the cluster name to the data path so that different clusters do not overwrite data. + +==== Local files + +Prior to 5.0, nodes that were marked with both `node.data: false` and `node.master: false` (or the now removed `node.client: true`) +didn't write any files or folder to disk. 5.x added persistent node ids, requiring nodes to store that information. As such, all +node types will write a small state file to their data folders. \ No newline at end of file diff --git a/docs/reference/migration/migrate_5_0/plugins.asciidoc b/docs/reference/migration/migrate_5_0/plugins.asciidoc index e1ff497a8f3..79583c6b925 100644 --- a/docs/reference/migration/migrate_5_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_5_0/plugins.asciidoc @@ -140,3 +140,11 @@ remove their `onModule(ActionModule)` implementation. Plugins that register custom `RestHandler`s should implement `ActionPlugin` and remove their `onModule(NetworkModule)` implemnetation. + +==== Mapper-Size plugin + +The metadata field `_size` is not accessible in aggregations, scripts and when +sorting for indices created in 2.x even if the index has been upgraded using the <> API. +If these features are needed in your application it is required to reindex the data with Elasticsearch 5.x. +The easiest way to reindex old indices is to use the `reindex` API, or the reindex UI provided by +the <>. diff --git a/docs/reference/migration/migrate_5_0/search.asciidoc b/docs/reference/migration/migrate_5_0/search.asciidoc index 72b29e8a9e7..09478ee3251 100644 --- a/docs/reference/migration/migrate_5_0/search.asciidoc +++ b/docs/reference/migration/migrate_5_0/search.asciidoc @@ -64,11 +64,15 @@ characteristics as the former `scan` search type. ==== `fields` parameter -The `fields` parameter used to try to retrieve field values from stored -fields, and fall back to extracting from the `_source` if a field is not -marked as stored. Now, the `fields` parameter will only return stored fields +The `fields` parameter has been replaced by `stored_fields`. +The `stored_fields` parameter will only return stored fields -- it will no longer extract values from the `_source`. +==== `fielddata_fields` parameter + +The `fielddata_fields` has been deprecated, use parameter `docvalue_fields` instead. + + ==== search-exists API removed The search exists api has been removed in favour of using the search api with diff --git a/docs/reference/migration/migrate_5_0/settings.asciidoc b/docs/reference/migration/migrate_5_0/settings.asciidoc index ffe69aa3cfb..7bfa9dc875c 100644 --- a/docs/reference/migration/migrate_5_0/settings.asciidoc +++ b/docs/reference/migration/migrate_5_0/settings.asciidoc @@ -26,6 +26,8 @@ should be used instead. The `name` setting has been removed and is replaced by `node.name`. Usage of `-Dname=some_node_name` is not supported anymore. +The `node.add_id_to_custom_path` was renamed to `add_lock_id_to_custom_path`. + ==== Node attribute settings Node level attributes used for allocation filtering, forced awareness or other node identification / grouping diff --git a/docs/reference/modules/scripting/painless.asciidoc b/docs/reference/modules/scripting/painless.asciidoc index 93fb136913b..ad36cdd6df4 100644 --- a/docs/reference/modules/scripting/painless.asciidoc +++ b/docs/reference/modules/scripting/painless.asciidoc @@ -143,7 +143,7 @@ First, let's look at the source data for a player by submitting the following re ---------------------------------------------------------------- GET hockey/_search { - "fields": [ + "stored_fields": [ "_id", "_source" ], diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index 958320ea110..a9adc157bd3 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -143,11 +143,11 @@ include::request/sort.asciidoc[] include::request/source-filtering.asciidoc[] -include::request/fields.asciidoc[] +include::request/stored-fields.asciidoc[] include::request/script-fields.asciidoc[] -include::request/fielddata-fields.asciidoc[] +include::request/docvalue-fields.asciidoc[] include::request/post-filter.asciidoc[] diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc new file mode 100644 index 00000000000..b4d2493d853 --- /dev/null +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -0,0 +1,23 @@ +[[search-request-docvalue-fields]] +=== Doc value Fields + +Allows to return the <> representation of a field for each hit, for +example: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query" : { + "match_all": {} + }, + "docvalue_fields" : ["test1", "test2"] +} +-------------------------------------------------- +// CONSOLE + +Doc value fields can work on fields that are not stored. + +Note that if the fields parameter specifies fields without docvalues it will try to load the value from the fielddata cache +causing the terms for that field to be loaded to memory (cached), which will result in more memory consumption. + diff --git a/docs/reference/search/request/fielddata-fields.asciidoc b/docs/reference/search/request/fielddata-fields.asciidoc deleted file mode 100644 index f3a3508b144..00000000000 --- a/docs/reference/search/request/fielddata-fields.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -[[search-request-fielddata-fields]] -=== Field Data Fields - -Allows to return the <> representation of a field for each hit, for -example: - -[source,js] --------------------------------------------------- -GET /_search -{ - "query" : { - "match_all": {} - }, - "fielddata_fields" : ["test1", "test2"] -} --------------------------------------------------- -// CONSOLE - -Field data fields can work on fields that are not stored. - -It's important to understand that using the `fielddata_fields` parameter will -cause the terms for that field to be loaded to memory (cached), which will -result in more memory consumption. diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index c235c37b338..efb7053c179 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -372,7 +372,7 @@ query and the rescore query in `highlight_query`. -------------------------------------------------- GET /_search { - "fields": [ "_id" ], + "stored_fields": [ "_id" ], "query" : { "match": { "content": { diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index 345bc9abde2..3c8e0e9f00e 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -72,7 +72,7 @@ Inner hits also supports the following per document features: * <> * <> * <> -* <> +* <> * <> [[nested-inner-hits]] diff --git a/docs/reference/search/request/fields.asciidoc b/docs/reference/search/request/stored-fields.asciidoc similarity index 91% rename from docs/reference/search/request/fields.asciidoc rename to docs/reference/search/request/stored-fields.asciidoc index 3483d470ee2..3d5b8c01b47 100644 --- a/docs/reference/search/request/fields.asciidoc +++ b/docs/reference/search/request/stored-fields.asciidoc @@ -1,7 +1,7 @@ [[search-request-fields]] === Fields -WARNING: The `fields` parameter is about fields that are explicitly marked as +WARNING: The `stored_fields` parameter is about fields that are explicitly marked as stored in the mapping, which is off by default and generally not recommended. Use <> instead to select subsets of the original source document to be returned. @@ -13,7 +13,7 @@ by a search hit. -------------------------------------------------- GET /_search { - "fields" : ["user", "postDate"], + "stored_fields" : ["user", "postDate"], "query" : { "term" : { "user" : "kimchy" } } @@ -30,7 +30,7 @@ returned, for example: -------------------------------------------------- GET /_search { - "fields" : [], + "stored_fields" : [], "query" : { "term" : { "user" : "kimchy" } } diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index 496f04ea4af..ba36992f6fb 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -83,7 +83,7 @@ hits was computed. part of the document by using `_source_include` & `_source_exclude` (see the <> documentation for more details) -|`fields` |The selective stored fields of the document to return for each hit, +|`stored_fields` |The selective stored fields of the document to return for each hit, comma delimited. Not specifying any value will cause no fields to return. |`sort` |Sorting to perform. Can either be in the form of `fieldName`, or diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java index dd2b69696f3..0de5e13c058 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceParser.java @@ -165,8 +165,8 @@ public abstract class MultiValuesSourceParser implement return factory; } - private final void parseMissingAndAdd(final String aggregationName, final String currentFieldName, - XContentParser parser, final Map missing) throws IOException { + private void parseMissingAndAdd(final String aggregationName, final String currentFieldName, + XContentParser parser, final Map missing) throws IOException { XContentParser.Token token = parser.currentToken(); if (token == null) { token = parser.nextToken(); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java index 22f931f4eed..c85e8d17a00 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ConvertProcessor.java @@ -93,7 +93,7 @@ public final class ConvertProcessor extends AbstractProcessor { }; @Override - public final String toString() { + public String toString() { return name().toLowerCase(Locale.ROOT); } diff --git a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index 105d42c8c86..a83dd93a17e 100644 --- a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -155,7 +155,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri GroovyClassLoader groovyClassLoader = new GroovyClassLoader(loader, configuration); return groovyClassLoader.parseClass(codeSource); - } catch (Throwable e) { + } catch (Exception e) { if (logger.isTraceEnabled()) { logger.trace("Exception compiling Groovy script:", e); } @@ -293,7 +293,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri // NOTE: we truncate the stack because IndyInterface has security issue (needs getClassLoader) // we don't do a security check just as a tradeoff, it cannot really escalate to anything. return AccessController.doPrivileged((PrivilegedAction) script::run); - } catch (Throwable e) { + } catch (Exception e) { if (logger.isTraceEnabled()) { logger.trace("failed to run {}", e, compiledScript); } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoDistanceTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoDistanceTests.java index e9c4bf6e359..0d7dd4e12e2 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoDistanceTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/GeoDistanceTests.java @@ -72,28 +72,28 @@ public class GeoDistanceTests extends ESIntegTestCase { refresh(); - SearchResponse searchResponse1 = client().prepareSearch().addField("_source") + SearchResponse searchResponse1 = client().prepareSearch().addStoredField("_source") .addScriptField("distance", new Script("doc['location'].arcDistance(" + target_lat + "," + target_long + ")")).execute() .actionGet(); Double resultDistance1 = searchResponse1.getHits().getHits()[0].getFields().get("distance").getValue(); assertThat(resultDistance1, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.DEFAULT), 0.01d)); - SearchResponse searchResponse2 = client().prepareSearch().addField("_source") + SearchResponse searchResponse2 = client().prepareSearch().addStoredField("_source") .addScriptField("distance", new Script("doc['location'].distance(" + target_lat + "," + target_long + ")")).execute() .actionGet(); Double resultDistance2 = searchResponse2.getHits().getHits()[0].getFields().get("distance").getValue(); assertThat(resultDistance2, closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.DEFAULT), 0.01d)); - SearchResponse searchResponse3 = client().prepareSearch().addField("_source") + SearchResponse searchResponse3 = client().prepareSearch().addStoredField("_source") .addScriptField("distance", new Script("doc['location'].arcDistanceInKm(" + target_lat + "," + target_long + ")")) .execute().actionGet(); Double resultArcDistance3 = searchResponse3.getHits().getHits()[0].getFields().get("distance").getValue(); assertThat(resultArcDistance3, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.01d)); - SearchResponse searchResponse4 = client().prepareSearch().addField("_source") + SearchResponse searchResponse4 = client().prepareSearch().addStoredField("_source") .addScriptField("distance", new Script("doc['location'].distanceInKm(" + target_lat + "," + target_long + ")")).execute() .actionGet(); Double resultDistance4 = searchResponse4.getHits().getHits()[0].getFields().get("distance").getValue(); @@ -102,7 +102,7 @@ public class GeoDistanceTests extends ESIntegTestCase { SearchResponse searchResponse5 = client() .prepareSearch() - .addField("_source") + .addStoredField("_source") .addScriptField("distance", new Script("doc['location'].arcDistanceInKm(" + (target_lat) + "," + (target_long + 360) + ")")) .execute().actionGet(); Double resultArcDistance5 = searchResponse5.getHits().getHits()[0].getFields().get("distance").getValue(); @@ -111,21 +111,21 @@ public class GeoDistanceTests extends ESIntegTestCase { SearchResponse searchResponse6 = client() .prepareSearch() - .addField("_source") + .addStoredField("_source") .addScriptField("distance", new Script("doc['location'].arcDistanceInKm(" + (target_lat + 360) + "," + (target_long) + ")")) .execute().actionGet(); Double resultArcDistance6 = searchResponse6.getHits().getHits()[0].getFields().get("distance").getValue(); assertThat(resultArcDistance6, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.01d)); - SearchResponse searchResponse7 = client().prepareSearch().addField("_source") + SearchResponse searchResponse7 = client().prepareSearch().addStoredField("_source") .addScriptField("distance", new Script("doc['location'].arcDistanceInMiles(" + target_lat + "," + target_long + ")")) .execute().actionGet(); Double resultDistance7 = searchResponse7.getHits().getHits()[0].getFields().get("distance").getValue(); assertThat(resultDistance7, closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.MILES), 0.01d)); - SearchResponse searchResponse8 = client().prepareSearch().addField("_source") + SearchResponse searchResponse8 = client().prepareSearch().addStoredField("_source") .addScriptField("distance", new Script("doc['location'].distanceInMiles(" + target_lat + "," + target_long + ")")) .execute().actionGet(); Double resultDistance8 = searchResponse8.getHits().getHits()[0].getFields().get("distance").getValue(); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HistogramTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HistogramTests.java index a6349db0d07..b4260cf5530 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HistogramTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/HistogramTests.java @@ -858,7 +858,7 @@ public class HistogramTests extends ESIntegTestCase { // constructing the newly expected bucket list int bucketsCount = (int) ((boundsMaxKey - boundsMinKey) / interval) + 1; - long[] extendedValueCounts = new long[bucketsCount]; + long[] extendedValueCounts = new long[valueCounts.length + addedBucketsLeft + addedBucketsRight]; System.arraycopy(valueCounts, 0, extendedValueCounts, addedBucketsLeft, valueCounts.length); SearchResponse response = null; diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinDocCountTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinDocCountTests.java index 662d4d2f30c..640c00b291d 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinDocCountTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/MinDocCountTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.messy.tests; import com.carrotsearch.hppc.LongHashSet; import com.carrotsearch.hppc.LongSet; import com.carrotsearch.randomizedtesting.generators.RandomStrings; - import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -316,7 +315,8 @@ public class MinDocCountTests extends AbstractTermsTestCase { Thread.sleep(60000); logger.debug("1m passed. retrying."); testMinDocCountOnTerms(field, script, order, include, false); - } catch (Throwable secondFailure) { + } catch (Exception secondFailure) { + secondFailure.addSuppressed(ae); logger.error("exception on retry (will re-throw the original in a sec)", secondFailure); } throw ae; diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java index d3871e90510..b7be9693210 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java @@ -102,33 +102,33 @@ public class SearchFieldsTests extends ESIntegTestCase { client().admin().indices().prepareRefresh().execute().actionGet(); - SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field1").execute().actionGet(); + SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("field1").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1")); // field2 is not stored, check that it is not extracted from source. - searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field2").execute().actionGet(); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("field2").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(0)); assertThat(searchResponse.getHits().getAt(0).fields().get("field2"), nullValue()); - searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field3").execute().actionGet(); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("field3").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3")); - searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*3").execute().actionGet(); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("*3").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3")); - searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*3").addField("field1").addField("field2").execute().actionGet(); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("*3").addStoredField("field1").addStoredField("field2").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2)); @@ -136,20 +136,20 @@ public class SearchFieldsTests extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1")); - searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field*").execute().actionGet(); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("field*").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2)); assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3")); assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1")); - searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("f*3").execute().actionGet(); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("f*3").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1)); assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3")); - searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*").execute().actionGet(); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("*").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); assertThat(searchResponse.getHits().getAt(0).source(), nullValue()); @@ -157,7 +157,7 @@ public class SearchFieldsTests extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1")); assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3")); - searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*").addField("_source").execute().actionGet(); + searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addStoredField("*").addStoredField("_source").execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); assertThat(searchResponse.getHits().hits().length, equalTo(1)); assertThat(searchResponse.getHits().getAt(0).source(), notNullValue()); @@ -437,15 +437,15 @@ public class SearchFieldsTests extends ESIntegTestCase { client().admin().indices().prepareRefresh().execute().actionGet(); SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()) - .addField("byte_field") - .addField("short_field") - .addField("integer_field") - .addField("long_field") - .addField("float_field") - .addField("double_field") - .addField("date_field") - .addField("boolean_field") - .addField("binary_field") + .addStoredField("byte_field") + .addStoredField("short_field") + .addStoredField("integer_field") + .addStoredField("long_field") + .addStoredField("float_field") + .addStoredField("double_field") + .addStoredField("date_field") + .addStoredField("boolean_field") + .addStoredField("binary_field") .execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); @@ -478,7 +478,7 @@ public class SearchFieldsTests extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch("my-index") .setTypes("my-type1") - .addField("field1").addField("_routing") + .addStoredField("field1").addStoredField("_routing") .get(); assertThat(searchResponse.getHits().totalHits(), equalTo(1L)); @@ -493,7 +493,7 @@ public class SearchFieldsTests extends ESIntegTestCase { .setRefreshPolicy(IMMEDIATE) .get(); - assertFailures(client().prepareSearch("my-index").setTypes("my-type1").addField("field1"), + assertFailures(client().prepareSearch("my-index").setTypes("my-type1").addStoredField("field1"), RestStatus.BAD_REQUEST, containsString("field [field1] isn't a leaf field")); } @@ -557,14 +557,14 @@ public class SearchFieldsTests extends ESIntegTestCase { String field = "field1.field2.field3.field4"; - SearchResponse searchResponse = client().prepareSearch("my-index").setTypes("my-type1").addField(field).get(); + SearchResponse searchResponse = client().prepareSearch("my-index").setTypes("my-type1").addStoredField(field).get(); assertThat(searchResponse.getHits().totalHits(), equalTo(1L)); assertThat(searchResponse.getHits().getAt(0).field(field).isMetadataField(), equalTo(false)); assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2)); assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1")); assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2")); - searchResponse = client().prepareSearch("my-index").setTypes("my-type2").addField(field).get(); + searchResponse = client().prepareSearch("my-index").setTypes("my-type2").addStoredField(field).get(); assertThat(searchResponse.getHits().totalHits(), equalTo(1L)); assertThat(searchResponse.getHits().getAt(0).field(field).isMetadataField(), equalTo(false)); assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2)); @@ -621,16 +621,16 @@ public class SearchFieldsTests extends ESIntegTestCase { client().admin().indices().prepareRefresh().execute().actionGet(); SearchRequestBuilder builder = client().prepareSearch().setQuery(matchAllQuery()) - .addFieldDataField("text_field") - .addFieldDataField("keyword_field") - .addFieldDataField("byte_field") - .addFieldDataField("short_field") - .addFieldDataField("integer_field") - .addFieldDataField("long_field") - .addFieldDataField("float_field") - .addFieldDataField("double_field") - .addFieldDataField("date_field") - .addFieldDataField("boolean_field"); + .addDocValueField("text_field") + .addDocValueField("keyword_field") + .addDocValueField("byte_field") + .addDocValueField("short_field") + .addDocValueField("integer_field") + .addDocValueField("long_field") + .addDocValueField("float_field") + .addDocValueField("double_field") + .addDocValueField("date_field") + .addDocValueField("boolean_field"); SearchResponse searchResponse = builder.execute().actionGet(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); @@ -704,7 +704,7 @@ public class SearchFieldsTests extends ESIntegTestCase { .setParent("parent_1") .setSource(jsonBuilder().startObject().field("field1", "value").endObject())); - SearchResponse response = client().prepareSearch("test").addField("field1").get(); + SearchResponse response = client().prepareSearch("test").addStoredField("field1").get(); assertSearchResponse(response); assertHitCount(response, 1); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java index 4a7b4350d23..f2eee2bb408 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovyScriptTests.java @@ -99,15 +99,15 @@ public class GroovyScriptTests extends ESIntegTestCase { try { client().prepareSearch("test") - .setQuery(constantScoreQuery(scriptQuery(new Script("assert false", ScriptType.INLINE, "groovy", null)))).get(); + .setQuery(constantScoreQuery(scriptQuery(new Script("null.foo", ScriptType.INLINE, "groovy", null)))).get(); fail("should have thrown an exception"); } catch (SearchPhaseExecutionException e) { assertThat(e.toString() + "should not contained NotSerializableTransportException", e.toString().contains("NotSerializableTransportException"), equalTo(false)); assertThat(e.toString() + "should have contained ScriptException", e.toString().contains("ScriptException"), equalTo(true)); - assertThat(e.toString()+ "should have contained an assert error", - e.toString().contains("AssertionError[assert false"), equalTo(true)); + assertThat(e.toString()+ "should have contained a NullPointerException", + e.toString().contains("NullPointerException[Cannot get property 'foo' on null object]"), equalTo(true)); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/action/search/template/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/action/search/template/MultiSearchTemplateResponse.java index c779757f61b..f9e99ffc7e3 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/action/search/template/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/action/search/template/MultiSearchTemplateResponse.java @@ -40,21 +40,21 @@ public class MultiSearchTemplateResponse extends ActionResponse implements Itera */ public static class Item implements Streamable { private SearchTemplateResponse response; - private Throwable throwable; + private Exception exception; Item() { } - public Item(SearchTemplateResponse response, Throwable throwable) { + public Item(SearchTemplateResponse response, Exception exception) { this.response = response; - this.throwable = throwable; + this.exception = exception; } /** * Is it a failed search? */ public boolean isFailure() { - return throwable != null; + return exception != null; } /** @@ -62,7 +62,7 @@ public class MultiSearchTemplateResponse extends ActionResponse implements Itera */ @Nullable public String getFailureMessage() { - return throwable == null ? null : throwable.getMessage(); + return exception == null ? null : exception.getMessage(); } /** @@ -85,7 +85,7 @@ public class MultiSearchTemplateResponse extends ActionResponse implements Itera this.response = new SearchTemplateResponse(); response.readFrom(in); } else { - throwable = in.readThrowable(); + exception = in.readException(); } } @@ -96,12 +96,12 @@ public class MultiSearchTemplateResponse extends ActionResponse implements Itera response.writeTo(out); } else { out.writeBoolean(false); - out.writeThrowable(throwable); + out.writeException(exception); } } - public Throwable getFailure() { - return throwable; + public Exception getFailure() { + return exception; } } @@ -150,7 +150,7 @@ public class MultiSearchTemplateResponse extends ActionResponse implements Itera for (Item item : items) { builder.startObject(); if (item.isFailure()) { - ElasticsearchException.renderThrowable(builder, params, item.getFailure()); + ElasticsearchException.renderException(builder, params, item.getFailure()); } else { item.getResponse().toXContent(builder, params); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/action/search/template/TransportMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/action/search/template/TransportMultiSearchTemplateAction.java index 1ffb19b5fc4..642fe7648da 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/action/search/template/TransportMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/action/search/template/TransportMultiSearchTemplateAction.java @@ -61,7 +61,7 @@ public class TransportMultiSearchTemplateAction extends HandledTransportAction clazz) { + private void addStruct(final String name, final Class clazz) { if (!name.matches("^[_a-zA-Z][\\.,_a-zA-Z0-9]*$")) { throw new IllegalArgumentException("Invalid struct name [" + name + "]."); } @@ -661,7 +661,7 @@ public final class Definition { simpleTypesMap.put(name, getTypeInternal(name)); } - private final void addConstructorInternal(final String struct, final String name, final Type[] args) { + private void addConstructorInternal(final String struct, final String name, final Type[] args) { final Struct owner = structsMap.get(struct); if (owner == null) { @@ -734,7 +734,7 @@ public final class Definition { * * no spaces allowed. */ - private final void addSignature(String className, String signature) { + private void addSignature(String className, String signature) { String elements[] = signature.split("\u0020"); if (elements.length != 2) { throw new IllegalArgumentException("Malformed signature: " + signature); @@ -774,8 +774,8 @@ public final class Definition { } } - private final void addMethodInternal(String struct, String name, boolean augmentation, - Type rtn, Type[] args) { + private void addMethodInternal(String struct, String name, boolean augmentation, + Type rtn, Type[] args) { final Struct owner = structsMap.get(struct); if (owner == null) { @@ -858,7 +858,7 @@ public final class Definition { } } - private final void addFieldInternal(String struct, String name, Type type) { + private void addFieldInternal(String struct, String name, Type type) { final Struct owner = structsMap.get(struct); if (owner == null) { diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateResponse.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateResponse.java index c05c0097c90..afc5b7ab6c7 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateResponse.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/MultiPercolateResponse.java @@ -77,7 +77,7 @@ public class MultiPercolateResponse extends ActionResponse implements Iterablefalse is returned. */ public boolean isFailure() { - return throwable != null; + return exception != null; } - public Throwable getFailure() { - return throwable; + public Exception getFailure() { + return exception; } @Override @@ -161,7 +161,7 @@ public class MultiPercolateResponse extends ActionResponse implements Iterable +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/modules/reindex/licenses/httpclient-NOTICE.txt b/modules/reindex/licenses/httpclient-NOTICE.txt new file mode 100644 index 00000000000..4f6058178b2 --- /dev/null +++ b/modules/reindex/licenses/httpclient-NOTICE.txt @@ -0,0 +1,5 @@ +Apache HttpComponents Client +Copyright 1999-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/modules/reindex/licenses/httpcore-4.4.4.jar.sha1 b/modules/reindex/licenses/httpcore-4.4.4.jar.sha1 new file mode 100644 index 00000000000..ef0c257e012 --- /dev/null +++ b/modules/reindex/licenses/httpcore-4.4.4.jar.sha1 @@ -0,0 +1 @@ +b31526a230871fbe285fbcbe2813f9c0839ae9b0 \ No newline at end of file diff --git a/modules/reindex/licenses/httpcore-LICENSE.txt b/modules/reindex/licenses/httpcore-LICENSE.txt new file mode 100644 index 00000000000..72819a9f06f --- /dev/null +++ b/modules/reindex/licenses/httpcore-LICENSE.txt @@ -0,0 +1,241 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project contains annotations in the package org.apache.http.annotation +which are derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. +See http://www.jcip.net and the Creative Commons Attribution License +(http://creativecommons.org/licenses/by/2.5) +Full text: http://creativecommons.org/licenses/by/2.5/legalcode + +License + +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. + +1. Definitions + + "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. + "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. + "Licensor" means the individual or entity that offers the Work under the terms of this License. + "Original Author" means the individual or entity who created the Work. + "Work" means the copyrightable work of authorship offered under the terms of this License. + "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. + +2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. + +3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: + + to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; + to create and reproduce Derivative Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. + + For the avoidance of doubt, where the work is a musical composition: + Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. + Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). + Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). + +The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved. + +4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: + + You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested. + If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. + +5. Representations, Warranties and Disclaimer + +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. + +6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. Termination + + This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. + Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. + +8. Miscellaneous + + Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. + Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. + If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. + This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. diff --git a/modules/reindex/licenses/httpcore-NOTICE.txt b/modules/reindex/licenses/httpcore-NOTICE.txt new file mode 100644 index 00000000000..c0be50a505e --- /dev/null +++ b/modules/reindex/licenses/httpcore-NOTICE.txt @@ -0,0 +1,8 @@ +Apache HttpComponents Core +Copyright 2005-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This project contains annotations derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 3403a8077b9..584dd022932 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; @@ -30,34 +29,24 @@ import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.Retry; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.ClearScrollRequest; -import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.ParentTaskAssigningClient; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.search.SearchHit; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; +import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; import static java.lang.Math.max; import static java.lang.Math.min; @@ -74,46 +63,57 @@ import static org.elasticsearch.search.sort.SortBuilders.fieldSort; * their tests can use them. Most methods run in the listener thread pool because the are meant to be fast and don't expect to block. */ public abstract class AbstractAsyncBulkByScrollAction> { + protected final ESLogger logger; + protected final BulkByScrollTask task; + protected final ThreadPool threadPool; /** * The request for this action. Named mainRequest because we create lots of request variables all representing child * requests of this mainRequest. */ protected final Request mainRequest; - protected final BulkByScrollTask task; private final AtomicLong startTime = new AtomicLong(-1); - private final AtomicReference scroll = new AtomicReference<>(); private final Set destinationIndices = Collections.newSetFromMap(new ConcurrentHashMap<>()); - private final ESLogger logger; private final ParentTaskAssigningClient client; - private final ThreadPool threadPool; - private final SearchRequest firstSearchRequest; private final ActionListener listener; - private final BackoffPolicy backoffPolicy; private final Retry bulkRetry; + private final ScrollableHitSource scrollSource; public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, - ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest, - ActionListener listener) { + ThreadPool threadPool, Request mainRequest, ActionListener listener) { this.task = task; this.logger = logger; this.client = client; this.threadPool = threadPool; this.mainRequest = mainRequest; - this.firstSearchRequest = firstSearchRequest; this.listener = listener; - backoffPolicy = buildBackoffPolicy(); - bulkRetry = Retry.on(EsRejectedExecutionException.class).policy(wrapBackoffPolicy(backoffPolicy)); + BackoffPolicy backoffPolicy = buildBackoffPolicy(); + bulkRetry = Retry.on(EsRejectedExecutionException.class).policy(BackoffPolicy.wrap(backoffPolicy, task::countBulkRetry)); + scrollSource = buildScrollableResultSource(backoffPolicy); + /* + * Default to sorting by doc. We can't do this in the request itself because it is normal to *add* to the sorts rather than replace + * them and if we add _doc as the first sort by default then sorts will never work.... So we add it here, only if there isn't + * another sort. + */ + List> sorts = mainRequest.getSearchRequest().source().sorts(); + if (sorts == null || sorts.isEmpty()) { + mainRequest.getSearchRequest().source().sort(fieldSort("_doc")); + } } - protected abstract BulkRequest buildBulk(Iterable docs); + protected abstract BulkRequest buildBulk(Iterable docs); + + protected ScrollableHitSource buildScrollableResultSource(BackoffPolicy backoffPolicy) { + return new ClientScrollableHitSource(logger, backoffPolicy, threadPool, task::countSearchRetry, this::finishHim, client, + mainRequest.getSearchRequest()); + } /** * Build the response for reindex actions. */ protected BulkIndexByScrollResponse buildResponse(TimeValue took, List indexingFailures, - List searchFailures, boolean timedOut) { + List searchFailures, boolean timedOut) { return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures, timedOut); } @@ -126,50 +126,33 @@ public abstract class AbstractAsyncBulkByScrollAction onScrollResponse(timeValueNanos(System.nanoTime()), 0, response)); + } catch (Exception e) { + finishHim(e); } - searchWithRetry(listener -> client.search(firstSearchRequest, listener), (SearchResponse response) -> { - logger.debug("[{}] documents match query", response.getHits().getTotalHits()); - onScrollResponse(timeValueNanos(System.nanoTime()), 0, response); - }); } /** * Process a scroll response. * @param lastBatchStartTime the time when the last batch started. Used to calculate the throttling delay. * @param lastBatchSize the size of the last batch. Used to calculate the throttling delay. - * @param searchResponse the scroll response to process + * @param response the scroll response to process */ - void onScrollResponse(TimeValue lastBatchStartTime, int lastBatchSize, SearchResponse searchResponse) { + void onScrollResponse(TimeValue lastBatchStartTime, int lastBatchSize, ScrollableHitSource.Response response) { if (task.isCancelled()) { finishHim(null); return; } - setScroll(searchResponse.getScrollId()); if ( // If any of the shards failed that should abort the request. - (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) + (response.getFailures().size() > 0) // Timeouts aren't shard failures but we still need to pass them back to the user. - || searchResponse.isTimedOut() + || response.isTimedOut() ) { - startNormalTermination(emptyList(), unmodifiableList(Arrays.asList(searchResponse.getShardFailures())), - searchResponse.isTimedOut()); + refreshAndFinish(emptyList(), response.getFailures(), response.isTimedOut()); return; } - long total = searchResponse.getHits().totalHits(); + long total = response.getTotalHits(); if (mainRequest.getSize() > 0) { total = min(total, mainRequest.getSize()); } @@ -181,12 +164,12 @@ public abstract class AbstractAsyncBulkByScrollAction docsIterable = Arrays.asList(docs); + List hits = response.getHits(); if (mainRequest.getSize() != SIZE_ALL_MATCHES) { - // Truncate the docs if we have more than the request size + // Truncate the hits if we have more than the request size long remaining = max(0, mainRequest.getSize() - task.getSuccessfullyProcessed()); - if (remaining < docs.length) { - docsIterable = docsIterable.subList(0, (int) remaining); + if (remaining < hits.size()) { + hits = hits.subList(0, (int) remaining); } } - BulkRequest request = buildBulk(docsIterable); + BulkRequest request = buildBulk(hits); if (request.requests().isEmpty()) { /* * If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation. @@ -250,7 +231,7 @@ public abstract class AbstractAsyncBulkByScrollAction= mainRequest.getSize()) { // We've processed all the requested docs. - startNormalTermination(emptyList(), emptyList(), false); + refreshAndFinish(emptyList(), emptyList(), false); return; } startNextScroll(thisBatchStartTime, response.getItems().length); - } catch (Throwable t) { + } catch (Exception t) { finishHim(t); } } @@ -324,11 +305,8 @@ public abstract class AbstractAsyncBulkByScrollAction client.searchScroll(request, listener), (SearchResponse response) -> { + TimeValue extraKeepAlive = task.throttleWaitTime(lastBatchStartTime, lastBatchSize); + scrollSource.startNextScroll(extraKeepAlive, response -> { onScrollResponse(lastBatchStartTime, lastBatchSize, response); }); } @@ -344,9 +322,10 @@ public abstract class AbstractAsyncBulkByScrollAction indexingFailures, List searchFailures, boolean timedOut) { + void refreshAndFinish(List indexingFailures, List searchFailures, boolean timedOut) { if (task.isCancelled() || false == mainRequest.isRefresh() || destinationIndices.isEmpty()) { finishHim(null, indexingFailures, searchFailures, timedOut); return; @@ -360,7 +339,7 @@ public abstract class AbstractAsyncBulkByScrollAction indexingFailures, List searchFailures, boolean timedOut) { - String scrollId = scroll.get(); - if (Strings.hasLength(scrollId)) { - /* - * Fire off the clear scroll but don't wait for it it return before - * we send the use their response. - */ - ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); - clearScrollRequest.addScrollId(scrollId); - /* - * Unwrap the client so we don't set our task as the parent. If we *did* set our ID then the clear scroll would be cancelled as - * if this task is cancelled. But we want to clear the scroll regardless of whether or not the main request was cancelled. - */ - client.unwrap().clearScroll(clearScrollRequest, new ActionListener() { - @Override - public void onResponse(ClearScrollResponse response) { - logger.debug("Freed [{}] contexts", response.getNumFreed()); - } - - @Override - public void onFailure(Throwable e) { - logger.warn("Failed to clear scroll [{}]", e, scrollId); - } - }); - } + void finishHim(Exception failure, List indexingFailures, List searchFailures, boolean timedOut) { + scrollSource.close(); if (failure == null) { listener.onResponse( buildResponse(timeValueNanos(System.nanoTime() - startTime.get()), indexingFailures, searchFailures, timedOut)); @@ -435,75 +390,6 @@ public abstract class AbstractAsyncBulkByScrollAction iterator() { - return new Iterator() { - private final Iterator delegate = backoffPolicy.iterator(); - @Override - public boolean hasNext() { - return delegate.hasNext(); - } - - @Override - public TimeValue next() { - if (false == delegate.hasNext()) { - return null; - } - task.countBulkRetry(); - return delegate.next(); - } - }; - } - }; - } - - /** - * Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by - * rejected execution. - * - * @param action consumes a listener and starts the action. The listener it consumes is rigged to retry on failure. - * @param onResponse consumes the response from the action - */ - private void searchWithRetry(Consumer> action, Consumer onResponse) { - class RetryHelper extends AbstractRunnable implements ActionListener { - private final Iterator retries = backoffPolicy.iterator(); - - @Override - public void onResponse(T response) { - onResponse.accept(response); - } - - @Override - protected void doRun() throws Exception { - action.accept(this); - } - - @Override - public void onFailure(Throwable e) { - if (ExceptionsHelper.unwrap(e, EsRejectedExecutionException.class) != null) { - if (retries.hasNext()) { - TimeValue delay = retries.next(); - logger.trace("retrying rejected search after [{}]", e, delay); - threadPool.schedule(delay, ThreadPool.Names.SAME, this); - task.countSearchRetry(); - } else { - logger.warn("giving up on search because we retried {} times without success", e, retries); - finishHim(e); - } - } else { - logger.warn("giving up on search because it failed with a non-retryable exception", e); - finishHim(e); - } - } - } - new RetryHelper().run(); + scrollSource.setScroll(scroll); } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java index 60ab088d76c..4b87df46312 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.logging.ESLogger; @@ -44,8 +43,6 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHitField; import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; @@ -72,13 +69,13 @@ public abstract class AbstractAsyncBulkIndexByScrollAction, SearchHit, RequestWrapper> scriptApplier; + private final BiFunction, ScrollableHitSource.Hit, RequestWrapper> scriptApplier; public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, - ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest, + ThreadPool threadPool, Request mainRequest, ActionListener listener, ScriptService scriptService, ClusterState clusterState) { - super(task, logger, client, threadPool, mainRequest, firstSearchRequest, listener); + super(task, logger, client, threadPool, mainRequest, listener); this.scriptService = scriptService; this.clusterState = clusterState; this.scriptApplier = Objects.requireNonNull(buildScriptApplier(), "script applier must not be null"); @@ -87,15 +84,15 @@ public abstract class AbstractAsyncBulkIndexByScrollAction, SearchHit, RequestWrapper> buildScriptApplier() { + protected BiFunction, ScrollableHitSource.Hit, RequestWrapper> buildScriptApplier() { // The default script applier executes a no-op return (request, searchHit) -> request; } @Override - protected BulkRequest buildBulk(Iterable docs) { + protected BulkRequest buildBulk(Iterable docs) { BulkRequest bulkRequest = new BulkRequest(); - for (SearchHit doc : docs) { + for (ScrollableHitSource.Hit doc : docs) { if (accept(doc)) { RequestWrapper request = scriptApplier.apply(copyMetadata(buildRequest(doc), doc), doc); if (request != null) { @@ -111,14 +108,14 @@ public abstract class AbstractAsyncBulkIndexByScrollAction buildRequest(SearchHit doc); + protected abstract RequestWrapper buildRequest(ScrollableHitSource.Hit doc); /** * Copies the metadata from a hit to the request. */ - protected RequestWrapper copyMetadata(RequestWrapper request, SearchHit doc) { - copyParent(request, fieldValue(doc, ParentFieldMapper.NAME)); - copyRouting(request, fieldValue(doc, RoutingFieldMapper.NAME)); + protected RequestWrapper copyMetadata(RequestWrapper request, ScrollableHitSource.Hit doc) { + request.setParent(doc.getParent()); + copyRouting(request, doc.getRouting()); // Comes back as a Long but needs to be a string - Long timestamp = fieldValue(doc, TimestampFieldMapper.NAME); + Long timestamp = doc.getTimestamp(); if (timestamp != null) { request.setTimestamp(timestamp.toString()); } - Long ttl = fieldValue(doc, TTLFieldMapper.NAME); + Long ttl = doc.getTTL(); if (ttl != null) { request.setTtl(ttl); } return request; } - /** - * Copy the parent from a search hit to the request. - */ - protected void copyParent(RequestWrapper request, String parent) { - request.setParent(parent); - } - /** * Copy the routing from a search hit to the request. */ @@ -163,11 +153,6 @@ public abstract class AbstractAsyncBulkIndexByScrollAction T fieldValue(SearchHit doc, String fieldName) { - SearchHitField field = doc.field(fieldName); - return field == null ? null : field.value(); - } - /** * Wrapper for the {@link ActionRequest} that are used in this action class. */ @@ -435,7 +420,7 @@ public abstract class AbstractAsyncBulkIndexByScrollAction, SearchHit, RequestWrapper> { + public abstract class ScriptApplier implements BiFunction, ScrollableHitSource.Hit, RequestWrapper> { private final BulkByScrollTask task; private final ScriptService scriptService; @@ -455,7 +440,7 @@ public abstract class AbstractAsyncBulkIndexByScrollAction apply(RequestWrapper request, SearchHit doc) { + public RequestWrapper apply(RequestWrapper request, ScrollableHitSource.Hit doc) { if (script == null) { return request; } @@ -467,18 +452,18 @@ public abstract class AbstractAsyncBulkIndexByScrollAction(); } - context.put(IndexFieldMapper.NAME, doc.index()); - context.put(TypeFieldMapper.NAME, doc.type()); - context.put(IdFieldMapper.NAME, doc.id()); + context.put(IndexFieldMapper.NAME, doc.getIndex()); + context.put(TypeFieldMapper.NAME, doc.getType()); + context.put(IdFieldMapper.NAME, doc.getId()); Long oldVersion = doc.getVersion(); context.put(VersionFieldMapper.NAME, oldVersion); - String oldParent = fieldValue(doc, ParentFieldMapper.NAME); + String oldParent = doc.getParent(); context.put(ParentFieldMapper.NAME, oldParent); - String oldRouting = fieldValue(doc, RoutingFieldMapper.NAME); + String oldRouting = doc.getRouting(); context.put(RoutingFieldMapper.NAME, oldRouting); - Long oldTimestamp = fieldValue(doc, TimestampFieldMapper.NAME); + Long oldTimestamp = doc.getTimestamp(); context.put(TimestampFieldMapper.NAME, oldTimestamp); - Long oldTTL = fieldValue(doc, TTLFieldMapper.NAME); + Long oldTTL = doc.getTTL(); context.put(TTLFieldMapper.NAME, oldTTL); context.put(SourceFieldMapper.NAME, request.getSource()); @@ -501,15 +486,15 @@ public abstract class AbstractAsyncBulkIndexByScrollAction) resultCtx.remove(SourceFieldMapper.NAME)); Object newValue = context.remove(IndexFieldMapper.NAME); - if (false == doc.index().equals(newValue)) { + if (false == doc.getIndex().equals(newValue)) { scriptChangedIndex(request, newValue); } newValue = context.remove(TypeFieldMapper.NAME); - if (false == doc.type().equals(newValue)) { + if (false == doc.getType().equals(newValue)) { scriptChangedType(request, newValue); } newValue = context.remove(IdFieldMapper.NAME); - if (false == doc.id().equals(newValue)) { + if (false == doc.getId().equals(newValue)) { scriptChangedId(request, newValue); } newValue = context.remove(VersionFieldMapper.NAME); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index 80a6ff891da..7725ee7f519 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -123,8 +123,8 @@ public abstract class AbstractBulkByScrollRequest indexingFailures; - private List searchFailures; + private List bulkFailures; + private List searchFailures; private boolean timedOut; public BulkIndexByScrollResponse() { } - public BulkIndexByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List indexingFailures, - List searchFailures, boolean timedOut) { + public BulkIndexByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List bulkFailures, + List searchFailures, boolean timedOut) { this.took = took; this.status = requireNonNull(status, "Null status not supported"); - this.indexingFailures = indexingFailures; + this.bulkFailures = bulkFailures; this.searchFailures = searchFailures; this.timedOut = timedOut; } @@ -113,17 +110,16 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont } /** - * All of the indexing failures. Version conflicts are only included if the request sets abortOnVersionConflict to true (the - * default). + * All of the bulk failures. Version conflicts are only included if the request sets abortOnVersionConflict to true (the default). */ - public List getIndexingFailures() { - return indexingFailures; + public List getBulkFailures() { + return bulkFailures; } /** * All search failures. */ - public List getSearchFailures() { + public List getSearchFailures() { return searchFailures; } @@ -139,14 +135,8 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont super.writeTo(out); took.writeTo(out); status.writeTo(out); - out.writeVInt(indexingFailures.size()); - for (Failure failure: indexingFailures) { - failure.writeTo(out); - } - out.writeVInt(searchFailures.size()); - for (ShardSearchFailure failure: searchFailures) { - failure.writeTo(out); - } + out.writeList(bulkFailures); + out.writeList(searchFailures); out.writeBoolean(timedOut); } @@ -155,19 +145,9 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont super.readFrom(in); took = new TimeValue(in); status = new BulkByScrollTask.Status(in); - int indexingFailuresCount = in.readVInt(); - List indexingFailures = new ArrayList<>(indexingFailuresCount); - for (int i = 0; i < indexingFailuresCount; i++) { - indexingFailures.add(new Failure(in)); - } - this.indexingFailures = unmodifiableList(indexingFailures); - int searchFailuresCount = in.readVInt(); - List searchFailures = new ArrayList<>(searchFailuresCount); - for (int i = 0; i < searchFailuresCount; i++) { - searchFailures.add(readShardSearchFailure(in)); - } - this.searchFailures = unmodifiableList(searchFailures); - this.timedOut = in.readBoolean(); + bulkFailures = in.readList(Failure::new); + searchFailures = in.readList(SearchFailure::new); + timedOut = in.readBoolean(); } @Override @@ -176,15 +156,13 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont builder.field("timed_out", timedOut); status.innerXContent(builder, params); builder.startArray("failures"); - for (Failure failure: indexingFailures) { + for (Failure failure: bulkFailures) { builder.startObject(); failure.toXContent(builder, params); builder.endObject(); } - for (ShardSearchFailure failure: searchFailures) { - builder.startObject(); + for (SearchFailure failure: searchFailures) { failure.toXContent(builder, params); - builder.endObject(); } builder.endArray(); return builder; @@ -197,7 +175,7 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont builder.append("took=").append(took).append(','); builder.append("timed_out=").append(timedOut).append(','); status.innerToString(builder); - builder.append(",indexing_failures=").append(getIndexingFailures().subList(0, min(3, getIndexingFailures().size()))); + builder.append(",bulk_failures=").append(getBulkFailures().subList(0, min(3, getBulkFailures().size()))); builder.append(",search_failures=").append(getSearchFailures().subList(0, min(3, getSearchFailures().size()))); return builder.append(']').toString(); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java index 72bf6957e12..6cfba3a302d 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseContentListener.java @@ -21,9 +21,9 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; -import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; @@ -61,13 +61,13 @@ public class BulkIndexByScrollResponseContentListener status.getStatus()) { status = failure.getStatus(); } } - for (ShardSearchFailure failure: response.getSearchFailures()) { - RestStatus failureStatus = ExceptionsHelper.status(failure.getCause()); + for (SearchFailure failure: response.getSearchFailures()) { + RestStatus failureStatus = ExceptionsHelper.status(failure.getReason()); if (failureStatus.getStatus() > status.getStatus()) { status = failureStatus; } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java new file mode 100644 index 00000000000..5e694e2cf26 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -0,0 +1,251 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BackoffPolicy; +import org.elasticsearch.action.search.ClearScrollRequest; +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.ParentTaskAssigningClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.mapper.internal.ParentFieldMapper; +import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; +import org.elasticsearch.index.mapper.internal.TTLFieldMapper; +import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHitField; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.function.Consumer; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; +import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; +import static org.elasticsearch.common.util.CollectionUtils.isEmpty; + +/** + * A scrollable source of hits from a {@linkplain Client} instance. + */ +public class ClientScrollableHitSource extends ScrollableHitSource { + private final ParentTaskAssigningClient client; + private final SearchRequest firstSearchRequest; + + public ClientScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry, + Consumer fail, ParentTaskAssigningClient client, SearchRequest firstSearchRequest) { + super(logger, backoffPolicy, threadPool, countSearchRetry, fail); + this.client = client; + this.firstSearchRequest = firstSearchRequest; + } + + @Override + public void doStart(Consumer onResponse) { + if (logger.isDebugEnabled()) { + logger.debug("executing initial scroll against {}{}", + isEmpty(firstSearchRequest.indices()) ? "all indices" : firstSearchRequest.indices(), + isEmpty(firstSearchRequest.types()) ? "" : firstSearchRequest.types()); + } + searchWithRetry(listener -> client.search(firstSearchRequest, listener), r -> consume(r, onResponse)); + } + + @Override + protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer onResponse) { + SearchScrollRequest request = new SearchScrollRequest(); + // Add the wait time into the scroll timeout so it won't timeout while we wait for throttling + request.scrollId(scrollId).scroll(timeValueNanos(firstSearchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos())); + searchWithRetry(listener -> client.searchScroll(request, listener), r -> consume(r, onResponse)); + } + + @Override + public void clearScroll(String scrollId) { + /* + * Fire off the clear scroll but don't wait for it it return before + * we send the use their response. + */ + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.addScrollId(scrollId); + /* + * Unwrap the client so we don't set our task as the parent. If we *did* set our ID then the clear scroll would be cancelled as + * if this task is cancelled. But we want to clear the scroll regardless of whether or not the main request was cancelled. + */ + client.unwrap().clearScroll(clearScrollRequest, new ActionListener() { + @Override + public void onResponse(ClearScrollResponse response) { + logger.debug("Freed [{}] contexts", response.getNumFreed()); + } + + @Override + public void onFailure(Exception e) { + logger.warn("Failed to clear scroll [{}]", e, scrollId); + } + }); + } + + /** + * Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by + * rejected execution. + * + * @param action consumes a listener and starts the action. The listener it consumes is rigged to retry on failure. + * @param onResponse consumes the response from the action + */ + private void searchWithRetry(Consumer> action, Consumer onResponse) { + /* + * RetryHelper is both an AbstractRunnable and an ActionListener - meaning that it both starts the search and + * handles reacts to the results. The complexity is all in onFailure which either adapts the failure to the "fail" listener or + * retries the search. Since both AbstractRunnable and ActionListener define the onFailure method it is called for either failure + * to run the action (either while running or before starting) and for failure on the response from the action. + */ + class RetryHelper extends AbstractRunnable implements ActionListener { + private final Iterator retries = backoffPolicy.iterator(); + private volatile int retryCount = 0; + + @Override + protected void doRun() throws Exception { + action.accept(this); + } + + @Override + public void onResponse(SearchResponse response) { + onResponse.accept(response); + } + + @Override + public void onFailure(Exception e) { + if (ExceptionsHelper.unwrap(e, EsRejectedExecutionException.class) != null) { + if (retries.hasNext()) { + retryCount += 1; + TimeValue delay = retries.next(); + logger.trace("retrying rejected search after [{}]", e, delay); + countSearchRetry.run(); + threadPool.schedule(delay, ThreadPool.Names.SAME, this); + } else { + logger.warn("giving up on search because we retried [{}] times without success", e, retryCount); + fail.accept(e); + } + } else { + logger.warn("giving up on search because it failed with a non-retryable exception", e); + fail.accept(e); + } + } + } + new RetryHelper().run(); + } + + private void consume(SearchResponse response, Consumer onResponse) { + onResponse.accept(wrap(response)); + } + + private Response wrap(SearchResponse response) { + List failures; + if (response.getShardFailures() == null) { + failures = emptyList(); + } else { + failures = new ArrayList<>(response.getShardFailures().length); + for (ShardSearchFailure failure: response.getShardFailures()) { + String nodeId = failure.shard() == null ? null : failure.shard().nodeId(); + failures.add(new SearchFailure(failure.getCause(), failure.index(), failure.shardId(), nodeId)); + } + } + List hits; + if (response.getHits().getHits() == null || response.getHits().getHits().length == 0) { + hits = emptyList(); + } else { + hits = new ArrayList<>(response.getHits().getHits().length); + for (SearchHit hit: response.getHits().getHits()) { + hits.add(new ClientHit(hit)); + } + hits = unmodifiableList(hits); + } + return new Response(response.isTimedOut(), failures, response.getHits().getTotalHits(), + hits, response.getScrollId()); + } + + private static class ClientHit implements Hit { + private final SearchHit delegate; + private final BytesReference source; + + public ClientHit(SearchHit delegate) { + this.delegate = delegate; + source = delegate.hasSource() ? null : delegate.getSourceRef(); + } + + @Override + public String getIndex() { + return delegate.getIndex(); + } + + @Override + public String getType() { + return delegate.getType(); + } + + @Override + public String getId() { + return delegate.getId(); + } + + @Override + public BytesReference getSource() { + return source; + } + + @Override + public long getVersion() { + return delegate.getVersion(); + } + + @Override + public String getParent() { + return fieldValue(ParentFieldMapper.NAME); + } + + @Override + public String getRouting() { + return fieldValue(RoutingFieldMapper.NAME); + } + + @Override + public Long getTimestamp() { + return fieldValue(TimestampFieldMapper.NAME); + } + + @Override + public Long getTTL() { + return fieldValue(TTLFieldMapper.NAME); + } + + private T fieldValue(String fieldName) { + SearchHitField field = delegate.field(fieldName); + return field == null ? null : field.value(); + } + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java index 5a7b81a7124..4f2cb2578ac 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexPlugin.java @@ -23,12 +23,15 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestHandler; import java.util.Arrays; import java.util.List; +import static java.util.Collections.singletonList; + public class ReindexPlugin extends Plugin implements ActionPlugin { public static final String NAME = "reindex"; @@ -49,4 +52,9 @@ public class ReindexPlugin extends Plugin implements ActionPlugin { public void onModule(NetworkModule networkModule) { networkModule.registerTaskStatus(BulkByScrollTask.Status.NAME, BulkByScrollTask.Status::new); } + + @Override + public List> getSettings() { + return singletonList(TransportReindexAction.REMOTE_CLUSTER_WHITELIST); + } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java index 660815bbf52..8c11cd3430f 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequest.java @@ -27,11 +27,13 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.index.reindex.remote.RemoteInfo; import java.io.IOException; import java.util.Arrays; import java.util.List; +import static java.util.Collections.singletonList; import static java.util.Collections.unmodifiableList; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.index.VersionType.INTERNAL; @@ -48,6 +50,8 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest subRequests() { assert getSearchRequest() != null; assert getDestination() != null; + if (remoteInfo != null) { + return singletonList(getDestination()); + } return unmodifiableList(Arrays.asList(getSearchRequest(), getDestination())); } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java index 4f814dbc49d..1eadf2c15bc 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ReindexRequestBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.index.reindex.remote.RemoteInfo; public class ReindexRequestBuilder extends AbstractBulkIndexByScrollRequestBuilder { @@ -67,4 +68,12 @@ public class ReindexRequestBuilder extends destination.setIndex(index).setType(type); return this; } + + /** + * Setup reindexing from a remote cluster. + */ + public ReindexRequestBuilder setRemoteInfo(RemoteInfo remoteInfo) { + request().setRemoteInfo(remoteInfo); + return this; + } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index af0bd168ff0..3bbfebf68a7 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -27,16 +27,21 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcherSupplier; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; @@ -48,36 +53,39 @@ import org.elasticsearch.search.suggest.Suggesters; import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import static java.util.Objects.requireNonNull; import static org.elasticsearch.common.unit.TimeValue.parseTimeValue; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.rest.RestRequest.Method.POST; /** - * Expose IndexBySearchRequest over rest. + * Expose reindex over rest. */ public class RestReindexAction extends AbstractBaseReindexRestHandler { - - private static final ObjectParser PARSER = new ObjectParser<>("reindex"); + static final ObjectParser PARSER = new ObjectParser<>("reindex"); + private static final Pattern HOST_PATTERN = Pattern.compile("(?[^:]+)://(?[^:]+):(?\\d+)"); static { - ObjectParser.Parser sourceParser = (parser, search, context) -> { - /* - * Extract the parameters that we need from the source sent to the parser. We could do away with this hack when search source - * has an ObjectParser. - */ + ObjectParser.Parser sourceParser = (parser, request, context) -> { + // Funky hack to work around Search not having a proper ObjectParser and us wanting to extract query if using remote. Map source = parser.map(); String[] indices = extractStringArray(source, "index"); if (indices != null) { - search.indices(indices); + request.getSearchRequest().indices(indices); } String[] types = extractStringArray(source, "type"); if (types != null) { - search.types(types); + request.getSearchRequest().types(types); } + request.setRemoteInfo(buildRemoteInfo(source)); XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); builder.map(source); try (XContentParser innerParser = parser.contentType().xContent().createParser(builder.bytes())) { - search.source().parseXContent(context.queryParseContext(innerParser), context.aggParsers, context.suggesters); + request.getSearchRequest().source().parseXContent(context.queryParseContext(innerParser), context.aggParsers, + context.suggesters); } }; @@ -94,7 +102,7 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler i.ttl(parseTimeValue(ttl, TimeValue.timeValueMillis(-1), "ttl").millis()), new ParseField("ttl")); - PARSER.declareField((p, v, c) -> sourceParser.parse(p, v.getSearchRequest(), c), new ParseField("source"), ValueType.OBJECT); + PARSER.declareField((p, v, c) -> sourceParser.parse(p, v, c), new ParseField("source"), ValueType.OBJECT); PARSER.declareField((p, v, c) -> destParser.parse(p, v.getDestination(), c), new ParseField("dest"), ValueType.OBJECT); PARSER.declareInt(ReindexRequest::setSize, new ParseField("size")); PARSER.declareField((p, v, c) -> v.setScript(Script.parse(p, c.getParseFieldMatcher())), new ParseField("script"), @@ -127,6 +135,29 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler source) throws IOException { + @SuppressWarnings("unchecked") + Map remote = (Map) source.remove("remote"); + if (remote == null) { + return null; + } + String username = extractString(remote, "username"); + String password = extractString(remote, "password"); + String hostInRequest = requireNonNull(extractString(remote, "host"), "[host] must be specified to reindex from a remote cluster"); + Matcher hostMatcher = HOST_PATTERN.matcher(hostInRequest); + if (false == hostMatcher.matches()) { + throw new IllegalArgumentException("[host] must be of the form [scheme]://[host]:[port] but was [" + hostInRequest + "]"); + } + String scheme = hostMatcher.group("scheme"); + String host = hostMatcher.group("host"); + int port = Integer.parseInt(hostMatcher.group("port")); + if (false == remote.isEmpty()) { + throw new IllegalArgumentException( + "Unsupported fields in [remote]: [" + Strings.collectionToCommaDelimitedString(remote.keySet()) + "]"); + } + return new RemoteInfo(scheme, host, port, queryForRemote(source), username, password); + } + /** * Yank a string array from a map. Emulates XContent's permissive String to * String array conversions. @@ -147,7 +178,32 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler source, String name) { + Object value = source.remove(name); + if (value == null) { + return null; + } + if (value instanceof String) { + return (String) value; + } + throw new IllegalArgumentException("Expected [" + name + "] to be a string but was [" + value + "]"); + } + + private static BytesReference queryForRemote(Map source) throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + Object query = source.remove("query"); + if (query == null) { + return matchAllQuery().toXContent(builder, ToXContent.EMPTY_PARAMS).bytes(); + } + if (!(query instanceof Map)) { + throw new IllegalArgumentException("Expected [query] to be an object but was [" + query + "]"); + } + @SuppressWarnings("unchecked") + Map map = (Map) query; + return builder.map(map).bytes(); + } + + static class ReindexParseContext implements ParseFieldMatcherSupplier { private final IndicesQueriesRegistry indicesQueryRegistry; private final ParseFieldMatcher parseFieldMatcher; private final AggregatorParsers aggParsers; diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java new file mode 100644 index 00000000000..b03496df7a7 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java @@ -0,0 +1,357 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.bulk.BackoffPolicy; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.reindex.remote.RemoteScrollableHitSource; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static java.util.Objects.requireNonNull; + +/** + * A scrollable source of results. + */ +public abstract class ScrollableHitSource implements Closeable { + private final AtomicReference scrollId = new AtomicReference<>(); + + protected final ESLogger logger; + protected final BackoffPolicy backoffPolicy; + protected final ThreadPool threadPool; + protected final Runnable countSearchRetry; + protected final Consumer fail; + + public ScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry, + Consumer fail) { + this.logger = logger; + this.backoffPolicy = backoffPolicy; + this.threadPool = threadPool; + this.countSearchRetry = countSearchRetry; + this.fail = fail; + } + + public final void start(Consumer onResponse) { + doStart(response -> { + setScroll(response.getScrollId()); + logger.debug("scroll returned [{}] documents with a scroll id of [{}]", response.getHits().size(), response.getScrollId()); + onResponse.accept(response); + }); + } + protected abstract void doStart(Consumer onResponse); + + public final void startNextScroll(TimeValue extraKeepAlive, Consumer onResponse) { + doStartNextScroll(scrollId.get(), extraKeepAlive, response -> { + setScroll(response.getScrollId()); + onResponse.accept(response); + }); + } + protected abstract void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer onResponse); + + @Override + public void close() { + String scrollId = this.scrollId.get(); + if (Strings.hasLength(scrollId)) { + clearScroll(scrollId); + } + } + protected abstract void clearScroll(String scrollId); + + /** + * Set the id of the last scroll. Used for debugging. + */ + final void setScroll(String scrollId) { + this.scrollId.set(scrollId); + } + + /** + * Response from each scroll batch. + */ + public static class Response { + private final boolean timedOut; + private final List failures; + private final long totalHits; + private final List hits; + private final String scrollId; + + public Response(boolean timedOut, List failures, long totalHits, List hits, String scrollId) { + this.timedOut = timedOut; + this.failures = failures; + this.totalHits = totalHits; + this.hits = hits; + this.scrollId = scrollId; + } + + /** + * Did this batch time out? + */ + public boolean isTimedOut() { + return timedOut; + } + + /** + * Where there any search failures? + */ + public final List getFailures() { + return failures; + } + + /** + * What were the total number of documents matching the search? + */ + public long getTotalHits() { + return totalHits; + } + + /** + * The documents returned in this batch. + */ + public List getHits() { + return hits; + } + + /** + * The scroll id used to fetch the next set of documents. + */ + public String getScrollId() { + return scrollId; + } + } + + /** + * A document returned as part of the response. Think of it like {@link SearchHit} but with all the things reindex needs in convenient + * methods. + */ + public interface Hit { + String getIndex(); + String getType(); + String getId(); + long getVersion(); + /** + * The source of the hit. Returns null if the source didn't come back from the search, usually because it source wasn't stored at + * all. + */ + @Nullable BytesReference getSource(); + @Nullable String getParent(); + @Nullable String getRouting(); + @Nullable Long getTimestamp(); + @Nullable Long getTTL(); + } + + /** + * An implementation of {@linkplain Hit} that uses getters and setters. Primarily used for testing and {@link RemoteScrollableHitSource} + * . + */ + public static class BasicHit implements Hit { + private final String index; + private final String type; + private final String id; + private final long version; + + private BytesReference source; + private String parent; + private String routing; + private Long timestamp; + private Long ttl; + + public BasicHit(String index, String type, String id, long version) { + this.index = index; + this.type = type; + this.id = id; + this.version = version; + } + + @Override + public String getIndex() { + return index; + } + + @Override + public String getType() { + return type; + } + + @Override + public String getId() { + return id; + } + + @Override + public long getVersion() { + return version; + } + + @Override + public BytesReference getSource() { + return source; + } + + public BasicHit setSource(BytesReference source) { + this.source = source; + return this; + } + + @Override + public String getParent() { + return parent; + } + + public BasicHit setParent(String parent) { + this.parent = parent; + return this; + } + + @Override + public String getRouting() { + return routing; + } + + public BasicHit setRouting(String routing) { + this.routing = routing; + return this; + } + + @Override + public Long getTimestamp() { + return timestamp; + } + + public BasicHit setTimestamp(Long timestamp) { + this.timestamp = timestamp; + return this; + } + + @Override + public Long getTTL() { + return ttl; + } + + public BasicHit setTTL(Long ttl) { + this.ttl = ttl; + return this; + } + } + + /** + * A failure during search. Like {@link ShardSearchFailure} but useful for reindex from remote as well. + */ + public static class SearchFailure implements Writeable, ToXContent { + private final Throwable reason; + @Nullable + private final String index; + @Nullable + private final Integer shardId; + @Nullable + private final String nodeId; + + public SearchFailure(Throwable reason, @Nullable String index, @Nullable Integer shardId, @Nullable String nodeId) { + this.index = index; + this.shardId = shardId; + this.reason = requireNonNull(reason, "reason cannot be null"); + this.nodeId = nodeId; + } + + /** + * Build a search failure that doesn't have shard information available. + */ + public SearchFailure(Throwable reason) { + this(reason, null, null, null); + } + + /** + * Read from a stream. + */ + public SearchFailure(StreamInput in) throws IOException { + reason = in.readException(); + index = in.readOptionalString(); + shardId = in.readOptionalVInt(); + nodeId = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeException(reason); + out.writeOptionalString(index); + out.writeOptionalVInt(shardId); + out.writeOptionalString(nodeId); + } + + public String getIndex() { + return index; + } + + public Integer getShardId() { + return shardId; + } + + public Throwable getReason() { + return reason; + } + + @Nullable + public String getNodeId() { + return nodeId; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (index != null) { + builder.field("index", index); + } + if (shardId != null) { + builder.field("shard", shardId); + } + if (nodeId != null) { + builder.field("node", nodeId); + } + builder.field("reason"); + { + builder.startObject(); + ElasticsearchException.toXContent(builder, params, reason); + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java index 471bd066f94..c3847ab2125 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportDeleteByQueryAction.java @@ -31,10 +31,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.internal.ParentFieldMapper; -import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -74,35 +71,35 @@ public class TransportDeleteByQueryAction extends HandledTransportAction listener, ScriptService scriptService, ClusterState clusterState) { - super(task, logger, client, threadPool, request, request.getSearchRequest(), listener, scriptService, clusterState); + super(task, logger, client, threadPool, request, listener, scriptService, clusterState); } @Override - protected boolean accept(SearchHit doc) { + protected boolean accept(ScrollableHitSource.Hit doc) { // Delete-by-query does not require the source to delete a document // and the default implementation checks for it return true; } @Override - protected RequestWrapper buildRequest(SearchHit doc) { + protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) { DeleteRequest delete = new DeleteRequest(); - delete.index(doc.index()); - delete.type(doc.type()); - delete.id(doc.id()); - delete.version(doc.version()); + delete.index(doc.getIndex()); + delete.type(doc.getType()); + delete.id(doc.getId()); + delete.version(doc.getVersion()); return wrap(delete); } /** - * Overrides the parent {@link AbstractAsyncBulkIndexByScrollAction#copyMetadata(RequestWrapper, SearchHit)} + * Overrides the parent {@link AbstractAsyncBulkIndexByScrollAction#copyMetadata(RequestWrapper, ScrollableHitSource.Hit)} * method that is much more Update/Reindex oriented and so also copies things like timestamp/ttl which we * don't care for a deletion. */ @Override - protected RequestWrapper copyMetadata(RequestWrapper request, SearchHit doc) { - copyParent(request, fieldValue(doc, ParentFieldMapper.NAME)); - copyRouting(request, fieldValue(doc, RoutingFieldMapper.NAME)); + protected RequestWrapper copyMetadata(RequestWrapper request, ScrollableHitSource.Hit doc) { + request.setParent(doc.getParent()); + request.setRouting(doc.getRouting()); return request; } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index e98c45595c7..04ccfa1ba49 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -19,8 +19,10 @@ package org.elasticsearch.index.reindex; +import org.apache.http.HttpHost; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActionFilters; @@ -28,51 +30,72 @@ import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.Client; import org.elasticsearch.client.ParentTaskAssigningClient; +import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.http.HttpInfo; import org.elasticsearch.index.mapper.internal.TTLFieldMapper; import org.elasticsearch.index.mapper.internal.VersionFieldMapper; +import org.elasticsearch.index.reindex.remote.RemoteInfo; +import org.elasticsearch.index.reindex.remote.RemoteScrollableHitSource; +import org.elasticsearch.node.service.NodeService; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.BiFunction; +import java.util.function.Function; +import static java.util.Collections.emptyList; import static java.util.Objects.requireNonNull; import static org.elasticsearch.index.VersionType.INTERNAL; public class TransportReindexAction extends HandledTransportAction { + public static final Setting> REMOTE_CLUSTER_WHITELIST = + Setting.listSetting("reindex.remote.whitelist", emptyList(), Function.identity(), Property.NodeScope); + private final ClusterService clusterService; private final ScriptService scriptService; private final AutoCreateIndex autoCreateIndex; private final Client client; + private final Set remoteWhitelist; + private final NodeService nodeService; @Inject public TransportReindexAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, ScriptService scriptService, - AutoCreateIndex autoCreateIndex, Client client, TransportService transportService) { + AutoCreateIndex autoCreateIndex, Client client, TransportService transportService, NodeService nodeService) { super(settings, ReindexAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ReindexRequest::new); this.clusterService = clusterService; this.scriptService = scriptService; this.autoCreateIndex = autoCreateIndex; this.client = client; + remoteWhitelist = new HashSet<>(REMOTE_CLUSTER_WHITELIST.get(settings)); + this.nodeService = nodeService; } @Override protected void doExecute(Task task, ReindexRequest request, ActionListener listener) { + checkRemoteWhitelist(request.getRemoteInfo()); ClusterState state = clusterService.state(); - validateAgainstAliases(request.getSearchRequest(), request.getDestination(), indexNameExpressionResolver, autoCreateIndex, state); + validateAgainstAliases(request.getSearchRequest(), request.getDestination(), request.getRemoteInfo(), indexNameExpressionResolver, + autoCreateIndex, state); ParentTaskAssigningClient client = new ParentTaskAssigningClient(this.client, clusterService.localNode(), task); new AsyncIndexBySearchAction((BulkByScrollTask) task, logger, client, threadPool, request, listener, scriptService, state).start(); } @@ -82,15 +105,43 @@ public class TransportReindexAction extends HandledTransportAction whitelist, RemoteInfo remoteInfo, TransportAddress publishAddress) { + if (remoteInfo == null) return; + String check = remoteInfo.getHost() + ':' + remoteInfo.getPort(); + if (whitelist.contains(check)) return; + /* + * For testing we support the key "myself" to allow connecting to the local node. We can't just change the setting to include the + * local node because it is intentionally not a dynamic setting for security purposes. We can't use something like "localhost:9200" + * because we don't know up front which port we'll get because the tests bind to port 0. Instead we try to resolve it here, taking + * "myself" to mean "my published http address". + */ + if (whitelist.contains("myself") && publishAddress != null && publishAddress.toString().equals(check)) { + return; + } + throw new IllegalArgumentException('[' + check + "] not whitelisted in " + REMOTE_CLUSTER_WHITELIST.getKey()); + } + /** * Throws an ActionRequestValidationException if the request tries to index * back into the same index or into an index that points to two indexes. * This cannot be done during request validation because the cluster state * isn't available then. Package private for testing. */ - static String validateAgainstAliases(SearchRequest source, IndexRequest destination, + static void validateAgainstAliases(SearchRequest source, IndexRequest destination, RemoteInfo remoteInfo, IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex, ClusterState clusterState) { + if (remoteInfo != null) { + return; + } String target = destination.index(); if (false == autoCreateIndex.shouldAutoCreate(target, clusterState)) { /* @@ -107,7 +158,6 @@ public class TransportReindexAction extends HandledTransportAction listener, ScriptService scriptService, ClusterState clusterState) { - super(task, logger, client, threadPool, request, request.getSearchRequest(), listener, scriptService, clusterState); + super(task, logger, client, threadPool, request, listener, scriptService, clusterState); } @Override - protected BiFunction, SearchHit, RequestWrapper> buildScriptApplier() { + protected ScrollableHitSource buildScrollableResultSource(BackoffPolicy backoffPolicy) { + if (mainRequest.getRemoteInfo() != null) { + // NORELEASE track 500-level retries that are builtin to the client + RemoteInfo remoteInfo = mainRequest.getRemoteInfo(); + if (remoteInfo.getUsername() != null) { + // NORELEASE support auth + throw new UnsupportedOperationException("Auth is unsupported"); + } + RestClient restClient = RestClient.builder(new HttpHost(remoteInfo.getHost(), remoteInfo.getPort(), remoteInfo.getScheme())) + .build(); + RemoteScrollableHitSource.AsyncClient client = new RemoteScrollableHitSource.AsynchronizingRestClient(threadPool, + restClient); + return new RemoteScrollableHitSource(logger, backoffPolicy, threadPool, task::countSearchRetry, this::finishHim, client, + remoteInfo.getQuery(), mainRequest.getSearchRequest()); + } + return super.buildScrollableResultSource(backoffPolicy); + } + + @Override + protected BiFunction, ScrollableHitSource.Hit, RequestWrapper> buildScriptApplier() { Script script = mainRequest.getScript(); if (script != null) { return new ReindexScriptApplier(task, scriptService, script, script.getParams()); @@ -134,7 +203,7 @@ public class TransportReindexAction extends HandledTransportAction buildRequest(SearchHit doc) { + protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) { IndexRequest index = new IndexRequest(); // Copy the index from the request so we always write where it asked to write @@ -142,7 +211,7 @@ public class TransportReindexAction extends HandledTransportAction listener, ScriptService scriptService, ClusterState clusterState) { - super(task, logger, client, threadPool, request, request.getSearchRequest(), listener, scriptService, clusterState); + super(task, logger, client, threadPool, request, listener, scriptService, clusterState); } @Override - protected BiFunction, SearchHit, RequestWrapper> buildScriptApplier() { + protected BiFunction, ScrollableHitSource.Hit, RequestWrapper> buildScriptApplier() { Script script = mainRequest.getScript(); if (script != null) { return new UpdateByQueryScriptApplier(task, scriptService, script, script.getParams()); @@ -98,14 +97,14 @@ public class TransportUpdateByQueryAction extends HandledTransportAction buildRequest(SearchHit doc) { + protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) { IndexRequest index = new IndexRequest(); - index.index(doc.index()); - index.type(doc.type()); - index.id(doc.id()); - index.source(doc.sourceRef()); + index.index(doc.getIndex()); + index.type(doc.getType()); + index.id(doc.getId()); + index.source(doc.getSource()); index.versionType(VersionType.INTERNAL); - index.version(doc.version()); + index.version(doc.getVersion()); index.setPipeline(mainRequest.getPipeline()); return wrap(index); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteInfo.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteInfo.java new file mode 100644 index 00000000000..89d6cb18401 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteInfo.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex.remote; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + +import static java.util.Objects.requireNonNull; + +public class RemoteInfo implements Writeable { + private final String scheme; + private final String host; + private final int port; + private final BytesReference query; + private final String username; + private final String password; + + public RemoteInfo(String scheme, String host, int port, BytesReference query, String username, String password) { + this.scheme = requireNonNull(scheme, "[scheme] must be specified to reindex from a remote cluster"); + this.host = requireNonNull(host, "[host] must be specified to reindex from a remote cluster"); + this.port = port; + this.query = requireNonNull(query, "[query] must be specified to reindex from a remote cluster"); + this.username = username; + this.password = password; + } + + /** + * Read from a stream. + */ + public RemoteInfo(StreamInput in) throws IOException { + scheme = in.readString(); + host = in.readString(); + port = in.readVInt(); + query = in.readBytesReference(); + username = in.readOptionalString(); + password = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(scheme); + out.writeString(host); + out.writeVInt(port); + out.writeBytesReference(query); + out.writeOptionalString(username); + out.writeOptionalString(password); + } + + public String getScheme() { + return scheme; + } + + public String getHost() { + return host; + } + + public int getPort() { + return port; + } + + public BytesReference getQuery() { + return query; + } + + @Nullable + public String getUsername() { + return username; + } + + @Nullable + public String getPassword() { + return password; + } + + @Override + public String toString() { + StringBuilder b = new StringBuilder(); + if (false == "http".equals(scheme)) { + // http is the default so it isn't worth taking up space if it is the scheme + b.append("scheme=").append(scheme).append(' '); + } + b.append("host=").append(host).append(" port=").append(port).append(" query=").append(query.utf8ToString()); + if (username != null) { + b.append(" username=").append(username); + } + if (password != null) { + b.append(" password=<<>>"); + } + return b.toString(); + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java new file mode 100644 index 00000000000..00c9f0ae509 --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -0,0 +1,163 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex.remote; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.SortBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.singletonMap; + +final class RemoteRequestBuilders { + private RemoteRequestBuilders() {} + + static String initialSearchPath(SearchRequest searchRequest) { + // It is nasty to build paths with StringBuilder but we'll be careful.... + StringBuilder path = new StringBuilder("/"); + addIndexesOrTypes(path, "Index", searchRequest.indices()); + addIndexesOrTypes(path, "Type", searchRequest.types()); + path.append("_search"); + return path.toString(); + } + + static Map initialSearchParams(SearchRequest searchRequest, Version remoteVersion) { + Map params = new HashMap<>(); + if (searchRequest.scroll() != null) { + params.put("scroll", searchRequest.scroll().keepAlive().toString()); + } + params.put("size", Integer.toString(searchRequest.source().size())); + if (searchRequest.source().version() == null || searchRequest.source().version() == true) { + // false is the only value that makes it false. Null defaults to true.... + params.put("version", null); + } + if (searchRequest.source().sorts() != null) { + boolean useScan = false; + // Detect if we should use search_type=scan rather than a sort + if (remoteVersion.before(Version.V_2_1_0)) { + for (SortBuilder sort : searchRequest.source().sorts()) { + if (sort instanceof FieldSortBuilder) { + FieldSortBuilder f = (FieldSortBuilder) sort; + if (f.getFieldName().equals(FieldSortBuilder.DOC_FIELD_NAME)) { + useScan = true; + break; + } + } + } + } + if (useScan) { + params.put("search_type", "scan"); + } else { + StringBuilder sorts = new StringBuilder(sortToUri(searchRequest.source().sorts().get(0))); + for (int i = 1; i < searchRequest.source().sorts().size(); i++) { + sorts.append(',').append(sortToUri(searchRequest.source().sorts().get(i))); + } + params.put("sorts", sorts.toString()); + } + } + if (searchRequest.source().storedFields() != null && false == searchRequest.source().storedFields().isEmpty()) { + StringBuilder fields = new StringBuilder(searchRequest.source().storedFields().get(0)); + for (int i = 1; i < searchRequest.source().storedFields().size(); i++) { + fields.append(',').append(searchRequest.source().storedFields().get(i)); + } + String storedFieldsParamName = remoteVersion.before(Version.V_5_0_0_alpha4) ? "fields" : "stored_fields"; + params.put(storedFieldsParamName, fields.toString()); + } + return params; + } + + static HttpEntity initialSearchEntity(BytesReference query) { + try (XContentBuilder entity = JsonXContent.contentBuilder(); XContentParser queryParser = XContentHelper.createParser(query)) { + entity.startObject(); + entity.field("query"); + /* + * We're intentionally a bit paranoid here - copying the query as xcontent rather than writing a raw field. We don't want poorly + * written queries to escape. Ever. + */ + entity.copyCurrentStructure(queryParser); + XContentParser.Token shouldBeEof = queryParser.nextToken(); + if (shouldBeEof != null) { + throw new ElasticsearchException( + "query was more than a single object. This first token after the object is [" + shouldBeEof + "]"); + } + entity.endObject(); + BytesRef bytes = entity.bytes().toBytesRef(); + return new ByteArrayEntity(bytes.bytes, bytes.offset, bytes.length, ContentType.APPLICATION_JSON); + } catch (IOException e) { + throw new ElasticsearchException("unexpected error building entity", e); + } + } + + private static void addIndexesOrTypes(StringBuilder path, String name, String[] indicesOrTypes) { + if (indicesOrTypes == null || indicesOrTypes.length == 0) { + return; + } + for (String indexOrType : indicesOrTypes) { + checkIndexOrType(name, indexOrType); + } + path.append(Strings.arrayToCommaDelimitedString(indicesOrTypes)).append('/'); + } + + private static void checkIndexOrType(String name, String indexOrType) { + if (indexOrType.indexOf(',') >= 0) { + throw new IllegalArgumentException(name + " containing [,] not supported but got [" + indexOrType + "]"); + } + if (indexOrType.indexOf('/') >= 0) { + throw new IllegalArgumentException(name + " containing [/] not supported but got [" + indexOrType + "]"); + } + } + + private static String sortToUri(SortBuilder sort) { + if (sort instanceof FieldSortBuilder) { + FieldSortBuilder f = (FieldSortBuilder) sort; + return f.getFieldName() + ":" + f.order(); + } + throw new IllegalArgumentException("Unsupported sort [" + sort + "]"); + } + + static String scrollPath() { + return "/_search/scroll"; + } + + static Map scrollParams(TimeValue keepAlive) { + return singletonMap("scroll", keepAlive.toString()); + } + + static HttpEntity scrollEntity(String scroll) { + return new StringEntity(scroll, ContentType.TEXT_PLAIN); + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java new file mode 100644 index 00000000000..0a467593a2c --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java @@ -0,0 +1,301 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex.remote; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParseFieldMatcherSupplier; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.reindex.ScrollableHitSource.BasicHit; +import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; +import org.elasticsearch.index.reindex.ScrollableHitSource.Response; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; + +import java.io.IOException; +import java.util.List; +import java.util.function.BiFunction; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.Objects.requireNonNull; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Parsers to convert the response from the remote host into objects useful for {@link RemoteScrollableHitSource}. Lots of data is + * intentionally thrown on the floor because we don't need it but ObjectParser and friends are strict about blowing up when they see + * elements they don't understand. So you'll see a lot of BiConsumers that look like "(b, v) -> {}". That means "I don't care about the + * value here, just throw it away and don't blow up. + */ +final class RemoteResponseParsers { + private RemoteResponseParsers() {} + + /** + * Parser for an individual {@code hit} element. + */ + public static final ConstructingObjectParser HIT_PARSER = new ConstructingObjectParser<>("hit", + a -> { + int i = 0; + String index = (String) a[i++]; + String type = (String) a[i++]; + String id = (String) a[i++]; + long version = (long) a[i++]; + return new BasicHit(index, type, id, version); + }); + static { + HIT_PARSER.declareString(constructorArg(), new ParseField("_index")); + HIT_PARSER.declareString(constructorArg(), new ParseField("_type")); + HIT_PARSER.declareString(constructorArg(), new ParseField("_id")); + HIT_PARSER.declareLong(constructorArg(), new ParseField("_version")); + HIT_PARSER.declareObject(BasicHit::setSource, (p, s) -> { + try { + /* + * We spool the data from the remote back into xcontent so we can get bytes to send. There ought to be a better way but for + * now this should do. + */ + try (XContentBuilder b = JsonXContent.contentBuilder()) { + b.copyCurrentStructure(p); + return b.bytes(); + } + } catch (IOException e) { + throw new ParsingException(p.getTokenLocation(), "[hit] failed to parse [_source]", e); + } + }, new ParseField("_source")); + HIT_PARSER.declareString(BasicHit::setRouting, new ParseField("_routing")); + HIT_PARSER.declareString(BasicHit::setParent, new ParseField("_parent")); + HIT_PARSER.declareLong(BasicHit::setTTL, new ParseField("_ttl")); + HIT_PARSER.declareLong(BasicHit::setTimestamp, new ParseField("_timestamp")); + HIT_PARSER.declareField((b, v) -> {}, p -> null, new ParseField("_score"), ValueType.FLOAT_OR_NULL); + HIT_PARSER.declareStringArray((b, v) -> {}, new ParseField("sort")); + } + + /** + * Parser for the {@code hits} element. Parsed to an array of {@code [total (Long), hits (List)]}. + */ + public static final ConstructingObjectParser HITS_PARSER = new ConstructingObjectParser<>("hits", + a -> a); + static { + HITS_PARSER.declareLong(constructorArg(), new ParseField("total")); + HITS_PARSER.declareObjectArray(constructorArg(), HIT_PARSER, new ParseField("hits")); + HITS_PARSER.declareField((b, v) -> {}, p -> null, new ParseField("max_score"), ValueType.FLOAT_OR_NULL); + } + + /** + * Parser for {@code failed} shards in the {@code _shards} elements. + */ + public static final ConstructingObjectParser SEARCH_FAILURE_PARSER = + new ConstructingObjectParser<>("failure", a -> { + int i = 0; + String index = (String) a[i++]; + Integer shardId = (Integer) a[i++]; + String nodeId = (String) a[i++]; + Object reason = a[i++]; + + Throwable reasonThrowable; + if (reason instanceof String) { + reasonThrowable = new RuntimeException("Unknown remote exception with reason=[" + (String) reason + "]"); + } else { + reasonThrowable = (Throwable) reason; + } + return new SearchFailure(reasonThrowable, index, shardId, nodeId); + }); + static { + SEARCH_FAILURE_PARSER.declareString(optionalConstructorArg(), new ParseField("index")); + SEARCH_FAILURE_PARSER.declareInt(optionalConstructorArg(), new ParseField("shard")); + SEARCH_FAILURE_PARSER.declareString(optionalConstructorArg(), new ParseField("node")); + SEARCH_FAILURE_PARSER.declareField(constructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.START_OBJECT) { + return ThrowableBuilder.PARSER.apply(p, c); + } else { + return p.text(); + } + }, new ParseField("reason"), ValueType.OBJECT_OR_STRING); + SEARCH_FAILURE_PARSER.declareInt((b, v) -> {}, new ParseField("status")); + } + + /** + * Parser for the {@code _shards} element. Throws everything out except the errors array if there is one. If there isn't one then it + * parses to an empty list. + */ + public static final ConstructingObjectParser, ParseFieldMatcherSupplier> SHARDS_PARSER = + new ConstructingObjectParser<>("_shards", a -> { + @SuppressWarnings("unchecked") + List failures = (List) a[0]; + failures = failures == null ? emptyList() : failures; + return failures; + }); + static { + SHARDS_PARSER.declareObjectArray(optionalConstructorArg(), SEARCH_FAILURE_PARSER, new ParseField("failures")); + SHARDS_PARSER.declareInt((b, v) -> {}, new ParseField("total")); + SHARDS_PARSER.declareInt((b, v) -> {}, new ParseField("successful")); + SHARDS_PARSER.declareInt((b, v) -> {}, new ParseField("failed")); + } + + public static final ConstructingObjectParser RESPONSE_PARSER = + new ConstructingObjectParser<>("search_response", a -> { + int i = 0; + Throwable catastrophicFailure = (Throwable) a[i++]; + if (catastrophicFailure != null) { + return new Response(false, singletonList(new SearchFailure(catastrophicFailure)), 0, emptyList(), null); + } + boolean timedOut = (boolean) a[i++]; + String scroll = (String) a[i++]; + Object[] hitsElement = (Object[]) a[i++]; + @SuppressWarnings("unchecked") + List failures = (List) a[i++]; + + long totalHits = 0; + List hits = emptyList(); + + // Pull apart the hits element if we got it + if (hitsElement != null) { + i = 0; + totalHits = (long) hitsElement[i++]; + @SuppressWarnings("unchecked") + List h = (List) hitsElement[i++]; + hits = h; + } + + return new Response(timedOut, failures, totalHits, hits, scroll); + }); + static { + RESPONSE_PARSER.declareObject(optionalConstructorArg(), ThrowableBuilder.PARSER, new ParseField("error")); + RESPONSE_PARSER.declareBoolean(optionalConstructorArg(), new ParseField("timed_out")); + RESPONSE_PARSER.declareString(optionalConstructorArg(), new ParseField("_scroll_id")); + RESPONSE_PARSER.declareObject(optionalConstructorArg(), HITS_PARSER, new ParseField("hits")); + RESPONSE_PARSER.declareObject(optionalConstructorArg(), SHARDS_PARSER, new ParseField("_shards")); + RESPONSE_PARSER.declareInt((b, v) -> {}, new ParseField("took")); + RESPONSE_PARSER.declareBoolean((b, v) -> {}, new ParseField("terminated_early")); + RESPONSE_PARSER.declareInt((b, v) -> {}, new ParseField("status")); + } + + /** + * Collects stuff about Throwables and attempts to rebuild them. + */ + public static class ThrowableBuilder { + public static final BiFunction PARSER; + static { + ObjectParser parser = new ObjectParser<>("reason", ThrowableBuilder::new); + PARSER = parser.andThen(ThrowableBuilder::build); + parser.declareString(ThrowableBuilder::setType, new ParseField("type")); + parser.declareString(ThrowableBuilder::setReason, new ParseField("reason")); + parser.declareObject(ThrowableBuilder::setCausedBy, PARSER, new ParseField("caused_by")); + + // So we can give a nice error for parsing exceptions + parser.declareInt(ThrowableBuilder::setLine, new ParseField("line")); + parser.declareInt(ThrowableBuilder::setColumn, new ParseField("col")); + + // So we don't blow up on search exceptions + parser.declareString((b, v) -> {}, new ParseField("phase")); + parser.declareBoolean((b, v) -> {}, new ParseField("grouped")); + parser.declareField((p, v, c) -> p.skipChildren(), new ParseField("failed_shards"), ValueType.OBJECT_ARRAY); + + // Just throw away the root_cause + parser.declareField((p, v, c) -> p.skipChildren(), new ParseField("root_cause"), ValueType.OBJECT_ARRAY); + } + + private String type; + private String reason; + private Integer line; + private Integer column; + private Throwable causedBy; + + public Throwable build() { + Throwable t = buildWithoutCause(); + if (causedBy != null) { + t.initCause(causedBy); + } + return t; + } + + private Throwable buildWithoutCause() { + requireNonNull(type, "[type] is required"); + requireNonNull(reason, "[reason] is required"); + switch (type) { + // Make some effort to use the right exceptions + case "es_rejected_execution_exception": + return new EsRejectedExecutionException(reason); + case "parsing_exception": + XContentLocation location = null; + if (line != null && column != null) { + location = new XContentLocation(line, column); + } + return new ParsingException(location, reason); + // But it isn't worth trying to get it perfect.... + default: + return new RuntimeException(type + ": " + reason); + } + } + + public void setType(String type) { + this.type = type; + } + public void setReason(String reason) { + this.reason = reason; + } + public void setLine(Integer line) { + this.line = line; + } + public void setColumn(Integer column) { + this.column = column; + } + public void setCausedBy(Throwable causedBy) { + this.causedBy = causedBy; + } + } + + /** + * Parses the {@code version} field of the main action. There are a surprising number of fields in this that we don't need! + */ + public static final ConstructingObjectParser VERSION_PARSER = new ConstructingObjectParser<>( + "version", a -> Version.fromString((String) a[0])); + static { + VERSION_PARSER.declareString(constructorArg(), new ParseField("number")); + VERSION_PARSER.declareBoolean((p, v) -> {}, new ParseField("snapshot_build")); + VERSION_PARSER.declareBoolean((p, v) -> {}, new ParseField("build_snapshot")); + VERSION_PARSER.declareString((p, v) -> {}, new ParseField("build_hash")); + VERSION_PARSER.declareString((p, v) -> {}, new ParseField("build_date")); + VERSION_PARSER.declareString((p, v) -> {}, new ParseField("build_timestamp")); + VERSION_PARSER.declareString((p, v) -> {}, new ParseField("lucene_version")); + } + + /** + * Parses the main action to return just the {@linkplain Version} that it returns. We throw everything else out. + */ + public static final ConstructingObjectParser MAIN_ACTION_PARSER = new ConstructingObjectParser<>( + "/", a -> (Version) a[0]); + static { + MAIN_ACTION_PARSER.declareBoolean((p, v) -> {}, new ParseField("ok")); + MAIN_ACTION_PARSER.declareInt((p, v) -> {}, new ParseField("status")); + MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("name")); + MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("cluster_name")); + MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("name")); + MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("tagline")); + MAIN_ACTION_PARSER.declareObject(constructorArg(), VERSION_PARSER, new ParseField("version")); + } +} diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java new file mode 100644 index 00000000000..62dbd59f80a --- /dev/null +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -0,0 +1,242 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex.remote; + +import org.apache.http.HttpEntity; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BackoffPolicy; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParseFieldMatcherSupplier; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.xcontent.XContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.reindex.ScrollableHitSource; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.BufferedInputStream; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.util.Iterator; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Consumer; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchEntity; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchParams; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchPath; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollEntity; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollParams; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollPath; +import static org.elasticsearch.index.reindex.remote.RemoteResponseParsers.MAIN_ACTION_PARSER; +import static org.elasticsearch.index.reindex.remote.RemoteResponseParsers.RESPONSE_PARSER; + +public class RemoteScrollableHitSource extends ScrollableHitSource { + private final AsyncClient client; + private final BytesReference query; + private final SearchRequest searchRequest; + Version remoteVersion; + + public RemoteScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry, + Consumer fail, AsyncClient client, BytesReference query, SearchRequest searchRequest) { + super(logger, backoffPolicy, threadPool, countSearchRetry, fail); + this.query = query; + this.searchRequest = searchRequest; + this.client = client; + } + + @Override + public void close() { + try { + client.close(); + } catch (IOException e) { + fail.accept(new IOException("couldn't close the remote connection", e)); + } + } + + @Override + protected void doStart(Consumer onResponse) { + lookupRemoteVersion(version -> { + remoteVersion = version; + execute("POST", initialSearchPath(searchRequest), initialSearchParams(searchRequest, version), + initialSearchEntity(query), RESPONSE_PARSER, r -> onStartResponse(onResponse, r)); + }); + } + + void lookupRemoteVersion(Consumer onVersion) { + execute("GET", "", emptyMap(), null, MAIN_ACTION_PARSER, onVersion); + + } + + void onStartResponse(Consumer onResponse, Response response) { + if (Strings.hasLength(response.getScrollId()) && response.getHits().isEmpty()) { + logger.debug("First response looks like a scan response. Jumping right to the second. scroll=[{}]", response.getScrollId()); + doStartNextScroll(response.getScrollId(), timeValueMillis(0), onResponse); + } else { + onResponse.accept(response); + } + } + + @Override + protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer onResponse) { + execute("POST", scrollPath(), scrollParams(timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos())), + scrollEntity(scrollId), RESPONSE_PARSER, onResponse); + } + + @Override + protected void clearScroll(String scrollId) { + // Need to throw out response.... + client.performRequest("DELETE", scrollPath(), emptyMap(), scrollEntity(scrollId), new ResponseListener() { + @Override + public void onResponse(InputStream response) { + logger.debug("Successfully cleared [{}]", scrollId); + } + + @Override + public void onRetryableFailure(Exception t) { + onFailure(t); + } + + @Override + public void onFailure(Exception t) { + logger.warn("Failed to clear scroll [{}]", t, scrollId); + } + }); + } + + void execute(String method, String uri, Map params, HttpEntity entity, + BiFunction parser, Consumer listener) { + class RetryHelper extends AbstractRunnable { + private final Iterator retries = backoffPolicy.iterator(); + + @Override + protected void doRun() throws Exception { + client.performRequest(method, uri, params, entity, new ResponseListener() { + @Override + public void onResponse(InputStream content) { + T response; + try { + XContent xContent = XContentFactory.xContentType(content).xContent(); + try(XContentParser xContentParser = xContent.createParser(content)) { + response = parser.apply(xContentParser, () -> ParseFieldMatcher.STRICT); + } + } catch (IOException e) { + throw new ElasticsearchException("Error deserializing response", e); + } + listener.accept(response); + } + + @Override + public void onFailure(Exception e) { + fail.accept(e); + } + + @Override + public void onRetryableFailure(Exception t) { + if (retries.hasNext()) { + TimeValue delay = retries.next(); + logger.trace("retrying rejected search after [{}]", t, delay); + countSearchRetry.run(); + threadPool.schedule(delay, ThreadPool.Names.SAME, RetryHelper.this); + } else { + fail.accept(t); + } + } + }); + } + + @Override + public void onFailure(Exception t) { + fail.accept(t); + } + } + new RetryHelper().run(); + } + + public interface AsyncClient extends Closeable { + void performRequest(String method, String uri, Map params, HttpEntity entity, ResponseListener listener); + } + + public interface ResponseListener extends ActionListener { + void onRetryableFailure(Exception t); + } + + public static class AsynchronizingRestClient implements AsyncClient { + private final ThreadPool threadPool; + private final RestClient restClient; + + public AsynchronizingRestClient(ThreadPool threadPool, RestClient restClient) { + this.threadPool = threadPool; + this.restClient = restClient; + } + + @Override + public void performRequest(String method, String uri, Map params, HttpEntity entity, + ResponseListener listener) { + /* + * We use the generic thread pool here because this client is blocking the generic thread pool is sized appropriately for some + * of the threads on it to be blocked, waiting on IO. It'd be a disaster if this ran on the listener thread pool, eating + * valuable threads needed to handle responses. Most other thread pool would probably not mind running this either, but the + * generic thread pool is the "most right" place for it to run. We could make our own thread pool for this but the generic + * thread pool already has plenty of capacity. + */ + threadPool.generic().execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + try (org.elasticsearch.client.Response response = restClient.performRequest(method, uri, params, entity)) { + InputStream markSupportedInputStream = new BufferedInputStream(response.getEntity().getContent()); + listener.onResponse(markSupportedInputStream); + } + } + + @Override + public void onFailure(Exception t) { + if (t instanceof ResponseException) { + ResponseException re = (ResponseException) t; + if (RestStatus.TOO_MANY_REQUESTS.getStatus() == re.getResponse().getStatusLine().getStatusCode()) { + listener.onRetryableFailure(t); + return; + } + } + listener.onFailure(t); + } + }); + } + + @Override + public void close() throws IOException { + restClient.close(); + } + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java index 0318e4ddb01..f8351b262fc 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexByScrollActionScriptTestCase.java @@ -22,21 +22,15 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.common.text.Text; -import org.elasticsearch.index.Index; import org.elasticsearch.index.reindex.AbstractAsyncBulkIndexByScrollAction.OpType; import org.elasticsearch.index.reindex.AbstractAsyncBulkIndexByScrollAction.RequestWrapper; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchHitField; -import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchHit; import org.junit.Before; import org.mockito.Matchers; -import java.util.HashMap; import java.util.Map; import java.util.function.Consumer; @@ -63,9 +57,7 @@ public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase< @SuppressWarnings("unchecked") protected > T applyScript(Consumer> scriptBody) { IndexRequest index = new IndexRequest("index", "type", "1").source(singletonMap("foo", "bar")); - Map fields = new HashMap<>(); - InternalSearchHit doc = new InternalSearchHit(0, "id", new Text("type"), fields); - doc.shardTarget(new SearchShardTarget("nodeid", new Index("index", "uuid"), 1)); + ScrollableHitSource.Hit doc = new ScrollableHitSource.BasicHit("test", "type", "id", 0); ExecutableScript executableScript = new SimpleExecutableScript(scriptBody); when(scriptService.executable(any(CompiledScript.class), Matchers.>any())) diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexbyScrollActionMetadataTestCase.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexbyScrollActionMetadataTestCase.java index 5a9976fc005..4cc10334223 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexbyScrollActionMetadataTestCase.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AbstractAsyncBulkIndexbyScrollActionMetadataTestCase.java @@ -20,16 +20,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.common.text.Text; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.mapper.internal.TTLFieldMapper; -import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; -import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchHit; -import org.elasticsearch.search.internal.InternalSearchHitField; -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; public abstract class AbstractAsyncBulkIndexbyScrollActionMetadataTestCase< @@ -37,25 +28,19 @@ public abstract class AbstractAsyncBulkIndexbyScrollActionMetadataTestCase< Response extends BulkIndexByScrollResponse> extends AbstractAsyncBulkIndexByScrollActionTestCase { - /** - * Create a doc with some metadata. - */ - protected InternalSearchHit doc(String field, Object value) { - InternalSearchHit doc = new InternalSearchHit(0, "id", new Text("type"), singletonMap(field, - new InternalSearchHitField(field, singletonList(value)))); - doc.shardTarget(new SearchShardTarget("node", new Index("index", "uuid"), 0)); - return doc; + protected ScrollableHitSource.BasicHit doc() { + return new ScrollableHitSource.BasicHit("index", "type", "id", 0); } public void testTimestampIsCopied() { IndexRequest index = new IndexRequest(); - action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(TimestampFieldMapper.NAME, 10L)); + action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setTimestamp(10L)); assertEquals("10", index.timestamp()); } public void testTTL() throws Exception { IndexRequest index = new IndexRequest(); - action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(TTLFieldMapper.NAME, 10L)); + action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setTTL(10L)); assertEquals(timeValueMillis(10), index.ttl()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 9b094a9e2d3..77e792b8333 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -62,9 +62,10 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.InternalSearchResponse; @@ -95,6 +96,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; import static org.apache.lucene.util.TestUtil.randomSimpleString; import static org.elasticsearch.action.bulk.BackoffPolicy.constantBackoff; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; @@ -103,7 +105,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; -import static org.hamcrest.Matchers.emptyCollectionOf; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; @@ -155,7 +157,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { * random scroll id so it is checked instead. */ private String scrollId() { - scrollId = randomSimpleString(random(), 1, 1000); // Empty strings get special behavior we don't want + scrollId = randomSimpleString(random(), 1, 10); // Empty strings get special behavior we don't want return scrollId; } @@ -216,10 +218,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { assertEquals(0, testTask.getStatus().getTotal()); long total = randomIntBetween(0, Integer.MAX_VALUE); - InternalSearchHits hits = new InternalSearchHits(null, total, 0); - InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false); - new DummyAbstractAsyncBulkByScrollAction().onScrollResponse(timeValueSeconds(0), 0, - new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null)); + ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), total, emptyList(), null); + simulateScrollResponse(new DummyAbstractAsyncBulkByScrollAction(), timeValueSeconds(0), 0, response); assertEquals(total, testTask.getStatus().getTotal()); } @@ -229,12 +229,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { public void testScrollResponseBatchingBehavior() throws Exception { int maxBatches = randomIntBetween(0, 100); for (int batches = 1; batches < maxBatches; batches++) { - InternalSearchHit hit = new InternalSearchHit(0, "id", new Text("type"), emptyMap()); - InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[] { hit }, 0, 0); - InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false); + Hit hit = new ScrollableHitSource.BasicHit("index", "type", "id", 0); + ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 1, singletonList(hit), null); DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction(); - action.onScrollResponse(timeValueNanos(System.nanoTime()), 0, - new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null)); + simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 0, response); // Use assert busy because the update happens on another thread final int expectedBatches = batches; @@ -314,16 +312,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { return null; } }; - InternalSearchHits hits = new InternalSearchHits(null, 0, 0); - InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false); - new DummyAbstractAsyncBulkByScrollAction().onScrollResponse(timeValueNanos(System.nanoTime()), 10, - new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null)); - try { - listener.get(); - fail("Expected a failure"); - } catch (ExecutionException e) { - assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]")); - } + ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 0, emptyList(), null); + simulateScrollResponse(new DummyAbstractAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 10, response); + ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get()); + assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]")); assertThat(client.scrollsCleared, contains(scrollId)); // When the task is rejected we don't increment the throttled timer @@ -335,12 +327,12 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { * scroll request going down. */ public void testShardFailuresAbortRequest() throws Exception { - ShardSearchFailure shardFailure = new ShardSearchFailure(new RuntimeException("test")); - InternalSearchResponse internalResponse = new InternalSearchResponse(null, null, null, null, false, null); - new DummyAbstractAsyncBulkByScrollAction().onScrollResponse(timeValueNanos(System.nanoTime()), 0, - new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), new ShardSearchFailure[] { shardFailure })); + SearchFailure shardFailure = new SearchFailure(new RuntimeException("test")); + ScrollableHitSource.Response scrollResponse = new ScrollableHitSource.Response(false, singletonList(shardFailure), 0, + emptyList(), null); + simulateScrollResponse(new DummyAbstractAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 0, scrollResponse); BulkIndexByScrollResponse response = listener.get(); - assertThat(response.getIndexingFailures(), emptyCollectionOf(Failure.class)); + assertThat(response.getBulkFailures(), empty()); assertThat(response.getSearchFailures(), contains(shardFailure)); assertFalse(response.isTimedOut()); assertNull(response.getReasonCancelled()); @@ -351,12 +343,11 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { * Mimicks search timeouts. */ public void testSearchTimeoutsAbortRequest() throws Exception { - InternalSearchResponse internalResponse = new InternalSearchResponse(null, null, null, null, true, null); - new DummyAbstractAsyncBulkByScrollAction().onScrollResponse(timeValueNanos(System.nanoTime()), 0, - new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), new ShardSearchFailure[0])); + ScrollableHitSource.Response scrollResponse = new ScrollableHitSource.Response(true, emptyList(), 0, emptyList(), null); + simulateScrollResponse(new DummyAbstractAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 0, scrollResponse); BulkIndexByScrollResponse response = listener.get(); - assertThat(response.getIndexingFailures(), emptyCollectionOf(Failure.class)); - assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class)); + assertThat(response.getBulkFailures(), empty()); + assertThat(response.getSearchFailures(), empty()); assertTrue(response.isTimedOut()); assertNull(response.getReasonCancelled()); assertThat(client.scrollsCleared, contains(scrollId)); @@ -371,8 +362,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] {new BulkItemResponse(0, "index", failure)}, randomLong()); action.onBulkResponse(timeValueNanos(System.nanoTime()), bulkResponse); BulkIndexByScrollResponse response = listener.get(); - assertThat(response.getIndexingFailures(), contains(failure)); - assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class)); + assertThat(response.getBulkFailures(), contains(failure)); + assertThat(response.getSearchFailures(), empty()); assertNull(response.getReasonCancelled()); } @@ -382,15 +373,13 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { public void testListenerReceiveBuildBulkExceptions() throws Exception { DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction() { @Override - protected BulkRequest buildBulk(Iterable docs) { + protected BulkRequest buildBulk(Iterable docs) { throw new RuntimeException("surprise"); } }; - InternalSearchHit hit = new InternalSearchHit(0, "id", new Text("type"), emptyMap()); - InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[] {hit}, 0, 0); - InternalSearchResponse internalResponse = new InternalSearchResponse(hits, null, null, null, false, false); - SearchResponse searchResponse = new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), null); - action.onScrollResponse(timeValueNanos(System.nanoTime()), 0, searchResponse); + Hit hit = new ScrollableHitSource.BasicHit("index", "type", "id", 0); + ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 1, singletonList(hit), null); + simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 0, response); ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get()); assertThat(e.getCause(), instanceOf(RuntimeException.class)); assertThat(e.getCause().getMessage(), equalTo("surprise")); @@ -499,9 +488,9 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { action.sendBulkRequest(timeValueNanos(System.nanoTime()), request); if (failWithRejection) { BulkIndexByScrollResponse response = listener.get(); - assertThat(response.getIndexingFailures(), hasSize(1)); - assertEquals(response.getIndexingFailures().get(0).getStatus(), RestStatus.TOO_MANY_REQUESTS); - assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class)); + assertThat(response.getBulkFailures(), hasSize(1)); + assertEquals(response.getBulkFailures().get(0).getStatus(), RestStatus.TOO_MANY_REQUESTS); + assertThat(response.getSearchFailures(), empty()); assertNull(response.getReasonCancelled()); } else { successLatch.await(10, TimeUnit.SECONDS); @@ -549,7 +538,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { if (addDestinationIndexes) { action.addDestinationIndices(singleton("foo")); } - action.startNormalTermination(emptyList(), emptyList(), false); + action.refreshAndFinish(emptyList(), emptyList(), false); if (shouldRefresh) { assertArrayEquals(new String[] {"foo"}, client.lastRefreshRequest.get().indices()); } else { @@ -563,7 +552,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { public void testCancelBeforeScrollResponse() throws Exception { // We bail so early we don't need to pass in a half way valid response. - cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.onScrollResponse(timeValueNanos(System.nanoTime()), 1, + cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 1, null)); } @@ -582,10 +571,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.startNextScroll(timeValueNanos(System.nanoTime()), 0)); } - public void testCancelBeforeStartNormalTermination() throws Exception { + public void testCancelBeforeRefreshAndFinish() throws Exception { // Refresh or not doesn't matter - we don't try to refresh. testRequest.setRefresh(usually()); - cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.startNormalTermination(emptyList(), emptyList(), false)); + cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.refreshAndFinish(emptyList(), emptyList(), false)); assertNull("No refresh was attempted", client.lastRefreshRequest.get()); } @@ -625,12 +614,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { action.setScroll(scrollId()); } long total = randomIntBetween(0, Integer.MAX_VALUE); - InternalSearchHits hits = new InternalSearchHits(null, total, 0); - InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false); + ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), total, emptyList(), null); // Use a long delay here so the test will time out if the cancellation doesn't reschedule the throttled task - SearchResponse scrollResponse = new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null); testTask.rethrottle(1); - action.onScrollResponse(timeValueNanos(System.nanoTime()), 1000, scrollResponse); + simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 1000, response); // Now that we've got our cancel we'll just verify that it all came through all right assertEquals(reason, listener.get(10, TimeUnit.SECONDS).getReasonCancelled()); @@ -656,23 +643,26 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } } + /** + * Simulate a scroll response by setting the scroll id and firing the onScrollResponse method. + */ + private void simulateScrollResponse(DummyAbstractAsyncBulkByScrollAction action, TimeValue lastBatchTime, int lastBatchSize, + ScrollableHitSource.Response response) { + action.setScroll(scrollId()); + action.onScrollResponse(lastBatchTime, lastBatchSize, response); + } + private class DummyAbstractAsyncBulkByScrollAction extends AbstractAsyncBulkByScrollAction { public DummyAbstractAsyncBulkByScrollAction() { - super(testTask, logger, new ParentTaskAssigningClient(client, localNode, testTask), threadPool, testRequest, firstSearchRequest, - listener); + super(testTask, AsyncBulkByScrollActionTests.this.logger, new ParentTaskAssigningClient(client, localNode, testTask), + AsyncBulkByScrollActionTests.this.threadPool, testRequest, listener); } @Override - protected BulkRequest buildBulk(Iterable docs) { + protected BulkRequest buildBulk(Iterable docs) { return new BulkRequest(); } - - @Override - protected BulkIndexByScrollResponse buildResponse(TimeValue took, List indexingFailures, - List searchFailures, boolean timedOut) { - return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures, timedOut); - } } /** @@ -805,7 +795,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { super.doExecute(action, request, listener); } - private Throwable wrappedRejectedException() { + private Exception wrappedRejectedException() { Exception e = new EsRejectedExecutionException(); int wraps = randomIntBetween(0, 4); for (int i = 0; i < wraps; i++) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java index 05699c6f7af..735c3aa5b64 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskTests.java @@ -190,8 +190,8 @@ public class BulkByScrollTaskTests extends ESTestCase { } @Override - public void onFailure(Throwable t) { - errors.add(t); + public void onFailure(Exception e) { + errors.add(e); } }); @@ -271,7 +271,7 @@ public class BulkByScrollTaskTests extends ESTestCase { protected void doRun() throws Exception { } @Override - public void onFailure(Throwable t) { + public void onFailure(Exception e) { throw new UnsupportedOperationException(); } }); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java index 4ef16c59141..c0c06b14d55 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/BulkIndexByScrollResponseMatcher.java @@ -124,7 +124,7 @@ public class BulkIndexByScrollResponseMatcher extends TypeSafeMatcher whitelist = randomWhitelist(); + String[] inList = whitelist.iterator().next().split(":"); + String host = inList[0]; + int port = Integer.valueOf(inList[1]); + checkRemoteWhitelist(whitelist, new RemoteInfo(randomAsciiOfLength(5), host, port, new BytesArray("test"), null, null), + localhostOrNone()); + } + + public void testMyselfInWhitelistRemote() throws UnknownHostException { + Set whitelist = randomWhitelist(); + whitelist.add("myself"); + TransportAddress publishAddress = new InetSocketTransportAddress(InetAddress.getByAddress(new byte[] {0x7f,0x00,0x00,0x01}), 9200); + checkRemoteWhitelist(whitelist, new RemoteInfo(randomAsciiOfLength(5), "127.0.0.1", 9200, new BytesArray("test"), null, null), + publishAddress); + } + + public void testUnwhitelistedRemote() { + int port = between(1, Integer.MAX_VALUE); + Exception e = expectThrows(IllegalArgumentException.class, () -> checkRemoteWhitelist(randomWhitelist(), + new RemoteInfo(randomAsciiOfLength(5), "not in list", port, new BytesArray("test"), null, null), localhostOrNone())); + assertEquals("[not in list:" + port + "] not whitelisted in reindex.remote.whitelist", e.getMessage()); + } + + private Set randomWhitelist() { + int size = between(1, 100); + Set set = new HashSet<>(size); + while (set.size() < size) { + set.add(randomAsciiOfLength(5) + ':' + between(1, Integer.MAX_VALUE)); + } + return set; + } + + private TransportAddress localhostOrNone() { + return randomFrom(random(), null, localhost); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java index 670fcefbf55..dab0cab8d8a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexMetadataTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; /** * Index-by-search test for ttl, timestamp, and routing. @@ -29,7 +28,7 @@ import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase { public void testRoutingCopiedByDefault() throws Exception { IndexRequest index = new IndexRequest(); - action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); + action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo")); assertEquals("foo", index.routing()); } @@ -37,7 +36,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe TransportReindexAction.AsyncIndexBySearchAction action = action(); action.mainRequest.getDestination().routing("keep"); IndexRequest index = new IndexRequest(); - action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); + action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo")); assertEquals("foo", index.routing()); } @@ -45,7 +44,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe TransportReindexAction.AsyncIndexBySearchAction action = action(); action.mainRequest.getDestination().routing("discard"); IndexRequest index = new IndexRequest(); - action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); + action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo")); assertEquals(null, index.routing()); } @@ -53,7 +52,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe TransportReindexAction.AsyncIndexBySearchAction action = action(); action.mainRequest.getDestination().routing("=cat"); IndexRequest index = new IndexRequest(); - action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); + action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo")); assertEquals("cat", index.routing()); } @@ -61,7 +60,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe TransportReindexAction.AsyncIndexBySearchAction action = action(); action.mainRequest.getDestination().routing("==]"); IndexRequest index = new IndexRequest(); - action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); + action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo")); assertEquals("=]", index.routing()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java new file mode 100644 index 00000000000..efaf5e627ad --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRequestTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.index.reindex.remote.RemoteInfo; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; + +/** + * Tests some of the validation of {@linkplain ReindexRequest}. See reindex's rest tests for much more. + */ +public class ReindexRequestTests extends ESTestCase { + public void testTimestampAndTtlNotAllowed() { + ReindexRequest reindex = request(); + reindex.getDestination().ttl("1s").timestamp("now"); + ActionRequestValidationException e = reindex.validate(); + assertEquals("Validation Failed: 1: setting ttl on destination isn't supported. use scripts instead.;" + + "2: setting timestamp on destination isn't supported. use scripts instead.;", + e.getMessage()); + } + + public void testReindexFromRemoteDoesNotSupportSearchQuery() { + ReindexRequest reindex = request(); + reindex.setRemoteInfo(new RemoteInfo(randomAsciiOfLength(5), randomAsciiOfLength(5), between(1, Integer.MAX_VALUE), + new BytesArray("real_query"), null, null)); + reindex.getSearchRequest().source().query(matchAllQuery()); // Unsupported place to put query + ActionRequestValidationException e = reindex.validate(); + assertEquals("Validation Failed: 1: reindex from remote sources should use RemoteInfo's query instead of source's query;", + e.getMessage()); + } + + private ReindexRequest request() { + ReindexRequest reindex = new ReindexRequest(new SearchRequest(), new IndexRequest()); + reindex.getSearchRequest().indices("source"); + reindex.getDestination().index("dest"); + return reindex; + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSameIndexTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java similarity index 62% rename from modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSameIndexTests.java rename to modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java index f1218414af7..66896406c66 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSameIndexTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexSourceTargetValidationTests.java @@ -30,15 +30,20 @@ import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.containsString; /** - * Tests that indexing from an index back into itself fails the request. + * Tests source and target index validation of reindex. Mostly that means testing that indexing from an index back into itself fails the + * request. Note that we can't catch you trying to remotely reindex from yourself into yourself. We actually assert here that reindexes + * from remote don't need to come from existing indexes. It'd be silly to fail requests if the source index didn't exist on the target + * cluster.... */ -public class ReindexSameIndexTests extends ESTestCase { +public class ReindexSourceTargetValidationTests extends ESTestCase { private static final ClusterState STATE = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder() .put(index("target", "target_alias", "target_multi"), true) .put(index("target2", "target_multi"), true) @@ -50,7 +55,7 @@ public class ReindexSameIndexTests extends ESTestCase { private static final IndexNameExpressionResolver INDEX_NAME_EXPRESSION_RESOLVER = new IndexNameExpressionResolver(Settings.EMPTY); private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY, INDEX_NAME_EXPRESSION_RESOLVER); - public void testObviousCases() throws Exception { + public void testObviousCases() { fails("target", "target"); fails("target", "foo", "bar", "target", "baz"); fails("target", "foo", "bar", "target", "baz", "target"); @@ -58,7 +63,7 @@ public class ReindexSameIndexTests extends ESTestCase { succeeds("target", "source", "source2"); } - public void testAliasesContainTarget() throws Exception { + public void testAliasesContainTarget() { fails("target", "target_alias"); fails("target_alias", "target"); fails("target", "foo", "bar", "target_alias", "baz"); @@ -71,31 +76,33 @@ public class ReindexSameIndexTests extends ESTestCase { succeeds("target", "source", "source2", "source_multi"); } - public void testTargetIsAlias() throws Exception { - try { - succeeds("target_multi", "foo"); - fail("Expected failure"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("Alias [target_multi] has more than one indices associated with it [[")); - // The index names can come in either order - assertThat(e.getMessage(), containsString("target")); - assertThat(e.getMessage(), containsString("target2")); - } + public void testTargetIsAlias() { + Exception e = expectThrows(IllegalArgumentException.class, () -> succeeds("target_multi", "foo")); + assertThat(e.getMessage(), containsString("Alias [target_multi] has more than one indices associated with it [[")); + // The index names can come in either order + assertThat(e.getMessage(), containsString("target")); + assertThat(e.getMessage(), containsString("target2")); } - private void fails(String target, String... sources) throws Exception { - try { - succeeds(target, sources); - fail("Expected an exception"); - } catch (ActionRequestValidationException e) { - assertThat(e.getMessage(), - containsString("reindex cannot write into an index its reading from [target]")); - } + public void testRemoteInfoSkipsValidation() { + // The index doesn't have to exist + succeeds(new RemoteInfo(randomAsciiOfLength(5), "test", 9200, new BytesArray("test"), null, null), "does_not_exist", "target"); + // And it doesn't matter if they are the same index. They are considered to be different because the remote one is, well, remote. + succeeds(new RemoteInfo(randomAsciiOfLength(5), "test", 9200, new BytesArray("test"), null, null), "target", "target"); } - private void succeeds(String target, String... sources) throws Exception { - TransportReindexAction.validateAgainstAliases(new SearchRequest(sources), new IndexRequest(target), INDEX_NAME_EXPRESSION_RESOLVER, - AUTO_CREATE_INDEX, STATE); + private void fails(String target, String... sources) { + Exception e = expectThrows(ActionRequestValidationException.class, () -> succeeds(target, sources)); + assertThat(e.getMessage(), containsString("reindex cannot write into an index its reading from [target]")); + } + + private void succeeds(String target, String... sources) { + succeeds(null, target, sources); + } + + private void succeeds(RemoteInfo remoteInfo, String target, String... sources) { + TransportReindexAction.validateAgainstAliases(new SearchRequest(sources), new IndexRequest(target), remoteInfo, + INDEX_NAME_EXPRESSION_RESOLVER, AUTO_CREATE_INDEX, STATE); } private static IndexMetaData index(String name, String... aliases) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java new file mode 100644 index 00000000000..1cbec59c49d --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.reindex.RestReindexAction.ReindexParseContext; +import org.elasticsearch.index.reindex.remote.RemoteInfo; +import org.elasticsearch.indices.query.IndicesQueriesRegistry; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class RestReindexActionTests extends ESTestCase { + public void testBuildRemoteInfoNoRemote() throws IOException { + assertNull(RestReindexAction.buildRemoteInfo(new HashMap<>())); + } + + public void testBuildRemoteInfoFullyLoaded() throws IOException { + Map remote = new HashMap<>(); + remote.put("host", "https://example.com:9200"); + remote.put("username", "testuser"); + remote.put("password", "testpass"); + + Map query = new HashMap<>(); + query.put("a", "b"); + + Map source = new HashMap<>(); + source.put("remote", remote); + source.put("query", query); + + RemoteInfo remoteInfo = RestReindexAction.buildRemoteInfo(source); + assertEquals("https", remoteInfo.getScheme()); + assertEquals("example.com", remoteInfo.getHost()); + assertEquals(9200, remoteInfo.getPort()); + assertEquals("{\n \"a\" : \"b\"\n}", remoteInfo.getQuery().utf8ToString()); + assertEquals("testuser", remoteInfo.getUsername()); + assertEquals("testpass", remoteInfo.getPassword()); + } + + public void testBuildRemoteInfoWithoutAllParts() throws IOException { + expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com")); + expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com:9200")); + expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("http://example.com")); + } + + public void testBuildRemoteInfoWithAllParts() throws IOException { + RemoteInfo info = buildRemoteInfoHostTestCase("http://example.com:9200"); + assertEquals("http", info.getScheme()); + assertEquals("example.com", info.getHost()); + assertEquals(9200, info.getPort()); + + info = buildRemoteInfoHostTestCase("https://other.example.com:9201"); + assertEquals("https", info.getScheme()); + assertEquals("other.example.com", info.getHost()); + assertEquals(9201, info.getPort()); + } + + public void testReindexFromRemoteRequestParsing() throws IOException { + BytesReference request; + try (XContentBuilder b = JsonXContent.contentBuilder()) { + b.startObject(); { + b.startObject("source"); { + b.startObject("remote"); { + b.field("host", "http://localhost:9200"); + } + b.endObject(); + b.field("index", "source"); + } + b.endObject(); + b.startObject("dest"); { + b.field("index", "dest"); + } + b.endObject(); + } + b.endObject(); + request = b.bytes(); + } + try (XContentParser p = JsonXContent.jsonXContent.createParser(request)) { + ReindexRequest r = new ReindexRequest(new SearchRequest(), new IndexRequest()); + RestReindexAction.PARSER.parse(p, r, + new ReindexParseContext(new IndicesQueriesRegistry(), null, null, ParseFieldMatcher.STRICT)); + assertEquals("localhost", r.getRemoteInfo().getHost()); + assertArrayEquals(new String[] {"source"}, r.getSearchRequest().indices()); + } + } + + private RemoteInfo buildRemoteInfoHostTestCase(String hostInRest) throws IOException { + Map remote = new HashMap<>(); + remote.put("host", hostInRest); + + Map source = new HashMap<>(); + source.put("remote", remote); + + return RestReindexAction.buildRemoteInfo(source); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index 09945c9372b..fd5ddaedd69 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -20,13 +20,18 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.ListenableActionFuture; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.Retry; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -39,6 +44,7 @@ import java.util.List; import java.util.concurrent.CyclicBarrier; import static org.elasticsearch.index.reindex.ReindexTestCase.matcher; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; @@ -68,6 +74,10 @@ public class RetryTests extends ESSingleNodeTestCase { // Use queues of size 1 because size 0 is broken and because search requests need the queue to function settings.put("thread_pool.bulk.queue_size", 1); settings.put("thread_pool.search.queue_size", 1); + // Enable http so we can test retries on reindex from remote. In this case the "remote" cluster is just this cluster. + settings.put(NetworkModule.HTTP_ENABLED.getKey(), true); + // Whitelist reindexing from the http host we're going to use + settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "myself"); return settings.build(); } @@ -97,6 +107,15 @@ public class RetryTests extends ESSingleNodeTestCase { matcher().created(DOC_COUNT)); } + public void testReindexFromRemote() throws Exception { + NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0); + TransportAddress address = nodeInfo.getHttp().getAddress().publishAddress(); + RemoteInfo remote = new RemoteInfo("http", address.getHost(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null, null); + ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest") + .setRemoteInfo(remote); + testCase(ReindexAction.NAME, request, matcher().created(DOC_COUNT)); + } + public void testUpdateByQuery() throws Exception { testCase(UpdateByQueryAction.NAME, UpdateByQueryAction.INSTANCE.newRequestBuilder(client()).source("source"), matcher().updated(DOC_COUNT)); @@ -118,34 +137,41 @@ public class RetryTests extends ESSingleNodeTestCase { logger.info("Starting request"); ListenableActionFuture responseListener = request.execute(); - logger.info("Waiting for search rejections on the initial search"); - assertBusy(() -> assertThat(taskStatus(action).getSearchRetries(), greaterThan(0L))); + try { + logger.info("Waiting for search rejections on the initial search"); + assertBusy(() -> assertThat(taskStatus(action).getSearchRetries(), greaterThan(0L))); - logger.info("Blocking bulk and unblocking search so we start to get bulk rejections"); - CyclicBarrier bulkBlock = blockExecutor(ThreadPool.Names.BULK); - initialSearchBlock.await(); + logger.info("Blocking bulk and unblocking search so we start to get bulk rejections"); + CyclicBarrier bulkBlock = blockExecutor(ThreadPool.Names.BULK); + initialSearchBlock.await(); - logger.info("Waiting for bulk rejections"); - assertBusy(() -> assertThat(taskStatus(action).getBulkRetries(), greaterThan(0L))); + logger.info("Waiting for bulk rejections"); + assertBusy(() -> assertThat(taskStatus(action).getBulkRetries(), greaterThan(0L))); - // Keep a copy of the current number of search rejections so we can assert that we get more when we block the scroll - long initialSearchRejections = taskStatus(action).getSearchRetries(); + // Keep a copy of the current number of search rejections so we can assert that we get more when we block the scroll + long initialSearchRejections = taskStatus(action).getSearchRetries(); - logger.info("Blocking search and unblocking bulk so we should get search rejections for the scroll"); - CyclicBarrier scrollBlock = blockExecutor(ThreadPool.Names.SEARCH); - bulkBlock.await(); + logger.info("Blocking search and unblocking bulk so we should get search rejections for the scroll"); + CyclicBarrier scrollBlock = blockExecutor(ThreadPool.Names.SEARCH); + bulkBlock.await(); - logger.info("Waiting for search rejections for the scroll"); - assertBusy(() -> assertThat(taskStatus(action).getSearchRetries(), greaterThan(initialSearchRejections))); + logger.info("Waiting for search rejections for the scroll"); + assertBusy(() -> assertThat(taskStatus(action).getSearchRetries(), greaterThan(initialSearchRejections))); - logger.info("Unblocking the scroll"); - scrollBlock.await(); + logger.info("Unblocking the scroll"); + scrollBlock.await(); - logger.info("Waiting for the request to finish"); - BulkIndexByScrollResponse response = responseListener.get(); - assertThat(response, matcher); - assertThat(response.getBulkRetries(), greaterThan(0L)); - assertThat(response.getSearchRetries(), greaterThan(initialSearchRejections)); + logger.info("Waiting for the request to finish"); + BulkIndexByScrollResponse response = responseListener.get(); + assertThat(response, matcher); + assertThat(response.getBulkRetries(), greaterThan(0L)); + assertThat(response.getSearchRetries(), greaterThan(initialSearchRejections)); + } finally { + // Fetch the response just in case we blew up half way through. This will make sure the failure is thrown up to the top level. + BulkIndexByScrollResponse response = responseListener.get(); + assertThat(response.getSearchFailures(), empty()); + assertThat(response.getBulkFailures(), empty()); + } } /** diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java index d1cb77361bb..3e3b3a63d62 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java @@ -19,20 +19,21 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.Index; -import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; +import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; -import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; @@ -56,11 +57,28 @@ public class RoundTripTests extends ESTestCase { randomRequest(reindex); reindex.getDestination().version(randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, 12L, 1L, 123124L, 12L)); reindex.getDestination().index("test"); + if (randomBoolean()) { + int port = between(1, Integer.MAX_VALUE); + BytesReference query = new BytesArray(randomAsciiOfLength(5)); + String username = randomBoolean() ? randomAsciiOfLength(5) : null; + String password = username != null && randomBoolean() ? randomAsciiOfLength(5) : null; + reindex.setRemoteInfo(new RemoteInfo(randomAsciiOfLength(5), randomAsciiOfLength(5), port, query, username, password)); + } ReindexRequest tripped = new ReindexRequest(); roundTrip(reindex, tripped); assertRequestEquals(reindex, tripped); assertEquals(reindex.getDestination().version(), tripped.getDestination().version()); assertEquals(reindex.getDestination().index(), tripped.getDestination().index()); + if (reindex.getRemoteInfo() == null) { + assertNull(tripped.getRemoteInfo()); + } else { + assertNotNull(tripped.getRemoteInfo()); + assertEquals(reindex.getRemoteInfo().getScheme(), tripped.getRemoteInfo().getScheme()); + assertEquals(reindex.getRemoteInfo().getHost(), tripped.getRemoteInfo().getHost()); + assertEquals(reindex.getRemoteInfo().getQuery(), tripped.getRemoteInfo().getQuery()); + assertEquals(reindex.getRemoteInfo().getUsername(), tripped.getRemoteInfo().getUsername()); + assertEquals(reindex.getRemoteInfo().getPassword(), tripped.getRemoteInfo().getPassword()); + } } public void testUpdateByQueryRequest() throws IOException { @@ -149,13 +167,19 @@ public class RoundTripTests extends ESTestCase { randomSimpleString(random()), new IllegalArgumentException("test"))); } - private List randomSearchFailures() { - if (usually()) { + private List randomSearchFailures() { + if (randomBoolean()) { return emptyList(); } - Index index = new Index(randomSimpleString(random()), "uuid"); - return singletonList(new ShardSearchFailure(randomSimpleString(random()), - new SearchShardTarget(randomSimpleString(random()), index, randomInt()), randomFrom(RestStatus.values()))); + String index = null; + Integer shardId = null; + String nodeId = null; + if (randomBoolean()) { + index = randomAsciiOfLength(5); + shardId = randomInt(); + nodeId = usually() ? randomAsciiOfLength(5) : null; + } + return singletonList(new SearchFailure(new ElasticsearchException("foo"), index, shardId, nodeId)); } private void roundTrip(Streamable example, Streamable empty) throws IOException { @@ -182,10 +206,10 @@ public class RoundTripTests extends ESTestCase { private void assertResponseEquals(BulkIndexByScrollResponse expected, BulkIndexByScrollResponse actual) { assertEquals(expected.getTook(), actual.getTook()); assertTaskStatusEquals(expected.getStatus(), actual.getStatus()); - assertEquals(expected.getIndexingFailures().size(), actual.getIndexingFailures().size()); - for (int i = 0; i < expected.getIndexingFailures().size(); i++) { - Failure expectedFailure = expected.getIndexingFailures().get(i); - Failure actualFailure = actual.getIndexingFailures().get(i); + assertEquals(expected.getBulkFailures().size(), actual.getBulkFailures().size()); + for (int i = 0; i < expected.getBulkFailures().size(); i++) { + Failure expectedFailure = expected.getBulkFailures().get(i); + Failure actualFailure = actual.getBulkFailures().get(i); assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); assertEquals(expectedFailure.getType(), actualFailure.getType()); assertEquals(expectedFailure.getId(), actualFailure.getId()); @@ -194,13 +218,15 @@ public class RoundTripTests extends ESTestCase { } assertEquals(expected.getSearchFailures().size(), actual.getSearchFailures().size()); for (int i = 0; i < expected.getSearchFailures().size(); i++) { - ShardSearchFailure expectedFailure = expected.getSearchFailures().get(i); - ShardSearchFailure actualFailure = actual.getSearchFailures().get(i); - assertEquals(expectedFailure.shard(), actualFailure.shard()); - assertEquals(expectedFailure.status(), actualFailure.status()); - // We can't use getCause because throwable doesn't implement equals - assertEquals(expectedFailure.reason(), actualFailure.reason()); + SearchFailure expectedFailure = expected.getSearchFailures().get(i); + SearchFailure actualFailure = actual.getSearchFailures().get(i); + assertEquals(expectedFailure.getIndex(), actualFailure.getIndex()); + assertEquals(expectedFailure.getShardId(), actualFailure.getShardId()); + assertEquals(expectedFailure.getNodeId(), actualFailure.getNodeId()); + assertEquals(expectedFailure.getReason().getClass(), actualFailure.getReason().getClass()); + assertEquals(expectedFailure.getReason().getMessage(), actualFailure.getReason().getMessage()); } + } private void assertTaskStatusEquals(BulkByScrollTask.Status expected, BulkByScrollTask.Status actual) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java index bb6a33b593a..6ebb0749792 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java @@ -21,13 +21,12 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; public class UpdateByQueryMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase { public void testRoutingIsCopied() throws Exception { IndexRequest index = new IndexRequest(); - action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo")); + action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo")); assertEquals("foo", index.routing()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWhileModifyingTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWhileModifyingTests.java index faea69b870f..6bbcbd6e643 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWhileModifyingTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWhileModifyingTests.java @@ -44,7 +44,7 @@ public class UpdateByQueryWhileModifyingTests extends ReindexTestCase { AtomicReference value = new AtomicReference<>(randomSimpleString(random())); indexRandom(true, client().prepareIndex("test", "test", "test").setSource("test", value.get())); - AtomicReference failure = new AtomicReference<>(); + AtomicReference failure = new AtomicReference<>(); AtomicBoolean keepUpdating = new AtomicBoolean(true); Thread updater = new Thread(() -> { while (keepUpdating.get()) { @@ -52,8 +52,8 @@ public class UpdateByQueryWhileModifyingTests extends ReindexTestCase { BulkIndexByScrollResponse response = updateByQuery().source("test").refresh(true).abortOnVersionConflict(false).get(); assertThat(response, matcher().updated(either(equalTo(0L)).or(equalTo(1L))) .versionConflicts(either(equalTo(0L)).or(equalTo(1L)))); - } catch (Throwable t) { - failure.set(t); + } catch (Exception e) { + failure.set(e); } } }); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java index 1c57c202766..c5b9d4da64f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.script.ScriptService; import java.util.Date; @@ -48,7 +49,7 @@ public class UpdateByQueryWithScriptTests @Override protected UpdateByQueryRequest request() { - return new UpdateByQueryRequest(); + return new UpdateByQueryRequest(new SearchRequest()); } @Override diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteInfoTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteInfoTests.java new file mode 100644 index 00000000000..5492a05986c --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteInfoTests.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex.remote; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.test.ESTestCase; + +public class RemoteInfoTests extends ESTestCase { + public void testToString() { + RemoteInfo info = new RemoteInfo("http", "testhost", 12344, new BytesArray("testquery"), null, null); + assertEquals("host=testhost port=12344 query=testquery", info.toString()); + info = new RemoteInfo("http", "testhost", 12344, new BytesArray("testquery"), "testuser", null); + assertEquals("host=testhost port=12344 query=testquery username=testuser", info.toString()); + info = new RemoteInfo("http", "testhost", 12344, new BytesArray("testquery"), "testuser", "testpass"); + assertEquals("host=testhost port=12344 query=testquery username=testuser password=<<>>", info.toString()); + info = new RemoteInfo("https", "testhost", 12344, new BytesArray("testquery"), "testuser", "testpass"); + assertEquals("scheme=https host=testhost port=12344 query=testquery username=testuser password=<<>>", info.toString()); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java new file mode 100644 index 00000000000..9bbfd175a79 --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -0,0 +1,181 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex.remote; + +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.elasticsearch.Version; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Map; + +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchEntity; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchParams; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchPath; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollEntity; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollParams; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.not; + +public class RemoteRequestBuildersTests extends ESTestCase { + public void testIntialSearchPath() { + SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); + + assertEquals("/_search", initialSearchPath(searchRequest)); + searchRequest.indices("a"); + searchRequest.types("b"); + assertEquals("/a/b/_search", initialSearchPath(searchRequest)); + searchRequest.indices("a", "b"); + searchRequest.types("c", "d"); + assertEquals("/a,b/c,d/_search", initialSearchPath(searchRequest)); + + searchRequest.indices("cat,"); + expectBadStartRequest(searchRequest, "Index", ",", "cat,"); + searchRequest.indices("cat,", "dog"); + expectBadStartRequest(searchRequest, "Index", ",", "cat,"); + searchRequest.indices("dog", "cat,"); + expectBadStartRequest(searchRequest, "Index", ",", "cat,"); + searchRequest.indices("cat/"); + expectBadStartRequest(searchRequest, "Index", "/", "cat/"); + searchRequest.indices("cat/", "dog"); + expectBadStartRequest(searchRequest, "Index", "/", "cat/"); + searchRequest.indices("dog", "cat/"); + expectBadStartRequest(searchRequest, "Index", "/", "cat/"); + + searchRequest.indices("ok"); + searchRequest.types("cat,"); + expectBadStartRequest(searchRequest, "Type", ",", "cat,"); + searchRequest.types("cat,", "dog"); + expectBadStartRequest(searchRequest, "Type", ",", "cat,"); + searchRequest.types("dog", "cat,"); + expectBadStartRequest(searchRequest, "Type", ",", "cat,"); + searchRequest.types("cat/"); + expectBadStartRequest(searchRequest, "Type", "/", "cat/"); + searchRequest.types("cat/", "dog"); + expectBadStartRequest(searchRequest, "Type", "/", "cat/"); + searchRequest.types("dog", "cat/"); + expectBadStartRequest(searchRequest, "Type", "/", "cat/"); + } + + private void expectBadStartRequest(SearchRequest searchRequest, String type, String bad, String failed) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> initialSearchPath(searchRequest)); + assertEquals(type + " containing [" + bad + "] not supported but got [" + failed + "]", e.getMessage()); + } + + public void testInitialSearchParamsSort() { + SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); + + // Test sort:_doc for versions that support it. + Version remoteVersion = Version.fromId(between(Version.V_2_1_0_ID, Version.CURRENT.id)); + searchRequest.source().sort("_doc"); + assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("sorts", "_doc:asc")); + + // Test search_type scan for versions that don't support sort:_doc. + remoteVersion = Version.fromId(between(0, Version.V_2_1_0_ID - 1)); + assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("search_type", "scan")); + + // Test sorting by some field. Version doesn't matter. + remoteVersion = Version.fromId(between(0, Version.CURRENT.id)); + searchRequest.source().sorts().clear(); + searchRequest.source().sort("foo"); + assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("sorts", "foo:asc")); + } + + public void testInitialSearchParamsFields() { + SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); + + // Test request without any fields + Version remoteVersion = Version.fromId(between(0, Version.CURRENT.id)); + assertThat(initialSearchParams(searchRequest, remoteVersion), + not(either(hasKey("stored_fields")).or(hasKey("fields")))); + + // Setup some fields for the next two tests + searchRequest.source().storedField("_source").storedField("_id"); + + // Test stored_fields for versions that support it + remoteVersion = Version.fromId(between(Version.V_5_0_0_alpha4_ID, Version.CURRENT.id)); + assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("stored_fields", "_source,_id")); + + // Test fields for versions that support it + remoteVersion = Version.fromId(between(0, Version.V_5_0_0_alpha4_ID - 1)); + assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("fields", "_source,_id")); + } + + public void testInitialSearchParamsMisc() { + SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); + Version remoteVersion = Version.fromId(between(0, Version.CURRENT.id)); + + TimeValue scroll = null; + if (randomBoolean()) { + scroll = TimeValue.parseTimeValue(randomPositiveTimeValue(), "test"); + searchRequest.scroll(scroll); + } + int size = between(0, Integer.MAX_VALUE); + searchRequest.source().size(size); + Boolean fetchVersion = null; + if (randomBoolean()) { + fetchVersion = randomBoolean(); + searchRequest.source().version(fetchVersion); + } + + Map params = initialSearchParams(searchRequest, remoteVersion); + + assertThat(params, scroll == null ? not(hasKey("scroll")) : hasEntry("scroll", scroll.toString())); + assertThat(params, hasEntry("size", Integer.toString(size))); + assertThat(params, fetchVersion == null || fetchVersion == true ? hasEntry("version", null) : not(hasEntry("version", null))); + } + + public void testInitialSearchEntity() throws IOException { + String query = "{\"match_all\":{}}"; + HttpEntity entity = initialSearchEntity(new BytesArray(query)); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals("{\"query\":" + query + "}", + Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + + // Invalid XContent fails + RuntimeException e = expectThrows(RuntimeException.class, () -> initialSearchEntity(new BytesArray("{}, \"trailing\": {}"))); + assertThat(e.getCause().getMessage(), containsString("Unexpected character (',' (code 44))")); + e = expectThrows(RuntimeException.class, () -> initialSearchEntity(new BytesArray("{"))); + assertThat(e.getCause().getMessage(), containsString("Unexpected end-of-input")); + } + + public void testScrollParams() { + TimeValue scroll = TimeValue.parseTimeValue(randomPositiveTimeValue(), "test"); + assertThat(scrollParams(scroll), hasEntry("scroll", scroll.toString())); + } + + public void testScrollEntity() throws IOException { + String scroll = randomAsciiOfLength(30); + HttpEntity entity = scrollEntity(scroll); + assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType().getValue()); + assertEquals(scroll, Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + } +} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java new file mode 100644 index 00000000000..f8f3e82b4bb --- /dev/null +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -0,0 +1,381 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.reindex.remote; + +import org.apache.http.HttpEntity; +import org.elasticsearch.Version; +import org.elasticsearch.action.bulk.BackoffPolicy; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.reindex.ScrollableHitSource.Response; +import org.elasticsearch.index.reindex.remote.RemoteScrollableHitSource.ResponseListener; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Map; +import java.util.concurrent.Executor; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; +import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; + +public class RemoteScrollableHitSourceTests extends ESTestCase { + private final String FAKE_SCROLL_ID = "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll"; + private int retries; + private ThreadPool threadPool; + private SearchRequest searchRequest; + private int retriesAllowed; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getTestName()) { + @Override + public Executor executor(String name) { + return r -> r.run(); + } + + @Override + public ScheduledFuture schedule(TimeValue delay, String name, Runnable command) { + command.run(); + return null; + } + }; + retries = 0; + searchRequest = new SearchRequest(); + searchRequest.scroll(timeValueMinutes(5)); + searchRequest.source(new SearchSourceBuilder().size(10).version(true).sort("_doc").size(123)); + retriesAllowed = 0; + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + terminate(threadPool); + } + + public void testLookupRemoteVersion() throws Exception { + sourceWithMockedRemoteCall(false, "main/0_20_5.json").lookupRemoteVersion(v -> assertEquals(Version.fromString("0.20.5"), v)); + sourceWithMockedRemoteCall(false, "main/0_90_13.json").lookupRemoteVersion(v -> assertEquals(Version.fromString("0.90.13"), v)); + sourceWithMockedRemoteCall(false, "main/1_7_5.json").lookupRemoteVersion(v -> assertEquals(Version.fromString("1.7.5"), v)); + sourceWithMockedRemoteCall(false, "main/2_3_3.json").lookupRemoteVersion(v -> assertEquals(Version.V_2_3_3, v)); + sourceWithMockedRemoteCall(false, "main/5_0_0_alpha_3.json").lookupRemoteVersion(v -> assertEquals(Version.V_5_0_0_alpha3, v)); + } + + public void testParseStartOk() throws Exception { + AtomicBoolean called = new AtomicBoolean(); + sourceWithMockedRemoteCall("start_ok.json").doStart(r -> { + assertFalse(r.isTimedOut()); + assertEquals(FAKE_SCROLL_ID, r.getScrollId()); + assertEquals(4, r.getTotalHits()); + assertThat(r.getFailures(), empty()); + assertThat(r.getHits(), hasSize(1)); + assertEquals("test", r.getHits().get(0).getIndex()); + assertEquals("test", r.getHits().get(0).getType()); + assertEquals("AVToMiC250DjIiBO3yJ_", r.getHits().get(0).getId()); + assertEquals("{\"test\":\"test2\"}", r.getHits().get(0).getSource().utf8ToString()); + assertNull(r.getHits().get(0).getTTL()); + assertNull(r.getHits().get(0).getTimestamp()); + assertNull(r.getHits().get(0).getRouting()); + called.set(true); + }); + assertTrue(called.get()); + } + + public void testParseScrollOk() throws Exception { + AtomicBoolean called = new AtomicBoolean(); + sourceWithMockedRemoteCall("scroll_ok.json").doStartNextScroll("", timeValueMillis(0), r -> { + assertFalse(r.isTimedOut()); + assertEquals(FAKE_SCROLL_ID, r.getScrollId()); + assertEquals(4, r.getTotalHits()); + assertThat(r.getFailures(), empty()); + assertThat(r.getHits(), hasSize(1)); + assertEquals("test", r.getHits().get(0).getIndex()); + assertEquals("test", r.getHits().get(0).getType()); + assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId()); + assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString()); + assertNull(r.getHits().get(0).getTTL()); + assertNull(r.getHits().get(0).getTimestamp()); + assertNull(r.getHits().get(0).getRouting()); + called.set(true); + }); + assertTrue(called.get()); + } + + /** + * Test for parsing _ttl, _timestamp, and _routing. + */ + public void testParseScrollFullyLoaded() throws Exception { + AtomicBoolean called = new AtomicBoolean(); + sourceWithMockedRemoteCall("scroll_fully_loaded.json").doStartNextScroll("", timeValueMillis(0), r -> { + assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId()); + assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString()); + assertEquals((Long) 1234L, r.getHits().get(0).getTTL()); + assertEquals((Long) 123444L, r.getHits().get(0).getTimestamp()); + assertEquals("testrouting", r.getHits().get(0).getRouting()); + assertEquals("testparent", r.getHits().get(0).getParent()); + called.set(true); + }); + assertTrue(called.get()); + } + + /** + * Versions of Elasticsearch before 2.1.0 don't support sort:_doc and instead need to use search_type=scan. Scan doesn't return + * documents the first iteration but reindex doesn't like that. So we jump start strait to the next iteration. + */ + public void testScanJumpStart() throws Exception { + AtomicBoolean called = new AtomicBoolean(); + sourceWithMockedRemoteCall("start_scan.json", "scroll_ok.json").doStart(r -> { + assertFalse(r.isTimedOut()); + assertEquals(FAKE_SCROLL_ID, r.getScrollId()); + assertEquals(4, r.getTotalHits()); + assertThat(r.getFailures(), empty()); + assertThat(r.getHits(), hasSize(1)); + assertEquals("test", r.getHits().get(0).getIndex()); + assertEquals("test", r.getHits().get(0).getType()); + assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId()); + assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString()); + assertNull(r.getHits().get(0).getTTL()); + assertNull(r.getHits().get(0).getTimestamp()); + assertNull(r.getHits().get(0).getRouting()); + called.set(true); + }); + assertTrue(called.get()); + } + + public void testParseRejection() throws Exception { + // The rejection comes through in the handler because the mocked http response isn't marked as an error + AtomicBoolean called = new AtomicBoolean(); + // Handling a scroll rejection is the same as handling a search rejection so we reuse the verification code + Consumer checkResponse = r -> { + assertFalse(r.isTimedOut()); + assertEquals(FAKE_SCROLL_ID, r.getScrollId()); + assertEquals(4, r.getTotalHits()); + assertThat(r.getFailures(), hasSize(1)); + assertEquals("test", r.getFailures().get(0).getIndex()); + assertEquals((Integer) 0, r.getFailures().get(0).getShardId()); + assertEquals("87A7NvevQxSrEwMbtRCecg", r.getFailures().get(0).getNodeId()); + assertThat(r.getFailures().get(0).getReason(), instanceOf(EsRejectedExecutionException.class)); + assertEquals("rejected execution of org.elasticsearch.transport.TransportService$5@52d06af2 on " + + "EsThreadPoolExecutor[search, queue capacity = 1000, org.elasticsearch.common.util.concurrent." + + "EsThreadPoolExecutor@778ea553[Running, pool size = 7, active threads = 7, queued tasks = 1000, " + + "completed tasks = 4182]]", r.getFailures().get(0).getReason().getMessage()); + assertThat(r.getHits(), hasSize(1)); + assertEquals("test", r.getHits().get(0).getIndex()); + assertEquals("test", r.getHits().get(0).getType()); + assertEquals("AVToMiC250DjIiBO3yJ_", r.getHits().get(0).getId()); + assertEquals("{\"test\":\"test1\"}", r.getHits().get(0).getSource().utf8ToString()); + called.set(true); + }; + sourceWithMockedRemoteCall("rejection.json").doStart(checkResponse); + assertTrue(called.get()); + called.set(false); + sourceWithMockedRemoteCall("rejection.json").doStartNextScroll("scroll", timeValueMillis(0), checkResponse); + assertTrue(called.get()); + } + + public void testParseFailureWithStatus() throws Exception { + // The rejection comes through in the handler because the mocked http response isn't marked as an error + AtomicBoolean called = new AtomicBoolean(); + // Handling a scroll rejection is the same as handling a search rejection so we reuse the verification code + Consumer checkResponse = r -> { + assertFalse(r.isTimedOut()); + assertEquals(FAKE_SCROLL_ID, r.getScrollId()); + assertEquals(10000, r.getTotalHits()); + assertThat(r.getFailures(), hasSize(1)); + assertEquals(null, r.getFailures().get(0).getIndex()); + assertEquals(null, r.getFailures().get(0).getShardId()); + assertEquals(null, r.getFailures().get(0).getNodeId()); + assertThat(r.getFailures().get(0).getReason(), instanceOf(RuntimeException.class)); + assertEquals("Unknown remote exception with reason=[SearchContextMissingException[No search context found for id [82]]]", + r.getFailures().get(0).getReason().getMessage()); + assertThat(r.getHits(), hasSize(1)); + assertEquals("test", r.getHits().get(0).getIndex()); + assertEquals("test", r.getHits().get(0).getType()); + assertEquals("10000", r.getHits().get(0).getId()); + assertEquals("{\"test\":\"test10000\"}", r.getHits().get(0).getSource().utf8ToString()); + called.set(true); + }; + sourceWithMockedRemoteCall("failure_with_status.json").doStart(checkResponse); + assertTrue(called.get()); + called.set(false); + sourceWithMockedRemoteCall("failure_with_status.json").doStartNextScroll("scroll", timeValueMillis(0), checkResponse); + assertTrue(called.get()); + } + + public void testParseRequestFailure() throws Exception { + AtomicBoolean called = new AtomicBoolean(); + Consumer checkResponse = r -> { + assertFalse(r.isTimedOut()); + assertNull(r.getScrollId()); + assertEquals(0, r.getTotalHits()); + assertThat(r.getFailures(), hasSize(1)); + assertThat(r.getFailures().get(0).getReason(), instanceOf(ParsingException.class)); + ParsingException failure = (ParsingException) r.getFailures().get(0).getReason(); + assertEquals("Unknown key for a VALUE_STRING in [invalid].", failure.getMessage()); + assertEquals(2, failure.getLineNumber()); + assertEquals(14, failure.getColumnNumber()); + called.set(true); + }; + sourceWithMockedRemoteCall("request_failure.json").doStart(checkResponse); + assertTrue(called.get()); + called.set(false); + sourceWithMockedRemoteCall("request_failure.json").doStartNextScroll("scroll", timeValueMillis(0), checkResponse); + assertTrue(called.get()); + } + + public void testRetryAndSucceed() throws Exception { + AtomicBoolean called = new AtomicBoolean(); + Consumer checkResponse = r -> { + assertThat(r.getFailures(), hasSize(0)); + called.set(true); + }; + retriesAllowed = between(1, Integer.MAX_VALUE); + sourceWithMockedRemoteCall("fail:rejection.json", "start_ok.json").doStart(checkResponse); + assertTrue(called.get()); + assertEquals(1, retries); + retries = 0; + called.set(false); + sourceWithMockedRemoteCall("fail:rejection.json", "scroll_ok.json").doStartNextScroll("scroll", timeValueMillis(0), + checkResponse); + assertTrue(called.get()); + assertEquals(1, retries); + } + + public void testRetryUntilYouRunOutOfTries() throws Exception { + AtomicBoolean called = new AtomicBoolean(); + Consumer checkResponse = r -> called.set(true); + retriesAllowed = between(0, 10); + String[] paths = new String[retriesAllowed + 2]; + for (int i = 0; i < retriesAllowed + 2; i++) { + paths[i] = "fail:rejection.json"; + } + RuntimeException e = expectThrows(RuntimeException.class, () -> sourceWithMockedRemoteCall(paths).doStart(checkResponse)); + assertEquals("failed", e.getMessage()); + assertFalse(called.get()); + assertEquals(retriesAllowed, retries); + retries = 0; + e = expectThrows(RuntimeException.class, + () -> sourceWithMockedRemoteCall(paths).doStartNextScroll("scroll", timeValueMillis(0), checkResponse)); + assertEquals("failed", e.getMessage()); + assertFalse(called.get()); + assertEquals(retriesAllowed, retries); + } + + private RemoteScrollableHitSource sourceWithMockedRemoteCall(String... paths) throws Exception { + return sourceWithMockedRemoteCall(true, paths); + } + + /** + * Creates a hit source that doesn't make the remote request and instead returns data from some files. Also requests are always returned + * synchronously rather than asynchronously. + */ + private RemoteScrollableHitSource sourceWithMockedRemoteCall(boolean mockRemoteVersion, String... paths) throws Exception { + URL[] resources = new URL[paths.length]; + for (int i = 0; i < paths.length; i++) { + resources[i] = Thread.currentThread().getContextClassLoader().getResource("responses/" + paths[i].replace("fail:", "")); + if (resources[i] == null) { + throw new IllegalArgumentException("Couldn't find [" + paths[i] + "]"); + } + } + RemoteScrollableHitSource.AsyncClient client = new RemoteScrollableHitSource.AsyncClient() { + int responseCount = 0; + @Override + public void performRequest(String method, String uri, Map params, HttpEntity entity, + ResponseListener listener) { + try { + URL resource = resources[responseCount]; + String path = paths[responseCount++]; + InputStream stream = resource.openStream(); + if (path.startsWith("fail:")) { + String body = Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)); + if (path.equals("fail:rejection.json")) { + listener.onRetryableFailure(new RuntimeException(body)); + } else { + listener.onFailure(new RuntimeException(body)); + } + } else { + listener.onResponse(stream); + } + } catch (IOException e) { + listener.onFailure(e); + } + } + + @Override + public void close() throws IOException { + } + }; + TestRemoteScrollableHitSource hitSource = new TestRemoteScrollableHitSource(client) { + @Override + void lookupRemoteVersion(Consumer onVersion) { + if (mockRemoteVersion) { + onVersion.accept(Version.CURRENT); + } else { + super.lookupRemoteVersion(onVersion); + } + } + }; + if (mockRemoteVersion) { + hitSource.remoteVersion = Version.CURRENT; + } + return hitSource; + } + + private BackoffPolicy backoff() { + return BackoffPolicy.constantBackoff(timeValueMillis(0), retriesAllowed); + } + + private void countRetry() { + retries += 1; + } + + private void failRequest(Throwable t) { + throw new RuntimeException("failed", t); + } + + private class TestRemoteScrollableHitSource extends RemoteScrollableHitSource { + public TestRemoteScrollableHitSource(RemoteScrollableHitSource.AsyncClient client) { + super(RemoteScrollableHitSourceTests.this.logger, backoff(), RemoteScrollableHitSourceTests.this.threadPool, + RemoteScrollableHitSourceTests.this::countRetry, RemoteScrollableHitSourceTests.this::failRequest, client, + new BytesArray("{}"), RemoteScrollableHitSourceTests.this.searchRequest); + } + } +} diff --git a/modules/reindex/src/test/resources/responses/failure_with_status.json b/modules/reindex/src/test/resources/responses/failure_with_status.json new file mode 100644 index 00000000000..314de37a679 --- /dev/null +++ b/modules/reindex/src/test/resources/responses/failure_with_status.json @@ -0,0 +1,28 @@ +{ + "_scroll_id": "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll", + "took": 3, + "timed_out": false, + "_shards": { + "total": 5, + "successful": 4, + "failed": 1, + "failures": [ { + "status": 404, + "reason": "SearchContextMissingException[No search context found for id [82]]" + } ] + }, + "hits": { + "total": 10000, + "max_score": 0.0, + "hits": [ { + "_index": "test", + "_type": "test", + "_id": "10000", + "_version": 1, + "_score": 0.0, + "_source": { + "test": "test10000" + } + } ] + } +} diff --git a/modules/reindex/src/test/resources/responses/main/0_20_5.json b/modules/reindex/src/test/resources/responses/main/0_20_5.json new file mode 100644 index 00000000000..58ca8e9428f --- /dev/null +++ b/modules/reindex/src/test/resources/responses/main/0_20_5.json @@ -0,0 +1,10 @@ +{ + "ok" : true, + "status" : 200, + "name" : "Techno", + "version" : { + "number" : "0.20.5", + "snapshot_build" : false + }, + "tagline" : "You Know, for Search" +} diff --git a/modules/reindex/src/test/resources/responses/main/0_90_13.json b/modules/reindex/src/test/resources/responses/main/0_90_13.json new file mode 100644 index 00000000000..1b104e068d9 --- /dev/null +++ b/modules/reindex/src/test/resources/responses/main/0_90_13.json @@ -0,0 +1,13 @@ +{ + "ok" : true, + "status" : 200, + "name" : "Mogul of the Mystic Mountain", + "version" : { + "number" : "0.90.13", + "build_hash" : "249c9c5e06765c9e929e92b1d235e1ba4dc679fa", + "build_timestamp" : "2014-03-25T15:27:12Z", + "build_snapshot" : false, + "lucene_version" : "4.6" + }, + "tagline" : "You Know, for Search" +} diff --git a/modules/reindex/src/test/resources/responses/main/1_7_5.json b/modules/reindex/src/test/resources/responses/main/1_7_5.json new file mode 100644 index 00000000000..0fe721defee --- /dev/null +++ b/modules/reindex/src/test/resources/responses/main/1_7_5.json @@ -0,0 +1,13 @@ +{ + "status" : 200, + "name" : "Robert Kelly", + "cluster_name" : "elasticsearch", + "version" : { + "number" : "1.7.5", + "build_hash" : "00f95f4ffca6de89d68b7ccaf80d148f1f70e4d4", + "build_timestamp" : "2016-02-02T09:55:30Z", + "build_snapshot" : false, + "lucene_version" : "4.10.4" + }, + "tagline" : "You Know, for Search" +} diff --git a/modules/reindex/src/test/resources/responses/main/2_3_3.json b/modules/reindex/src/test/resources/responses/main/2_3_3.json new file mode 100644 index 00000000000..8cd90b3b637 --- /dev/null +++ b/modules/reindex/src/test/resources/responses/main/2_3_3.json @@ -0,0 +1,12 @@ +{ + "name" : "Ezekiel Stane", + "cluster_name" : "elasticsearch", + "version" : { + "number" : "2.3.3", + "build_hash" : "218bdf10790eef486ff2c41a3df5cfa32dadcfde", + "build_timestamp" : "2016-05-17T15:40:04Z", + "build_snapshot" : false, + "lucene_version" : "5.5.0" + }, + "tagline" : "You Know, for Search" +} diff --git a/modules/reindex/src/test/resources/responses/main/5_0_0_alpha_3.json b/modules/reindex/src/test/resources/responses/main/5_0_0_alpha_3.json new file mode 100644 index 00000000000..6911f61c3e9 --- /dev/null +++ b/modules/reindex/src/test/resources/responses/main/5_0_0_alpha_3.json @@ -0,0 +1,12 @@ +{ + "name" : "Paibo", + "cluster_name" : "distribution_run", + "version" : { + "number" : "5.0.0-alpha3", + "build_hash" : "42e092f", + "build_date" : "2016-05-26T16:55:45.405Z", + "build_snapshot" : true, + "lucene_version" : "6.0.0" + }, + "tagline" : "You Know, for Search" +} diff --git a/modules/reindex/src/test/resources/responses/rejection.json b/modules/reindex/src/test/resources/responses/rejection.json new file mode 100644 index 00000000000..36120fbf888 --- /dev/null +++ b/modules/reindex/src/test/resources/responses/rejection.json @@ -0,0 +1,34 @@ +{ + "_scroll_id" : "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll", + "took" : 6, + "timed_out" : false, + "_shards" : { + "total" : 5, + "successful" : 4, + "failed" : 1, + "failures" : [ { + "shard" : 0, + "index" : "test", + "node" : "87A7NvevQxSrEwMbtRCecg", + "reason" : { + "type" : "es_rejected_execution_exception", + "reason" : "rejected execution of org.elasticsearch.transport.TransportService$5@52d06af2 on EsThreadPoolExecutor[search, queue capacity = 1000, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@778ea553[Running, pool size = 7, active threads = 7, queued tasks = 1000, completed tasks = 4182]]" + } + } ] + }, + "hits" : { + "total" : 4, + "max_score" : null, + "hits" : [ { + "_index" : "test", + "_type" : "test", + "_id" : "AVToMiC250DjIiBO3yJ_", + "_version" : 1, + "_score" : null, + "_source" : { + "test" : "test1" + }, + "sort" : [ 0 ] + } ] + } +} diff --git a/modules/reindex/src/test/resources/responses/request_failure.json b/modules/reindex/src/test/resources/responses/request_failure.json new file mode 100644 index 00000000000..6f6de78c060 --- /dev/null +++ b/modules/reindex/src/test/resources/responses/request_failure.json @@ -0,0 +1,15 @@ +{ + "error" : { + "root_cause" : [ { + "type" : "parsing_exception", + "reason" : "Unknown key for a VALUE_STRING in [invalid].", + "line" : 2, + "col" : 14 + } ], + "type" : "parsing_exception", + "reason" : "Unknown key for a VALUE_STRING in [invalid].", + "line" : 2, + "col" : 14 + }, + "status" : 400 +} diff --git a/modules/reindex/src/test/resources/responses/scroll_fully_loaded.json b/modules/reindex/src/test/resources/responses/scroll_fully_loaded.json new file mode 100644 index 00000000000..a2c1be34e5c --- /dev/null +++ b/modules/reindex/src/test/resources/responses/scroll_fully_loaded.json @@ -0,0 +1,30 @@ +{ + "_scroll_id" : "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll", + "took" : 3, + "timed_out" : false, + "terminated_early" : true, + "_shards" : { + "total" : 5, + "successful" : 5, + "failed" : 0 + }, + "hits" : { + "total" : 4, + "max_score" : null, + "hits" : [ { + "_index" : "test", + "_type" : "test", + "_id" : "AVToMiDL50DjIiBO3yKA", + "_version" : 1, + "_score" : null, + "_source" : { + "test" : "test3" + }, + "sort" : [ 0 ], + "_routing": "testrouting", + "_parent": "testparent", + "_ttl" : 1234, + "_timestamp": 123444 + } ] + } +} diff --git a/modules/reindex/src/test/resources/responses/scroll_ok.json b/modules/reindex/src/test/resources/responses/scroll_ok.json new file mode 100644 index 00000000000..5cdc4a400cb --- /dev/null +++ b/modules/reindex/src/test/resources/responses/scroll_ok.json @@ -0,0 +1,26 @@ +{ + "_scroll_id" : "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll", + "took" : 3, + "timed_out" : false, + "terminated_early" : true, + "_shards" : { + "total" : 5, + "successful" : 5, + "failed" : 0 + }, + "hits" : { + "total" : 4, + "max_score" : null, + "hits" : [ { + "_index" : "test", + "_type" : "test", + "_id" : "AVToMiDL50DjIiBO3yKA", + "_version" : 1, + "_score" : null, + "_source" : { + "test" : "test3" + }, + "sort" : [ 0 ] + } ] + } +} diff --git a/modules/reindex/src/test/resources/responses/start_ok.json b/modules/reindex/src/test/resources/responses/start_ok.json new file mode 100644 index 00000000000..a2988341f8c --- /dev/null +++ b/modules/reindex/src/test/resources/responses/start_ok.json @@ -0,0 +1,25 @@ +{ + "_scroll_id" : "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll", + "took" : 6, + "timed_out" : false, + "_shards" : { + "total" : 5, + "successful" : 5, + "failed" : 0 + }, + "hits" : { + "total" : 4, + "max_score" : null, + "hits" : [ { + "_index" : "test", + "_type" : "test", + "_id" : "AVToMiC250DjIiBO3yJ_", + "_version" : 1, + "_score" : null, + "_source" : { + "test" : "test2" + }, + "sort" : [ 0 ] + } ] + } +} diff --git a/modules/reindex/src/test/resources/responses/start_scan.json b/modules/reindex/src/test/resources/responses/start_scan.json new file mode 100644 index 00000000000..5576d708b30 --- /dev/null +++ b/modules/reindex/src/test/resources/responses/start_scan.json @@ -0,0 +1,15 @@ +{ + "_scroll_id" : "c2Nhbjs1OzQ0Ojd5aUZoUm5hU2lDZ3ZvUHMzMXdGQ1E7NDU6N3lpRmhSbmFTaUNndm9QczMxd0ZDUTs0Mjo3eWlGaFJuYVNpQ2d2b1BzMzF3RkNROzQzOjd5aUZoUm5hU2lDZ3ZvUHMzMXdGQ1E7NDE6N3lpRmhSbmFTaUNndm9QczMxd0ZDUTsxO3RvdGFsX2hpdHM6MTAwMDA7", + "took" : 13, + "timed_out" : false, + "_shards" : { + "total" : 5, + "successful" : 5, + "failed" : 0 + }, + "hits" : { + "total" : 10000, + "max_score" : 0.0, + "hits" : [ ] + } +} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml index 8648c9034ee..7f17befe909 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml @@ -59,11 +59,11 @@ --- "source fields may not be modified": - do: - catch: /fields is not supported in this context/ + catch: /stored_fields is not supported in this context/ delete_by_query: index: test body: - fields: [_id] + stored_fields: [_id] --- "requests_per_second cannot be an empty string": diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml index a7a5198e430..e31c8f84cec 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/20_validation.yaml @@ -216,11 +216,54 @@ --- "source fields may not be modified": - do: - catch: /fields is not supported in this context/ + catch: /stored_fields is not supported in this context/ reindex: body: source: index: test - fields: [_id] + stored_fields: [_id] + dest: + index: dest + +--- +"unwhitelisted remote host fails": + - do: + catch: /\[badremote:9200\] not whitelisted in reindex.remote.whitelist/ + reindex: + body: + source: + remote: + host: http://badremote:9200 + index: test + dest: + index: dest + +--- +"badly formatted remote host fails": + - do: + catch: /\[host\] must be of the form \[scheme\].//\[host\].\[port\]/ + reindex: + body: + source: + remote: + host: badremote + weird: stuff + badkey: is bad + index: test + dest: + index: dest + +--- +"junk in remote fails": + - do: + catch: /Unsupported fields in \[remote\]. \[weird,badkey\]/ + reindex: + body: + source: + remote: + host: http://okremote:9200 + weird: stuff + badkey: is bad + index: test dest: index: dest diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yaml new file mode 100644 index 00000000000..6adac98ad77 --- /dev/null +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/90_remote.yaml @@ -0,0 +1,207 @@ +--- +"Basic reindex from remote": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + refresh: true + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + source: + remote: + host: http://${host} + index: source + dest: + index: dest + - match: {created: 1} + - match: {updated: 0} + - match: {version_conflicts: 0} + - match: {batches: 1} + - match: {failures: []} + - match: {throttled_millis: 0} + - gte: { took: 0 } + - is_false: task + - is_false: deleted + + - do: + search: + index: dest + body: + query: + match: + text: test + - match: {hits.total: 1} + +--- +"Reindex from remote with query": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: source + type: foo + id: 2 + body: { "text": "test2" } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + source: + remote: + host: http://${host} + index: source + query: + match: + text: test2 + dest: + index: dest + - match: {created: 1} + + - do: + search: + index: dest + body: + query: + match_all: {} + - match: {hits.total: 1} + +--- +"Reindex from remote with routing": + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + routing: foo + refresh: true + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + source: + remote: + host: http://${host} + index: source + dest: + index: dest + - match: {created: 1} + + - do: + search: + index: dest + routing: foo + body: + query: + match: + text: test + - match: {hits.total: 1} + +--- +"Reindex from remote with parent/child": + - do: + indices.create: + index: source + body: + mappings: + foo: {} + bar: + _parent: + type: foo + - do: + indices.create: + index: dest + body: + mappings: + foo: {} + bar: + _parent: + type: foo + - do: + index: + index: source + type: foo + id: 1 + body: { "text": "test" } + - do: + index: + index: source + type: bar + id: 1 + parent: 1 + body: { "text": "test2" } + - do: + indices.refresh: {} + + # Fetch the http host. We use the host of the master because we know there will always be a master. + - do: + cluster.state: {} + - set: { master_node: master } + - do: + nodes.info: + metric: [ http ] + - is_true: nodes.$master.http.publish_address + - set: {nodes.$master.http.publish_address: host} + - do: + reindex: + refresh: true + body: + source: + remote: + host: http://${host} + index: source + dest: + index: dest + - match: {created: 2} + + - do: + search: + index: dest + body: + query: + has_parent: + parent_type: foo + query: + match: + text: test + - match: {hits.total: 1} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml index ea487eb54e0..08ab3cbcc10 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/20_validation.yaml @@ -67,11 +67,11 @@ --- "source fields may not be modified": - do: - catch: /fields is not supported in this context/ + catch: /stored_fields is not supported in this context/ update_by_query: index: test body: - fields: [_id] + stored_fields: [_id] --- "requests_per_second cannot be an empty string": diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java index e04724ee370..eac3ceebc16 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuTokenizerFactory.java @@ -40,9 +40,6 @@ import java.util.HashMap; import java.util.Map; import java.util.stream.Collectors; - -/** - */ public class IcuTokenizerFactory extends AbstractTokenizerFactory { private final ICUTokenizerConfig config; @@ -101,8 +98,8 @@ public class IcuTokenizerFactory extends AbstractTokenizerFactory { }; return config; } - } catch (Throwable t) { - throw new ElasticsearchException("failed to load ICU rule files", t); + } catch (Exception e) { + throw new ElasticsearchException("failed to load ICU rule files", e); } } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index c9dd2263245..be3d737b919 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -249,8 +249,8 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas } } - } catch (Throwable e) { - logger.warn("Exception caught during discovery: {}", e, e.getMessage()); + } catch (Exception e) { + logger.warn("exception caught during discovery", e); } logger.debug("{} node(s) added", cachedDiscoNodes.size()); diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java index a3d2dffda55..4036fb0d688 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java @@ -122,7 +122,7 @@ public final class AttachmentProcessor extends AbstractProcessor { String length = Strings.hasLength(contentLength) ? contentLength : String.valueOf(parsedContent.length()); additionalFields.put(Property.CONTENT_LENGTH.toLowerCase(), length); } - } catch (Throwable e) { + } catch (Exception e) { throw new ElasticsearchParseException("Error parsing document in field [{}]", e, field); } diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/TikaDocTests.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/TikaDocTests.java index 0c63f65c247..4b9a40dd8a9 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/TikaDocTests.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/TikaDocTests.java @@ -58,7 +58,7 @@ public class TikaDocTests extends ESTestCase { assertNotNull(parsedContent); assertFalse(parsedContent.isEmpty()); logger.debug("extracted content: {}", parsedContent); - } catch (Throwable e) { + } catch (Exception e) { throw new RuntimeException("parsing of filename: " + fileName.getFileName() + " failed", e); } } diff --git a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java index 8bcc0ca1fc8..b27c3fad2b8 100644 --- a/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java +++ b/plugins/jvm-example/src/main/java/org/elasticsearch/plugin/example/ExampleCatAction.java @@ -52,11 +52,12 @@ public class ExampleCatAction extends AbstractCatAction { table.endRow(); try { channel.sendResponse(RestTable.buildResponse(table, channel)); - } catch (Throwable e) { + } catch (Exception e) { try { channel.sendResponse(new BytesRestResponse(channel, e)); - } catch (Throwable e1) { - logger.error("failed to send failure response", e1); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.error("failed to send failure response", inner); } } } diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java index c2f1214e72b..634a4ca6dfa 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java @@ -35,9 +35,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { public void testExecutableNoRuntimeParams() throws Exception { final JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); @@ -64,9 +61,9 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { long result = ((Number) script.run()).longValue(); assertThat(result, equalTo(addition)); } - } catch (Throwable t) { + } catch (Exception e) { failed.set(true); - logger.error("failed", t); + logger.error("failed", e); } finally { latch.countDown(); } @@ -106,9 +103,9 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { long result = ((Number) script.run()).longValue(); assertThat(result, equalTo(addition)); } - } catch (Throwable t) { + } catch (Exception e) { failed.set(true); - logger.error("failed", t); + logger.error("failed", e); } finally { latch.countDown(); } @@ -147,9 +144,9 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { long result = ((Number) se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled), runtimeVars).run()).longValue(); assertThat(result, equalTo(addition)); } - } catch (Throwable t) { + } catch (Exception e) { failed.set(true); - logger.error("failed", t); + logger.error("failed", e); } finally { latch.countDown(); } diff --git a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java index b642b7b0a73..5a16c06d4dc 100644 --- a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java +++ b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java @@ -261,7 +261,7 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri } /** Evaluates with reduced privileges */ - private final PyObject evalRestricted(final PyCode code) { + private PyObject evalRestricted(final PyCode code) { // eval the script with reduced privileges return AccessController.doPrivileged(new PrivilegedAction() { @Override diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java index abf9f661a6d..0a887bc9a7e 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java @@ -66,9 +66,9 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { long result = ((Number) script.run()).longValue(); assertThat(result, equalTo(addition)); } - } catch (Throwable t) { + } catch (Exception e) { failed.set(true); - logger.error("failed", t); + logger.error("failed", e); } finally { latch.countDown(); } @@ -109,9 +109,9 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { // long result = ((Number) script.run(runtimeVars)).longValue(); // assertThat(result, equalTo(addition)); // } -// } catch (Throwable t) { +// } catch (Exception e) { // failed.set(true); -// logger.error("failed", t); +// logger.error("failed", e); // } finally { // latch.countDown(); // } @@ -151,9 +151,9 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { long result = ((Number) se.executable(compiledScript, runtimeVars).run()).longValue(); assertThat(result, equalTo(addition)); } - } catch (Throwable t) { + } catch (Exception e) { failed.set(true); - logger.error("failed", t); + logger.error("failed", e); } finally { latch.countDown(); } diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java index 79174e54c62..06e51686823 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java @@ -482,7 +482,7 @@ public class AttachmentMapper extends FieldMapper { String parsedContent; try { parsedContent = TikaImpl.parse(content, metadata, indexedChars); - } catch (Throwable e) { + } catch (Exception e) { // #18: we could ignore errors when Tika does not parse data if (!ignoreErrors) { logger.trace("exception caught", e); @@ -508,8 +508,8 @@ public class AttachmentMapper extends FieldMapper { } context = context.createExternalValueContext(language); languageMapper.parse(context); - } catch(Throwable t) { - logger.debug("Cannot detect language: [{}]", t.getMessage()); + } catch(Exception e) { + logger.debug("Cannot detect language: [{}]", e.getMessage()); } } diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaDocTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaDocTests.java index fbbdeb83a7d..b32a6ab79a0 100644 --- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaDocTests.java +++ b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaDocTests.java @@ -58,7 +58,7 @@ public class TikaDocTests extends ESTestCase { assertNotNull(parsedContent); assertFalse(parsedContent.isEmpty()); logger.debug("extracted content: {}", parsedContent); - } catch (Throwable e) { + } catch (Exception e) { throw new RuntimeException("parsing of filename: " + fileName.getFileName() + " failed", e); } } diff --git a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/30_mapping.yaml b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/30_mapping.yaml index 458990cc90c..f180f51dfc5 100644 --- a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/30_mapping.yaml +++ b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/30_mapping.yaml @@ -54,7 +54,7 @@ search: index: test body: - fields: [file.content_type,file.name] + stored_fields: [file.content_type,file.name] - match: { hits.total: 1 } - match: { hits.hits.0.fields: { file.content_type: ["text/my-dummy-content-type"], file.name: ["my-dummy-name-txt"] }} diff --git a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/40_highlight.yaml b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/40_highlight.yaml index dc6f800c078..c2d42be3b9a 100644 --- a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/40_highlight.yaml +++ b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/40_highlight.yaml @@ -57,7 +57,7 @@ setup: query: match: file.content: "apache tika" - fields: [] + stored_fields: [] highlight: fields: file.content: {} diff --git a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml index 69991b9d0c0..99f427c1ab9 100644 --- a/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml +++ b/plugins/mapper-attachments/src/test/resources/rest-api-spec/test/mapper_attachments/50_files_supported.yaml @@ -38,7 +38,7 @@ setup: search: index: test body: - fields: [file.content, file.author, file.date, file.content_length, file.content_type] + stored_fields: [file.content, file.author, file.date, file.content_length, file.content_type] - match: { hits.total: 1 } - match: { hits.hits.0.fields: { file.content: ["Test elasticsearch\n"], @@ -65,7 +65,7 @@ setup: search: index: test body: - fields: [file.content, file.author, file.date, file.content_length, file.content_type] + stored_fields: [file.content, file.author, file.date, file.content_length, file.content_type] - match: { hits.total: 1 } - match: { hits.hits.0.fields: { file.content: ["Test elasticsearch\n"], diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index 9c07c7b3eb3..c9c8972c62d 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -42,15 +42,15 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue; public class SizeFieldMapper extends MetadataFieldMapper { - public static final String NAME = "_size"; - public static final String CONTENT_TYPE = "_size"; public static class Defaults { public static final EnabledAttributeMapper ENABLED_STATE = EnabledAttributeMapper.UNSET_DISABLED; - public static final MappedFieldType SIZE_FIELD_TYPE = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); - public static final MappedFieldType LEGACY_SIZE_FIELD_TYPE = LegacyIntegerFieldMapper.Defaults.FIELD_TYPE.clone(); + public static final MappedFieldType SIZE_FIELD_TYPE = + new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.INTEGER); + public static final MappedFieldType LEGACY_SIZE_FIELD_TYPE = + LegacyIntegerFieldMapper.Defaults.FIELD_TYPE.clone(); static { SIZE_FIELD_TYPE.setStored(true); @@ -68,14 +68,31 @@ public class SizeFieldMapper extends MetadataFieldMapper { } } + private static MappedFieldType defaultFieldType(Version indexCreated) { + MappedFieldType defaultFieldType; + if (indexCreated.before(Version.V_5_0_0_alpha2)) { + defaultFieldType = Defaults.LEGACY_SIZE_FIELD_TYPE.clone(); + // doc_values are disabled for bwc with indices created before V_5_0_0_alpha4 + defaultFieldType.setHasDocValues(false); + } else { + defaultFieldType = Defaults.SIZE_FIELD_TYPE.clone(); + if (indexCreated.onOrBefore(Version.V_5_0_0_alpha4)) { + // doc_values are disabled for bwc with indices created before V_5_0_0_alpha4 + defaultFieldType.setHasDocValues(false); + } else { + defaultFieldType.setHasDocValues(true); + } + } + return defaultFieldType; + } + public static class Builder extends MetadataFieldMapper.Builder { protected EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED; private Builder(MappedFieldType existing, Version indexCreated) { - super(NAME, existing == null - ? indexCreated.before(Version.V_5_0_0_alpha2) ? Defaults.LEGACY_SIZE_FIELD_TYPE : Defaults.SIZE_FIELD_TYPE - : existing, Defaults.LEGACY_SIZE_FIELD_TYPE); + super(NAME, existing == null ? defaultFieldType(indexCreated) : existing.clone(), + defaultFieldType(indexCreated)); builder = this; } @@ -87,21 +104,27 @@ public class SizeFieldMapper extends MetadataFieldMapper { @Override public SizeFieldMapper build(BuilderContext context) { setupFieldType(context); - fieldType.setHasDocValues(false); + if (context.indexCreatedVersion().onOrBefore(Version.V_5_0_0_alpha4)) { + // Make sure that the doc_values are disabled on indices created before V_5_0_0_alpha4 + fieldType.setHasDocValues(false); + } return new SizeFieldMapper(enabledState, fieldType, context.indexSettings()); } } public static class TypeParser implements MetadataFieldMapper.TypeParser { @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - Builder builder = new Builder(parserContext.mapperService().fullName(NAME), parserContext.indexVersionCreated()); + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { + Builder builder = new Builder(parserContext.mapperService().fullName(NAME), + parserContext.indexVersionCreated()); for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { Map.Entry entry = iterator.next(); String fieldName = entry.getKey(); Object fieldNode = entry.getValue(); if (fieldName.equals("enabled")) { - builder.enabled(lenientNodeBooleanValue(fieldNode) ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED); + builder.enabled(lenientNodeBooleanValue(fieldNode) ? + EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED); iterator.remove(); } } @@ -116,14 +139,15 @@ public class SizeFieldMapper extends MetadataFieldMapper { private EnabledAttributeMapper enabledState; - private SizeFieldMapper(Settings indexSettings, MappedFieldType mappedFieldType) { - this(Defaults.ENABLED_STATE, mappedFieldType == null ? Defaults.LEGACY_SIZE_FIELD_TYPE : mappedFieldType, indexSettings); + private SizeFieldMapper(Settings indexSettings, MappedFieldType existing) { + this(Defaults.ENABLED_STATE, + existing == null ? defaultFieldType(Version.indexCreated(indexSettings)) : existing.clone(), + indexSettings); } private SizeFieldMapper(EnabledAttributeMapper enabled, MappedFieldType fieldType, Settings indexSettings) { - super(NAME, fieldType, Defaults.LEGACY_SIZE_FIELD_TYPE, indexSettings); + super(NAME, fieldType, defaultFieldType(Version.indexCreated(indexSettings)), indexSettings); this.enabledState = enabled; - } @Override diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeFieldMapperUpgradeTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeFieldMapperUpgradeTests.java index 761fb5fd144..7cbce102c57 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeFieldMapperUpgradeTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeFieldMapperUpgradeTests.java @@ -67,7 +67,8 @@ public class SizeFieldMapperUpgradeTests extends ESIntegTestCase { Settings settings = Settings.builder() .put(Environment.PATH_DATA_SETTING.getKey(), dataPath) .build(); - final String node = internalCluster().startDataOnlyNode(settings); // workaround for dangling index loading issue when node is master + // workaround for dangling index loading issue when node is master + final String node = internalCluster().startDataOnlyNode(settings); Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, node).nodeDataPaths(); assertEquals(1, nodePaths.length); dataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER); @@ -83,8 +84,8 @@ public class SizeFieldMapperUpgradeTests extends ESIntegTestCase { ElasticsearchAssertions.assertHitCount(countResponse, 3L); final SearchResponse sizeResponse = client().prepareSearch(indexName) - .addField("_source") - .addField("_size") + .addStoredField("_source") + .addStoredField("_size") .get(); ElasticsearchAssertions.assertHitCount(sizeResponse, 3L); for (SearchHit hit : sizeResponse.getHits().getHits()) { diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java index b6c341c0601..279c5c96091 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIT.java @@ -49,15 +49,19 @@ public class SizeMappingIT extends ESIntegTestCase { String index = "foo"; String type = "mytype"; - XContentBuilder builder = jsonBuilder().startObject().startObject("_size").field("enabled", true).endObject().endObject(); + XContentBuilder builder = + jsonBuilder().startObject().startObject("_size").field("enabled", true).endObject().endObject(); assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder)); // check mapping again assertSizeMappingEnabled(index, type, true); // update some field in the mapping - XContentBuilder updateMappingBuilder = jsonBuilder().startObject().startObject("properties").startObject("otherField").field("type", "text").endObject().endObject().endObject(); - PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get(); + XContentBuilder updateMappingBuilder = + jsonBuilder().startObject().startObject("properties").startObject("otherField").field("type", "text") + .endObject().endObject().endObject(); + PutMappingResponse putMappingResponse = + client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get(); assertAcked(putMappingResponse); // make sure size field is still in mapping @@ -68,15 +72,18 @@ public class SizeMappingIT extends ESIntegTestCase { String index = "foo"; String type = "mytype"; - XContentBuilder builder = jsonBuilder().startObject().startObject("_size").field("enabled", true).endObject().endObject(); + XContentBuilder builder = + jsonBuilder().startObject().startObject("_size").field("enabled", true).endObject().endObject(); assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder)); // check mapping again assertSizeMappingEnabled(index, type, true); // update some field in the mapping - XContentBuilder updateMappingBuilder = jsonBuilder().startObject().startObject("_size").field("enabled", false).endObject().endObject(); - PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get(); + XContentBuilder updateMappingBuilder = + jsonBuilder().startObject().startObject("_size").field("enabled", false).endObject().endObject(); + PutMappingResponse putMappingResponse = + client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get(); assertAcked(putMappingResponse); // make sure size field is still in mapping @@ -84,8 +91,10 @@ public class SizeMappingIT extends ESIntegTestCase { } private void assertSizeMappingEnabled(String index, String type, boolean enabled) throws IOException { - String errMsg = String.format(Locale.ROOT, "Expected size field mapping to be " + (enabled ? "enabled" : "disabled") + " for %s/%s", index, type); - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes(type).get(); + String errMsg = String.format(Locale.ROOT, + "Expected size field mapping to be " + (enabled ? "enabled" : "disabled") + " for %s/%s", index, type); + GetMappingsResponse getMappingsResponse = + client().admin().indices().prepareGetMappings(index).addTypes(type).get(); Map mappingSource = getMappingsResponse.getMappings().get(index).get(type).getSourceAsMap(); assertThat(errMsg, mappingSource, hasKey("_size")); String sizeAsString = mappingSource.get("_size").toString(); diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index b0802a955df..8cc01aba4bb 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -19,58 +19,48 @@ package org.elasticsearch.index.mapper.size; -import java.util.Collections; -import java.util.Map; +import java.util.Collection; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.indices.IndicesModule; -import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.core.LegacyNumberFieldMapper; +import org.elasticsearch.index.mapper.core.NumberFieldMapper; +import org.elasticsearch.plugin.mapper.MapperSizePlugin; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.junit.Before; - +import org.elasticsearch.test.InternalSettingsPlugin; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.instanceOf; import org.apache.lucene.index.IndexableField; public class SizeMappingTests extends ESSingleNodeTestCase { - - IndexService indexService; - MapperService mapperService; - DocumentMapperParser parser; - - @Before - public void before() { - indexService = createIndex("test"); - IndicesModule indices = newTestIndicesModule(Collections.emptyMap(), - Collections.singletonMap(SizeFieldMapper.NAME, new SizeFieldMapper.TypeParser()) - ); - mapperService = new MapperService(indexService.getIndexSettings(), indexService.analysisService(), indexService.similarityService(), indices.getMapperRegistry(), indexService::newQueryShardContext); - parser = mapperService.documentMapperParser(); + @Override + protected Collection> getPlugins() { + return pluginList(MapperSizePlugin.class, InternalSettingsPlugin.class); } public void testSizeEnabled() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_size").field("enabled", true).endObject() - .endObject().endObject().string(); - DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping)); + IndexService service = createIndex("test", Settings.EMPTY, "type", "_size", "enabled=true"); + DocumentMapper docMapper = service.mapperService().documentMapper("type"); BytesReference source = XContentFactory.jsonBuilder() - .startObject() - .field("field", "value") - .endObject() - .bytes(); + .startObject() + .field("field", "value") + .endObject() + .bytes(); ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", source)); boolean stored = false; @@ -84,47 +74,82 @@ public class SizeMappingTests extends ESSingleNodeTestCase { } public void testSizeDisabled() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_size").field("enabled", false).endObject() - .endObject().endObject().string(); - DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping)); + IndexService service = createIndex("test", Settings.EMPTY, "type", "_size", "enabled=false"); + DocumentMapper docMapper = service.mapperService().documentMapper("type"); BytesReference source = XContentFactory.jsonBuilder() - .startObject() - .field("field", "value") - .endObject() - .bytes(); + .startObject() + .field("field", "value") + .endObject() + .bytes(); ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", source)); assertThat(doc.rootDoc().getField("_size"), nullValue()); } public void testSizeNotSet() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .endObject().endObject().string(); - DocumentMapper docMapper = parser.parse("type", new CompressedXContent(mapping)); + IndexService service = createIndex("test", Settings.EMPTY, "type"); + DocumentMapper docMapper = service.mapperService().documentMapper("type"); BytesReference source = XContentFactory.jsonBuilder() - .startObject() - .field("field", "value") - .endObject() - .bytes(); + .startObject() + .field("field", "value") + .endObject() + .bytes(); ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", source)); assertThat(doc.rootDoc().getField("_size"), nullValue()); } public void testThatDisablingWorksWhenMerging() throws Exception { - String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_size").field("enabled", true).endObject() - .endObject().endObject().string(); - DocumentMapper enabledMapper = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE, false); + IndexService service = createIndex("test", Settings.EMPTY, "type", "_size", "enabled=true"); + DocumentMapper docMapper = service.mapperService().documentMapper("type"); + assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(true)); String disabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_size").field("enabled", false).endObject() - .endObject().endObject().string(); - DocumentMapper disabledMapper = mapperService.merge("type", new CompressedXContent(disabledMapping), MapperService.MergeReason.MAPPING_UPDATE, false); + .startObject("_size").field("enabled", false).endObject() + .endObject().endObject().string(); + docMapper = service.mapperService().merge("type", new CompressedXContent(disabledMapping), + MapperService.MergeReason.MAPPING_UPDATE, false); - assertThat(disabledMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(false)); + assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(false)); } + + public void testBWCMapper() throws Exception { + { + // IntPoint && docvalues=true for V_5_0_0_alpha5 + IndexService service = createIndex("foo", Settings.EMPTY, "bar", "_size", "enabled=true"); + DocumentMapper docMapper = service.mapperService().documentMapper("bar"); + SizeFieldMapper mapper = docMapper.metadataMapper(SizeFieldMapper.class); + assertThat(mapper.enabled(), is(true)); + MappedFieldType ft = mapper.fieldType(); + assertThat(ft.hasDocValues(), is(true)); + assertThat(mapper.fieldType(), instanceOf(NumberFieldMapper.NumberFieldType.class)); + } + + { + // IntPoint with docvalues=false if version > V_5_0_0_alpha2 && version < V_5_0_0_beta1 + IndexService service = createIndex("foo2", + Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0_alpha4.id).build(), + "bar", "_size", "enabled=true"); + DocumentMapper docMapper = service.mapperService().documentMapper("bar"); + SizeFieldMapper mapper = docMapper.metadataMapper(SizeFieldMapper.class); + assertThat(mapper.enabled(), is(true)); + assertThat(mapper.fieldType().hasDocValues(), is(false)); + assertThat(mapper.fieldType(), instanceOf(NumberFieldMapper.NumberFieldType.class)); + } + + { + // LegacyIntField with docvalues=false if version < V_5_0_0_alpha2 + IndexService service = createIndex("foo3", + Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0_alpha1.id).build(), + "bar", "_size", "enabled=true"); + DocumentMapper docMapper = service.mapperService().documentMapper("bar"); + SizeFieldMapper mapper = docMapper.metadataMapper(SizeFieldMapper.class); + assertThat(mapper.enabled(), is(true)); + assertThat(mapper.fieldType().hasDocValues(), is(false)); + assertThat(mapper.fieldType(), instanceOf(LegacyNumberFieldMapper.NumberFieldType.class)); + } + } + } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java index 7dc6f3b6a83..ea71dc152f9 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobContainer.java @@ -46,9 +46,6 @@ import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.Map; -/** - * - */ public class S3BlobContainer extends AbstractBlobContainer { protected final S3BlobStore blobStore; @@ -74,7 +71,7 @@ public class S3BlobContainer extends AbstractBlobContainer { }); } catch (AmazonS3Exception e) { return false; - } catch (Throwable e) { + } catch (Exception e) { throw new BlobStoreException("failed to check if blob exists", e); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index 93c19df9c04..d1c43f15adb 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -512,7 +512,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase multiObjectDeleteRequest.setKeys(keys); client.deleteObjects(multiObjectDeleteRequest); } - } catch (Throwable ex) { + } catch (Exception ex) { logger.warn("Failed to delete S3 repository [{}] in [{}]", ex, bucketName, region); } } diff --git a/qa/backwards-5.0/build.gradle b/qa/backwards-5.0/build.gradle index fbce12f8126..657a6b7c078 100644 --- a/qa/backwards-5.0/build.gradle +++ b/qa/backwards-5.0/build.gradle @@ -18,6 +18,6 @@ integTest { cluster { numNodes = 2 numBwcNodes = 1 - bwcVersion = "5.0.0-alpha4-SNAPSHOT" // this is the same as the current version until we released the first RC + bwcVersion = "5.0.0-alpha5-SNAPSHOT" // this is the same as the current version until we released the first RC } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SeccompTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SeccompTests.java index a319aaabb70..d028dfd573a 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SeccompTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/SeccompTests.java @@ -40,7 +40,7 @@ public class SeccompTests extends ESTestCase { if (!JNANatives.LOCAL_SECCOMP_ALL) { try { Seccomp.init(createTempDir()); - } catch (Throwable e) { + } catch (Exception e) { throw new RuntimeException("unable to forcefully apply seccomp to test thread", e); } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index e5117fa0aa0..af1f311dd23 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -26,7 +26,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.cli.UserError; +import org.elasticsearch.cli.UserException; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -320,7 +320,7 @@ public class InstallPluginCommandTests extends ESTestCase { public void testUnknownPlugin() throws Exception { Tuple env = createEnv(fs, temp); - UserError e = expectThrows(UserError.class, () -> installPlugin("foo", env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin("foo", env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("Unknown plugin foo")); } @@ -350,7 +350,7 @@ public class InstallPluginCommandTests extends ESTestCase { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); String pluginZip = createPlugin("lang-groovy", pluginDir); - UserError e = expectThrows(UserError.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("is a system module")); assertInstallCleaned(env.v2()); } @@ -385,7 +385,7 @@ public class InstallPluginCommandTests extends ESTestCase { Path pluginDir = createPluginDir(temp); String pluginZip = createPlugin("fake", pluginDir); installPlugin(pluginZip, env.v1()); - UserError e = expectThrows(UserError.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("already exists")); assertInstallCleaned(env.v2()); } @@ -407,7 +407,7 @@ public class InstallPluginCommandTests extends ESTestCase { Path binDir = pluginDir.resolve("bin"); Files.createFile(binDir); String pluginZip = createPlugin("fake", pluginDir); - UserError e = expectThrows(UserError.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); assertInstallCleaned(env.v2()); } @@ -419,7 +419,7 @@ public class InstallPluginCommandTests extends ESTestCase { Files.createDirectories(dirInBinDir); Files.createFile(dirInBinDir.resolve("somescript")); String pluginZip = createPlugin("fake", pluginDir); - UserError e = expectThrows(UserError.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("Directories not allowed in bin dir for plugin")); assertInstallCleaned(env.v2()); } @@ -490,7 +490,7 @@ public class InstallPluginCommandTests extends ESTestCase { Path configDir = pluginDir.resolve("config"); Files.createFile(configDir); String pluginZip = createPlugin("fake", pluginDir); - UserError e = expectThrows(UserError.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); assertInstallCleaned(env.v2()); } @@ -502,7 +502,7 @@ public class InstallPluginCommandTests extends ESTestCase { Files.createDirectories(dirInConfigDir); Files.createFile(dirInConfigDir.resolve("myconfig.yml")); String pluginZip = createPlugin("fake", pluginDir); - UserError e = expectThrows(UserError.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("Directories not allowed in config dir for plugin")); assertInstallCleaned(env.v2()); } @@ -534,7 +534,7 @@ public class InstallPluginCommandTests extends ESTestCase { Path pluginDir = createPluginDir(temp); Files.createFile(pluginDir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES)); String pluginZip = writeZip(pluginDir, null); - UserError e = expectThrows(UserError.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("`elasticsearch` directory is missing in the plugin zip")); assertInstallCleaned(env.v2()); } @@ -580,16 +580,16 @@ public class InstallPluginCommandTests extends ESTestCase { public void testInstallMisspelledOfficialPlugins() throws Exception { Tuple env = createEnv(fs, temp); - UserError e = expectThrows(UserError.class, () -> installPlugin("xpack", env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin("xpack", env.v1())); assertThat(e.getMessage(), containsString("Unknown plugin xpack, did you mean [x-pack]?")); - e = expectThrows(UserError.class, () -> installPlugin("analysis-smartnc", env.v1())); + e = expectThrows(UserException.class, () -> installPlugin("analysis-smartnc", env.v1())); assertThat(e.getMessage(), containsString("Unknown plugin analysis-smartnc, did you mean [analysis-smartcn]?")); - e = expectThrows(UserError.class, () -> installPlugin("repository", env.v1())); + e = expectThrows(UserException.class, () -> installPlugin("repository", env.v1())); assertThat(e.getMessage(), containsString("Unknown plugin repository, did you mean any of [repository-s3, repository-gcs]?")); - e = expectThrows(UserError.class, () -> installPlugin("unknown_plugin", env.v1())); + e = expectThrows(UserException.class, () -> installPlugin("unknown_plugin", env.v1())); assertThat(e.getMessage(), containsString("Unknown plugin unknown_plugin")); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index 3a4639fa839..e2910be64f0 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -27,7 +27,7 @@ import java.util.HashMap; import java.util.Map; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.cli.UserError; +import org.elasticsearch.cli.UserException; import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -72,7 +72,7 @@ public class RemovePluginCommandTests extends ESTestCase { } public void testMissing() throws Exception { - UserError e = expectThrows(UserError.class, () -> removePlugin("dne", home)); + UserException e = expectThrows(UserException.class, () -> removePlugin("dne", home)); assertTrue(e.getMessage(), e.getMessage().contains("plugin dne not found")); assertRemoveCleaned(env); } @@ -102,7 +102,7 @@ public class RemovePluginCommandTests extends ESTestCase { public void testBinNotDir() throws Exception { Files.createDirectories(env.pluginsFile().resolve("elasticsearch")); - UserError e = expectThrows(UserError.class, () -> removePlugin("elasticsearch", home)); + UserException e = expectThrows(UserException.class, () -> removePlugin("elasticsearch", home)); assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); assertTrue(Files.exists(env.pluginsFile().resolve("elasticsearch"))); // did not remove assertTrue(Files.exists(env.binFile().resolve("elasticsearch"))); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 4199a5d67cd..e7b5d1c4501 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -23,10 +23,10 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; @@ -65,14 +65,14 @@ public class TribeUnitTests extends ESTestCase { .put(baseSettings) .put("cluster.name", "tribe1") .put("node.name", "tribe1_node") - .put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) + .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) .build()).start(); tribe2 = new TribeClientNode( Settings.builder() .put(baseSettings) .put("cluster.name", "tribe2") .put("node.name", "tribe2_node") - .put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) + .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) .build()).start(); } diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml b/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml index d4fa8d8d130..19b2a7b5dd9 100644 --- a/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml +++ b/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml @@ -1,5 +1,5 @@ cluster.name: tribe_node_cluster tribe.t1.cluster.name: tribe1 tribe.t2.cluster.name: tribe2 -tribe.t1.node_id.seed: 1 -tribe.t2.node_id.seed: 2 +tribe.t1.node.id.seed: 1 +tribe.t2.node.id.seed: 2 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/template.msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json similarity index 97% rename from rest-api-spec/src/main/resources/rest-api-spec/api/template.msearch.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json index 379b7bdf362..39aa53b2572 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/template.msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json @@ -1,5 +1,5 @@ { - "template.msearch": { + "msearch_template": { "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html", "methods": ["GET", "POST"], "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index d2b9b8cf9b4..21fda8dc805 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -38,13 +38,17 @@ "type" : "boolean", "description" : "Specify whether to return detailed information about score computation as part of a hit" }, - "fields": { + "stored_fields": { "type" : "list", - "description" : "A comma-separated list of fields to return as part of a hit" + "description" : "A comma-separated list of stored fields to return as part of a hit" + }, + "docvalue_fields": { + "type" : "list", + "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" }, "fielddata_fields": { "type" : "list", - "description" : "A comma-separated list of fields to return as the field data representation of a field for each hit" + "description" : "A comma-separated list of fields to return as the docvalue representation of a field for each hit" }, "from": { "type" : "number", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/template.search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/template.search.json deleted file mode 100644 index 6a2a8c1d7e2..00000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/template.search.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "template.search": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html", - "methods": ["GET", "POST"], - "url": { - "path": "/_search/template", - "paths": ["/_search/template", "/{index}/_search/template", "/{index}/{type}/_search/template"], - "parts": { - "index": { - "type" : "list", - "description" : "A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" - }, - "type": { - "type" : "list", - "description" : "A comma-separated list of document types to search; leave empty to perform the operation on all types" - } - }, - "params" : { - "ignore_unavailable": { - "type" : "boolean", - "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards": { - "type" : "enum", - "options" : ["open","closed","none","all"], - "default" : "open", - "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "preference": { - "type" : "string", - "description" : "Specify the node or shard the operation should be performed on (default: random)" - }, - "routing": { - "type" : "list", - "description" : "A comma-separated list of specific routing values" - }, - "scroll": { - "type" : "duration", - "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" - }, - "search_type": { - "type" : "enum", - "options" : ["query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", "dfs_query_and_fetch"], - "description" : "Search operation type" - } - } - }, - "body": { - "description": "The search definition template and its params" - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml index 84bf44f7392..424153aa573 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml @@ -95,7 +95,7 @@ setup: - do: search: body: - fields: [ include.field2 ] + stored_fields: [ include.field2 ] query: { match_all: {} } - is_false: hits.hits.0._source @@ -104,7 +104,7 @@ setup: - do: search: body: - fields: [ include.field2, _source ] + stored_fields: [ include.field2, _source ] query: { match_all: {} } - match: { hits.hits.0._source.include.field2: v2 } - is_true: hits.hits.0._source @@ -113,5 +113,5 @@ setup: "fielddata_fields": - do: search: - fielddata_fields: [ "count" ] + docvalue_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue4895.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue4895.yaml index df7322f12c8..993cbed2647 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue4895.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/issue4895.yaml @@ -31,6 +31,6 @@ setup: term: data: some preference: _local - fields: [user,amount] + stored_fields: [user,amount] diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java index aa327ae2546..f08f15f236b 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java @@ -53,12 +53,12 @@ abstract class ESElasticsearchCliTestCase extends ESTestCase { assertThat(status, equalTo(expectedStatus)); assertThat(init.get(), equalTo(expectedInit)); outputConsumer.accept(terminal.getOutput()); - } catch (Throwable t) { + } catch (Exception e) { // if an unexpected exception is thrown, we log // terminal output to aid debugging logger.info(terminal.getOutput()); // rethrow so the test fails - throw t; + throw e; } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index 128b0d0e315..45edbd8bcb2 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -32,7 +32,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.plugins.Plugin; @@ -67,7 +67,7 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService { usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeBytes()); paths[0] = path; FsInfo fsInfo = new FsInfo(System.currentTimeMillis(), null, paths); - return new NodeStats(new DiscoveryNode(nodeName, DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT), + return new NodeStats(new DiscoveryNode(nodeName, LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT), System.currentTimeMillis(), null, null, null, null, null, fsInfo, diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java index 933f26e6e81..4440fbe117d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java +++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java @@ -49,7 +49,7 @@ public class BackgroundIndexer implements AutoCloseable { final Thread[] writers; final CountDownLatch stopLatch; - final CopyOnWriteArrayList failures; + final CopyOnWriteArrayList failures; final AtomicBoolean stop = new AtomicBoolean(false); final AtomicLong idGenerator = new AtomicLong(); final AtomicLong indexCounter = new AtomicLong(); @@ -169,7 +169,7 @@ public class BackgroundIndexer implements AutoCloseable { } } logger.info("**** done indexing thread {} stop: {} numDocsIndexed: {}", indexerId, stop.get(), indexCounter.get()); - } catch (Throwable e) { + } catch (Exception e) { failures.add(e); logger.warn("**** failed indexing thread {} on doc id {}", e, indexerId, id); } finally { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java index fe7ba74a327..a6d35930e6b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java @@ -21,7 +21,6 @@ package org.elasticsearch.test; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.NodeConnectionsService; @@ -30,7 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; @@ -46,7 +45,7 @@ public class ClusterServiceUtils { ClusterService clusterService = new ClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool); - clusterService.setLocalNode(new DiscoveryNode("node", DummyTransportAddress.INSTANCE, Collections.emptyMap(), + clusterService.setLocalNode(new DiscoveryNode("node", LocalTransportAddress.buildUnique(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())),Version.CURRENT)); clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { @Override @@ -98,8 +97,8 @@ public class ClusterServiceUtils { } @Override - public void onFailure(String source, Throwable t) { - fail("unexpected exception" + t); + public void onFailure(String source, Exception e) { + fail("unexpected exception" + e); } }); try { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java index 5704a178f48..1aa0428454e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java @@ -42,7 +42,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationD import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.gateway.AsyncShardFetch; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.ReplicaShardAllocator; @@ -133,19 +133,19 @@ public abstract class ESAllocationTestCase extends ESTestCase { } protected static DiscoveryNode newNode(String nodeName, String nodeId, Map attributes) { - return new DiscoveryNode(nodeName, nodeId, DummyTransportAddress.INSTANCE, attributes, MASTER_DATA_ROLES, Version.CURRENT); + return new DiscoveryNode(nodeName, nodeId, LocalTransportAddress.buildUnique(), attributes, MASTER_DATA_ROLES, Version.CURRENT); } protected static DiscoveryNode newNode(String nodeId, Map attributes) { - return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, attributes, MASTER_DATA_ROLES, Version.CURRENT); + return new DiscoveryNode(nodeId, LocalTransportAddress.buildUnique(), attributes, MASTER_DATA_ROLES, Version.CURRENT); } protected static DiscoveryNode newNode(String nodeId, Set roles) { - return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, emptyMap(), roles, Version.CURRENT); + return new DiscoveryNode(nodeId, LocalTransportAddress.buildUnique(), emptyMap(), roles, Version.CURRENT); } protected static DiscoveryNode newNode(String nodeId, Version version) { - return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, emptyMap(), MASTER_DATA_ROLES, version); + return new DiscoveryNode(nodeId, LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, version); } protected static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index d13f69a3765..6d07a85e50b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -955,7 +955,7 @@ public abstract class ESIntegTestCase extends ESTestCase { client().admin().indices().prepareRefresh().get(); } lastKnownCount.set(count); - } catch (Throwable e) { // count now acts like search and barfs if all shards failed... + } catch (Exception e) { // count now acts like search and barfs if all shards failed... logger.debug("failed to executed count", e); return false; } @@ -1333,7 +1333,7 @@ public abstract class ESIntegTestCase extends ESTestCase { } final String[] indices = indicesSet.toArray(new String[indicesSet.size()]); Collections.shuffle(builders, random()); - final CopyOnWriteArrayList> errors = new CopyOnWriteArrayList<>(); + final CopyOnWriteArrayList> errors = new CopyOnWriteArrayList<>(); List inFlightAsyncOperations = new ArrayList<>(); // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk. if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) { @@ -1366,8 +1366,8 @@ public abstract class ESIntegTestCase extends ESTestCase { for (CountDownLatch operation : inFlightAsyncOperations) { operation.await(); } - final List actualErrors = new ArrayList<>(); - for (Tuple tuple : errors) { + final List actualErrors = new ArrayList<>(); + for (Tuple tuple : errors) { if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) { tuple.v1().execute().actionGet(); // re-index if rejected } else { @@ -1525,7 +1525,7 @@ public abstract class ESIntegTestCase extends ESTestCase { } @Override - public final void onFailure(Throwable t) { + public final void onFailure(Exception t) { try { logger.info("Action Failed", t); addError(t); @@ -1534,24 +1534,24 @@ public abstract class ESIntegTestCase extends ESTestCase { } } - protected void addError(Throwable t) { + protected void addError(Exception e) { } } private class PayloadLatchedActionListener extends LatchedActionListener { - private final CopyOnWriteArrayList> errors; + private final CopyOnWriteArrayList> errors; private final T builder; - public PayloadLatchedActionListener(T builder, CountDownLatch latch, CopyOnWriteArrayList> errors) { + public PayloadLatchedActionListener(T builder, CountDownLatch latch, CopyOnWriteArrayList> errors) { super(latch); this.errors = errors; this.builder = builder; } @Override - protected void addError(Throwable t) { - errors.add(new Tuple<>(builder, t)); + protected void addError(Exception e) { + errors.add(new Tuple<>(builder, e)); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 11bad415764..620727e255d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -40,7 +40,6 @@ import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; -import org.elasticsearch.cluster.node.DiscoveryNodeService; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.ShardRouting; @@ -606,7 +605,7 @@ public final class InternalTestCluster extends TestCluster { .put(Environment.PATH_HOME_SETTING.getKey(), baseDir) // allow overriding path.home .put(settings) .put("node.name", name) - .put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), seed) + .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed) .build(); MockNode node = new MockNode(finalSettings, plugins); return new NodeAndClient(name, node, nodeId); @@ -898,8 +897,8 @@ public final class InternalTestCluster extends TestCluster { } private void createNewNode(final Settings newSettings) { - final long newIdSeed = DiscoveryNodeService.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id - Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(DiscoveryNodeService.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); + final long newIdSeed = NodeEnvironment.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id + Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); Collection> plugins = node.getPlugins(); node = new MockNode(finalSettings, plugins); markNodeDataDirsAsNotEligableForWipe(node); @@ -1393,7 +1392,6 @@ public final class InternalTestCluster extends TestCluster { // delete data folders now, before we start other nodes that may claim it nodeAndClient.clearDataIfNeeded(callback); - DiscoveryNode discoveryNode = getInstanceFromNode(ClusterService.class, nodeAndClient.node()).localNode(); nodesRoleOrder[nodeAndClient.nodeAndClientId()] = discoveryNode.getRoles(); nodesByRoles.computeIfAbsent(discoveryNode.getRoles(), k -> new ArrayList<>()).add(nodeAndClient); @@ -1481,7 +1479,7 @@ public final class InternalTestCluster extends TestCluster { Client client = viaNode != null ? client(viaNode) : client(); ClusterState state = client.admin().cluster().prepareState().execute().actionGet().getState(); return state.nodes().getMasterNode().getName(); - } catch (Throwable e) { + } catch (Exception e) { logger.warn("Can't fetch cluster state", e); throw new RuntimeException("Can't get master node " + e.getMessage(), e); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java index 5525baf4206..d09c763322c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java @@ -44,7 +44,7 @@ public class VersionUtils { try { Version object = (Version) field.get(null); ids.add(object.id); - } catch (Throwable e) { + } catch (IllegalAccessException e) { throw new RuntimeException(e); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java index 6ff45608700..1d91b0980e4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java @@ -50,8 +50,8 @@ public class NoOpClient extends AbstractClient { public void close() { try { ThreadPool.terminate(threadPool(), 10, TimeUnit.SECONDS); - } catch (Throwable t) { - throw new ElasticsearchException(t.getMessage(), t); + } catch (Exception e) { + throw new ElasticsearchException(e.getMessage(), e); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java index cbcb9766943..956088f0fd1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java @@ -76,8 +76,8 @@ public class BlockClusterStateProcessing extends SingleNodeDisruption { } @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected error during disruption", t); + public void onFailure(String source, Exception e) { + logger.error("unexpected error during disruption", e); } }); try { diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java index be0b69a8e8b..f69c0a3085d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java @@ -124,7 +124,7 @@ public class SlowClusterStateProcessing extends SingleNodeDisruption { } @Override - public void onFailure(String source, Throwable t) { + public void onFailure(String source, Exception e) { countDownLatch.countDown(); } }); diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java b/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java index 73281b3f6ea..37ed43b9450 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java @@ -62,18 +62,18 @@ public class ThrowingLeafReaderWrapper extends FilterLeafReader { * A callback interface that allows to throw certain exceptions for * methods called on the IndexReader that is wrapped by {@link ThrowingLeafReaderWrapper} */ - public static interface Thrower { + public interface Thrower { /** * Maybe throws an exception ;) */ - public void maybeThrow(Flags flag) throws IOException; + void maybeThrow(Flags flag) throws IOException; /** * If this method returns true the {@link Terms} instance for the given field * is wrapped with Thrower support otherwise no exception will be thrown for * the current {@link Terms} instance or any other instance obtained from it. */ - public boolean wrapTerms(String field); + boolean wrapTerms(String field); } public ThrowingLeafReaderWrapper(LeafReader in, Thrower thrower) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 82f2346c421..24af2b72bd1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -95,9 +95,6 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -/** - * - */ public class ElasticsearchAssertions { public static void assertAcked(AcknowledgedRequestBuilder builder) { @@ -555,7 +552,6 @@ public class ElasticsearchAssertions { extraInfo += " with status [" + status + "]"; } - try { future.actionGet(); fail = true; @@ -565,7 +561,7 @@ public class ElasticsearchAssertions { if (status != null) { assertThat(extraInfo, ExceptionsHelper.status(esException), equalTo(status)); } - } catch (Throwable e) { + } catch (Exception e) { assertThat(extraInfo, e, instanceOf(exceptionClass)); if (status != null) { assertThat(extraInfo, ExceptionsHelper.status(e), equalTo(status)); @@ -597,7 +593,7 @@ public class ElasticsearchAssertions { try { future.actionGet(); fail = true; - } catch (Throwable e) { + } catch (Exception e) { assertThat(extraInfo, ExceptionsHelper.status(e), equalTo(status)); } // has to be outside catch clause to get a proper message @@ -657,35 +653,38 @@ public class ElasticsearchAssertions { equalTo(0)); assertThat("Serialization failed with version [" + version + "] bytes should be equal for streamable [" + streamable + "]", serialize(version, streamable), equalTo(orig)); - } catch (Throwable ex) { + } catch (Exception ex) { throw new RuntimeException("failed to check serialization - version [" + version + "] for streamable [" + streamable + "]", ex); } } - public static void assertVersionSerializable(Version version, final Throwable t) { - ElasticsearchAssertions.assertVersionSerializable(version, new ThrowableWrapper(t)); + public static void assertVersionSerializable(Version version, final Exception e) { + ElasticsearchAssertions.assertVersionSerializable(version, new ExceptionWrapper(e)); } - public static final class ThrowableWrapper implements Streamable { - Throwable throwable; - public ThrowableWrapper(Throwable t) { - throwable = t; + public static final class ExceptionWrapper implements Streamable { + + private Exception exception; + + public ExceptionWrapper(Exception e) { + exception = e; } - public ThrowableWrapper() { - throwable = null; + public ExceptionWrapper() { + exception = null; } @Override public void readFrom(StreamInput in) throws IOException { - throwable = in.readThrowable(); + exception = in.readException(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeThrowable(throwable); + out.writeException(exception); } + } @@ -697,7 +696,7 @@ public class ElasticsearchAssertions { assertThat(constructor, Matchers.notNullValue()); Streamable newInstance = constructor.newInstance(); return newInstance; - } catch (Throwable e) { + } catch (Exception e) { return null; } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java index a4218f83340..f687f2b39bf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java @@ -71,7 +71,7 @@ public class Stash implements ToXContent { * as arguments for following requests (e.g. scroll_id) */ public boolean containsStashedValue(Object key) { - if (key == null) { + if (key == null || false == key instanceof CharSequence) { return false; } String stashKey = key.toString(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java index 106ff5176c7..c6ea48fd6ef 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java @@ -75,7 +75,7 @@ public class RestSpec { } restSpec.addApi(restApi); } - } catch (Throwable ex) { + } catch (Exception ex) { throw new IOException("Can't parse rest spec file: [" + jsonFile + "]", ex); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java index b0d16d10c49..ec695e8bd41 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java @@ -50,8 +50,8 @@ public class MockTaskManager extends TaskManager { for (MockTaskManagerListener listener : listeners) { try { listener.onTaskRegistered(task); - } catch (Throwable t) { - logger.warn("failed to notify task manager listener about unregistering the task with id {}", t, task.getId()); + } catch (Exception e) { + logger.warn("failed to notify task manager listener about unregistering the task with id {}", e, task.getId()); } } } @@ -65,8 +65,8 @@ public class MockTaskManager extends TaskManager { for (MockTaskManagerListener listener : listeners) { try { listener.onTaskUnregistered(task); - } catch (Throwable t) { - logger.warn("failed to notify task manager listener about unregistering the task with id {}", t, task.getId()); + } catch (Exception e) { + logger.warn("failed to notify task manager listener about unregistering the task with id {}", e, task.getId()); } } } else { @@ -80,8 +80,8 @@ public class MockTaskManager extends TaskManager { for (MockTaskManagerListener listener : listeners) { try { listener.waitForTaskCompletion(task); - } catch (Throwable t) { - logger.warn("failed to notify task manager listener about waitForTaskCompletion the task with id {}", t, task.getId()); + } catch (Exception e) { + logger.warn("failed to notify task manager listener about waitForTaskCompletion the task with id {}", e, task.getId()); } } super.waitForTaskCompletion(task, untilInNanos); diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 41c27ecbf97..bc371ca02d1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -20,7 +20,6 @@ package org.elasticsearch.test.transport; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.transport.TransportService; @@ -34,7 +33,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -52,7 +50,6 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportServiceAdapter; import org.elasticsearch.transport.local.LocalTransport; import org.elasticsearch.transport.netty.NettyTransport; @@ -387,7 +384,7 @@ public class MockTransportService extends TransportService { threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() { @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { logger.debug("failed to send delayed request", e); } @@ -639,10 +636,10 @@ public class MockTransportService extends TransportService { } @Override - protected void traceResponseSent(long requestId, String action, Throwable t) { - super.traceResponseSent(requestId, action, t); + protected void traceResponseSent(long requestId, String action, Exception e) { + super.traceResponseSent(requestId, action, e); for (Tracer tracer : activeTracers) { - tracer.responseSent(requestId, action, t); + tracer.responseSent(requestId, action, e); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/test/StashTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/StashTests.java new file mode 100644 index 00000000000..7d0c0598f09 --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/test/StashTests.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest.test; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.Stash; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.singletonMap; + +public class StashTests extends ESTestCase { + public void testReplaceStashedValuesEmbeddedStashKey() throws IOException { + Stash stash = new Stash(); + stash.stashValue("stashed", "bar"); + + Map expected = new HashMap<>(); + expected.put("key", singletonMap("a", "foobar")); + Map map = new HashMap<>(); + Map map2 = new HashMap<>(); + map2.put("a", "foo${stashed}"); + map.put("key", map2); + + Map actual = stash.replaceStashedValues(map); + assertEquals(expected, actual); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 7253c4d26e7..5bf11e4dc98 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -178,7 +178,7 @@ public class InternalTestClusterTests extends ESTestCase { int maxNumDataNodes = 2; final String clusterName1 = "shared1"; NodeConfigurationSource nodeConfigurationSource = NodeConfigurationSource.EMPTY; - int numClientNodes = 0; + int numClientNodes = randomIntBetween(0, 2); boolean enableHttpPipelining = randomBoolean(); String nodePrefix = "test"; Path baseDir = createTempDir(); @@ -218,8 +218,7 @@ public class InternalTestClusterTests extends ESTestCase { assertFileNotExists(testMarker); // a new unknown node used this path, it should be cleaned assertFileExists(stableTestMarker); // but leaving the structure of existing, reused nodes for (String name: cluster.getNodeNames()) { - assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), - equalTo(shardNodePaths.get(name))); + assertThat("data paths for " + name + " changed", getNodePaths(cluster, name), equalTo(shardNodePaths.get(name))); } cluster.beforeTest(random(), 0.0);