From 482717b2c7f4fabd800b51bf4b19aeee8b307545 Mon Sep 17 00:00:00 2001 From: Mathias Fussenegger Date: Thu, 30 Apr 2015 10:20:55 +0200 Subject: [PATCH 01/19] prevent over allocation for multicast ping request very tiny optimization but the BytesStreamOutput ctor by default allocates 16KB which is way too much for a ping request. The actual size of the request depends on the clusterName so 150 isn't accurate either but should be enough. --- .../discovery/zen/ping/multicast/MulticastZenPing.java | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java index 0d5ca7260b4..85eb221a73e 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen.ping.multicast; import org.apache.lucene.util.Constants; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -29,7 +28,10 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.io.stream.*; +import org.elasticsearch.common.io.stream.BytesStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.network.MulticastChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -64,6 +66,8 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem private static final byte[] INTERNAL_HEADER = new byte[]{1, 9, 8, 4}; + private static final int PING_SIZE_ESTIMATE = 150; + private final String address; private final int port; private final String group; @@ -248,7 +252,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem private void sendPingRequest(int id) { try { - BytesStreamOutput out = new BytesStreamOutput(); + BytesStreamOutput out = new BytesStreamOutput(PING_SIZE_ESTIMATE); out.writeBytes(INTERNAL_HEADER); // TODO: change to min_required version! Version.writeVersion(version, out); From defe2c305cdc54b051d197e2204d9a160084d0ab Mon Sep 17 00:00:00 2001 From: aleph-zero Date: Thu, 30 Apr 2015 13:47:24 -0700 Subject: [PATCH 02/19] Read configuration file with .yaml suffix Fixes a bug whereby we failed to read an elasticsearch config file with the .yaml extension. This commit allows elasticsearch config files to be suffixed with: .yml, .yaml, .json, .properties. Closes #9706 --- .../internal/InternalSettingsPreparer.java | 26 +++++++------------ 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java index fcb6f3919c6..f940c15c042 100644 --- a/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java +++ b/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java @@ -19,6 +19,7 @@ package org.elasticsearch.node.internal; +import com.google.common.collect.ImmutableList; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Names; import org.elasticsearch.common.Strings; @@ -28,6 +29,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.FailedToResolveConfigException; +import java.util.List; import java.util.Map; import static org.elasticsearch.common.Strings.cleanPath; @@ -38,6 +40,8 @@ import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilde */ public class InternalSettingsPreparer { + static final List ALLOWED_SUFFIXES = ImmutableList.of(".yml", ".yaml", ".json", ".properties"); + public static Tuple prepareSettings(Settings pSettings, boolean loadConfigSettings) { // ignore this prefixes when getting properties from es. and elasticsearch. String[] ignorePrefixes = new String[]{"es.default.", "elasticsearch.default."}; @@ -73,22 +77,12 @@ public class InternalSettingsPreparer { } } if (loadFromEnv) { - try { - settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.yml")); - } catch (FailedToResolveConfigException e) { - // ignore - } catch (NoClassDefFoundError e) { - // ignore, no yaml - } - try { - settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.json")); - } catch (FailedToResolveConfigException e) { - // ignore - } - try { - settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.properties")); - } catch (FailedToResolveConfigException e) { - // ignore + for (String allowedSuffix : ALLOWED_SUFFIXES) { + try { + settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch" + allowedSuffix)); + } catch (FailedToResolveConfigException e) { + // ignore + } } } } From 4a13a56058465e9b9cff0e9d435b24f1a37d5826 Mon Sep 17 00:00:00 2001 From: aleph-zero Date: Thu, 7 May 2015 13:26:42 -0700 Subject: [PATCH 03/19] Test for reading alternate suffixes --- .../node/internal/InternalSettingsPreparerTests.java | 12 ++++++++++++ src/test/resources/config/elasticsearch.json | 3 +++ src/test/resources/config/elasticsearch.properties | 2 ++ src/test/resources/config/elasticsearch.yaml | 3 +++ 4 files changed, 20 insertions(+) create mode 100644 src/test/resources/config/elasticsearch.json create mode 100644 src/test/resources/config/elasticsearch.properties create mode 100644 src/test/resources/config/elasticsearch.yaml diff --git a/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java b/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java index 8db6fd4e5c0..d78644e5fb5 100644 --- a/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java +++ b/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java @@ -52,4 +52,16 @@ public class InternalSettingsPreparerTests extends ElasticsearchTestCase { // Should use setting from the system property assertThat(tuple.v1().get("node.zone"), equalTo("bar")); } + + @Test + public void testAlternateConfigFileSuffixes() { + // test that we can read config files with .yaml, .json, and .properties suffixes + Tuple tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder() + .put("config.ignore_system_properties", true) + .build(), true); + + assertThat(tuple.v1().get("yaml.config.exists"), equalTo("true")); + assertThat(tuple.v1().get("json.config.exists"), equalTo("true")); + assertThat(tuple.v1().get("properties.config.exists"), equalTo("true")); + } } diff --git a/src/test/resources/config/elasticsearch.json b/src/test/resources/config/elasticsearch.json new file mode 100644 index 00000000000..16433a2c88d --- /dev/null +++ b/src/test/resources/config/elasticsearch.json @@ -0,0 +1,3 @@ +{ + "json.config.exists" : "true" +} diff --git a/src/test/resources/config/elasticsearch.properties b/src/test/resources/config/elasticsearch.properties new file mode 100644 index 00000000000..d3f822cafb5 --- /dev/null +++ b/src/test/resources/config/elasticsearch.properties @@ -0,0 +1,2 @@ + +properties.config.exists: true diff --git a/src/test/resources/config/elasticsearch.yaml b/src/test/resources/config/elasticsearch.yaml new file mode 100644 index 00000000000..b6ebc6bd105 --- /dev/null +++ b/src/test/resources/config/elasticsearch.yaml @@ -0,0 +1,3 @@ + +yaml.config.exists: true + From aa98ae5240cf8c2d18abb50ab310ff414d3df9a2 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 22 May 2015 19:28:49 +0200 Subject: [PATCH 04/19] Search: Do not specialize TermQuery vs. TermsQuery. We had some code that created a TermQuery instead of a TermsQuery when there was a single term, which is not useful anymore now that TermsQuery rewrites to a disjunction when there are few terms: https://issues.apache.org/jira/browse/LUCENE-6360 --- .../mapper/core/AbstractFieldMapper.java | 20 ++++--------------- .../index/mapper/internal/IdFieldMapper.java | 6 +----- .../string/SimpleStringMappingTests.java | 14 ------------- 3 files changed, 5 insertions(+), 35 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java index 5c26e0c14b4..b1d1d3395e4 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java @@ -479,23 +479,11 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Query termsQuery(List values, @Nullable QueryParseContext context) { - switch (values.size()) { - case 0: - return Queries.newMatchNoDocsQuery(); - case 1: - // When there is a single term, it's important to return a term filter so that - // it can return a DocIdSet that is directly backed by a postings list, instead - // of loading everything into a bit set and returning an iterator based on the - // bit set - return termQuery(values.get(0), context); - default: - BytesRef[] bytesRefs = new BytesRef[values.size()]; - for (int i = 0; i < bytesRefs.length; i++) { - bytesRefs[i] = indexedValueForSearch(values.get(i)); - } - return new TermsQuery(names.indexName(), bytesRefs); - + BytesRef[] bytesRefs = new BytesRef[values.size()]; + for (int i = 0; i < bytesRefs.length; i++) { + bytesRefs[i] = indexedValueForSearch(values.get(i)); } + return new TermsQuery(names.indexName(), bytesRefs); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index cd59591b6bb..b7438540c40 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -188,11 +188,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements RootMapper { return super.termQuery(value, context); } final BytesRef[] uids = Uid.createUidsForTypesAndId(context.queryTypes(), value); - if (uids.length == 1) { - return new TermQuery(new Term(UidFieldMapper.NAME, uids[0])); - } else { - return new TermsQuery(UidFieldMapper.NAME, uids); - } + return new TermsQuery(UidFieldMapper.NAME, uids); } @Override diff --git a/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index 9baa2ca6afc..3363518b8e4 100644 --- a/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -519,18 +519,4 @@ public class SimpleStringMappingTests extends ElasticsearchSingleNodeTest { assertTrue(mergeResult.buildConflicts()[0].contains("cannot enable norms")); } - public void testTermsQuery() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").endObject().endObject() - .endObject().endObject().string(); - - DocumentMapper defaultMapper = parser.parse(mapping); - FieldMapper mapper = defaultMapper.mappers().getMapper("field"); - assertNotNull(mapper); - assertTrue(mapper instanceof StringFieldMapper); - assertEquals(Queries.newMatchNoDocsQuery(), mapper.termsQuery(Collections.emptyList(), null)); - assertEquals(new TermQuery(new Term("field", "value")), mapper.termsQuery(Collections.singletonList("value"), null)); - assertEquals(new TermsQuery(new Term("field", "value1"), new Term("field", "value2")), mapper.termsQuery(Arrays.asList("value1", "value2"), null)); - } - } From 098c01d86c5c2fbd28cdcc86db5646fcac9edd69 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 27 May 2015 18:25:38 +0200 Subject: [PATCH 05/19] Internal: remove unused code. --- .../allocator/BalancedShardsAllocator.java | 3 +- .../common/collect/BoundedTreeSet.java | 63 --- .../common/collect/IdentityHashSet.java | 201 ---------- .../common/collect/ImmutableOpenLongMap.java | 376 ------------------ .../common/collect/Iterators2.java | 65 --- .../common/lucene/HashedBytesRef.java | 85 ---- .../ThreadSafeInputStreamIndexInput.java | 39 -- .../elasticsearch/common/unit/Percent.java | 64 --- .../common/collect/Iterators2Tests.java | 50 --- 9 files changed, 1 insertion(+), 945 deletions(-) delete mode 100644 src/main/java/org/elasticsearch/common/collect/BoundedTreeSet.java delete mode 100644 src/main/java/org/elasticsearch/common/collect/IdentityHashSet.java delete mode 100644 src/main/java/org/elasticsearch/common/collect/ImmutableOpenLongMap.java delete mode 100644 src/main/java/org/elasticsearch/common/collect/Iterators2.java delete mode 100644 src/main/java/org/elasticsearch/common/lucene/HashedBytesRef.java delete mode 100644 src/main/java/org/elasticsearch/common/lucene/store/ThreadSafeInputStreamIndexInput.java delete mode 100644 src/main/java/org/elasticsearch/common/unit/Percent.java delete mode 100644 src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 11f3e45653f..d4a268d522b 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; -import org.elasticsearch.common.collect.IdentityHashSet; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; @@ -597,7 +596,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards int secondaryLength = 0; int primaryLength = primary.length; ArrayUtil.timSort(primary, comparator); - final Set throttledNodes = new IdentityHashSet<>(); + final Set throttledNodes = Collections.newSetFromMap(new IdentityHashMap()); do { for (int i = 0; i < primaryLength; i++) { MutableShardRouting shard = primary[i]; diff --git a/src/main/java/org/elasticsearch/common/collect/BoundedTreeSet.java b/src/main/java/org/elasticsearch/common/collect/BoundedTreeSet.java deleted file mode 100644 index 2adec0f283d..00000000000 --- a/src/main/java/org/elasticsearch/common/collect/BoundedTreeSet.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.collect; - -import java.util.Collection; -import java.util.Comparator; -import java.util.TreeSet; - -/** - * A {@link TreeSet} that is bounded by size. - * - * - */ -public class BoundedTreeSet extends TreeSet { - - private final int size; - - public BoundedTreeSet(int size) { - this.size = size; - } - - public BoundedTreeSet(Comparator comparator, int size) { - super(comparator); - this.size = size; - } - - @Override - public boolean add(E e) { - boolean result = super.add(e); - rebound(); - return result; - } - - @Override - public boolean addAll(Collection c) { - boolean result = super.addAll(c); - rebound(); - return result; - } - - private void rebound() { - while (size() > size) { - remove(last()); - } - } -} diff --git a/src/main/java/org/elasticsearch/common/collect/IdentityHashSet.java b/src/main/java/org/elasticsearch/common/collect/IdentityHashSet.java deleted file mode 100644 index e3e4834519b..00000000000 --- a/src/main/java/org/elasticsearch/common/collect/IdentityHashSet.java +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.collect; - -import java.util.*; - -/** - * - */ -public class IdentityHashSet - extends AbstractSet - implements Set, Cloneable, java.io.Serializable { - - static final long serialVersionUID = -5024744406713321677L; - - private transient IdentityHashMap map; - - // Dummy value to associate with an Object in the backing Map - private static final Object PRESENT = new Object(); - - public IdentityHashSet() { - map = new IdentityHashMap<>(); - } - - public IdentityHashSet(Collection c) { - map = new IdentityHashMap<>(Math.max((int) (c.size() / .75f) + 1, 16)); - addAll(c); - } - - public IdentityHashSet(int expectedSize) { - map = new IdentityHashMap<>(expectedSize); - } - - /** - * Returns an iterator over the elements in this set. The elements - * are returned in no particular order. - * - * @return an Iterator over the elements in this set - * @see ConcurrentModificationException - */ - @Override - public Iterator iterator() { - return map.keySet().iterator(); - } - - /** - * Returns the number of elements in this set (its cardinality). - * - * @return the number of elements in this set (its cardinality) - */ - @Override - public int size() { - return map.size(); - } - - /** - * Returns true if this set contains no elements. - * - * @return true if this set contains no elements - */ - @Override - public boolean isEmpty() { - return map.isEmpty(); - } - - /** - * Returns true if this set contains the specified element. - * More formally, returns true if and only if this set - * contains an element e such that - * (o==e). - * - * @param o element whose presence in this set is to be tested - * @return true if this set contains the specified element - */ - @Override - public boolean contains(Object o) { - return map.containsKey(o); - } - - /** - * Adds the specified element to this set if it is not already present. - * More formally, adds the specified element e to this set if - * this set contains no element e2 such that - * (e==e2). - * If this set already contains the element, the call leaves the set - * unchanged and returns false. - * - * @param e element to be added to this set - * @return true if this set did not already contain the specified - * element - */ - @Override - public boolean add(E e) { - return map.put(e, PRESENT) == null; - } - - /** - * Removes the specified element from this set if it is present. - * More formally, removes an element e such that - * (o==e), - * if this set contains such an element. Returns true if - * this set contained the element (or equivalently, if this set - * changed as a result of the call). (This set will not contain the - * element once the call returns.) - * - * @param o object to be removed from this set, if present - * @return true if the set contained the specified element - */ - @Override - public boolean remove(Object o) { - return map.remove(o) == PRESENT; - } - - /** - * Removes all of the elements from this set. - * The set will be empty after this call returns. - */ - @Override - public void clear() { - map.clear(); - } - - /** - * Returns a shallow copy of this HashSet instance: the elements - * themselves are not cloned. - * - * @return a shallow copy of this set - */ - @Override - public Object clone() { - try { - IdentityHashSet newSet = (IdentityHashSet) super.clone(); - newSet.map = (IdentityHashMap) map.clone(); - return newSet; - } catch (CloneNotSupportedException e) { - throw new InternalError(); - } - } - - /** - * Index the state of this HashSet instance to a stream (that is, - * serialize it). - * - * @serialData The capacity of the backing HashMap instance - * (int), and its load factor (float) are emitted, followed by - * the size of the set (the number of elements it contains) - * (int), followed by all of its elements (each an Object) in - * no particular order. - */ - private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { - // Write out any hidden serialization magic - s.defaultWriteObject(); - - // Write out size - s.writeInt(map.size()); - - // Write out all elements in the proper order. - for (Iterator i = map.keySet().iterator(); i.hasNext(); ) - s.writeObject(i.next()); - } - - /** - * Reconstitute the HashSet instance from a stream (that is, - * deserialize it). - */ - private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - // Read in any hidden serialization magic - s.defaultReadObject(); - - // Read in size - int size = s.readInt(); - - map = new IdentityHashMap<>(size); - - // Read in all elements in the proper order. - for (int i = 0; i < size; i++) { - E e = (E) s.readObject(); - map.put(e, PRESENT); - } - } -} - diff --git a/src/main/java/org/elasticsearch/common/collect/ImmutableOpenLongMap.java b/src/main/java/org/elasticsearch/common/collect/ImmutableOpenLongMap.java deleted file mode 100644 index 571b54ebeeb..00000000000 --- a/src/main/java/org/elasticsearch/common/collect/ImmutableOpenLongMap.java +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.collect; - -import com.carrotsearch.hppc.*; -import com.carrotsearch.hppc.cursors.LongCursor; -import com.carrotsearch.hppc.cursors.LongObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.predicates.IntObjectPredicate; -import com.carrotsearch.hppc.predicates.LongObjectPredicate; -import com.carrotsearch.hppc.predicates.LongPredicate; -import com.carrotsearch.hppc.procedures.LongObjectProcedure; -import com.google.common.collect.UnmodifiableIterator; - -import java.util.Iterator; -import java.util.Map; - -/** - * An immutable map implementation based on open hash map. - *

- * Can be constructed using a {@link #builder()}, or using {@link #builder(org.elasticsearch.common.collect.ImmutableOpenLongMap)} (which is an optimized - * option to copy over existing content and modify it). - */ -public final class ImmutableOpenLongMap implements Iterable> { - - private final LongObjectHashMap map; - - private ImmutableOpenLongMap(LongObjectHashMap map) { - this.map = map; - } - - /** - * @return Returns the value associated with the given key or the default value - * for the key type, if the key is not associated with any value. - *

- * Important note: For primitive type values, the value returned for a non-existing - * key may not be the default value of the primitive type (it may be any value previously - * assigned to that slot). - */ - public VType get(long key) { - return map.get(key); - } - - /** - * Returns true if this container has an association to a value for - * the given key. - */ - public boolean containsKey(long key) { - return map.containsKey(key); - } - - /** - * @return Returns the current size (number of assigned keys) in the container. - */ - public int size() { - return map.size(); - } - - /** - * @return Return true if this hash map contains no assigned keys. - */ - public boolean isEmpty() { - return map.isEmpty(); - } - - /** - * Returns a cursor over the entries (key-value pairs) in this map. The iterator is - * implemented as a cursor and it returns the same cursor instance on every - * call to {@link java.util.Iterator#next()}. To read the current key and value use the cursor's - * public fields. An example is shown below. - *

-     * for (IntShortCursor c : intShortMap)
-     * {
-     *     System.out.println("index=" + c.index
-     *       + " key=" + c.key
-     *       + " value=" + c.value);
-     * }
-     * 
- *

- *

The index field inside the cursor gives the internal index inside - * the container's implementation. The interpretation of this index depends on - * to the container. - */ - @Override - public Iterator> iterator() { - return map.iterator(); - } - - /** - * Returns a specialized view of the keys of this associated container. - * The view additionally implements {@link com.carrotsearch.hppc.ObjectLookupContainer}. - */ - public LongLookupContainer keys() { - return map.keys(); - } - - /** - * Returns a direct iterator over the keys. - */ - public UnmodifiableIterator keysIt() { - final Iterator iterator = map.keys().iterator(); - return new UnmodifiableIterator() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public Long next() { - return iterator.next().value; - } - }; - } - - /** - * @return Returns a container with all values stored in this map. - */ - public ObjectContainer values() { - return map.values(); - } - - /** - * Returns a direct iterator over the keys. - */ - public UnmodifiableIterator valuesIt() { - final Iterator> iterator = map.values().iterator(); - return new UnmodifiableIterator() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public VType next() { - return iterator.next().value; - } - }; - } - - @Override - public String toString() { - return map.toString(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - ImmutableOpenLongMap that = (ImmutableOpenLongMap) o; - - if (!map.equals(that.map)) return false; - - return true; - } - - @Override - public int hashCode() { - return map.hashCode(); - } - - @SuppressWarnings("unchecked") - private static final ImmutableOpenLongMap EMPTY = new ImmutableOpenLongMap(new LongObjectHashMap()); - - @SuppressWarnings("unchecked") - public static ImmutableOpenLongMap of() { - return EMPTY; - } - - public static Builder builder() { - return new Builder<>(); - } - - public static Builder builder(int size) { - return new Builder<>(size); - } - - public static Builder builder(ImmutableOpenLongMap map) { - return new Builder<>(map); - } - - public static class Builder implements LongObjectMap { - - private LongObjectHashMap map; - - public Builder() { - //noinspection unchecked - this(EMPTY); - } - - public Builder(int size) { - this.map = new LongObjectHashMap<>(size); - } - - public Builder(ImmutableOpenLongMap map) { - this.map = map.map.clone(); - } - - /** - * Builds a new instance of the - */ - public ImmutableOpenLongMap build() { - LongObjectHashMap map = this.map; - this.map = null; // nullify the map, so any operation post build will fail! (hackish, but safest) - return new ImmutableOpenLongMap<>(map); - } - - /** - * Puts all the entries in the map to the builder. - */ - public Builder putAll(Map map) { - for (Map.Entry entry : map.entrySet()) { - this.map.put(entry.getKey(), entry.getValue()); - } - return this; - } - - /** - * A put operation that can be used in the fluent pattern. - */ - public Builder fPut(long key, VType value) { - map.put(key, value); - return this; - } - - @Override - public VType put(long key, VType value) { - return map.put(key, value); - } - - @Override - public VType get(long key) { - return map.get(key); - } - - @Override - public VType getOrDefault(long kType, VType vType) { - return map.getOrDefault(kType, vType); - } - - /** - * Remove that can be used in the fluent pattern. - */ - public Builder fRemove(long key) { - map.remove(key); - return this; - } - - @Override - public VType remove(long key) { - return map.remove(key); - } - - @Override - public Iterator> iterator() { - return map.iterator(); - } - - @Override - public boolean containsKey(long key) { - return map.containsKey(key); - } - - @Override - public int size() { - return map.size(); - } - - @Override - public boolean isEmpty() { - return map.isEmpty(); - } - - @Override - public void clear() { - map.clear(); - } - - @Override - public int putAll(LongObjectAssociativeContainer container) { - return map.putAll(container); - } - - @Override - public int putAll(Iterable> iterable) { - return map.putAll(iterable); - } - - @Override - public int removeAll(LongContainer container) { - return map.removeAll(container); - } - - @Override - public int removeAll(LongPredicate predicate) { - return map.removeAll(predicate); - } - - @Override - public LongCollection keys() { - return map.keys(); - } - - @Override - public ObjectContainer values() { - return map.values(); - } - - @Override - public > T forEach(T procedure) { - return map.forEach(procedure); - } - - @Override - public int indexOf(long key) { - return map.indexOf(key); - } - - @Override - public boolean indexExists(int index) { - return map.indexExists(index); - } - - @Override - public VType indexGet(int index) { - return map.indexGet(index); - } - - @Override - public VType indexReplace(int index, VType newValue) { - return map.indexReplace(index, newValue); - } - - @Override - public void indexInsert(int index, long key, VType value) { - map.indexInsert(index, key, value); - } - - @Override - public void release() { - map.release(); - } - - @Override - public String visualizeKeyDistribution(int characters) { - return map.visualizeKeyDistribution(characters); - } - - @Override - public int removeAll(LongObjectPredicate predicate) { - return map.removeAll(predicate); - } - - @Override - public > T forEach(T predicate) { - return map.forEach(predicate); - } - } -} diff --git a/src/main/java/org/elasticsearch/common/collect/Iterators2.java b/src/main/java/org/elasticsearch/common/collect/Iterators2.java deleted file mode 100644 index 20c0bd46483..00000000000 --- a/src/main/java/org/elasticsearch/common/collect/Iterators2.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.collect; - -import com.google.common.collect.Iterators; -import com.google.common.collect.PeekingIterator; -import com.google.common.collect.UnmodifiableIterator; - -import java.util.Comparator; -import java.util.Iterator; - -public enum Iterators2 { - ; - - /** Remove duplicated elements from an iterator over sorted content. */ - public static Iterator deduplicateSorted(Iterator iterator, final Comparator comparator) { - // TODO: infer type once JI-9019884 is fixed - final PeekingIterator it = Iterators.peekingIterator(iterator); - return new UnmodifiableIterator() { - - @Override - public boolean hasNext() { - return it.hasNext(); - } - - @Override - public T next() { - final T ret = it.next(); - while (it.hasNext() && comparator.compare(ret, it.peek()) == 0) { - it.next(); - } - assert !it.hasNext() || comparator.compare(ret, it.peek()) < 0 : "iterator is not sorted: " + ret + " > " + it.peek(); - return ret; - } - - }; - } - - /** Return a merged view over several iterators, optionally deduplicating equivalent entries. */ - public static Iterator mergeSorted(Iterable> iterators, Comparator comparator, boolean deduplicate) { - Iterator it = Iterators.mergeSorted(iterators, comparator); - if (deduplicate) { - it = deduplicateSorted(it, comparator); - } - return it; - } - -} diff --git a/src/main/java/org/elasticsearch/common/lucene/HashedBytesRef.java b/src/main/java/org/elasticsearch/common/lucene/HashedBytesRef.java deleted file mode 100644 index a85d786bd89..00000000000 --- a/src/main/java/org/elasticsearch/common/lucene/HashedBytesRef.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene; - -import org.apache.lucene.util.BytesRef; - -/** - * A wrapped to {@link BytesRef} that also caches the hashCode for it. - */ -public class HashedBytesRef { - - public BytesRef bytes; - public int hash; - - public HashedBytesRef() { - } - - public HashedBytesRef(String bytes) { - this(new BytesRef(bytes)); - } - - public HashedBytesRef(BytesRef bytes) { - this(bytes, bytes.hashCode()); - } - - public HashedBytesRef(BytesRef bytes, int hash) { - this.bytes = bytes; - this.hash = hash; - } - - public HashedBytesRef resetHashCode() { - this.hash = bytes.hashCode(); - return this; - } - - public HashedBytesRef reset(BytesRef bytes, int hash) { - this.bytes = bytes; - this.hash = hash; - return this; - } - - @Override - public int hashCode() { - return hash; - } - - @Override - public boolean equals(Object other) { - if (other instanceof HashedBytesRef) { - return bytes.equals(((HashedBytesRef) other).bytes); - } - return false; - } - - @Override - public String toString() { - return bytes.toString(); - } - - public HashedBytesRef deepCopy() { - return deepCopyOf(this); - } - - public static HashedBytesRef deepCopyOf(HashedBytesRef other) { - BytesRef copy = BytesRef.deepCopyOf(other.bytes); - return new HashedBytesRef(copy, other.hash); - } -} diff --git a/src/main/java/org/elasticsearch/common/lucene/store/ThreadSafeInputStreamIndexInput.java b/src/main/java/org/elasticsearch/common/lucene/store/ThreadSafeInputStreamIndexInput.java deleted file mode 100644 index 1d3084e0352..00000000000 --- a/src/main/java/org/elasticsearch/common/lucene/store/ThreadSafeInputStreamIndexInput.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.store; - -import org.apache.lucene.store.IndexInput; - -import java.io.IOException; - -/** - * - */ -public class ThreadSafeInputStreamIndexInput extends InputStreamIndexInput { - - public ThreadSafeInputStreamIndexInput(IndexInput indexInput, long limit) { - super(indexInput, limit); - } - - @Override - public synchronized int read(byte[] b, int off, int len) throws IOException { - return super.read(b, off, len); - } -} diff --git a/src/main/java/org/elasticsearch/common/unit/Percent.java b/src/main/java/org/elasticsearch/common/unit/Percent.java deleted file mode 100644 index 8da3eff6ad4..00000000000 --- a/src/main/java/org/elasticsearch/common/unit/Percent.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.unit; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; - -import java.io.IOException; -import java.io.Serializable; - -/** - * - */ -public class Percent implements Streamable, Serializable { - - private double value; - - public Percent(double value) { - this.value = value; - } - - public double value() { - return value; - } - - @Override - public String toString() { - return format(value); - } - - public static String format(double value) { - String p = String.valueOf(value * 100.0); - int ix = p.indexOf(".") + 1; - return p.substring(0, ix) + p.substring(ix, ix + 1) + "%"; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - value = in.readDouble(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeDouble(value); - } -} diff --git a/src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java b/src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java deleted file mode 100644 index 65aa51c8ec0..00000000000 --- a/src/test/java/org/elasticsearch/common/collect/Iterators2Tests.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.collect; - -import com.google.common.collect.Lists; -import com.google.common.collect.Ordering; -import com.google.common.collect.Sets; -import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.test.ElasticsearchTestCase; - -import java.util.Iterator; -import java.util.List; - -public class Iterators2Tests extends ElasticsearchTestCase { - - public void testDeduplicateSorted() { - final List list = Lists.newArrayList(); - for (int i = randomInt(100); i >= 0; --i) { - final int frequency = randomIntBetween(1, 10); - final String s = randomAsciiOfLength(randomIntBetween(2, 20)); - for (int j = 0; j < frequency; ++j) { - list.add(s); - } - } - CollectionUtil.introSort(list); - final List deduplicated = Lists.newArrayList(); - for (Iterator it = Iterators2.deduplicateSorted(list.iterator(), Ordering.natural()); it.hasNext(); ) { - deduplicated.add(it.next()); - } - assertEquals(Lists.newArrayList(Sets.newTreeSet(list)), deduplicated); - } - -} From 91e9caabd7139e0fa48687d49fb5f35916a41d78 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Thu, 28 May 2015 11:54:56 +0200 Subject: [PATCH 06/19] [TEST] add path.home to settings --- .../node/internal/InternalSettingsPreparerTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java b/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java index 77cd1aebfcb..315a5f7f2bd 100644 --- a/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java +++ b/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java @@ -67,6 +67,7 @@ public class InternalSettingsPreparerTests extends ElasticsearchTestCase { // test that we can read config files with .yaml, .json, and .properties suffixes Tuple tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder() .put("config.ignore_system_properties", true) + .put("path.home", createTempDir().toString()) .build(), true); assertThat(tuple.v1().get("yaml.config.exists"), equalTo("true")); From 105f4dd512115d0a802319af02c5f68f789bcdfc Mon Sep 17 00:00:00 2001 From: jaymode Date: Thu, 28 May 2015 06:39:51 -0400 Subject: [PATCH 07/19] Test: filter out colons in test section names On Windows, colons ':' are illegal in file names and since we use a Path to check if the test is blacklisted, tests with a colon in the test section name will fail. This change simply removes the colon from the name when matching against the blacklist. --- .../org/elasticsearch/test/rest/ElasticsearchRestTestCase.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java index 5951a9a5815..ee217ae1e8f 100644 --- a/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java +++ b/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java @@ -312,7 +312,7 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration //skip test if it matches one of the blacklist globs for (PathMatcher blacklistedPathMatcher : blacklistPathMatchers) { //we need to replace a few characters otherwise the test section name can't be parsed as a path on windows - String testSection = testCandidate.getTestSection().getName().replace("*", "").replace("\\", "/").replaceAll("\\s+/", "/").trim(); + String testSection = testCandidate.getTestSection().getName().replace("*", "").replace("\\", "/").replaceAll("\\s+/", "/").replace(":", "").trim(); String testPath = testCandidate.getSuitePath() + "/" + testSection; assumeFalse("[" + testCandidate.getTestPath() + "] skipped, reason: blacklisted", blacklistedPathMatcher.matches(PathUtils.get(testPath))); } From 283b0931ff7de13b1e35ad8646bec1dba63b8112 Mon Sep 17 00:00:00 2001 From: markharwood Date: Tue, 26 May 2015 17:32:48 +0100 Subject: [PATCH 08/19] Aggregations fix: queries with size=0 broke aggregations that require scores. Aggregations like Sampler and TopHits that require access to scores did not work if the query has size param set to zero. The assumption was that the Lucene query scoring logic was not required in these cases. Added a Junit test to demonstrate the issue and a fix which relies on earlier creation of Collector wrappers so that Collector.needsScores() calls work for all search operations. Closes #11119 --- .../search/internal/ContextIndexSearcher.java | 12 ++- .../aggregations/bucket/TopHitsTests.java | 73 ++++++++++++++++++- 2 files changed, 81 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 388ed8e8ae9..e4c61209825 100644 --- a/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -133,8 +133,11 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { } } + @Override - public void search(List leaves, Weight weight, Collector collector) throws IOException { + public void search(Query query, Collector collector) throws IOException { + // Wrap the caller's collector with various wrappers e.g. those used to siphon + // matches off for aggregation or to impose a time-limit on collection. final boolean timeoutSet = searchContext.timeoutInMillis() != -1; final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER; @@ -166,8 +169,13 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { collector = new MinimumScoreCollector(collector, searchContext.minimumScore()); } } + super.search(query, collector); + } - // we only compute the doc id set once since within a context, we execute the same query always... + @Override + public void search(List leaves, Weight weight, Collector collector) throws IOException { + final boolean timeoutSet = searchContext.timeoutInMillis() != -1; + final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER; try { if (timeoutSet || terminateAfterSet) { try { diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java index ba927efd641..a592ec18b95 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java @@ -63,7 +63,15 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; /** * @@ -228,7 +236,9 @@ public class TopHitsTests extends ElasticsearchIntegrationTest { @Test public void testBasics() throws Exception { - SearchResponse response = client().prepareSearch("idx").setTypes("type") + SearchResponse response = client() + .prepareSearch("idx") + .setTypes("type") .addAggregation(terms("terms") .executionHint(randomExecutionHint()) .field(TERMS_AGGS_FIELD) @@ -264,6 +274,65 @@ public class TopHitsTests extends ElasticsearchIntegrationTest { } } + @Test + public void testIssue11119() throws Exception { + // Test that top_hits aggregation is fed scores if query results size=0 + SearchResponse response = client() + .prepareSearch("idx") + .setTypes("field-collapsing") + .setSize(0) + .setQuery(matchQuery("text", "x y z")) + .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits"))) + .get(); + + assertSearchResponse(response); + + assertThat(response.getHits().getTotalHits(), equalTo(8l)); + assertThat(response.getHits().hits().length, equalTo(0)); + assertThat(response.getHits().maxScore(), equalTo(0f)); + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + + for (Terms.Bucket bucket : terms.getBuckets()) { + assertThat(bucket, notNullValue()); + TopHits topHits = bucket.getAggregations().get("hits"); + SearchHits hits = topHits.getHits(); + float bestScore = Float.MAX_VALUE; + for (int h = 0; h < hits.getHits().length; h++) { + float score=hits.getAt(h).getScore(); + assertThat(score, lessThanOrEqualTo(bestScore)); + assertThat(score, greaterThan(0f)); + bestScore = hits.getAt(h).getScore(); + } + } + + // Also check that min_score setting works when size=0 + // (technically not a test of top_hits but implementation details are + // tied up with the need to feed scores into the agg tree even when + // users don't want ranked set of query results.) + response = client() + .prepareSearch("idx") + .setTypes("field-collapsing") + .setSize(0) + .setMinScore(0.0001f) + .setQuery(matchQuery("text", "x y z")) + .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group")) + .get(); + + assertSearchResponse(response); + + assertThat(response.getHits().getTotalHits(), equalTo(8l)); + assertThat(response.getHits().hits().length, equalTo(0)); + assertThat(response.getHits().maxScore(), equalTo(0f)); + terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getBuckets().size(), equalTo(3)); + } + + @Test public void testBreadthFirst() throws Exception { // breadth_first will be ignored since we need scores From a4c88b723304bb0d2be0f587a71d3dacf3b0289e Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 28 May 2015 12:42:18 +0200 Subject: [PATCH 09/19] Consolidate directory lock obtain code The Directory#makeLock API is trappy and can easily lead to unexpected lock release if native locks are used. see LUCENE-6507 for details. This commit consolidates the lock lock into one place and only returns the lock instance if we actually acquired it. --- .../elasticsearch/common/lucene/Lucene.java | 32 ++++++++++++++----- .../common/util/MultiDataPathUpgrader.java | 13 ++++---- .../elasticsearch/env/NodeEnvironment.java | 15 +++++---- .../org/elasticsearch/index/store/Store.java | 10 ++---- 4 files changed, 41 insertions(+), 29 deletions(-) diff --git a/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 385607d89ba..e3d787779c7 100644 --- a/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -173,6 +173,28 @@ public class Lucene { return SegmentInfos.readCommit(directory, segmentsFileName); } + /** + * Tries to acquire the {@link IndexWriter#WRITE_LOCK_NAME} on the given directory. The returned lock must be closed once + * the lock is released. If the lock can't be obtained a {@link LockObtainFailedException} is thrown. + * This method uses the {@link IndexWriterConfig#getDefaultWriteLockTimeout()} as the lock timeout. + */ + public static Lock acquireWriteLock(Directory directory) throws IOException { + return acquireLock(directory, IndexWriter.WRITE_LOCK_NAME, IndexWriterConfig.getDefaultWriteLockTimeout()); + } + + /** + * Tries to acquire a lock on the given directory. The returned lock must be closed once + * the lock is released. If the lock can't be obtained a {@link LockObtainFailedException} is thrown. + */ + @SuppressForbidden(reason = "this method uses trappy Directory#makeLock API") + public static Lock acquireLock(Directory directory, String lockName, long timeout) throws IOException { + final Lock writeLock = directory.makeLock(lockName); + if (writeLock.obtain(timeout) == false) { + throw new LockObtainFailedException("failed to obtain lock: " + writeLock); + } + return writeLock; + } + /** * This method removes all files from the given directory that are not referenced by the given segments file. * This method will open an IndexWriter and relies on index file deleter to remove all unreferenced files. Segment files @@ -184,10 +206,7 @@ public class Lucene { */ public static SegmentInfos pruneUnreferencedFiles(String segmentsFileName, Directory directory) throws IOException { final SegmentInfos si = readSegmentInfos(segmentsFileName, directory); - try (Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) { - if (!writeLock.obtain(IndexWriterConfig.getDefaultWriteLockTimeout())) { // obtain write lock - throw new LockObtainFailedException("Index locked for write: " + writeLock); - } + try (Lock writeLock = acquireWriteLock(directory)) { int foundSegmentFiles = 0; for (final String file : directory.listAll()) { /** @@ -226,10 +245,7 @@ public class Lucene { * this operation fails. */ public static void cleanLuceneIndex(Directory directory) throws IOException { - try (Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) { - if (!writeLock.obtain(IndexWriterConfig.getDefaultWriteLockTimeout())) { // obtain write lock - throw new LockObtainFailedException("Index locked for write: " + writeLock); - } + try (Lock writeLock = acquireWriteLock(directory)) { for (final String file : directory.listAll()) { if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { directory.deleteFile(file); // remove all segment_N files diff --git a/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java b/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java index 1cb700cff60..3425d151c34 100644 --- a/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java +++ b/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java @@ -25,6 +25,7 @@ import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; +import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -32,6 +33,7 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; @@ -84,13 +86,12 @@ public class MultiDataPathUpgrader { ShardStateMetaData.FORMAT.write(loaded, loaded.version, targetPath.getShardStatePath()); Files.createDirectories(targetPath.resolveIndex()); try (SimpleFSDirectory directory = new SimpleFSDirectory(targetPath.resolveIndex())) { - try (final Lock lock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) { - if (lock.obtain(5000)) { - upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths); - } else { - throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex()); - } + try (final Lock lock = Lucene.acquireWriteLock(directory)) { + upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths); + } catch (LockObtainFailedException ex) { + throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex(), ex); } + } diff --git a/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 26a725f9072..75ef6914eae 100644 --- a/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; @@ -146,18 +147,17 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) { logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); - Lock tmpLock = luceneDir.makeLock(NODE_LOCK_FILENAME); - boolean obtained = tmpLock.obtain(); - if (obtained) { + try { + locks[dirIndex] = Lucene.acquireLock(luceneDir, NODE_LOCK_FILENAME, 0); nodePaths[dirIndex] = new NodePath(dir, environment); - locks[dirIndex] = tmpLock; localNodeId = possibleLockId; - } else { + } catch (LockObtainFailedException ex) { logger.trace("failed to obtain node lock on {}", dir.toAbsolutePath()); // release all the ones that were obtained up until now releaseAndNullLocks(locks); break; } + } catch (IOException e) { logger.trace("failed to obtain node lock on {}", e, dir.toAbsolutePath()); lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e); @@ -314,8 +314,9 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { // open a directory (will be immediately closed) on the shard's location dirs[i] = new SimpleFSDirectory(p, FsDirectoryService.buildLockFactory(indexSettings)); // create a lock for the "write.lock" file - locks[i] = dirs[i].makeLock(IndexWriter.WRITE_LOCK_NAME); - if (locks[i].obtain() == false) { + try { + locks[i] = Lucene.acquireWriteLock(dirs[i]); + } catch (IOException ex) { throw new ElasticsearchException("unable to acquire " + IndexWriter.WRITE_LOCK_NAME + " for " + p); } diff --git a/src/main/java/org/elasticsearch/index/store/Store.java b/src/main/java/org/elasticsearch/index/store/Store.java index c889dd16c20..d92128a319c 100644 --- a/src/main/java/org/elasticsearch/index/store/Store.java +++ b/src/main/java/org/elasticsearch/index/store/Store.java @@ -259,10 +259,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref metadataLock.writeLock().lock(); // we make sure that nobody fetches the metadata while we do this rename operation here to ensure we don't // get exceptions if files are still open. - try (Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) { - if (!writeLock.obtain(IndexWriterConfig.getDefaultWriteLockTimeout())) { // obtain write lock - throw new LockObtainFailedException("Index locked for write: " + writeLock); - } + try (Lock writeLock = Lucene.acquireWriteLock(directory())) { for (Map.Entry entry : entries) { String tempFile = entry.getKey(); String origFile = entry.getValue(); @@ -586,10 +583,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref */ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException { metadataLock.writeLock().lock(); - try (Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) { - if (!writeLock.obtain(IndexWriterConfig.getDefaultWriteLockTimeout())) { // obtain write lock - throw new LockObtainFailedException("Index locked for write: " + writeLock); - } + try (Lock writeLock = Lucene.acquireWriteLock(directory)) { final StoreDirectory dir = directory; for (String existingFile : dir.listAll()) { if (existingFile.equals(IndexWriter.WRITE_LOCK_NAME) || Store.isChecksum(existingFile) || sourceMetaData.contains(existingFile)) { From 38639074b4d8a7fe19076b31dd2207b6b31412e4 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Thu, 28 May 2015 16:24:41 +0200 Subject: [PATCH 10/19] Testing: Ensure cat API REST tests are unaffected by randomization The wildcard cat API REST tests relied on bulk.max and bulk.min in the thread_pool response. However due to the thread pool types being randomized in InternalTestCluster, the min/max values were not guaranteed to exist (the cached thread pool type is unbounded and thus does not have a max value). In order to prevent this, the test has been removed and now the cat nodes test is used for wildcard testing, which always returns stats about the heap. --- rest-api-spec/test/cat.nodes/10_basic.yaml | 10 ++++++++++ rest-api-spec/test/cat.thread_pool/10_basic.yaml | 12 ------------ 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/rest-api-spec/test/cat.nodes/10_basic.yaml b/rest-api-spec/test/cat.nodes/10_basic.yaml index f7e9bedcd0b..5b2ddb7fc38 100755 --- a/rest-api-spec/test/cat.nodes/10_basic.yaml +++ b/rest-api-spec/test/cat.nodes/10_basic.yaml @@ -29,6 +29,16 @@ /^ heap\.current \s+ heap\.percent \s+ heap\.max \s+ \n (\s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+ \s+ \d+(\.\d+)?[ptgmk]?b \s+ \n)+ $/ + - do: + cat.nodes: + h: heap.* + v: true + + - match: + $body: | + /^ heap\.current \s+ heap\.percent \s+ heap\.max \s+ \n + (\s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+ \s+ \d+(\.\d+)?[ptgmk]?b \s+ \n)+ $/ + - do: cat.nodes: h: file_desc.current,file_desc.percent,file_desc.max diff --git a/rest-api-spec/test/cat.thread_pool/10_basic.yaml b/rest-api-spec/test/cat.thread_pool/10_basic.yaml index 283e353b7a3..edb87ce27b9 100755 --- a/rest-api-spec/test/cat.thread_pool/10_basic.yaml +++ b/rest-api-spec/test/cat.thread_pool/10_basic.yaml @@ -29,18 +29,6 @@ / #pid id host ip port ^ (\d+ \s+ \S{4} \s+ \S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ (\d+|-) \s+ \n)+ $/ - - - do: - cat.thread_pool: - h: bulk.m* - - - match: - $body: | - /^ bulk.min \s+ bulk.max \s+ \n - (\s+ \d+ \s+ \d+ \s+ \n)+ $/ - -#(\s+ \d+ \s+ \d+ \n)+ $/ - - do: cat.thread_pool: h: id,ba,fa,gea,ga,ia,maa,ma,oa,pa From 55fc3a727b0d0ba18ba2852e7e630b07beee29a8 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Sun, 24 May 2015 15:22:06 -0700 Subject: [PATCH 11/19] Core: refactor upgrade API to use transport and write minimum compatible version that the index was upgraded to In #11072 we are adding a check that will prevent opening of old indices. However, this check doesn't take into consideration the fact that indices can be made compatible with the current version through upgrade API. In order to make compatibility check aware of the upgrade, the upgrade API should write a new setting `index.version.minimum_compatible` that will indicate the minimum compatible version of lucene this index is compatible with and `index.version.upgraded` that will indicate the version of elasticsearch that performed the upgrade. Closes #11095 --- docs/reference/indices/upgrade.asciidoc | 17 +- .../elasticsearch/action/ActionModule.java | 9 + .../indices/optimize/OptimizeRequest.java | 42 ---- .../upgrade/get/IndexShardUpgradeStatus.java | 78 +++++++ .../upgrade/get/IndexUpgradeStatus.java | 95 ++++++++ .../upgrade/get/ShardUpgradeStatus.java | 92 ++++++++ .../get/TransportUpgradeStatusAction.java | 152 +++++++++++++ .../upgrade/get/UpgradeStatusAction.java | 45 ++++ .../upgrade/get/UpgradeStatusRequest.java | 39 ++++ .../get/UpgradeStatusRequestBuilder.java | 33 +++ .../upgrade/get/UpgradeStatusResponse.java | 191 ++++++++++++++++ .../upgrade/post/ShardUpgradeRequest.java | 60 +++++ .../upgrade/post/ShardUpgradeResponse.java | 76 +++++++ .../upgrade/post/TransportUpgradeAction.java | 214 ++++++++++++++++++ .../post/TransportUpgradeSettingsAction.java | 86 +++++++ .../indices/upgrade/post/UpgradeAction.java | 46 ++++ .../indices/upgrade/post/UpgradeRequest.java | 91 ++++++++ .../upgrade/post/UpgradeRequestBuilder.java | 42 ++++ .../indices/upgrade/post/UpgradeResponse.java | 76 +++++++ .../upgrade/post/UpgradeSettingsAction.java | 45 ++++ ...radeSettingsClusterStateUpdateRequest.java | 51 +++++ .../upgrade/post/UpgradeSettingsRequest.java | 98 ++++++++ .../post/UpgradeSettingsRequestBuilder.java | 43 ++++ .../upgrade/post/UpgradeSettingsResponse.java | 51 +++++ .../client/IndicesAdminClient.java | 53 +++++ .../org/elasticsearch/client/Requests.java | 12 + .../client/support/AbstractClient.java | 38 ++++ .../cluster/metadata/IndexMetaData.java | 40 +++- .../metadata/MetaDataIndexUpgradeService.java | 18 +- .../MetaDataUpdateSettingsService.java | 37 +++ .../elasticsearch/index/shard/IndexShard.java | 35 ++- .../indices/upgrade/RestUpgradeAction.java | 90 +++----- .../snapshots/RestoreService.java | 2 + .../OldIndexBackwardsCompatibilityTests.java | 13 +- .../upgrade/UpgradeReallyOldIndexTest.java | 18 +- .../admin/indices/upgrade/UpgradeTest.java | 136 ++++------- 36 files changed, 2030 insertions(+), 234 deletions(-) create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResponse.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsClusterStateUpdateRequest.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java create mode 100644 src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsResponse.java diff --git a/docs/reference/indices/upgrade.asciidoc b/docs/reference/indices/upgrade.asciidoc index 295a407f979..046dde2fe8a 100644 --- a/docs/reference/indices/upgrade.asciidoc +++ b/docs/reference/indices/upgrade.asciidoc @@ -54,13 +54,26 @@ curl 'http://localhost:9200/twitter/_upgrade?pretty&human' [source,js] -------------------------------------------------- { - "twitter": { + "size": "21gb", + "size_in_bytes": "21000000000", + "size_to_upgrade": "10gb", + "size_to_upgrade_in_bytes": "10000000000" + "size_to_upgrade_ancient": "1gb", + "size_to_upgrade_ancient_in_bytes": "1000000000" + "indices": { + "twitter": { "size": "21gb", "size_in_bytes": "21000000000", "size_to_upgrade": "10gb", "size_to_upgrade_in_bytes": "10000000000" "size_to_upgrade_ancient": "1gb", "size_to_upgrade_ancient_in_bytes": "1000000000" - } + } + } } -------------------------------------------------- + +The level of details in the upgrade status command can be controlled by +setting `level` parameter to `cluster`, `index` (default) or `shard` levels. +For example, you can run the upgrade status command with `level=shard` to +get detailed upgrade information of each individual shard. \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/ActionModule.java b/src/main/java/org/elasticsearch/action/ActionModule.java index 0decb393405..7bb66260a58 100644 --- a/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/src/main/java/org/elasticsearch/action/ActionModule.java @@ -111,6 +111,12 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesActi import org.elasticsearch.action.admin.indices.template.get.TransportGetIndexTemplatesAction; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction; +import org.elasticsearch.action.admin.indices.upgrade.get.TransportUpgradeStatusAction; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusAction; +import org.elasticsearch.action.admin.indices.upgrade.post.TransportUpgradeAction; +import org.elasticsearch.action.admin.indices.upgrade.post.TransportUpgradeSettingsAction; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction; import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction; @@ -256,6 +262,9 @@ public class ActionModule extends AbstractModule { registerAction(FlushAction.INSTANCE, TransportFlushAction.class); registerAction(SealIndicesAction.INSTANCE, TransportSealIndicesAction.class); registerAction(OptimizeAction.INSTANCE, TransportOptimizeAction.class); + registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class); + registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); + registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class); registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class); registerAction(PutWarmerAction.INSTANCE, TransportPutWarmerAction.class); registerAction(DeleteWarmerAction.INSTANCE, TransportDeleteWarmerAction.class); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequest.java index 3510a3b7f96..08f322a1154 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/OptimizeRequest.java @@ -42,15 +42,11 @@ public class OptimizeRequest extends BroadcastRequest { public static final int MAX_NUM_SEGMENTS = -1; public static final boolean ONLY_EXPUNGE_DELETES = false; public static final boolean FLUSH = true; - public static final boolean UPGRADE = false; - public static final boolean UPGRADE_ONLY_ANCIENT_SEGMENTS = false; } private int maxNumSegments = Defaults.MAX_NUM_SEGMENTS; private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES; private boolean flush = Defaults.FLUSH; - private boolean upgrade = Defaults.UPGRADE; - private boolean upgradeOnlyAncientSegments = Defaults.UPGRADE_ONLY_ANCIENT_SEGMENTS; /** * Constructs an optimization request over one or more indices. @@ -114,30 +110,12 @@ public class OptimizeRequest extends BroadcastRequest { return this; } - /** - * Should the merge upgrade all old segments to the current index format. - * Defaults to false. - */ - public boolean upgrade() { - return upgrade; - } - - /** - * See {@link #upgrade()} - */ - public OptimizeRequest upgrade(boolean upgrade) { - this.upgrade = upgrade; - return this; - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); maxNumSegments = in.readInt(); onlyExpungeDeletes = in.readBoolean(); flush = in.readBoolean(); - upgrade = in.readBoolean(); - upgradeOnlyAncientSegments = in.readBoolean(); } @Override @@ -146,24 +124,6 @@ public class OptimizeRequest extends BroadcastRequest { out.writeInt(maxNumSegments); out.writeBoolean(onlyExpungeDeletes); out.writeBoolean(flush); - out.writeBoolean(upgrade); - out.writeBoolean(upgradeOnlyAncientSegments); - } - - /** - * Should the merge upgrade only the ancient (older major version of Lucene) segments? - * Defaults to false. - */ - public boolean upgradeOnlyAncientSegments() { - return upgradeOnlyAncientSegments; - } - - /** - * See {@link #upgradeOnlyAncientSegments()} - */ - public OptimizeRequest upgradeOnlyAncientSegments(boolean upgradeOnlyAncientSegments) { - this.upgradeOnlyAncientSegments = upgradeOnlyAncientSegments; - return this; } @Override @@ -172,8 +132,6 @@ public class OptimizeRequest extends BroadcastRequest { "maxNumSegments=" + maxNumSegments + ", onlyExpungeDeletes=" + onlyExpungeDeletes + ", flush=" + flush + - ", upgrade=" + upgrade + - ", upgradeOnlyAncientSegments=" + upgradeOnlyAncientSegments + '}'; } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java new file mode 100644 index 00000000000..e1cd16370c3 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexShardUpgradeStatus.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.get; + +import com.google.common.collect.Iterators; +import org.elasticsearch.index.shard.ShardId; + +import java.util.Iterator; + +public class IndexShardUpgradeStatus implements Iterable { + + private final ShardId shardId; + + private final ShardUpgradeStatus[] shards; + + IndexShardUpgradeStatus(ShardId shardId, ShardUpgradeStatus[] shards) { + this.shardId = shardId; + this.shards = shards; + } + + public ShardId getShardId() { + return this.shardId; + } + + public ShardUpgradeStatus getAt(int i) { + return shards[i]; + } + + public ShardUpgradeStatus[] getShards() { + return this.shards; + } + + @Override + public Iterator iterator() { + return Iterators.forArray(shards); + } + + public long getTotalBytes() { + long totalBytes = 0; + for (ShardUpgradeStatus indexShardUpgradeStatus : shards) { + totalBytes += indexShardUpgradeStatus.getTotalBytes(); + } + return totalBytes; + } + + public long getToUpgradeBytes() { + long upgradeBytes = 0; + for (ShardUpgradeStatus indexShardUpgradeStatus : shards) { + upgradeBytes += indexShardUpgradeStatus.getToUpgradeBytes(); + } + return upgradeBytes; + } + + public long getToUpgradeBytesAncient() { + long upgradeBytesAncient = 0; + for (ShardUpgradeStatus indexShardUpgradeStatus : shards) { + upgradeBytesAncient += indexShardUpgradeStatus.getToUpgradeBytesAncient(); + } + return upgradeBytesAncient; + } +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java new file mode 100644 index 00000000000..33a60328951 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.get; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +public class IndexUpgradeStatus implements Iterable { + + private final String index; + + private final Map indexShards; + + IndexUpgradeStatus(String index, ShardUpgradeStatus[] shards) { + this.index = index; + + Map> tmpIndexShards = Maps.newHashMap(); + for (ShardUpgradeStatus shard : shards) { + List lst = tmpIndexShards.get(shard.getShardRouting().id()); + if (lst == null) { + lst = Lists.newArrayList(); + tmpIndexShards.put(shard.getShardRouting().id(), lst); + } + lst.add(shard); + } + indexShards = Maps.newHashMap(); + for (Map.Entry> entry : tmpIndexShards.entrySet()) { + indexShards.put(entry.getKey(), new IndexShardUpgradeStatus(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardUpgradeStatus[entry.getValue().size()]))); + } + } + + public String getIndex() { + return this.index; + } + + /** + * A shard id to index shard upgrade status map (note, index shard upgrade status is the replication shard group that maps + * to the shard id). + */ + public Map getShards() { + return this.indexShards; + } + + @Override + public Iterator iterator() { + return indexShards.values().iterator(); + } + + public long getTotalBytes() { + long totalBytes = 0; + for (IndexShardUpgradeStatus indexShardUpgradeStatus : indexShards.values()) { + totalBytes += indexShardUpgradeStatus.getTotalBytes(); + } + return totalBytes; + } + + public long getToUpgradeBytes() { + long upgradeBytes = 0; + for (IndexShardUpgradeStatus indexShardUpgradeStatus : indexShards.values()) { + upgradeBytes += indexShardUpgradeStatus.getToUpgradeBytes(); + } + return upgradeBytes; + } + + public long getToUpgradeBytesAncient() { + long upgradeBytesAncient = 0; + for (IndexShardUpgradeStatus indexShardUpgradeStatus : indexShards.values()) { + upgradeBytesAncient += indexShardUpgradeStatus.getToUpgradeBytesAncient(); + } + return upgradeBytesAncient; + } + + +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java new file mode 100644 index 00000000000..e5f0261932c --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/ShardUpgradeStatus.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.get; + +import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.elasticsearch.cluster.routing.ImmutableShardRouting.readShardRoutingEntry; + +public class ShardUpgradeStatus extends BroadcastShardResponse { + + private ShardRouting shardRouting; + + private long totalBytes; + + private long toUpgradeBytes; + + private long toUpgradeBytesAncient; + + ShardUpgradeStatus() { + } + + ShardUpgradeStatus(ShardRouting shardRouting, long totalBytes, long toUpgradeBytes, long upgradeBytesAncient) { + super(shardRouting.shardId()); + this.shardRouting = shardRouting; + this.totalBytes = totalBytes; + this.toUpgradeBytes = toUpgradeBytes; + this.toUpgradeBytesAncient = upgradeBytesAncient; + + } + + public ShardRouting getShardRouting() { + return this.shardRouting; + } + + public long getTotalBytes() { + return totalBytes; + } + + public long getToUpgradeBytes() { + return toUpgradeBytes; + } + + public long getToUpgradeBytesAncient() { + return toUpgradeBytesAncient; + } + + public static ShardUpgradeStatus readShardUpgradeStatus(StreamInput in) throws IOException { + ShardUpgradeStatus shard = new ShardUpgradeStatus(); + shard.readFrom(in); + return shard; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + shardRouting = readShardRoutingEntry(in); + totalBytes = in.readLong(); + toUpgradeBytes = in.readLong(); + toUpgradeBytesAncient = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardRouting.writeTo(out); + out.writeLong(totalBytes); + out.writeLong(toUpgradeBytes); + out.writeLong(toUpgradeBytesAncient); + } +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java new file mode 100644 index 00000000000..370dce6e41f --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -0,0 +1,152 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.get; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; +import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Segment; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.atomic.AtomicReferenceArray; + +import static com.google.common.collect.Lists.newArrayList; + +/** + * + */ +public class TransportUpgradeStatusAction extends TransportBroadcastAction { + + private final IndicesService indicesService; + + @Inject + public TransportUpgradeStatusAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, + IndicesService indicesService, ActionFilters actionFilters) { + super(settings, UpgradeStatusAction.NAME, threadPool, clusterService, transportService, actionFilters, + UpgradeStatusRequest.class, IndexShardUpgradeStatusRequest.class, ThreadPool.Names.MANAGEMENT); + this.indicesService = indicesService; + } + + /** + * Getting upgrade stats from *all* active shards. + */ + @Override + protected GroupShardsIterator shards(ClusterState clusterState, UpgradeStatusRequest request, String[] concreteIndices) { + return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, UpgradeStatusRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, UpgradeStatusRequest countRequest, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); + } + + @Override + protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { + int successfulShards = 0; + int failedShards = 0; + List shardFailures = null; + final List shards = newArrayList(); + for (int i = 0; i < shardsResponses.length(); i++) { + Object shardResponse = shardsResponses.get(i); + if (shardResponse == null) { + // simply ignore non active shards + } else if (shardResponse instanceof BroadcastShardOperationFailedException) { + failedShards++; + if (shardFailures == null) { + shardFailures = newArrayList(); + } + shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); + } else { + shards.add((ShardUpgradeStatus) shardResponse); + successfulShards++; + } + } + return new UpgradeStatusResponse(shards.toArray(new ShardUpgradeStatus[shards.size()]), shardsResponses.length(), successfulShards, failedShards, shardFailures); + } + + @Override + protected IndexShardUpgradeStatusRequest newShardRequest(int numShards, ShardRouting shard, UpgradeStatusRequest request) { + return new IndexShardUpgradeStatusRequest(shard.shardId(), request); + } + + @Override + protected ShardUpgradeStatus newShardResponse() { + return new ShardUpgradeStatus(); + } + + @Override + protected ShardUpgradeStatus shardOperation(IndexShardUpgradeStatusRequest request) { + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.shardSafe(request.shardId().id()); + List segments = indexShard.engine().segments(false); + long total_bytes = 0; + long to_upgrade_bytes = 0; + long to_upgrade_bytes_ancient = 0; + for (Segment seg : segments) { + total_bytes += seg.sizeInBytes; + if (seg.version.major != Version.CURRENT.luceneVersion.major) { + to_upgrade_bytes_ancient += seg.sizeInBytes; + to_upgrade_bytes += seg.sizeInBytes; + } else if (seg.version.minor != Version.CURRENT.luceneVersion.minor) { + // TODO: this comparison is bogus! it would cause us to upgrade even with the same format + // instead, we should check if the codec has changed + to_upgrade_bytes += seg.sizeInBytes; + } + } + + return new ShardUpgradeStatus(indexShard.routingEntry(), total_bytes, to_upgrade_bytes, to_upgrade_bytes_ancient); + } + + static class IndexShardUpgradeStatusRequest extends BroadcastShardRequest { + + IndexShardUpgradeStatusRequest() { + + } + + IndexShardUpgradeStatusRequest(ShardId shardId, UpgradeStatusRequest request) { + super(shardId, request); + } + + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java new file mode 100644 index 00000000000..e0318b13b97 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusAction.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.get; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + */ +public class UpgradeStatusAction extends Action { + + public static final UpgradeStatusAction INSTANCE = new UpgradeStatusAction(); + public static final String NAME = "indices:monitor/upgrade"; + + private UpgradeStatusAction() { + super(NAME); + } + + @Override + public UpgradeStatusResponse newResponse() { + return new UpgradeStatusResponse(); + } + + @Override + public UpgradeStatusRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new UpgradeStatusRequestBuilder(client, this); + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java new file mode 100644 index 00000000000..a951924720d --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequest.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.get; + +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class UpgradeStatusRequest extends BroadcastRequest { + + public UpgradeStatusRequest() { + this(Strings.EMPTY_ARRAY); + } + + public UpgradeStatusRequest(String... indices) { + super(indices); + } + +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java new file mode 100644 index 00000000000..98dd1c1828d --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.get; + +import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * + */ +public class UpgradeStatusRequestBuilder extends BroadcastOperationRequestBuilder { + + public UpgradeStatusRequestBuilder(ElasticsearchClient client, UpgradeStatusAction action) { + super(client, action, new UpgradeStatusRequest()); + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java new file mode 100644 index 00000000000..89520704049 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -0,0 +1,191 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.get; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentBuilderString; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class UpgradeStatusResponse extends BroadcastResponse implements ToXContent { + + + private ShardUpgradeStatus[] shards; + + private Map indicesUpgradeStatus; + + UpgradeStatusResponse() { + + } + + UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, List shardFailures) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.shards = shards; + } + + public Map getIndices() { + if (indicesUpgradeStatus != null) { + return indicesUpgradeStatus; + } + Map indicesUpgradeStats = Maps.newHashMap(); + + Set indices = Sets.newHashSet(); + for (ShardUpgradeStatus shard : shards) { + indices.add(shard.getIndex()); + } + + for (String index : indices) { + List shards = Lists.newArrayList(); + for (ShardUpgradeStatus shard : this.shards) { + if (shard.getShardRouting().index().equals(index)) { + shards.add(shard); + } + } + indicesUpgradeStats.put(index, new IndexUpgradeStatus(index, shards.toArray(new ShardUpgradeStatus[shards.size()]))); + } + this.indicesUpgradeStatus = indicesUpgradeStats; + return indicesUpgradeStats; + } + + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + shards = new ShardUpgradeStatus[in.readVInt()]; + for (int i = 0; i < shards.length; i++) { + shards[i] = ShardUpgradeStatus.readShardUpgradeStatus(in); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(shards.length); + for (ShardUpgradeStatus shard : shards) { + shard.writeTo(out); + } + } + + public long getTotalBytes() { + long totalBytes = 0; + for (IndexUpgradeStatus indexShardUpgradeStatus : getIndices().values()) { + totalBytes += indexShardUpgradeStatus.getTotalBytes(); + } + return totalBytes; + } + + public long getToUpgradeBytes() { + long upgradeBytes = 0; + for (IndexUpgradeStatus indexShardUpgradeStatus : getIndices().values()) { + upgradeBytes += indexShardUpgradeStatus.getToUpgradeBytes(); + } + return upgradeBytes; + } + + public long getToUpgradeBytesAncient() { + long upgradeBytesAncient = 0; + for (IndexUpgradeStatus indexShardUpgradeStatus : getIndices().values()) { + upgradeBytesAncient += indexShardUpgradeStatus.getToUpgradeBytesAncient(); + } + return upgradeBytesAncient; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + + + builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes()); + builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes()); + builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient()); + + String level = params.param("level", "indices"); + boolean outputShards = "shards".equals(level); + boolean outputIndices = "indices".equals(level) || outputShards; + if (outputIndices) { + builder.startObject(Fields.INDICES); + for (IndexUpgradeStatus indexUpgradeStatus : getIndices().values()) { + builder.startObject(indexUpgradeStatus.getIndex(), XContentBuilder.FieldCaseConversion.NONE); + + builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, indexUpgradeStatus.getTotalBytes()); + builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, indexUpgradeStatus.getToUpgradeBytes()); + builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, indexUpgradeStatus.getToUpgradeBytesAncient()); + if (outputShards) { + builder.startObject(Fields.SHARDS); + for (IndexShardUpgradeStatus indexShardUpgradeStatus : indexUpgradeStatus) { + builder.startArray(Integer.toString(indexShardUpgradeStatus.getShardId().id())); + for (ShardUpgradeStatus shardUpgradeStatus : indexShardUpgradeStatus) { + builder.startObject(); + + builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes()); + builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes()); + builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient()); + + builder.startObject(Fields.ROUTING); + builder.field(Fields.STATE, shardUpgradeStatus.getShardRouting().state()); + builder.field(Fields.PRIMARY, shardUpgradeStatus.getShardRouting().primary()); + builder.field(Fields.NODE, shardUpgradeStatus.getShardRouting().currentNodeId()); + if (shardUpgradeStatus.getShardRouting().relocatingNodeId() != null) { + builder.field(Fields.RELOCATING_NODE, shardUpgradeStatus.getShardRouting().relocatingNodeId()); + } + builder.endObject(); + + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + } + + builder.endObject(); + } + + builder.endObject(); + } + return builder; + } + + static final class Fields { + static final XContentBuilderString INDICES = new XContentBuilderString("indices"); + static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); + static final XContentBuilderString ROUTING = new XContentBuilderString("routing"); + static final XContentBuilderString STATE = new XContentBuilderString("state"); + static final XContentBuilderString PRIMARY = new XContentBuilderString("primary"); + static final XContentBuilderString NODE = new XContentBuilderString("node"); + static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node"); + static final XContentBuilderString SIZE = new XContentBuilderString("size"); + static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes"); + static final XContentBuilderString SIZE_TO_UPGRADE = new XContentBuilderString("size_to_upgrade"); + static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT = new XContentBuilderString("size_to_upgrade_ancient"); + static final XContentBuilderString SIZE_TO_UPGRADE_IN_BYTES = new XContentBuilderString("size_to_upgrade_in_bytes"); + static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT_IN_BYTES = new XContentBuilderString("size_to_upgrade_ancient_in_bytes"); + + } +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java new file mode 100644 index 00000000000..9731a983c38 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeRequest.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + + +import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; + +/** + * + */ +final class ShardUpgradeRequest extends BroadcastShardRequest { + + private UpgradeRequest request = new UpgradeRequest(); + + ShardUpgradeRequest() { + } + + ShardUpgradeRequest(ShardId shardId, UpgradeRequest request) { + super(shardId, request); + this.request = request; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + request.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + request.writeTo(out); + } + + public UpgradeRequest upgradeRequest() { + return this.request; + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResponse.java new file mode 100644 index 00000000000..efbb19142c3 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/ShardUpgradeResponse.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; +import java.text.ParseException; + +/** + * + */ +class ShardUpgradeResponse extends BroadcastShardResponse { + + private org.apache.lucene.util.Version version; + + private boolean primary; + + + ShardUpgradeResponse() { + } + + ShardUpgradeResponse(ShardId shardId, boolean primary, org.apache.lucene.util.Version version) { + super(shardId); + this.primary = primary; + this.version = version; + } + + public org.apache.lucene.util.Version version() { + return this.version; + } + + public boolean primary() { + return primary; + } + + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + primary = in.readBoolean(); + try { + version = org.apache.lucene.util.Version.parse(in.readString()); + } catch (ParseException ex) { + throw new IOException("failed to parse lucene version [" + version + "]", ex); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(primary); + out.writeString(version.toString()); + } + +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java new file mode 100644 index 00000000000..c5dc59ee634 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -0,0 +1,214 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.apache.lucene.util.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.PrimaryMissingActionException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReferenceArray; + +import static com.google.common.collect.Lists.newArrayList; +import static com.google.common.collect.Maps.newHashMap; +import static com.google.common.collect.Sets.newHashSet; + +/** + * Upgrade index/indices action. + */ +public class TransportUpgradeAction extends TransportBroadcastAction { + + private final IndicesService indicesService; + + private final TransportUpgradeSettingsAction upgradeSettingsAction; + + @Inject + public TransportUpgradeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, + TransportService transportService, IndicesService indicesService, ActionFilters actionFilters, + TransportUpgradeSettingsAction upgradeSettingsAction) { + super(settings, UpgradeAction.NAME, threadPool, clusterService, transportService, actionFilters, + UpgradeRequest.class, ShardUpgradeRequest.class, ThreadPool.Names.OPTIMIZE); + this.indicesService = indicesService; + this.upgradeSettingsAction = upgradeSettingsAction; + } + + @Override + protected UpgradeResponse newResponse(UpgradeRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { + int successfulShards = 0; + int failedShards = 0; + List shardFailures = null; + Map successfulPrimaryShards = newHashMap(); + Map versions = newHashMap(); + for (int i = 0; i < shardsResponses.length(); i++) { + Object shardResponse = shardsResponses.get(i); + if (shardResponse == null) { + // a non active shard, ignore... + } else if (shardResponse instanceof BroadcastShardOperationFailedException) { + failedShards++; + if (shardFailures == null) { + shardFailures = newArrayList(); + } + shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); + } else { + successfulShards++; + ShardUpgradeResponse shardUpgradeResponse = (ShardUpgradeResponse) shardResponse; + String index = shardUpgradeResponse.getIndex(); + if (shardUpgradeResponse.primary()) { + Integer count = successfulPrimaryShards.get(index); + successfulPrimaryShards.put(index, count == null ? 1 : count + 1); + } + Version version = versions.get(index); + if (version == null || shardUpgradeResponse.version().onOrAfter(version) == false) { + versions.put(index, shardUpgradeResponse.version()); + } + } + } + Map updatedVersions = newHashMap(); + MetaData metaData = clusterState.metaData(); + for (Map.Entry versionEntry : versions.entrySet()) { + String index = versionEntry.getKey(); + Integer primaryCount = successfulPrimaryShards.get(index); + int expectedPrimaryCount = metaData.index(index).getNumberOfShards(); + if (primaryCount == metaData.index(index).getNumberOfShards()) { + updatedVersions.put(index, versionEntry.getValue().toString()); + } else { + logger.warn("Not updating settings for the index [{}] because upgraded of some primary shards failed - expected[{}], received[{}]", index, + expectedPrimaryCount, primaryCount == null ? 0 : primaryCount); + } + } + + return new UpgradeResponse(updatedVersions, shardsResponses.length(), successfulShards, failedShards, shardFailures); + } + + @Override + protected ShardUpgradeRequest newShardRequest(int numShards, ShardRouting shard, UpgradeRequest request) { + return new ShardUpgradeRequest(shard.shardId(), request); + } + + @Override + protected ShardUpgradeResponse newShardResponse() { + return new ShardUpgradeResponse(); + } + + @Override + protected ShardUpgradeResponse shardOperation(ShardUpgradeRequest request) { + IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); + org.apache.lucene.util.Version version = indexShard.upgrade(request.upgradeRequest()); + return new ShardUpgradeResponse(request.shardId(), indexShard.routingEntry().primary(), version); + } + + /** + * The upgrade request works against *all* shards. + */ + @Override + protected GroupShardsIterator shards(ClusterState clusterState, UpgradeRequest request, String[] concreteIndices) { + GroupShardsIterator iterator = clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true); + Set indicesWithMissingPrimaries = indicesWithMissingPrimaries(clusterState, concreteIndices); + if (indicesWithMissingPrimaries.isEmpty()) { + return iterator; + } + // If some primary shards are not available the request should fail. + throw new PrimaryMissingActionException("Cannot upgrade indices because the following indices are missing primary shards " + indicesWithMissingPrimaries); + } + + /** + * Finds all indices that have not all primaries available + */ + private Set indicesWithMissingPrimaries(ClusterState clusterState, String[] concreteIndices) { + Set indices = newHashSet(); + RoutingTable routingTable = clusterState.routingTable(); + for (String index : concreteIndices) { + IndexRoutingTable indexRoutingTable = routingTable.index(index); + if (indexRoutingTable.allPrimaryShardsActive() == false) { + indices.add(index); + } + } + return indices; + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, UpgradeRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, UpgradeRequest request, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); + } + + @Override + protected void doExecute(UpgradeRequest request, final ActionListener listener) { + ActionListener settingsUpdateListener = new ActionListener() { + @Override + public void onResponse(UpgradeResponse upgradeResponse) { + try { + if (upgradeResponse.versions().isEmpty()) { + listener.onResponse(upgradeResponse); + } else { + updateSettings(upgradeResponse, listener); + } + } catch (Throwable t) { + listener.onFailure(t); + } + } + + @Override + public void onFailure(Throwable e) { + listener.onFailure(e); + } + }; + super.doExecute(request, settingsUpdateListener); + } + + private void updateSettings(final UpgradeResponse upgradeResponse, final ActionListener listener) { + UpgradeSettingsRequest upgradeSettingsRequest = new UpgradeSettingsRequest(upgradeResponse.versions()); + upgradeSettingsAction.execute(upgradeSettingsRequest, new ActionListener() { + @Override + public void onResponse(UpgradeSettingsResponse updateSettingsResponse) { + listener.onResponse(upgradeResponse); + } + + @Override + public void onFailure(Throwable e) { + listener.onFailure(e); + } + }); + } + +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java new file mode 100644 index 00000000000..26c3731697d --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * + */ +public class TransportUpgradeSettingsAction extends TransportMasterNodeAction { + + private final MetaDataUpdateSettingsService updateSettingsService; + + @Inject + public TransportUpgradeSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, + MetaDataUpdateSettingsService updateSettingsService, ActionFilters actionFilters) { + super(settings, UpgradeSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, UpgradeSettingsRequest.class); + this.updateSettingsService = updateSettingsService; + } + + @Override + protected String executor() { + // we go async right away.... + return ThreadPool.Names.SAME; + } + + @Override + protected ClusterBlockException checkBlock(UpgradeSettingsRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected UpgradeSettingsResponse newResponse() { + return new UpgradeSettingsResponse(); + } + + @Override + protected void masterOperation(final UpgradeSettingsRequest request, final ClusterState state, final ActionListener listener) { + UpgradeSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpgradeSettingsClusterStateUpdateRequest() + .ackTimeout(request.timeout()) + .versions(request.versions()) + .masterNodeTimeout(request.masterNodeTimeout()); + + updateSettingsService.upgradeIndexSettings(clusterStateUpdateRequest, new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse response) { + listener.onResponse(new UpgradeSettingsResponse(response.isAcknowledged())); + } + + @Override + public void onFailure(Throwable t) { + logger.debug("failed to upgrade minimum compatibility version settings on indices [{}]", t, request.versions().keySet()); + listener.onFailure(t); + } + }); + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java new file mode 100644 index 00000000000..908a8a0d283 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeAction.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * Upgrade index/indices action. + */ +public class UpgradeAction extends Action { + + public static final UpgradeAction INSTANCE = new UpgradeAction(); + public static final String NAME = "indices:admin/upgrade"; + + private UpgradeAction() { + super(NAME); + } + + @Override + public UpgradeResponse newResponse() { + return new UpgradeResponse(); + } + + @Override + public UpgradeRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new UpgradeRequestBuilder(client, this); + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java new file mode 100644 index 00000000000..af328ce21ad --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequest.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or + * null for the indices. + *

+ * @see org.elasticsearch.client.Requests#upgradeRequest(String...) + * @see org.elasticsearch.client.IndicesAdminClient#upgrade(UpgradeRequest) + * @see UpgradeResponse + */ +public class UpgradeRequest extends BroadcastRequest { + + public static final class Defaults { + public static final boolean UPGRADE_ONLY_ANCIENT_SEGMENTS = false; + } + + private boolean upgradeOnlyAncientSegments = Defaults.UPGRADE_ONLY_ANCIENT_SEGMENTS; + + /** + * Constructs an optimization request over one or more indices. + * + * @param indices The indices to optimize, no indices passed means all indices will be optimized. + */ + public UpgradeRequest(String... indices) { + super(indices); + } + + public UpgradeRequest() { + + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + upgradeOnlyAncientSegments = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(upgradeOnlyAncientSegments); + } + + /** + * Should the upgrade only the ancient (older major version of Lucene) segments? + * Defaults to false. + */ + public boolean upgradeOnlyAncientSegments() { + return upgradeOnlyAncientSegments; + } + + /** + * See {@link #upgradeOnlyAncientSegments()} + */ + public UpgradeRequest upgradeOnlyAncientSegments(boolean upgradeOnlyAncientSegments) { + this.upgradeOnlyAncientSegments = upgradeOnlyAncientSegments; + return this; + } + + @Override + public String toString() { + return "UpgradeRequest{" + + "upgradeOnlyAncientSegments=" + upgradeOnlyAncientSegments + + '}'; + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java new file mode 100644 index 00000000000..adc8ea5510a --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeRequestBuilder.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +/** + * A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or + * null for the indices. + */ +public class UpgradeRequestBuilder extends BroadcastOperationRequestBuilder { + + public UpgradeRequestBuilder(ElasticsearchClient client, UpgradeAction action) { + super(client, action, new UpgradeRequest()); + } + + /** + * Should the upgrade only the ancient (older major version of Lucene) segments? + */ + public UpgradeRequestBuilder setUpgradeOnlyAncientSegments(boolean upgradeOnlyAncientSegments) { + request.upgradeOnlyAncientSegments(upgradeOnlyAncientSegments); + return this; + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java new file mode 100644 index 00000000000..04e377dd75d --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Maps.newHashMap; + +/** + * A response for optimize action. + * + * + */ +public class UpgradeResponse extends BroadcastResponse { + + private Map versions; + + UpgradeResponse() { + + } + + UpgradeResponse(Map versions, int totalShards, int successfulShards, int failedShards, List shardFailures) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.versions = versions; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + int size = in.readVInt(); + versions = newHashMap(); + for (int i=0; i entry : versions.entrySet()) { + out.writeString(entry.getKey()); + out.writeString(entry.getValue()); + } + } + + public Map versions() { + return versions; + } +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java new file mode 100644 index 00000000000..5257b50132d --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +/** + */ +public class UpgradeSettingsAction extends Action { + + public static final UpgradeSettingsAction INSTANCE = new UpgradeSettingsAction(); + public static final String NAME = "internal:indices/admin/upgrade"; + + private UpgradeSettingsAction() { + super(NAME); + } + + @Override + public UpgradeSettingsResponse newResponse() { + return new UpgradeSettingsResponse(); + } + + @Override + public UpgradeSettingsRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new UpgradeSettingsRequestBuilder(client, this); + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsClusterStateUpdateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsClusterStateUpdateRequest.java new file mode 100644 index 00000000000..7067f2f61ec --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsClusterStateUpdateRequest.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; + +import java.util.Map; + +/** + * Cluster state update request that allows to change minimum compatibility settings for some indices + */ +public class UpgradeSettingsClusterStateUpdateRequest extends ClusterStateUpdateRequest { + + private Map versions; + + public UpgradeSettingsClusterStateUpdateRequest() { + + } + + /** + * Returns the index to version map for indices that should be updated + */ + public Map versions() { + return versions; + } + + /** + * Sets the index to version map for indices that should be updated + */ + public UpgradeSettingsClusterStateUpdateRequest versions(Map versions) { + this.versions = versions; + return this; + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java new file mode 100644 index 00000000000..b191fa53539 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Map; + +import static com.google.common.collect.Maps.newHashMap; +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request for an update index settings action + */ +public class UpgradeSettingsRequest extends AcknowledgedRequest { + + + private Map versions; + + UpgradeSettingsRequest() { + } + + /** + * Constructs a new request to update minimum compatible version settings for one or more indices + */ + public UpgradeSettingsRequest(Map versions) { + this.versions = versions; + } + + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (versions.isEmpty()) { + validationException = addValidationError("no indices to update", validationException); + } + return validationException; + } + + + Map versions() { + return versions; + } + + /** + * Sets the index versions to be updated + */ + public UpgradeSettingsRequest versions(Map versions) { + this.versions = versions; + return this; + } + + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + int size = in.readVInt(); + versions = newHashMap(); + for (int i=0; i entry : versions.entrySet()) { + out.writeString(entry.getKey()); + out.writeString(entry.getValue()); + } + writeTimeout(out); + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java new file mode 100644 index 00000000000..74c42a5fe80 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +import java.util.Map; + +/** + * Builder for an update index settings request + */ +public class UpgradeSettingsRequestBuilder extends AcknowledgedRequestBuilder { + + public UpgradeSettingsRequestBuilder(ElasticsearchClient client, UpgradeSettingsAction action) { + super(client, action, new UpgradeSettingsRequest()); + } + + /** + * Sets the index versions to be updated + */ + public UpgradeSettingsRequestBuilder setVersions(Map versions) { + request.versions(versions); + return this; + } +} diff --git a/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsResponse.java new file mode 100644 index 00000000000..0918af6f418 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsResponse.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.upgrade.post; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * A response for an update index settings action + */ +public class UpgradeSettingsResponse extends AcknowledgedResponse { + + UpgradeSettingsResponse() { + } + + UpgradeSettingsResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + readAcknowledged(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + writeAcknowledged(out); + } +} diff --git a/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index c54aaece7f4..ae16d7b36d2 100644 --- a/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -96,6 +96,12 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResp import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequestBuilder; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequestBuilder; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; @@ -406,6 +412,53 @@ public interface IndicesAdminClient extends ElasticsearchClient { */ OptimizeRequestBuilder prepareOptimize(String... indices); + + /** + * Explicitly upgrade one or more indices + * + * @param request The upgrade request + * @return A result future + * @see org.elasticsearch.client.Requests#upgradeRequest(String...) + */ + ActionFuture upgrade(UpgradeRequest request); + + /** + * Explicitly upgrade one or more indices + * + * @param request The upgrade request + * @param listener A listener to be notified with a result + * @see org.elasticsearch.client.Requests#upgradeRequest(String...) + */ + void upgrade(UpgradeRequest request, ActionListener listener); + + /** + * Explicitly upgrade one or more indices + */ + UpgradeStatusRequestBuilder prepareUpgradeStatus(String... indices); + + /** + * Check upgrade status of one or more indices + * + * @param request The upgrade request + * @return A result future + * @see org.elasticsearch.client.Requests#upgradeRequest(String...) + */ + ActionFuture upgradeStatus(UpgradeStatusRequest request); + + /** + * Check upgrade status of one or more indices + * + * @param request The upgrade request + * @param listener A listener to be notified with a result + * @see org.elasticsearch.client.Requests#upgradeRequest(String...) + */ + void upgradeStatus(UpgradeStatusRequest request, ActionListener listener); + + /** + * Check upgrade status of one or more indices + */ + UpgradeRequestBuilder prepareUpgrade(String... indices); + /** * Get the complete mappings of one or more types */ diff --git a/src/main/java/org/elasticsearch/client/Requests.java b/src/main/java/org/elasticsearch/client/Requests.java index bc2a778f570..8a70c18b374 100644 --- a/src/main/java/org/elasticsearch/client/Requests.java +++ b/src/main/java/org/elasticsearch/client/Requests.java @@ -49,6 +49,7 @@ import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.count.CountRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -291,6 +292,17 @@ public class Requests { return new OptimizeRequest(indices); } + /** + * Creates an upgrade request. + * + * @param indices The indices to upgrade. Use null or _all to execute against all indices + * @return The upgrade request + * @see org.elasticsearch.client.IndicesAdminClient#upgrade(UpgradeRequest) + */ + public static UpgradeRequest upgradeRequest(String... indices) { + return new UpgradeRequest(indices); + } + /** * Creates a clean indices cache request. * diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 75e8ada560b..625a469470d 100644 --- a/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -196,6 +196,14 @@ import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateActio import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusAction; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequestBuilder; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequestBuilder; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; @@ -1415,6 +1423,36 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new OptimizeRequestBuilder(this, OptimizeAction.INSTANCE).setIndices(indices); } + @Override + public ActionFuture upgrade(final UpgradeRequest request) { + return execute(UpgradeAction.INSTANCE, request); + } + + @Override + public void upgrade(final UpgradeRequest request, final ActionListener listener) { + execute(UpgradeAction.INSTANCE, request, listener); + } + + @Override + public UpgradeRequestBuilder prepareUpgrade(String... indices) { + return new UpgradeRequestBuilder(this, UpgradeAction.INSTANCE).setIndices(indices); + } + + + @Override + public ActionFuture upgradeStatus(final UpgradeStatusRequest request) { + return execute(UpgradeStatusAction.INSTANCE, request); + } + + @Override + public void upgradeStatus(final UpgradeStatusRequest request, final ActionListener listener) { + execute(UpgradeStatusAction.INSTANCE, request, listener); + } + + @Override + public UpgradeStatusRequestBuilder prepareUpgradeStatus(String... indices) { + return new UpgradeStatusRequestBuilder(this, UpgradeStatusAction.INSTANCE).setIndices(indices); + } @Override public ActionFuture refresh(final RefreshRequest request) { return execute(RefreshAction.INSTANCE, request); diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index c9c7bbabb00..07703bca591 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -49,6 +49,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import java.io.IOException; +import java.text.ParseException; import java.util.EnumSet; import java.util.HashMap; import java.util.Locale; @@ -159,6 +160,7 @@ public class IndexMetaData implements Diffable { public static final String SETTING_BLOCKS_WRITE = "index.blocks.write"; public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata"; public static final String SETTING_VERSION_CREATED = "index.version.created"; + public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded"; public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible"; public static final String SETTING_CREATION_DATE = "index.creation_date"; public static final String SETTING_UUID = "index.uuid"; @@ -192,7 +194,8 @@ public class IndexMetaData implements Diffable { private final DiscoveryNodeFilters excludeFilters; private final Version indexCreatedVersion; - private final Version indexMinimumCompatibleVersion; + private final Version indexUpgradedVersion; + private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion; private final HashFunction routingHashFunction; private final boolean useTypeForRouting; @@ -227,7 +230,17 @@ public class IndexMetaData implements Diffable { excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap); } indexCreatedVersion = Version.indexCreated(settings); - indexMinimumCompatibleVersion = settings.getAsVersion(SETTING_VERSION_MINIMUM_COMPATIBLE, indexCreatedVersion); + indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion); + String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE); + if (stringLuceneVersion != null) { + try { + this.minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion); + } catch (ParseException ex) { + throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE +"] setting", ex); + } + } else { + this.minimumCompatibleLuceneVersion = null; + } final Class hashFunctionClass = settings.getAsClass(SETTING_LEGACY_ROUTING_HASH_FUNCTION, null); if (hashFunctionClass == null) { routingHashFunction = MURMUR3_HASH_FUNCTION; @@ -280,8 +293,6 @@ public class IndexMetaData implements Diffable { /** * Return the {@link Version} on which this index has been created. This * information is typically useful for backward compatibility. - * - * Returns null if the index was created before 0.19.0.RC1. */ public Version creationVersion() { return indexCreatedVersion; @@ -292,17 +303,22 @@ public class IndexMetaData implements Diffable { } /** - * Return the {@link Version} of that created the oldest segment in the index. - * - * If the index was created before v1.6 and didn't go through upgrade API the creation verion is returned. - * Returns null if the index was created before 0.19.0.RC1. + * Return the {@link Version} on which this index has been upgraded. This + * information is typically useful for backward compatibility. */ - public Version minimumCompatibleVersion() { - return indexMinimumCompatibleVersion; + public Version upgradeVersion() { + return indexUpgradedVersion; } - public Version getMinimumCompatibleVersion() { - return minimumCompatibleVersion(); + public Version getUpgradeVersion() { + return upgradeVersion(); + } + + /** + * Return the {@link org.apache.lucene.util.Version} of the oldest lucene segment in the index + */ + public org.apache.lucene.util.Version getMinimumCompatibleVersion() { + return minimumCompatibleLuceneVersion; } /** diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 5265cfabd5d..ea9f7f7a611 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -28,7 +28,7 @@ import org.elasticsearch.common.settings.Settings; /** * This service is responsible for upgrading legacy index metadata to the current version - * + *

* Every time an existing index is introduced into cluster this service should be used * to upgrade the existing index metadata to the latest version of the cluster. It typically * occurs during cluster upgrade, when dangling indices are imported into the cluster or indices @@ -64,7 +64,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { pre20HashFunction = DjbHashFunction.class; } pre20UseType = settings.getAsBoolean(DEPRECATED_SETTING_ROUTING_USE_TYPE, null); - if (hasCustomPre20HashFunction|| pre20UseType != null) { + if (hasCustomPre20HashFunction || pre20UseType != null) { logger.warn("Settings [{}] and [{}] are deprecated. Index settings from your old indices have been updated to record the fact that they " + "used some custom routing logic, you can now remove these settings from your `elasticsearch.yml` file", DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, DEPRECATED_SETTING_ROUTING_USE_TYPE); } @@ -72,7 +72,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { /** * Checks that the index can be upgraded to the current version of the master node. - * + *

* If the index does need upgrade it returns the index metadata unchanged, otherwise it returns a modified index metadata. If index cannot be * updated the method throws an exception. */ @@ -101,8 +101,16 @@ public class MetaDataIndexUpgradeService extends AbstractComponent { * Returns true if this index can be supported by the current version of elasticsearch */ private static boolean isSupportedVersion(IndexMetaData indexMetaData) { - return indexMetaData.minimumCompatibleVersion() != null && - indexMetaData.minimumCompatibleVersion().luceneVersion.onOrAfter(Version.V_0_90_0_Beta1.luceneVersion); + if (indexMetaData.creationVersion().onOrAfter(Version.V_0_90_0_Beta1)) { + // The index was created with elasticsearch that was using Lucene 4.0 + return true; + } + if (indexMetaData.getMinimumCompatibleVersion() != null && + indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_4_0_0)) { + //The index was upgraded we can work with it + return true; + } + return false; } /** diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 4b5d793356d..2f40335116e 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -20,8 +20,10 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.Sets; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; @@ -40,6 +42,8 @@ import org.elasticsearch.index.settings.IndexDynamicSettings; import java.util.*; +import static org.elasticsearch.common.settings.Settings.settingsBuilder; + /** * Service responsible for submitting update index settings requests */ @@ -307,4 +311,37 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements } }); } + + public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener listener) { + + + clusterService.submitStateUpdateTask("update-index-compatibility-versions", Priority.URGENT, new AckedClusterStateUpdateTask(request, listener) { + + @Override + protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { + return new ClusterStateUpdateResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) { + MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData()); + for (Map.Entry entry : request.versions().entrySet()) { + String index = entry.getKey(); + IndexMetaData indexMetaData = metaDataBuilder.get(index); + if (indexMetaData != null) { + if (Version.CURRENT.equals(indexMetaData.creationVersion()) == false) { + // No reason to pollute the settings, we didn't really upgrade anything + metaDataBuilder.put(IndexMetaData.builder(indexMetaData) + .settings(settingsBuilder().put(indexMetaData.settings()) + .put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, entry.getValue()) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT) + ) + ); + } + } + } + return ClusterState.builder(currentState).metaData(metaDataBuilder).build(); + } + }); + } } diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index acac1c07794..e6222003651 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -33,6 +33,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -717,8 +718,38 @@ public class IndexShard extends AbstractIndexShardComponent { if (logger.isTraceEnabled()) { logger.trace("optimize with {}", optimize); } - engine().forceMerge(optimize.flush(), optimize.maxNumSegments(), optimize.onlyExpungeDeletes(), - optimize.upgrade(), optimize.upgradeOnlyAncientSegments()); + engine().forceMerge(optimize.flush(), optimize.maxNumSegments(), optimize.onlyExpungeDeletes(), false, false); + } + + /** + * Upgrades the shard to the current version of Lucene and returns the minimum segment version + */ + public org.apache.lucene.util.Version upgrade(UpgradeRequest upgrade) { + verifyStarted(); + if (logger.isTraceEnabled()) { + logger.trace("upgrade with {}", upgrade); + } + org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion(); + // we just want to upgrade the segments, not actually optimize to a single segment + engine().forceMerge(true, // we need to flush at the end to make sure the upgrade is durable + Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment + false, true, upgrade.upgradeOnlyAncientSegments()); + org.apache.lucene.util.Version version = minimumCompatibleVersion(); + if (logger.isTraceEnabled()) { + logger.trace("upgraded segment {} from version {} to version {}", previousVersion, version); + } + + return version; + } + + public org.apache.lucene.util.Version minimumCompatibleVersion() { + org.apache.lucene.util.Version luceneVersion = Version.LUCENE_3_EMULATION_VERSION; + for(Segment segment : engine().segments(false)) { + if (luceneVersion.onOrAfter(segment.getVersion())) { + luceneVersion = segment.getVersion(); + } + } + return luceneVersion; } public SnapshotIndexCommit snapshotIndex(boolean flushFirst) throws EngineException { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java index 6ea428bc31a..8c1b1c0458a 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java @@ -19,17 +19,14 @@ package org.elasticsearch.rest.action.admin.indices.upgrade; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest; -import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse; -import org.elasticsearch.action.admin.indices.segments.*; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; +import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.index.engine.Segment; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; @@ -37,7 +34,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.support.RestBuilderListener; -import java.io.IOException; + +import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -66,72 +64,36 @@ public class RestUpgradeAction extends BaseRestHandler { } } - void handleGet(RestRequest request, RestChannel channel, Client client) { - IndicesSegmentsRequest segsReq = new IndicesSegmentsRequest(Strings.splitStringByCommaToArray(request.param("index"))); - client.admin().indices().segments(segsReq, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(IndicesSegmentResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - - // TODO: getIndices().values() is what IndicesSegmentsResponse uses, but this will produce different orders with jdk8? - for (IndexSegments indexSegments : response.getIndices().values()) { - builder.startObject(indexSegments.getIndex()); - buildUpgradeStatus(indexSegments, builder); - builder.endObject(); - } - - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + void handleGet(final RestRequest request, RestChannel channel, Client client) { + client.admin().indices().prepareUpgradeStatus(Strings.splitStringByCommaToArray(request.param("index"))) + .execute(new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + response.toXContent(builder, request); + builder.endObject(); + return new BytesRestResponse(OK, builder); + } + }); } - + void handlePost(final RestRequest request, RestChannel channel, Client client) { - OptimizeRequest optimizeReq = new OptimizeRequest(Strings.splitStringByCommaToArray(request.param("index"))); - optimizeReq.flush(true); - optimizeReq.upgrade(true); - optimizeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false)); - optimizeReq.maxNumSegments(Integer.MAX_VALUE); // we just want to upgrade the segments, not actually optimize to a single segment - client.admin().indices().optimize(optimizeReq, new RestBuilderListener(channel) { + UpgradeRequest upgradeReq = new UpgradeRequest(Strings.splitStringByCommaToArray(request.param("index"))); + upgradeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false)); + client.admin().indices().upgrade(upgradeReq, new RestBuilderListener(channel) { @Override - public RestResponse buildResponse(OptimizeResponse response, XContentBuilder builder) throws Exception { + public RestResponse buildResponse(UpgradeResponse response, XContentBuilder builder) throws Exception { builder.startObject(); buildBroadcastShardsHeader(builder, request, response); + builder.startArray("upgraded_indices"); + for (Map.Entry entry : response.versions().entrySet()) { + builder.field(entry.getKey(), entry.getValue(), XContentBuilder.FieldCaseConversion.NONE); + } + builder.endObject(); builder.endObject(); return new BytesRestResponse(OK, builder); } }); } - - void buildUpgradeStatus(IndexSegments indexSegments, XContentBuilder builder) throws IOException { - long total_bytes = 0; - long to_upgrade_bytes = 0; - long to_upgrade_bytes_ancient = 0; - for (IndexShardSegments shard : indexSegments) { - for (ShardSegments segs : shard.getShards()) { - for (Segment seg : segs.getSegments()) { - total_bytes += seg.sizeInBytes; - if (seg.version.major != Version.CURRENT.luceneVersion.major) { - to_upgrade_bytes_ancient += seg.sizeInBytes; - to_upgrade_bytes += seg.sizeInBytes; - } else if (seg.version.minor != Version.CURRENT.luceneVersion.minor) { - // TODO: this comparison is bogus! it would cause us to upgrade even with the same format - // instead, we should check if the codec has changed - to_upgrade_bytes += seg.sizeInBytes; - } - } - } - } - builder.byteSizeField(SIZE_IN_BYTES, SIZE, total_bytes); - builder.byteSizeField(SIZE_TO_UPGRADE_IN_BYTES, SIZE_TO_UPGRADE, to_upgrade_bytes); - builder.byteSizeField(SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, SIZE_TO_UPGRADE_ANCIENT, to_upgrade_bytes_ancient); - } - - static final XContentBuilderString SIZE = new XContentBuilderString("size"); - static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes"); - static final XContentBuilderString SIZE_TO_UPGRADE = new XContentBuilderString("size_to_upgrade"); - static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT = new XContentBuilderString("size_to_upgrade_ancient"); - static final XContentBuilderString SIZE_TO_UPGRADE_IN_BYTES = new XContentBuilderString("size_to_upgrade_in_bytes"); - static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT_IN_BYTES = new XContentBuilderString("size_to_upgrade_ancient_in_bytes"); } diff --git a/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 9aabab8580b..5f10e5217a6 100644 --- a/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -106,6 +106,8 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis .addAll(UNMODIFIABLE_SETTINGS) .add(SETTING_NUMBER_OF_REPLICAS) .add(SETTING_AUTO_EXPAND_REPLICAS) + .add(SETTING_VERSION_UPGRADED) + .add(SETTING_VERSION_MINIMUM_COMPATIBLE) .build(); private final ClusterService clusterService; diff --git a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java index 32fef09363e..ac59615f902 100644 --- a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java +++ b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java @@ -54,7 +54,6 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.index.merge.NoMergePolicyProvider; -import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; import org.hamcrest.Matchers; import org.junit.AfterClass; import org.junit.Before; @@ -67,8 +66,9 @@ import java.nio.file.attribute.BasicFileAttributes; import java.util.*; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.junit.matchers.JUnitMatchers.containsString; // needs at least 2 nodes since it bumps replicas to 1 @ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0) @@ -110,7 +110,6 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio @Override public Settings nodeSettings(int ord) { return Settings.builder() - .put(Node.HTTP_ENABLED, true) // for _upgrade .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class) // disable merging so no segments will be upgraded .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 30) // increase recovery speed for small files .build(); @@ -438,13 +437,11 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio } void assertUpgradeWorks(String indexName, boolean alreadyLatest) throws Exception { - HttpRequestBuilder httpClient = httpClient(); - if (alreadyLatest == false) { - UpgradeTest.assertNotUpgraded(httpClient, indexName); + UpgradeTest.assertNotUpgraded(client(), indexName); } - UpgradeTest.runUpgrade(httpClient, indexName); - UpgradeTest.assertUpgraded(httpClient, indexName); + assertNoFailures(client().admin().indices().prepareUpgrade(indexName).get()); + UpgradeTest.assertUpgraded(client(), indexName); } } diff --git a/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeReallyOldIndexTest.java b/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeReallyOldIndexTest.java index 10416d44868..ce2d54bb30f 100644 --- a/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeReallyOldIndexTest.java +++ b/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeReallyOldIndexTest.java @@ -20,20 +20,22 @@ package org.elasticsearch.rest.action.admin.indices.upgrade; import org.elasticsearch.bwcompat.StaticIndexBackwardCompatibilityTest; -import org.elasticsearch.node.Node; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; public class UpgradeReallyOldIndexTest extends StaticIndexBackwardCompatibilityTest { public void testUpgrade_0_90_6() throws Exception { String indexName = "index-0.90.6"; - loadIndex(indexName, Node.HTTP_ENABLED, true); - - UpgradeTest.assertNotUpgraded(httpClient(), indexName); - assertTrue(UpgradeTest.hasAncientSegments(httpClient(), indexName)); - UpgradeTest.runUpgrade(httpClient(), indexName, "wait_for_completion", "true", "only_ancient_segments", "true"); - assertFalse(UpgradeTest.hasAncientSegments(httpClient(), "index-0.90.6")); + + loadIndex(indexName); + UpgradeTest.assertNotUpgraded(client(), indexName); + assertTrue(UpgradeTest.hasAncientSegments(client(), indexName)); + assertNoFailures(client().admin().indices().prepareUpgrade(indexName).setUpgradeOnlyAncientSegments(true).get()); + + assertFalse(UpgradeTest.hasAncientSegments(client(), "index-0.90.6")); // This index has only ancient segments, so it should now be fully upgraded: - UpgradeTest.assertUpgraded(httpClient(), indexName); + UpgradeTest.assertUpgraded(client(), indexName); } } diff --git a/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java b/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java index 3ea5747899c..ddf4bbe2057 100644 --- a/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java +++ b/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java @@ -26,26 +26,26 @@ import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.ShardSegments; +import org.elasticsearch.action.admin.indices.upgrade.get.IndexUpgradeStatus; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Segment; -import org.elasticsearch.node.Node; import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; -import org.elasticsearch.test.rest.client.http.HttpResponse; -import org.elasticsearch.test.rest.json.JsonPath; import org.junit.BeforeClass; import java.util.ArrayList; +import java.util.Collection; import java.util.List; -import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) // test scope since we set cluster wide settings public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { @@ -134,20 +134,20 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { logger.info("--> Nodes upgrade complete"); logSegmentsState(); - assertNotUpgraded(httpClient(), null); + assertNotUpgraded(client(), null); final String indexToUpgrade = "test" + randomInt(numIndexes - 1); // This test fires up another node running an older version of ES, but because wire protocol changes across major ES versions, it // means we can never generate ancient segments in this test (unless Lucene major version bumps but ES major version does not): - assertFalse(hasAncientSegments(httpClient(), indexToUpgrade)); + assertFalse(hasAncientSegments(client(), indexToUpgrade)); logger.info("--> Running upgrade on index " + indexToUpgrade); - runUpgrade(httpClient(), indexToUpgrade); + assertNoFailures(client().admin().indices().prepareUpgrade(indexToUpgrade).get()); awaitBusy(new Predicate() { @Override public boolean apply(Object o) { try { - return isUpgraded(httpClient(), indexToUpgrade); + return isUpgraded(client(), indexToUpgrade); } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); } @@ -156,48 +156,40 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { logger.info("--> Single index upgrade complete"); logger.info("--> Running upgrade on the rest of the indexes"); - runUpgrade(httpClient(), null); + assertNoFailures(client().admin().indices().prepareUpgrade().get()); logSegmentsState(); logger.info("--> Full upgrade complete"); - assertUpgraded(httpClient(), null); + assertUpgraded(client(), null); } - static String upgradePath(String index) { - String path = "/_upgrade"; - if (index != null) { - path = "/" + index + path; - } - return path; - } - - public static void assertNotUpgraded(HttpRequestBuilder httpClient, String index) throws Exception { - for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) { - assertTrue("index " + status.indexName + " should not be zero sized", status.totalBytes != 0); + public static void assertNotUpgraded(Client client, String index) throws Exception { + for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { + assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0); // TODO: it would be better for this to be strictly greater, but sometimes an extra flush // mysteriously happens after the second round of docs are indexed - assertTrue("index " + status.indexName + " should have recovered some segments from transaction log", - status.totalBytes >= status.toUpgradeBytes); - assertTrue("index " + status.indexName + " should need upgrading", status.toUpgradeBytes != 0); + assertTrue("index " + status.getIndex() + " should have recovered some segments from transaction log", + status.getTotalBytes() >= status.getToUpgradeBytes()); + assertTrue("index " + status.getIndex() + " should need upgrading", status.getToUpgradeBytes() != 0); } } - public static void assertNoAncientSegments(HttpRequestBuilder httpClient, String index) throws Exception { - for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) { - assertTrue("index " + status.indexName + " should not be zero sized", status.totalBytes != 0); + public static void assertNoAncientSegments(Client client, String index) throws Exception { + for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { + assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0); // TODO: it would be better for this to be strictly greater, but sometimes an extra flush // mysteriously happens after the second round of docs are indexed - assertTrue("index " + status.indexName + " should not have any ancient segments", - status.toUpgradeBytesAncient == 0); - assertTrue("index " + status.indexName + " should have recovered some segments from transaction log", - status.totalBytes >= status.toUpgradeBytes); - assertTrue("index " + status.indexName + " should need upgrading", status.toUpgradeBytes != 0); + assertTrue("index " + status.getIndex() + " should not have any ancient segments", + status.getToUpgradeBytesAncient() == 0); + assertTrue("index " + status.getIndex() + " should have recovered some segments from transaction log", + status.getTotalBytes() >= status.getToUpgradeBytes()); + assertTrue("index " + status.getIndex() + " should need upgrading", status.getToUpgradeBytes() != 0); } } /** Returns true if there are any ancient segments. */ - public static boolean hasAncientSegments(HttpRequestBuilder httpClient, String index) throws Exception { - for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) { - if (status.toUpgradeBytesAncient != 0) { + public static boolean hasAncientSegments(Client client, String index) throws Exception { + for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { + if (status.getToUpgradeBytesAncient() != 0) { return true; } } @@ -205,20 +197,20 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { } /** Returns true if there are any old but not ancient segments. */ - public static boolean hasOldButNotAncientSegments(HttpRequestBuilder httpClient, String index) throws Exception { - for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) { - if (status.toUpgradeBytes > status.toUpgradeBytesAncient) { + public static boolean hasOldButNotAncientSegments(Client client, String index) throws Exception { + for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { + if (status.getToUpgradeBytes() > status.getToUpgradeBytesAncient()) { return true; } } return false; } - public static void assertUpgraded(HttpRequestBuilder httpClient, String index) throws Exception { - for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) { - assertTrue("index " + status.indexName + " should not be zero sized", status.totalBytes != 0); - assertEquals("index " + status.indexName + " should be upgraded", - 0, status.toUpgradeBytes); + public static void assertUpgraded(Client client, String index) throws Exception { + for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { + assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0); + assertEquals("index " + status.getIndex() + " should be upgraded", + 0, status.getToUpgradeBytes()); } // double check using the segments api that all segments are actually upgraded @@ -242,12 +234,12 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { } } - static boolean isUpgraded(HttpRequestBuilder httpClient, String index) throws Exception { + static boolean isUpgraded(Client client, String index) throws Exception { ESLogger logger = Loggers.getLogger(UpgradeTest.class); int toUpgrade = 0; - for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) { - logger.info("Index: " + status.indexName + ", total: " + status.totalBytes + ", toUpgrade: " + status.toUpgradeBytes); - toUpgrade += status.toUpgradeBytes; + for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { + logger.info("Index: " + status.getIndex() + ", total: " + status.getTotalBytes() + ", toUpgrade: " + status.getToUpgradeBytes()); + toUpgrade += status.getToUpgradeBytes(); } return toUpgrade == 0; } @@ -257,7 +249,7 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { public final int totalBytes; public final int toUpgradeBytes; public final int toUpgradeBytesAncient; - + public UpgradeStatus(String indexName, int totalBytes, int toUpgradeBytes, int toUpgradeBytesAncient) { this.indexName = indexName; this.totalBytes = totalBytes; @@ -266,49 +258,11 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { assert toUpgradeBytesAncient <= toUpgradeBytes; } } - - public static void runUpgrade(HttpRequestBuilder httpClient, String index, String... params) throws Exception { - assert params.length % 2 == 0; - HttpRequestBuilder builder = httpClient.method("POST").path(upgradePath(index)); - for (int i = 0; i < params.length; i += 2) { - builder.addParam(params[i], params[i + 1]); - } - HttpResponse rsp = builder.execute(); - assertNotNull(rsp); - assertEquals(200, rsp.getStatusCode()); - } @SuppressWarnings("unchecked") - static List getUpgradeStatus(HttpRequestBuilder httpClient, String path) throws Exception { - HttpResponse rsp = httpClient.method("GET").path(path).execute(); - Map data = validateAndParse(rsp); - List ret = new ArrayList<>(); - for (String index : data.keySet()) { - Map status = (Map)data.get(index); - assertTrue("missing key size_in_bytes for index " + index, status.containsKey("size_in_bytes")); - Object totalBytes = status.get("size_in_bytes"); - assertTrue("size_in_bytes for index " + index + " is not an integer", totalBytes instanceof Integer); - assertTrue("missing key size_to_upgrade_in_bytes for index " + index, status.containsKey("size_to_upgrade_in_bytes")); - Object toUpgradeBytes = status.get("size_to_upgrade_in_bytes"); - assertTrue("size_to_upgrade_in_bytes for index " + index + " is not an integer", toUpgradeBytes instanceof Integer); - Object toUpgradeBytesAncient = status.get("size_to_upgrade_ancient_in_bytes"); - assertTrue("size_to_upgrade_ancient_in_bytes for index " + index + " is not an integer", toUpgradeBytesAncient instanceof Integer); - ret.add(new UpgradeStatus(index, (Integer) totalBytes, (Integer) toUpgradeBytes, (Integer) toUpgradeBytesAncient)); - } - return ret; - } - - @SuppressWarnings("unchecked") - static Map validateAndParse(HttpResponse rsp) throws Exception { - assertNotNull(rsp); - assertEquals(200, rsp.getStatusCode()); - assertTrue(rsp.hasBody()); - return (Map)new JsonPath(rsp.getBody()).evaluate(""); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(Node.HTTP_ENABLED, true).build(); + static Collection getUpgradeStatus(Client client, String... indices) throws Exception { + UpgradeStatusResponse upgradeStatusResponse = client.admin().indices().prepareUpgradeStatus(indices).get(); + assertNoFailures(upgradeStatusResponse); + return upgradeStatusResponse.getIndices().values(); } } From f732900111ec310900b3f8d92ff76c77c19e2898 Mon Sep 17 00:00:00 2001 From: jaymode Date: Thu, 28 May 2015 11:56:09 -0400 Subject: [PATCH 12/19] Export hostname as environment variable for plugin manager In #9474, we exported the hostname in the bin/elasticsearch scripts so that it could be used as a variable in the elasticsearch.yml file but did not do the same for plugin manager. When using the hostname variable in elasticsearch.yml and trying to use the plugin manager, initialization will fail because the property could not be resolved. This change will allow the hostname to be resolved in the same manner as the service scripts. Closes #10902 --- bin/plugin | 2 ++ bin/plugin.bat | 2 ++ 2 files changed, 4 insertions(+) diff --git a/bin/plugin b/bin/plugin index 80d3e7d2906..c1b5a777042 100755 --- a/bin/plugin +++ b/bin/plugin @@ -103,4 +103,6 @@ if [ -e "$CONF_FILE" ]; then esac fi +export HOSTNAME=`hostname -s` + exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Xmx64m -Xms16m -Delasticsearch -Des.path.home="$ES_HOME" $properties -cp "$ES_HOME/lib/*" org.elasticsearch.plugins.PluginManager $args diff --git a/bin/plugin.bat b/bin/plugin.bat index 462c2d78a07..1addc161323 100644 --- a/bin/plugin.bat +++ b/bin/plugin.bat @@ -9,6 +9,8 @@ for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI TITLE Elasticsearch Plugin Manager ${project.version} +SET HOSTNAME=%COMPUTERNAME% + "%JAVA_HOME%\bin\java" %JAVA_OPTS% %ES_JAVA_OPTS% -Xmx64m -Xms16m -Des.path.home="%ES_HOME%" -cp "%ES_HOME%/lib/*;" "org.elasticsearch.plugins.PluginManager" %* goto finally From d955461f580f0874863b1a97ddcaddbd96cbd6c6 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 28 May 2015 07:13:32 -1000 Subject: [PATCH 13/19] Tests: fix NPE in UpgradeTest --- .../rest/action/admin/indices/upgrade/UpgradeTest.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java b/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java index ddf4bbe2057..3b9f8c19b5a 100644 --- a/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java +++ b/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java @@ -134,7 +134,7 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { logger.info("--> Nodes upgrade complete"); logSegmentsState(); - assertNotUpgraded(client(), null); + assertNotUpgraded(client()); final String indexToUpgrade = "test" + randomInt(numIndexes - 1); // This test fires up another node running an older version of ES, but because wire protocol changes across major ES versions, it @@ -159,10 +159,10 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { assertNoFailures(client().admin().indices().prepareUpgrade().get()); logSegmentsState(); logger.info("--> Full upgrade complete"); - assertUpgraded(client(), null); + assertUpgraded(client()); } - public static void assertNotUpgraded(Client client, String index) throws Exception { + public static void assertNotUpgraded(Client client, String... index) throws Exception { for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0); // TODO: it would be better for this to be strictly greater, but sometimes an extra flush @@ -173,7 +173,7 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { } } - public static void assertNoAncientSegments(Client client, String index) throws Exception { + public static void assertNoAncientSegments(Client client, String... index) throws Exception { for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0); // TODO: it would be better for this to be strictly greater, but sometimes an extra flush @@ -206,7 +206,7 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest { return false; } - public static void assertUpgraded(Client client, String index) throws Exception { + public static void assertUpgraded(Client client, String... index) throws Exception { for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) { assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0); assertEquals("index " + status.getIndex() + " should be upgraded", From 3db9caf7a1647d3179e0fa0665d1bc19f3b9a827 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 28 May 2015 08:54:38 -1000 Subject: [PATCH 14/19] Tests: Increase timeout waiting for snapshot to complete in batchingShardUpdateTaskTest When this test picks a large number of shards, the snapshot doesn't always manage to complete in 10 seconds. --- .../snapshots/SharedClusterSnapshotRestoreTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java index d4651e87174..1e8d45d3699 100644 --- a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java @@ -1907,7 +1907,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { } assertThat(count, equalTo(expectedCount)); } - }); + }, 1, TimeUnit.MINUTES); } /** From 6980286ba46bb2f9e74494609c29c2ecbab62331 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 28 May 2015 09:19:13 -1000 Subject: [PATCH 15/19] Core: fix upgrade response serialization --- rest-api-spec/test/indices.upgrade/10_basic.yaml | 16 ++++++++++++++++ .../admin/indices/upgrade/RestUpgradeAction.java | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 rest-api-spec/test/indices.upgrade/10_basic.yaml diff --git a/rest-api-spec/test/indices.upgrade/10_basic.yaml b/rest-api-spec/test/indices.upgrade/10_basic.yaml new file mode 100644 index 00000000000..f4844839c41 --- /dev/null +++ b/rest-api-spec/test/indices.upgrade/10_basic.yaml @@ -0,0 +1,16 @@ +--- +"Basic test for upgrade indices": + + - do: + indices.create: + index: test_index + + - do: + cluster.health: + wait_for_status: yellow + + - do: + indices.upgrade: + index: test_index + + - match: {upgraded_indices.test_index: '/(\d\.)+\d/'} diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java index 8c1b1c0458a..a1c9c0b3ed6 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/upgrade/RestUpgradeAction.java @@ -85,7 +85,7 @@ public class RestUpgradeAction extends BaseRestHandler { public RestResponse buildResponse(UpgradeResponse response, XContentBuilder builder) throws Exception { builder.startObject(); buildBroadcastShardsHeader(builder, request, response); - builder.startArray("upgraded_indices"); + builder.startObject("upgraded_indices"); for (Map.Entry entry : response.versions().entrySet()) { builder.field(entry.getKey(), entry.getValue(), XContentBuilder.FieldCaseConversion.NONE); } From 790baed7551ba005732d530b1a6ccaad253ebb4c Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 28 May 2015 10:00:36 -0600 Subject: [PATCH 16/19] Fall back to reading SegmentInfos from Store if reading from commit fails In the event that reading from the latest commit fails, we should fall back to reading from the `Store` using the traditional `Directory.listAll()` Related to #11361 --- .../java/org/elasticsearch/index/engine/Engine.java | 10 +++++++++- .../org/elasticsearch/index/engine/InternalEngine.java | 2 +- .../org/elasticsearch/index/engine/ShadowEngine.java | 4 ++-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/main/java/org/elasticsearch/index/engine/Engine.java b/src/main/java/org/elasticsearch/index/engine/Engine.java index e2811bb0698..41adf2b3b45 100644 --- a/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -319,11 +319,19 @@ public abstract class Engine implements Closeable { /** * Read the last segments info from the commit pointed to by the searcher manager */ - protected static SegmentInfos readLastCommittedSegmentInfos(SearcherManager sm) throws IOException { + protected static SegmentInfos readLastCommittedSegmentInfos(final SearcherManager sm, final Store store) throws IOException { IndexSearcher searcher = sm.acquire(); try { IndexCommit latestCommit = ((DirectoryReader) searcher.getIndexReader()).getIndexCommit(); return Lucene.readSegmentInfos(latestCommit); + } catch (IOException e) { + // Fall back to reading from the store if reading from the commit fails + try { + return store. readLastCommittedSegmentsInfo(); + } catch (IOException e2) { + e2.addSuppressed(e); + throw e2; + } } finally { sm.release(searcher); } diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 393dff33907..42bab8ca7fe 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -275,7 +275,7 @@ public class InternalEngine extends Engine { try { final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter, true), shardId); searcherManager = new SearcherManager(directoryReader, searcherFactory); - lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager); + lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store); success = true; return searcherManager; } catch (IOException e) { diff --git a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 301f6176a00..95b3810d330 100644 --- a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -79,7 +79,7 @@ public class ShadowEngine extends Engine { if (Lucene.waitForIndex(store.directory(), nonexistentRetryTime)) { reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId); this.searcherManager = new SearcherManager(reader, searcherFactory); - this.lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager); + this.lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store); success = true; } else { throw new IndexShardException(shardId, "failed to open a shadow engine after" + @@ -148,7 +148,7 @@ public class ShadowEngine extends Engine { store.incRef(); try (ReleasableLock lock = readLock.acquire()) { // reread the last committed segment infos - lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager); + lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store); } catch (Throwable e) { if (isClosed.get() == false) { logger.warn("failed to read latest segment infos on flush", e); From 503f844a05c02112c852cc7a6b4ee4b52c99497a Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 28 May 2015 10:51:18 -1000 Subject: [PATCH 17/19] Tests: make randomRepoPath work with bwc tests --- .../elasticsearch/test/ElasticsearchIntegrationTest.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 0655588e35d..0e146d37ae1 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -1847,7 +1847,12 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase * Returns path to a random directory that can be used to create a temporary file system repo */ public Path randomRepoPath() { - return randomRepoPath(internalCluster().getDefaultSettings()); + if (currentCluster instanceof InternalTestCluster) { + return randomRepoPath(((InternalTestCluster) currentCluster).getDefaultSettings()); + } else if (currentCluster instanceof CompositeTestCluster) { + return randomRepoPath(((CompositeTestCluster) currentCluster).internalCluster().getDefaultSettings()); + } + throw new UnsupportedOperationException("unsupported cluster type"); } /** From c695f35bcabc57223ef8480aa2d1b24f14310f33 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 28 May 2015 12:08:28 -1000 Subject: [PATCH 18/19] Tests: make REST upgrade test more reliable Make sure that all shards are started to avoid flush conflicts. --- rest-api-spec/test/indices.upgrade/10_basic.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/rest-api-spec/test/indices.upgrade/10_basic.yaml b/rest-api-spec/test/indices.upgrade/10_basic.yaml index f4844839c41..d6a38b4c168 100644 --- a/rest-api-spec/test/indices.upgrade/10_basic.yaml +++ b/rest-api-spec/test/indices.upgrade/10_basic.yaml @@ -4,10 +4,15 @@ - do: indices.create: index: test_index + body: + settings: + index: + number_of_replicas: 0 + - do: cluster.health: - wait_for_status: yellow + wait_for_status: green - do: indices.upgrade: From 5600757f3eb505bf22162c1190914d86a57dda53 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Thu, 28 May 2015 17:09:43 +0200 Subject: [PATCH 19/19] Serialization: Remove old version checks As the 2.x release does not need to be backwards compatible in terms of serialization, we can remove a fair share of the serialization checks. --- .../cluster/health/ClusterHealthResponse.java | 9 ++---- .../hotthreads/NodesHotThreadsRequest.java | 12 ++------ .../restore/RestoreSnapshotRequest.java | 25 +++------------- .../get/GetIndexedScriptRequest.java | 30 ------------------- .../cluster/metadata/MappingMetaData.java | 14 ++------- .../org/elasticsearch/common/Priority.java | 4 --- .../zen/fd/MasterFaultDetection.java | 9 ------ .../percolator/stats/PercolateStats.java | 11 ------- .../AbstractInternalPercentiles.java | 10 ------- 9 files changed, 10 insertions(+), 114 deletions(-) diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index 1e1a363d906..fc4567a5b53 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.health; import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -229,9 +228,7 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable { private String snapshot; - private String repository; - private String[] indices = Strings.EMPTY_ARRAY; - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); - private String renamePattern; - private String renameReplacement; - private boolean waitForCompletion; - private boolean includeGlobalState = true; - private boolean partial = false; - private boolean includeAliases = true; - private Settings settings = EMPTY_SETTINGS; - private Settings indexSettings = EMPTY_SETTINGS; - private String[] ignoreIndexSettings = Strings.EMPTY_ARRAY; RestoreSnapshotRequest() { @@ -638,10 +625,8 @@ public class RestoreSnapshotRequest extends MasterNodeRequest { out.writeOptionalString(timestamp().path()); out.writeString(timestamp().format()); out.writeOptionalString(timestamp().defaultTimestamp()); - // TODO Remove the test in elasticsearch 2.0.0 - if (out.getVersion().onOrAfter(Version.V_1_5_0)) { - out.writeOptionalBoolean(timestamp().ignoreMissing()); - } + out.writeOptionalBoolean(timestamp().ignoreMissing()); out.writeBoolean(hasParentField()); } @@ -619,10 +612,7 @@ public class MappingMetaData extends AbstractDiffable { String defaultTimestamp = in.readOptionalString(); Boolean ignoreMissing = null; - // TODO Remove the test in elasticsearch 2.0.0 - if (in.getVersion().onOrAfter(Version.V_1_5_0)) { - ignoreMissing = in.readOptionalBoolean(); - } + ignoreMissing = in.readOptionalBoolean(); final Timestamp timestamp = new Timestamp(enabled, path, format, defaultTimestamp, ignoreMissing); final boolean hasParentField = in.readBoolean(); diff --git a/src/main/java/org/elasticsearch/common/Priority.java b/src/main/java/org/elasticsearch/common/Priority.java index 658a7e5e9e2..19c2024555d 100644 --- a/src/main/java/org/elasticsearch/common/Priority.java +++ b/src/main/java/org/elasticsearch/common/Priority.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.common; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -35,9 +34,6 @@ public final class Priority implements Comparable { public static void writeTo(Priority priority, StreamOutput output) throws IOException { byte b = priority.value; - if (output.getVersion().before(Version.V_1_1_0)) { - b = (byte) Math.max(URGENT.value, b); - } output.writeByte(b); } diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java index 1159f1c4e6a..9dfaef728b3 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java +++ b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen.fd; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -428,19 +427,11 @@ public class MasterFaultDetection extends FaultDetection { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().onOrBefore(Version.V_1_4_0_Beta1)) { - // old listedOnMaster - in.readBoolean(); - } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrBefore(Version.V_1_4_0_Beta1)) { - // old listedOnMaster - out.writeBoolean(true); - } } } } diff --git a/src/main/java/org/elasticsearch/index/percolator/stats/PercolateStats.java b/src/main/java/org/elasticsearch/index/percolator/stats/PercolateStats.java index e75813772de..4ae854a4c40 100644 --- a/src/main/java/org/elasticsearch/index/percolator/stats/PercolateStats.java +++ b/src/main/java/org/elasticsearch/index/percolator/stats/PercolateStats.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.percolator.stats; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -152,11 +151,6 @@ public class PercolateStats implements Streamable, ToXContent { percolateCount = in.readVLong(); percolateTimeInMillis = in.readVLong(); current = in.readVLong(); - if (in.getVersion().before(Version.V_1_1_0)) { - in.readVLong(); - } else { - in.readLong(); - } numQueries = in.readVLong(); } @@ -165,11 +159,6 @@ public class PercolateStats implements Streamable, ToXContent { out.writeVLong(percolateCount); out.writeVLong(percolateTimeInMillis); out.writeVLong(current); - if (out.getVersion().before(Version.V_1_1_0)) { - out.writeVLong(0); - } else { - out.writeLong(-1); - } out.writeVLong(numQueries); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java index 05bc0f95683..6d6de6b4346 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles; -import org.elasticsearch.Version; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -79,12 +78,6 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega @Override protected void doReadFrom(StreamInput in) throws IOException { valueFormatter = ValueFormatterStreams.readOptional(in); - if (in.getVersion().before(Version.V_1_2_0)) { - final byte id = in.readByte(); - if (id != 0) { - throw new IllegalArgumentException("Unexpected percentiles aggregator id [" + id + "]"); - } - } keys = new double[in.readInt()]; for (int i = 0; i < keys.length; ++i) { keys[i] = in.readDouble(); @@ -96,9 +89,6 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega @Override protected void doWriteTo(StreamOutput out) throws IOException { ValueFormatterStreams.writeOptional(valueFormatter, out); - if (out.getVersion().before(Version.V_1_2_0)) { - out.writeByte((byte) 0); - } out.writeInt(keys.length); for (int i = 0 ; i < keys.length; ++i) { out.writeDouble(keys[i]);