mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-25 01:19:02 +00:00
Switch to murmurhash3 to route documents to shards.
We currently use the djb2 hash function in order to compute the shard a document should go to. Unfortunately this hash function is not very sophisticated and you can sometimes hit adversarial cases, such as numeric ids on 33 shards. Murmur3 generates hashes with a better distribution, which should avoid the adversarial cases. Here are some examples of how 100000 incremental ids are distributed to shards using either djb2 or murmur3. 5 shards: Murmur3: [19933, 19964, 19940, 20030, 20133] DJB: [20000, 20000, 20000, 20000, 20000] 3 shards: Murmur3: [33185, 33347, 33468] DJB: [30100, 30000, 39900] 33 shards: Murmur3: [2999, 3096, 2930, 2986, 3070, 3093, 3023, 3052, 3112, 2940, 3036, 2985, 3031, 3048, 3127, 2961, 2901, 3105, 3041, 3130, 3013, 3035, 3031, 3019, 3008, 3022, 3111, 3086, 3016, 2996, 3075, 2945, 2977] DJB: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 900, 900, 900, 900, 1000, 1000, 10000, 10000, 10000, 10000, 9100, 9100, 9100, 9100, 9000, 9000, 0, 0, 0, 0, 0, 0] Even if djb2 looks ideal in some cases (5 shards), the fact that the distribution of its hashes has some patterns can raise issues with some shard counts (eg. 3, or even worse 33). Some tests have been modified because they relied on implementation details of the routing hash function. Close #7954
This commit is contained in:
parent
8ef6e7e7ec
commit
9ea25df649
@ -18,4 +18,26 @@ Partial fields were deprecated since 1.0.0beta1 in favor of <<search-request-sou
|
||||
=== More Like This Field
|
||||
|
||||
The More Like This Field query has been removed in favor of the <<query-dsl-mlt-query, More Like This Query>>
|
||||
restrained set to a specific `field`.
|
||||
restrained set to a specific `field`.
|
||||
|
||||
=== Routing
|
||||
|
||||
The default hash function that is used for routing has been changed from djb2 to
|
||||
murmur3. This change should be transparent unless you relied on very specific
|
||||
properties of djb2. This will help ensure a better balance of the document counts
|
||||
between shards.
|
||||
|
||||
In addition, the following node settings related to routing have been deprecated:
|
||||
|
||||
[horizontal]
|
||||
|
||||
`cluster.routing.operation.hash.type`::
|
||||
|
||||
This was an undocumented setting that allowed to configure which hash function
|
||||
to use for routing. `murmur3` is now enforced on new indices.
|
||||
|
||||
`cluster.routing.operation.use_type`::
|
||||
|
||||
This was an undocumented setting that allowed to take the `_type` of the
|
||||
document into account when computing its shard (default: `false`). `false` is
|
||||
now enforced on new indices.
|
||||
|
@ -21,11 +21,14 @@
|
||||
body: { foo: bar }
|
||||
refresh: 1
|
||||
|
||||
# If you wonder why this document get 3 as an id instead of 2, it is because the
|
||||
# current routing algorithm would route 1 and 2 to the same shard while we need
|
||||
# them to be different for this test to pass
|
||||
- do:
|
||||
index:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 2
|
||||
id: 3
|
||||
body: { foo: bar }
|
||||
refresh: 1
|
||||
|
||||
@ -34,7 +37,7 @@
|
||||
index: test_1
|
||||
type: test
|
||||
body:
|
||||
query: { terms: { _id: [1,2] }}
|
||||
query: { terms: { _id: [1,3] }}
|
||||
|
||||
- match: { hits.total: 2 }
|
||||
|
||||
@ -49,7 +52,7 @@
|
||||
index: test_1
|
||||
type: test
|
||||
body:
|
||||
query: { terms: { _id: [1,2] }}
|
||||
query: { terms: { _id: [1,3] }}
|
||||
|
||||
- match: { hits.total: 2 }
|
||||
|
||||
@ -57,7 +60,7 @@
|
||||
delete:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 2
|
||||
id: 3
|
||||
refresh: 1
|
||||
|
||||
# If a replica shard where doc 1 is located gets initialized at this point, doc 1
|
||||
@ -69,6 +72,6 @@
|
||||
index: test_1
|
||||
type: test
|
||||
body:
|
||||
query: { terms: { _id: [1,2] }}
|
||||
query: { terms: { _id: [1,3] }}
|
||||
|
||||
- match: { hits.total: 1 }
|
||||
|
@ -28,6 +28,8 @@ import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.HashFunction;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.murmur3.Murmur3HashFunction;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Preconditions;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
@ -163,8 +165,14 @@ public class IndexMetaData {
|
||||
public static final String SETTING_VERSION_CREATED = "index.version.created";
|
||||
public static final String SETTING_CREATION_DATE = "index.creation_date";
|
||||
public static final String SETTING_UUID = "index.uuid";
|
||||
public static final String SETTING_LEGACY_ROUTING_HASH_FUNCTION = "index.legacy.routing.hash.type";
|
||||
public static final String SETTING_LEGACY_ROUTING_USE_TYPE = "index.legacy.routing.use_type";
|
||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||
|
||||
// hard-coded hash function as of 2.0
|
||||
// older indices will read which hash function to use in their index settings
|
||||
private static final HashFunction MURMUR3_HASH_FUNCTION = new Murmur3HashFunction();
|
||||
|
||||
private final String index;
|
||||
private final long version;
|
||||
|
||||
@ -184,6 +192,10 @@ public class IndexMetaData {
|
||||
private final DiscoveryNodeFilters includeFilters;
|
||||
private final DiscoveryNodeFilters excludeFilters;
|
||||
|
||||
private final Version indexCreatedVersion;
|
||||
private final HashFunction routingHashFunction;
|
||||
private final boolean useTypeForRouting;
|
||||
|
||||
private IndexMetaData(String index, long version, State state, Settings settings, ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, Custom> customs) {
|
||||
Preconditions.checkArgument(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null) != null, "must specify numberOfShards for index [" + index + "]");
|
||||
Preconditions.checkArgument(settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null) != null, "must specify numberOfReplicas for index [" + index + "]");
|
||||
@ -214,10 +226,20 @@ public class IndexMetaData {
|
||||
} else {
|
||||
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
|
||||
}
|
||||
indexCreatedVersion = Version.indexCreated(settings);
|
||||
final Class<? extends HashFunction> hashFunctionClass = settings.getAsClass(SETTING_LEGACY_ROUTING_HASH_FUNCTION, null);
|
||||
if (hashFunctionClass == null) {
|
||||
routingHashFunction = MURMUR3_HASH_FUNCTION;
|
||||
} else {
|
||||
try {
|
||||
routingHashFunction = hashFunctionClass.newInstance();
|
||||
} catch (InstantiationException | IllegalAccessException e) {
|
||||
throw new ElasticsearchIllegalStateException("Cannot instantiate hash function", e);
|
||||
}
|
||||
}
|
||||
useTypeForRouting = settings.getAsBoolean(SETTING_LEGACY_ROUTING_USE_TYPE, false);
|
||||
}
|
||||
|
||||
|
||||
|
||||
public String index() {
|
||||
return index;
|
||||
}
|
||||
@ -254,6 +276,41 @@ public class IndexMetaData {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link Version} on which this index has been created. This
|
||||
* information is typically useful for backward compatibility.
|
||||
*/
|
||||
public Version creationVersion() {
|
||||
return indexCreatedVersion;
|
||||
}
|
||||
|
||||
public Version getCreationVersion() {
|
||||
return creationVersion();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link HashFunction} that should be used for routing.
|
||||
*/
|
||||
public HashFunction routingHashFunction() {
|
||||
return routingHashFunction;
|
||||
}
|
||||
|
||||
public HashFunction getRoutingHashFunction() {
|
||||
return routingHashFunction();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return whether routing should use the _type in addition to the _id in
|
||||
* order to decide which shard a document should go to.
|
||||
*/
|
||||
public boolean routingUseType() {
|
||||
return useTypeForRouting;
|
||||
}
|
||||
|
||||
public boolean getRoutingUseType() {
|
||||
return routingUseType();
|
||||
}
|
||||
|
||||
public long creationDate() {
|
||||
return settings.getAsLong(SETTING_CREATION_DATE, -1l);
|
||||
}
|
||||
|
@ -20,8 +20,6 @@
|
||||
package org.elasticsearch.cluster.routing.operation;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.HashFunction;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
|
||||
import org.elasticsearch.cluster.routing.operation.plain.PlainOperationRoutingModule;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
@ -48,6 +46,5 @@ public class OperationRoutingModule extends AbstractModule implements SpawnModul
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(HashFunction.class).to(settings.getAsClass("cluster.routing.operation.hash.type", DjbHashFunction.class, "org.elasticsearch.cluster.routing.operation.hash.", "HashFunction")).asEagerSingleton();
|
||||
}
|
||||
}
|
||||
|
@ -37,5 +37,6 @@ public interface HashFunction {
|
||||
* @param routing String to calculate the hash value from
|
||||
* @return hash value of the given type and routing string
|
||||
*/
|
||||
@Deprecated
|
||||
int hash(String type, String id);
|
||||
}
|
||||
|
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.operation.hash.murmur3;
|
||||
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.HashFunction;
|
||||
|
||||
/**
|
||||
* Hash function based on the Murmur3 algorithm, which is the default as of Elasticsearch 2.0.
|
||||
*/
|
||||
public class Murmur3HashFunction implements HashFunction {
|
||||
|
||||
@Override
|
||||
public int hash(String routing) {
|
||||
final byte[] bytesToHash = new byte[routing.length() * 2];
|
||||
for (int i = 0; i < routing.length(); ++i) {
|
||||
final char c = routing.charAt(i);
|
||||
final byte b1 = (byte) c, b2 = (byte) (c >>> 8);
|
||||
assert ((b1 & 0xFF) | ((b2 & 0xFF) << 8)) == c; // no information loss
|
||||
bytesToHash[i * 2] = b1;
|
||||
bytesToHash[i * 2 + 1] = b2;
|
||||
}
|
||||
return StringHelper.murmurhash3_x86_32(bytesToHash, 0, bytesToHash.length, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hash(String type, String id) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
}
|
@ -20,8 +20,8 @@
|
||||
package org.elasticsearch.cluster.routing.operation.plain;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
@ -37,30 +37,30 @@ import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.math.MathUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexShardMissingException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexMissingException;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class PlainOperationRouting extends AbstractComponent implements OperationRouting {
|
||||
|
||||
private final HashFunction hashFunction;
|
||||
|
||||
private final boolean useType;
|
||||
|
||||
private final AwarenessAllocationDecider awarenessAllocationDecider;
|
||||
|
||||
@Inject
|
||||
public PlainOperationRouting(Settings indexSettings, HashFunction hashFunction, AwarenessAllocationDecider awarenessAllocationDecider) {
|
||||
super(indexSettings);
|
||||
this.hashFunction = hashFunction;
|
||||
this.useType = indexSettings.getAsBoolean("cluster.routing.operation.use_type", false);
|
||||
public PlainOperationRouting(Settings settings, AwarenessAllocationDecider awarenessAllocationDecider) {
|
||||
super(settings);
|
||||
this.awarenessAllocationDecider = awarenessAllocationDecider;
|
||||
}
|
||||
|
||||
@ -262,22 +262,35 @@ public class PlainOperationRouting extends AbstractComponent implements Operatio
|
||||
return indexShard;
|
||||
}
|
||||
|
||||
private int shardId(ClusterState clusterState, String index, String type, @Nullable String id, @Nullable String routing) {
|
||||
private int shardId(ClusterState clusterState, String index, String type, String id, @Nullable String routing) {
|
||||
final IndexMetaData indexMetaData = indexMetaData(clusterState, index);
|
||||
final Version createdVersion = indexMetaData.getCreationVersion();
|
||||
final HashFunction hashFunction = indexMetaData.getRoutingHashFunction();
|
||||
final boolean useType = indexMetaData.getRoutingUseType();
|
||||
|
||||
final int hash;
|
||||
if (routing == null) {
|
||||
if (!useType) {
|
||||
return Math.abs(hash(id) % indexMetaData(clusterState, index).numberOfShards());
|
||||
hash = hash(hashFunction, id);
|
||||
} else {
|
||||
return Math.abs(hash(type, id) % indexMetaData(clusterState, index).numberOfShards());
|
||||
hash = hash(hashFunction, type, id);
|
||||
}
|
||||
} else {
|
||||
hash = hash(hashFunction, routing);
|
||||
}
|
||||
if (createdVersion.onOrAfter(Version.V_2_0_0)) {
|
||||
return MathUtils.mod(hash, indexMetaData.numberOfShards());
|
||||
} else {
|
||||
return Math.abs(hash % indexMetaData.numberOfShards());
|
||||
}
|
||||
return Math.abs(hash(routing) % indexMetaData(clusterState, index).numberOfShards());
|
||||
}
|
||||
|
||||
protected int hash(String routing) {
|
||||
protected int hash(HashFunction hashFunction, String routing) {
|
||||
return hashFunction.hash(routing);
|
||||
}
|
||||
|
||||
protected int hash(String type, String id) {
|
||||
@Deprecated
|
||||
protected int hash(HashFunction hashFunction, String type, String id) {
|
||||
if (type == null || "_all".equals(type)) {
|
||||
throw new ElasticsearchIllegalArgumentException("Can't route an operation with no type and having type part of the routing (for backward comp)");
|
||||
}
|
||||
@ -289,4 +302,5 @@ public class PlainOperationRouting extends AbstractComponent implements Operatio
|
||||
throw new ElasticsearchIllegalArgumentException("No data node with id[" + nodeId + "] found");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.gateway.local.state.meta;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.ElasticsearchIllegalStateException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
@ -29,6 +30,8 @@ import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.HashFunction;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
@ -43,7 +46,9 @@ import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.*;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
@ -60,6 +65,8 @@ public class LocalGatewayMetaState extends AbstractComponent implements ClusterS
|
||||
static final Pattern GLOBAL_STATE_FILE_PATTERN = Pattern.compile(GLOBAL_STATE_FILE_PREFIX + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?");
|
||||
static final Pattern INDEX_STATE_FILE_PATTERN = Pattern.compile(INDEX_STATE_FILE_PREFIX + "(\\d+)(" + MetaDataStateFormat.STATE_FILE_EXTENSION + ")?");
|
||||
private static final String GLOBAL_STATE_LOG_TYPE = "[_global]";
|
||||
private static final String DEPRECATED_SETTING_ROUTING_HASH_FUNCTION = "cluster.routing.operation.hash.type";
|
||||
private static final String DEPRECATED_SETTING_ROUTING_USE_TYPE = "cluster.routing.operation.use_type";
|
||||
|
||||
static enum AutoImportDangledState {
|
||||
NO() {
|
||||
@ -152,6 +159,7 @@ public class LocalGatewayMetaState extends AbstractComponent implements ClusterS
|
||||
if (DiscoveryNode.masterNode(settings)) {
|
||||
try {
|
||||
pre019Upgrade();
|
||||
pre20Upgrade();
|
||||
long start = System.currentTimeMillis();
|
||||
loadState();
|
||||
logger.debug("took {} to load state", TimeValue.timeValueMillis(System.currentTimeMillis() - start));
|
||||
@ -516,6 +524,41 @@ public class LocalGatewayMetaState extends AbstractComponent implements ClusterS
|
||||
logger.info("conversion to new metadata location and format done, backup create at [{}]", backupFile.getAbsolutePath());
|
||||
}
|
||||
|
||||
/**
|
||||
* Elasticsearch 2.0 deprecated custom routing hash functions. So what we do here is that for old indices, we
|
||||
* move this old & deprecated node setting to an index setting so that we can keep things backward compatible.
|
||||
*/
|
||||
private void pre20Upgrade() throws Exception {
|
||||
final Class<? extends HashFunction> pre20HashFunction = settings.getAsClass(DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, null, "org.elasticsearch.cluster.routing.operation.hash.", "HashFunction");
|
||||
final Boolean pre20UseType = settings.getAsBoolean(DEPRECATED_SETTING_ROUTING_USE_TYPE, null);
|
||||
MetaData metaData = loadMetaState();
|
||||
for (IndexMetaData indexMetaData : metaData) {
|
||||
if (indexMetaData.settings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) == null
|
||||
&& indexMetaData.getCreationVersion().before(Version.V_2_0_0)) {
|
||||
// these settings need an upgrade
|
||||
Settings indexSettings = ImmutableSettings.builder().put(indexMetaData.settings())
|
||||
.put(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION, pre20HashFunction == null ? DjbHashFunction.class : pre20HashFunction)
|
||||
.put(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE, pre20UseType == null ? false : pre20UseType)
|
||||
.build();
|
||||
IndexMetaData newMetaData = IndexMetaData.builder(indexMetaData)
|
||||
.version(indexMetaData.version())
|
||||
.settings(indexSettings)
|
||||
.build();
|
||||
writeIndex("upgrade", newMetaData, null);
|
||||
} else if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0)) {
|
||||
if (indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) != null
|
||||
|| indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE) != null) {
|
||||
throw new ElasticsearchIllegalStateException("Indices created on or after 2.0 should NOT contain [" + IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION
|
||||
+ "] + or [" + IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE + "] in their index settings");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (pre20HashFunction != null || pre20UseType != null) {
|
||||
logger.warn("Settings [{}] and [{}] are deprecated. Index settings from your old indices have been updated to record the fact that they "
|
||||
+ "used some custom routing logic, you can now remove these settings from your `elasticsearch.yml` file", DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, DEPRECATED_SETTING_ROUTING_USE_TYPE);
|
||||
}
|
||||
}
|
||||
|
||||
class RemoveDanglingIndex implements Runnable {
|
||||
|
||||
private final String index;
|
||||
|
@ -207,7 +207,7 @@ public abstract class AbstractTermVectorTests extends ElasticsearchIntegrationTe
|
||||
/**
|
||||
* Generate test documentsThe returned documents are already indexed.
|
||||
*/
|
||||
protected TestDoc[] generateTestDocs(int numberOfDocs, TestFieldSetting[] fieldSettings) {
|
||||
protected TestDoc[] generateTestDocs(String index, TestFieldSetting[] fieldSettings) {
|
||||
String[] fieldContentOptions = new String[]{"Generating a random permutation of a sequence (such as when shuffling cards).",
|
||||
"Selecting a random sample of a population (important in statistical sampling).",
|
||||
"Allocating experimental units via random assignment to a treatment or control condition.",
|
||||
@ -216,16 +216,19 @@ public abstract class AbstractTermVectorTests extends ElasticsearchIntegrationTe
|
||||
|
||||
String[] contentArray = new String[fieldSettings.length];
|
||||
Map<String, Object> docSource = new HashMap<>();
|
||||
TestDoc[] testDocs = new TestDoc[numberOfDocs];
|
||||
for (int docId = 0; docId < numberOfDocs; docId++) {
|
||||
int totalShards = getNumShards(index).numPrimaries;
|
||||
TestDoc[] testDocs = new TestDoc[totalShards];
|
||||
// this methods wants to send one doc to each shard
|
||||
for (int i = 0; i < totalShards; i++) {
|
||||
docSource.clear();
|
||||
for (int i = 0; i < contentArray.length; i++) {
|
||||
contentArray[i] = fieldContentOptions[randomInt(fieldContentOptions.length - 1)];
|
||||
docSource.put(fieldSettings[i].name, contentArray[i]);
|
||||
for (int j = 0; j < contentArray.length; j++) {
|
||||
contentArray[j] = fieldContentOptions[randomInt(fieldContentOptions.length - 1)];
|
||||
docSource.put(fieldSettings[j].name, contentArray[j]);
|
||||
}
|
||||
TestDoc doc = new TestDoc(Integer.toString(docId), fieldSettings, contentArray.clone());
|
||||
final String id = routingKeyForShard(index, "type", i);
|
||||
TestDoc doc = new TestDoc(id, fieldSettings, contentArray.clone());
|
||||
index(doc.index, doc.type, doc.id, docSource);
|
||||
testDocs[docId] = doc;
|
||||
testDocs[i] = doc;
|
||||
}
|
||||
|
||||
refresh();
|
||||
|
@ -381,7 +381,7 @@ public class GetTermVectorTests extends AbstractTermVectorTests {
|
||||
TestFieldSetting[] testFieldSettings = getFieldSettings();
|
||||
createIndexBasedOnFieldSettings("test", "alias", testFieldSettings);
|
||||
//we generate as many docs as many shards we have
|
||||
TestDoc[] testDocs = generateTestDocs(getNumShards("test").numPrimaries, testFieldSettings);
|
||||
TestDoc[] testDocs = generateTestDocs("test", testFieldSettings);
|
||||
|
||||
DirectoryReader directoryReader = indexDocsWithLucene(testDocs);
|
||||
TestConfig[] testConfigs = generateTestConfigs(20, testDocs, testFieldSettings);
|
||||
|
@ -32,7 +32,7 @@ public class MultiTermVectorsTests extends AbstractTermVectorTests {
|
||||
AbstractTermVectorTests.TestFieldSetting[] testFieldSettings = getFieldSettings();
|
||||
createIndexBasedOnFieldSettings("test", "alias", testFieldSettings);
|
||||
//we generate as many docs as many shards we have
|
||||
TestDoc[] testDocs = generateTestDocs(getNumShards("test").numPrimaries, testFieldSettings);
|
||||
TestDoc[] testDocs = generateTestDocs("test", testFieldSettings);
|
||||
|
||||
DirectoryReader directoryReader = indexDocsWithLucene(testDocs);
|
||||
AbstractTermVectorTests.TestConfig[] testConfigs = generateTestConfigs(20, testDocs, testFieldSettings);
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth;
|
||||
@ -156,7 +157,7 @@ public class ClusterHealthResponsesTests extends ElasticsearchTestCase {
|
||||
public void testClusterIndexHealth() {
|
||||
int numberOfShards = randomInt(3) + 1;
|
||||
int numberOfReplicas = randomInt(4);
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder("test1").numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
|
||||
ShardCounter counter = new ShardCounter();
|
||||
IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
|
||||
|
||||
@ -183,7 +184,7 @@ public class ClusterHealthResponsesTests extends ElasticsearchTestCase {
|
||||
for (int i = randomInt(4); i >= 0; i--) {
|
||||
int numberOfShards = randomInt(3) + 1;
|
||||
int numberOfReplicas = randomInt(4);
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder("test_" + Integer.toString(i)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder("test_" + Integer.toString(i)).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
|
||||
IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
|
||||
metaData.put(indexMetaData, true);
|
||||
routingTable.add(indexRoutingTable);
|
||||
@ -197,10 +198,10 @@ public class ClusterHealthResponsesTests extends ElasticsearchTestCase {
|
||||
|
||||
@Test
|
||||
public void testValidations() {
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(2).build();
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(2).build();
|
||||
ShardCounter counter = new ShardCounter();
|
||||
IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
|
||||
indexMetaData = IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(3).build();
|
||||
indexMetaData = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(3).build();
|
||||
|
||||
ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
|
||||
assertThat(indexHealth.getValidationFailures(), Matchers.hasSize(2));
|
||||
|
@ -21,10 +21,10 @@ package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData.State;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.indices.IndexMissingException;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
@ -525,7 +525,7 @@ public class MetaDataTests extends ElasticsearchTestCase {
|
||||
}
|
||||
|
||||
private IndexMetaData.Builder indexBuilder(String index) {
|
||||
return IndexMetaData.builder(index).settings(ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0));
|
||||
return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0));
|
||||
}
|
||||
|
||||
@Test(expected = IndexMissingException.class)
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
@ -39,39 +40,43 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
public void testSimpleJsonFromAndTo() throws IOException {
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1")
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(2))
|
||||
.put(IndexMetaData.builder("test2")
|
||||
.settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
|
||||
.settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
|
||||
.numberOfShards(2)
|
||||
.numberOfReplicas(3))
|
||||
.put(IndexMetaData.builder("test3")
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(2)
|
||||
.putMapping("mapping1", MAPPING_SOURCE1))
|
||||
.put(IndexMetaData.builder("test4")
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(2)
|
||||
.creationDate(2l))
|
||||
.put(IndexMetaData.builder("test5")
|
||||
.settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
|
||||
.settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(2)
|
||||
.putMapping("mapping1", MAPPING_SOURCE1)
|
||||
.putMapping("mapping2", MAPPING_SOURCE2))
|
||||
.put(IndexMetaData.builder("test6")
|
||||
.settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
|
||||
.settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(2)
|
||||
.creationDate(2l))
|
||||
.put(IndexMetaData.builder("test7")
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(2)
|
||||
.creationDate(2l)
|
||||
.putMapping("mapping1", MAPPING_SOURCE1)
|
||||
.putMapping("mapping2", MAPPING_SOURCE2))
|
||||
.put(IndexMetaData.builder("test8")
|
||||
.settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
|
||||
.settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(2)
|
||||
.putMapping("mapping1", MAPPING_SOURCE1)
|
||||
@ -79,7 +84,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
.putAlias(newAliasMetaDataBuilder("alias1"))
|
||||
.putAlias(newAliasMetaDataBuilder("alias2")))
|
||||
.put(IndexMetaData.builder("test9")
|
||||
.settings(settingsBuilder().put("setting1", "value1").put("setting2", "value2"))
|
||||
.settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
|
||||
.creationDate(2l)
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(2)
|
||||
@ -88,7 +93,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
.putAlias(newAliasMetaDataBuilder("alias1"))
|
||||
.putAlias(newAliasMetaDataBuilder("alias2")))
|
||||
.put(IndexMetaData.builder("test10")
|
||||
.settings(settingsBuilder()
|
||||
.settings(settings(Version.CURRENT)
|
||||
.put("setting1", "value1")
|
||||
.put("setting2", "value2"))
|
||||
.numberOfShards(1)
|
||||
@ -98,7 +103,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
.putAlias(newAliasMetaDataBuilder("alias1"))
|
||||
.putAlias(newAliasMetaDataBuilder("alias2")))
|
||||
.put(IndexMetaData.builder("test11")
|
||||
.settings(settingsBuilder()
|
||||
.settings(settings(Version.CURRENT)
|
||||
.put("setting1", "value1")
|
||||
.put("setting2", "value2"))
|
||||
.numberOfShards(1)
|
||||
@ -118,10 +123,10 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
.putAlias(newAliasMetaDataBuilder("alias-bar2").filter("{\"term\":{\"user\":\"kimchy\"}}"))
|
||||
.putAlias(newAliasMetaDataBuilder("alias-bar3").routing("routing-bar")))
|
||||
.put(IndexMetaData.builder("test12")
|
||||
.settings(settingsBuilder()
|
||||
.settings(settings(Version.CURRENT)
|
||||
.put("setting1", "value1")
|
||||
.put("setting2", "value2"))
|
||||
.creationDate(2l)
|
||||
.creationDate(2l)
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(2)
|
||||
.putMapping("mapping1", MAPPING_SOURCE1)
|
||||
@ -149,14 +154,14 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(2));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(3));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(0));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test2");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(2));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(3));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(0));
|
||||
@ -165,7 +170,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(2));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(3));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(1));
|
||||
assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
|
||||
@ -173,14 +178,14 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
assertThat(indexMetaData.creationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(3));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(0));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test5");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
@ -191,7 +196,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(6));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(0));
|
||||
@ -200,7 +205,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(3));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
@ -209,7 +214,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
@ -223,7 +228,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(6));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
@ -237,7 +242,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
@ -251,7 +256,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
@ -269,7 +274,7 @@ public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(6));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
|
@ -19,16 +19,20 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.operation.OperationRouting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.node.internal.InternalNode;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class RoutingBackwardCompatibilityTests extends ElasticsearchTestCase {
|
||||
|
||||
@ -41,19 +45,29 @@ public class RoutingBackwardCompatibilityTests extends ElasticsearchTestCase {
|
||||
continue;
|
||||
}
|
||||
String[] parts = line.split("\t");
|
||||
assertEquals(6, parts.length);
|
||||
assertEquals(Arrays.toString(parts), 7, parts.length);
|
||||
final String index = parts[0];
|
||||
final int numberOfShards = Integer.parseInt(parts[1]);
|
||||
final String type = parts[2];
|
||||
final String id = parts[3];
|
||||
final String routing = "null".equals(parts[4]) ? null : parts[4];
|
||||
final int expectedShardId = Integer.parseInt(parts[5]);
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder(index).numberOfShards(numberOfShards).numberOfReplicas(randomInt(3)).build();
|
||||
MetaData.Builder metaData = MetaData.builder().put(indexMetaData, false);
|
||||
RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build();
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
|
||||
final int pre20ExpectedShardId = Integer.parseInt(parts[5]);
|
||||
final int currentExpectedShard = Integer.parseInt(parts[6]);
|
||||
|
||||
OperationRouting operationRouting = node.injector().getInstance(OperationRouting.class);
|
||||
assertEquals(expectedShardId, operationRouting.indexShards(clusterState, index, type, id, routing).shardId().getId());
|
||||
for (Version version : allVersions()) {
|
||||
final Settings settings = settings(version).build();
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(settings).numberOfShards(numberOfShards).numberOfReplicas(randomInt(3)).build();
|
||||
MetaData.Builder metaData = MetaData.builder().put(indexMetaData, false);
|
||||
RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build();
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
|
||||
final int shardId = operationRouting.indexShards(clusterState, index, type, id, routing).shardId().getId();
|
||||
if (version.before(Version.V_2_0_0)) {
|
||||
assertEquals(pre20ExpectedShardId, shardId);
|
||||
} else {
|
||||
assertEquals(currentExpectedShard, shardId);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.HashFunction;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.simple.SimpleHashFunction;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.node.internal.InternalNode;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
|
||||
import java.io.File;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
|
||||
@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0, minNumDataNodes = 0, maxNumDataNodes = 0)
|
||||
public class RoutingBackwardCompatibilityUponUpgradeTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
public void testDefaultRouting() throws Exception {
|
||||
test("default_routing_1_x", DjbHashFunction.class, false);
|
||||
}
|
||||
|
||||
public void testCustomRouting() throws Exception {
|
||||
test("custom_routing_1_x", SimpleHashFunction.class, true);
|
||||
}
|
||||
|
||||
private void test(String name, Class<? extends HashFunction> expectedHashFunction, boolean expectedUseType) throws Exception {
|
||||
File zippedIndexDir = new File(getClass().getResource("/org/elasticsearch/cluster/routing/" + name + ".zip").toURI());
|
||||
Settings baseSettings = prepareBackwardsDataDir(zippedIndexDir);
|
||||
internalCluster().startNode(ImmutableSettings.builder()
|
||||
.put(baseSettings)
|
||||
.put(InternalNode.HTTP_ENABLED, true)
|
||||
.build());
|
||||
ensureYellow("test");
|
||||
GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().get();
|
||||
assertArrayEquals(new String[] {"test"}, getIndexResponse.indices());
|
||||
GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
|
||||
assertEquals(expectedHashFunction.getName(), getSettingsResponse.getSetting("test", IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION));
|
||||
assertEquals(Boolean.valueOf(expectedUseType).toString(), getSettingsResponse.getSetting("test", IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE));
|
||||
SearchResponse allDocs = client().prepareSearch("test").get();
|
||||
assertSearchResponse(allDocs);
|
||||
assertHitCount(allDocs, 4);
|
||||
// Make sure routing works
|
||||
for (SearchHit hit : allDocs.getHits().hits()) {
|
||||
GetResponse get = client().prepareGet(hit.index(), hit.type(), hit.id()).get();
|
||||
assertTrue(get.isExists());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -292,7 +293,7 @@ public class AddIncrementallyTests extends ElasticsearchAllocationTestCase {
|
||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
|
||||
|
||||
for (int i = 0; i < numberOfIndices; i++) {
|
||||
IndexMetaData.Builder index = IndexMetaData.builder("test" + i).numberOfShards(numberOfShards).numberOfReplicas(
|
||||
IndexMetaData.Builder index = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(
|
||||
numberOfReplicas);
|
||||
metaDataBuilder = metaDataBuilder.put(index);
|
||||
}
|
||||
@ -347,7 +348,7 @@ public class AddIncrementallyTests extends ElasticsearchAllocationTestCase {
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(clusterState.getMetaData());
|
||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable());
|
||||
|
||||
IndexMetaData.Builder index = IndexMetaData.builder("test" + indexOrdinal).numberOfShards(numberOfShards).numberOfReplicas(
|
||||
IndexMetaData.Builder index = IndexMetaData.builder("test" + indexOrdinal).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(
|
||||
numberOfReplicas);
|
||||
IndexMetaData imd = index.build();
|
||||
metaDataBuilder = metaDataBuilder.put(imd, true);
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -46,7 +47,7 @@ public class AllocatePostApiFlagTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("creating an index with 1 shard, no replica");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
|
||||
.build();
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsNew(metaData.index("test"))
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -59,7 +60,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("creating an index with 1 shard, no replica");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
|
||||
.build();
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsNew(metaData.index("test"))
|
||||
@ -106,7 +107,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("--> building initial routing table");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsNew(metaData.index("test"))
|
||||
@ -195,7 +196,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("--> building initial routing table");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsNew(metaData.index("test"))
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -54,7 +55,7 @@ public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded1'");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -123,7 +124,7 @@ public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded2'");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -198,7 +199,7 @@ public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded3'");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -296,8 +297,8 @@ public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded4'");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -390,7 +391,7 @@ public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded5'");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(2))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -469,7 +470,7 @@ public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded6'");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(3))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(3))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -551,7 +552,7 @@ public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table for 'fullAwareness1'");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -619,7 +620,7 @@ public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table for 'fullAwareness2'");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -693,8 +694,8 @@ public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table for 'fullAwareness3'");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -775,7 +776,7 @@ public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table for 'testUnbalancedZones'");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -141,7 +142,7 @@ public class BalanceConfigurationTests extends ElasticsearchAllocationTestCase {
|
||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
|
||||
|
||||
for (int i = 0; i < numberOfIndices; i++) {
|
||||
IndexMetaData.Builder index = IndexMetaData.builder("test" + i).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas);
|
||||
IndexMetaData.Builder index = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas);
|
||||
metaDataBuilder = metaDataBuilder.put(index);
|
||||
}
|
||||
|
||||
@ -446,7 +447,7 @@ public class BalanceConfigurationTests extends ElasticsearchAllocationTestCase {
|
||||
}), ClusterInfoService.EMPTY);
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder();
|
||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
|
||||
IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1);
|
||||
IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1);
|
||||
metaDataBuilder = metaDataBuilder.put(indexMeta);
|
||||
MetaData metaData = metaDataBuilder.build();
|
||||
for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -46,8 +47,8 @@ public class ClusterRebalanceRoutingTests extends ElasticsearchAllocationTestCas
|
||||
ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -133,8 +134,8 @@ public class ClusterRebalanceRoutingTests extends ElasticsearchAllocationTestCas
|
||||
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -238,8 +239,8 @@ public class ClusterRebalanceRoutingTests extends ElasticsearchAllocationTestCas
|
||||
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -323,8 +324,8 @@ public class ClusterRebalanceRoutingTests extends ElasticsearchAllocationTestCas
|
||||
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -447,8 +448,8 @@ public class ClusterRebalanceRoutingTests extends ElasticsearchAllocationTestCas
|
||||
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -532,8 +533,8 @@ public class ClusterRebalanceRoutingTests extends ElasticsearchAllocationTestCas
|
||||
ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -49,7 +50,7 @@ public class ConcurrentRebalanceRoutingTests extends ElasticsearchAllocationTest
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -51,7 +52,7 @@ public class DeadNodesAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("--> building initial routing table");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsNew(metaData.index("test"))
|
||||
@ -103,7 +104,7 @@ public class DeadNodesAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("--> building initial routing table");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsNew(metaData.index("test"))
|
||||
@ -178,7 +179,7 @@ public class DeadNodesAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("--> building initial routing table");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsNew(metaData.index("test"))
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -27,7 +28,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.test.ElasticsearchAllocationTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
@ -52,7 +52,7 @@ public class DisableAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -81,7 +81,7 @@ public class DisableAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -112,8 +112,8 @@ public class DisableAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
.build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("disabled").settings(ImmutableSettings.builder().put(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true).put(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("enabled").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("disabled").settings(settings(Version.CURRENT).put(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true).put(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("enabled").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -50,7 +51,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ElasticsearchAll
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -47,8 +48,8 @@ public class FailedNodeRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -108,8 +109,8 @@ public class FailedNodeRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -55,7 +56,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("--> building initial routing table");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsNew(metaData.index("test"))
|
||||
@ -145,7 +146,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -227,7 +228,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -284,7 +285,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
int numberOfReplicas = scaledRandomIntBetween(2, 10);
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(numberOfReplicas))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(numberOfReplicas))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -341,7 +342,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -399,7 +400,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -491,7 +492,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
.build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(2))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -538,7 +539,7 @@ public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
.build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(2))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -55,7 +56,7 @@ public class FilterRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -99,7 +100,7 @@ public class FilterRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(settingsBuilder()
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
|
||||
.put("index.number_of_shards", 2)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put("index.routing.allocation.include.tag1", "value1,value2")
|
||||
@ -142,7 +143,7 @@ public class FilterRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("--> switch between value2 and value4, shards should be relocating");
|
||||
|
||||
metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(settingsBuilder()
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
|
||||
.put("index.number_of_shards", 2)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put("index.routing.allocation.include.tag1", "value1,value4")
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -28,7 +29,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.test.ElasticsearchAllocationTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
@ -54,8 +54,8 @@ public class IndexBalanceTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
|
||||
|
||||
@ -185,8 +185,8 @@ public class IndexBalanceTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
|
||||
|
||||
@ -348,7 +348,7 @@ public class IndexBalanceTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1)).build();
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
|
||||
|
||||
@ -447,7 +447,7 @@ public class IndexBalanceTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
prevRoutingTable = routingTable;
|
||||
metaData = MetaData.builder(metaData)
|
||||
.put(IndexMetaData.builder("test1").settings(ImmutableSettings.settingsBuilder()
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
))
|
||||
|
@ -61,7 +61,7 @@ public class NodeVersionAllocationDeciderTests extends ElasticsearchAllocationTe
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(2))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -179,7 +179,7 @@ public class NodeVersionAllocationDeciderTests extends ElasticsearchAllocationTe
|
||||
RoutingTable.Builder rtBuilder = RoutingTable.builder();
|
||||
int numIndices = between(1, 20);
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
builder.put(IndexMetaData.builder("test_" + i).numberOfShards(between(1, 5)).numberOfReplicas(between(0, 2)));
|
||||
builder.put(IndexMetaData.builder("test_" + i).settings(settings(Version.CURRENT)).numberOfShards(between(1, 5)).numberOfReplicas(between(0, 2)));
|
||||
}
|
||||
MetaData metaData = builder.build();
|
||||
|
||||
@ -227,7 +227,7 @@ public class NodeVersionAllocationDeciderTests extends ElasticsearchAllocationTe
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(2))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -51,8 +52,8 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends Elasticsearc
|
||||
logger.info("create 2 indices with [{}] no replicas, and wait till all are allocated", numberOfShards);
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(numberOfShards).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(numberOfShards).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(0))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -78,12 +79,12 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends Elasticsearc
|
||||
logger.info("remove one of the nodes and apply filter to move everything from another node");
|
||||
|
||||
metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").settings(settingsBuilder()
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
|
||||
.put("index.number_of_shards", numberOfShards)
|
||||
.put("index.number_of_replicas", 0)
|
||||
.put("index.routing.allocation.exclude.tag1", "value2")
|
||||
.build()))
|
||||
.put(IndexMetaData.builder("test2").settings(settingsBuilder()
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)
|
||||
.put("index.number_of_shards", numberOfShards)
|
||||
.put("index.number_of_replicas", 0)
|
||||
.put("index.routing.allocation.exclude.tag1", "value2")
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -50,8 +51,8 @@ public class PreferPrimaryAllocationTests extends ElasticsearchAllocationTestCas
|
||||
logger.info("create several indices with no replicas, and wait till all are allocated");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(10).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(10).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(0))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -84,7 +85,7 @@ public class PreferPrimaryAllocationTests extends ElasticsearchAllocationTestCas
|
||||
|
||||
logger.info("create a new index");
|
||||
metaData = MetaData.builder(clusterState.metaData())
|
||||
.put(IndexMetaData.builder("new_index").numberOfShards(4).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("new_index").settings(settings(Version.CURRENT)).numberOfShards(4).numberOfReplicas(0))
|
||||
.build();
|
||||
|
||||
routingTable = RoutingTable.builder(clusterState.routingTable())
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -50,7 +51,7 @@ public class PrimaryElectionRoutingTests extends ElasticsearchAllocationTestCase
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -101,7 +102,7 @@ public class PrimaryElectionRoutingTests extends ElasticsearchAllocationTestCase
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -53,7 +54,7 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ElasticsearchAl
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -69,7 +70,7 @@ public class RandomAllocationDeciderTests extends ElasticsearchAllocationTestCas
|
||||
maxNumReplicas = Math.max(maxNumReplicas, replicas + 1);
|
||||
int numShards = scaledRandomIntBetween(1, 20);
|
||||
totalNumShards += numShards * (replicas + 1);
|
||||
metaBuilder.put(IndexMetaData.builder("INDEX_" + i).numberOfShards(numShards).numberOfReplicas(replicas));
|
||||
metaBuilder.put(IndexMetaData.builder("INDEX_" + i).settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(replicas));
|
||||
|
||||
}
|
||||
MetaData metaData = metaBuilder.build();
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -55,7 +56,7 @@ public class RebalanceAfterActiveTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -48,7 +49,7 @@ public class ReplicaAllocatedAfterPrimaryTests extends ElasticsearchAllocationTe
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -29,7 +30,6 @@ import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.test.ElasticsearchAllocationTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
@ -55,8 +55,8 @@ public class RoutingNodesIntegrityTests extends ElasticsearchAllocationTestCase
|
||||
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
|
||||
|
||||
@ -128,8 +128,8 @@ public class RoutingNodesIntegrityTests extends ElasticsearchAllocationTestCase
|
||||
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(3).numberOfReplicas(1)).build();
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
|
||||
|
||||
@ -221,7 +221,7 @@ public class RoutingNodesIntegrityTests extends ElasticsearchAllocationTestCase
|
||||
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(3).numberOfReplicas(1)).build();
|
||||
MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
|
||||
|
||||
@ -302,7 +302,7 @@ public class RoutingNodesIntegrityTests extends ElasticsearchAllocationTestCase
|
||||
|
||||
prevRoutingTable = routingTable;
|
||||
metaData = MetaData.builder(metaData)
|
||||
.put(IndexMetaData.builder("test1").settings(ImmutableSettings.settingsBuilder()
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
))
|
||||
|
@ -54,7 +54,7 @@ public class SameShardRoutingTests extends ElasticsearchAllocationTestCase {
|
||||
AllocationService strategy = createAllocationService(settingsBuilder().put(SameShardAllocationDecider.SAME_HOST_SETTING, true).build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(2).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -45,8 +46,8 @@ public class ShardVersioningTests extends ElasticsearchAllocationTestCase {
|
||||
ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.test.ElasticsearchAllocationTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
@ -52,7 +52,7 @@ public class ShardsLimitAllocationTests extends ElasticsearchAllocationTestCase
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(ImmutableSettings.settingsBuilder()
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 2)))
|
||||
@ -102,7 +102,7 @@ public class ShardsLimitAllocationTests extends ElasticsearchAllocationTestCase
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(ImmutableSettings.settingsBuilder()
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
))
|
||||
@ -127,7 +127,7 @@ public class ShardsLimitAllocationTests extends ElasticsearchAllocationTestCase
|
||||
|
||||
logger.info("add another index with 5 shards");
|
||||
metaData = MetaData.builder(metaData)
|
||||
.put(IndexMetaData.builder("test1").settings(ImmutableSettings.settingsBuilder()
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
))
|
||||
@ -158,7 +158,7 @@ public class ShardsLimitAllocationTests extends ElasticsearchAllocationTestCase
|
||||
|
||||
logger.info("update " + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE + " for test, see that things move");
|
||||
metaData = MetaData.builder(metaData)
|
||||
.put(IndexMetaData.builder("test").settings(ImmutableSettings.settingsBuilder()
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 3)
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -58,7 +59,7 @@ public class SingleShardNoReplicasRoutingTests extends ElasticsearchAllocationTe
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -160,7 +161,7 @@ public class SingleShardNoReplicasRoutingTests extends ElasticsearchAllocationTe
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -216,7 +217,7 @@ public class SingleShardNoReplicasRoutingTests extends ElasticsearchAllocationTe
|
||||
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder();
|
||||
for (int i = 0; i < numberOfIndices; i++) {
|
||||
metaDataBuilder.put(IndexMetaData.builder("test" + i).numberOfShards(1).numberOfReplicas(0));
|
||||
metaDataBuilder.put(IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0));
|
||||
}
|
||||
MetaData metaData = metaDataBuilder.build();
|
||||
|
||||
@ -329,7 +330,7 @@ public class SingleShardNoReplicasRoutingTests extends ElasticsearchAllocationTe
|
||||
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder();
|
||||
for (int i = 0; i < numberOfIndices; i++) {
|
||||
metaDataBuilder.put(IndexMetaData.builder("test" + i).numberOfShards(1).numberOfReplicas(0));
|
||||
metaDataBuilder.put(IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0));
|
||||
}
|
||||
MetaData metaData = metaDataBuilder.build();
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -49,7 +50,7 @@ public class SingleShardOneReplicaRoutingTests extends ElasticsearchAllocationTe
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -57,7 +58,7 @@ public class TenShardsOneReplicaRoutingTests extends ElasticsearchAllocationTest
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -50,7 +51,7 @@ public class ThrottlingAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -111,7 +112,7 @@ public class ThrottlingAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -48,7 +49,7 @@ public class UpdateNumberOfReplicasTests extends ElasticsearchAllocationTestCase
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
@ -96,7 +97,7 @@ public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase {
|
||||
.build(), deciders, new ShardsAllocators(), cis);
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -292,7 +293,7 @@ public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase {
|
||||
.build(), deciders, new ShardsAllocators(), cis);
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(2))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -553,7 +554,7 @@ public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase {
|
||||
.build(), deciders, new ShardsAllocators(), cis);
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -620,7 +621,7 @@ public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase {
|
||||
.build(), deciders, new ShardsAllocators(), cis);
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -724,8 +725,8 @@ public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase {
|
||||
.build(), deciders, new ShardsAllocators(), cis);
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -28,6 +29,8 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.MutableShardRouting;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Rebalance;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
@ -39,8 +42,12 @@ import org.junit.Test;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.*;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE;
|
||||
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ -59,7 +66,7 @@ public class EnableAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -88,7 +95,7 @@ public class EnableAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
logger.info("Building initial routing table");
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -119,10 +126,10 @@ public class EnableAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
.build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("disabled").settings(ImmutableSettings.builder()
|
||||
.put(IndexMetaData.builder("disabled").settings(settings(Version.CURRENT)
|
||||
.put(INDEX_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()))
|
||||
.numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("enabled").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("enabled").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -169,9 +176,8 @@ public class EnableAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("Building initial routing table");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(indexSettings).numberOfShards(3).numberOfReplicas(1))
|
||||
|
||||
.put(IndexMetaData.builder("always_disabled").settings(settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(indexSettings)).numberOfShards(3).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("always_disabled").settings(settings(Version.CURRENT).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -273,7 +279,7 @@ public class EnableAllocationTests extends ElasticsearchAllocationTestCase {
|
||||
|
||||
logger.info("Building initial routing table");
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(indexSettings).numberOfShards(6).numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(indexSettings)).numberOfShards(6).numberOfReplicas(0))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.operation.hash.murmur3;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import com.google.common.hash.HashFunction;
|
||||
import com.google.common.hash.Hashing;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
|
||||
public class Murmur3HashFunctionTests extends ElasticsearchTestCase {
|
||||
|
||||
public void test() {
|
||||
// Make sure that we agree with guava
|
||||
Murmur3HashFunction murmur3 = new Murmur3HashFunction();
|
||||
HashFunction guavaMurmur3 = Hashing.murmur3_32();
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
final String id = RandomStrings.randomRealisticUnicodeOfCodepointLength(getRandom(), RandomInts.randomIntBetween(getRandom(), 1, 20));
|
||||
//final String id = "0";
|
||||
final int hash1 = guavaMurmur3.newHasher().putUnencodedChars(id).hash().asInt();
|
||||
final int hash2 = murmur3.hash(id);
|
||||
assertEquals(hash1, hash2);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.serialization;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -41,7 +42,7 @@ public class ClusterSerializationTests extends ElasticsearchAllocationTestCase {
|
||||
@Test
|
||||
public void testClusterStateSerialization() throws Exception {
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -65,7 +66,7 @@ public class ClusterSerializationTests extends ElasticsearchAllocationTestCase {
|
||||
@Test
|
||||
public void testRoutingTableSerialization() throws Exception {
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(10).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -40,7 +40,7 @@ public class ClusterStateToStringTests extends ElasticsearchAllocationTestCase {
|
||||
@Test
|
||||
public void testClusterStateSerialization() throws Exception {
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test_idx").numberOfShards(10).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test_idx").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
|
||||
.put(IndexTemplateMetaData.builder("test_template").build())
|
||||
.build();
|
||||
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.structure;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
|
||||
import org.elasticsearch.cluster.routing.operation.plain.PlainOperationRouting;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
@ -78,7 +78,7 @@ public class RoutingIteratorTests extends ElasticsearchAllocationTestCase {
|
||||
@Test
|
||||
public void testIterator1() {
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(2))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
|
||||
.build();
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsNew(metaData.index("test1"))
|
||||
@ -106,8 +106,8 @@ public class RoutingIteratorTests extends ElasticsearchAllocationTestCase {
|
||||
@Test
|
||||
public void testIterator2() {
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -187,8 +187,8 @@ public class RoutingIteratorTests extends ElasticsearchAllocationTestCase {
|
||||
@Test
|
||||
public void testRandomRouting() {
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test1").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -221,7 +221,7 @@ public class RoutingIteratorTests extends ElasticsearchAllocationTestCase {
|
||||
.build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(1).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -270,7 +270,7 @@ public class RoutingIteratorTests extends ElasticsearchAllocationTestCase {
|
||||
.build());
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").numberOfShards(5).numberOfReplicas(1))
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
@ -293,7 +293,7 @@ public class RoutingIteratorTests extends ElasticsearchAllocationTestCase {
|
||||
routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
|
||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||
|
||||
PlainOperationRouting operationRouting = new PlainOperationRouting(ImmutableSettings.Builder.EMPTY_SETTINGS, new DjbHashFunction(), new AwarenessAllocationDecider());
|
||||
PlainOperationRouting operationRouting = new PlainOperationRouting(ImmutableSettings.Builder.EMPTY_SETTINGS, new AwarenessAllocationDecider());
|
||||
|
||||
GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0");
|
||||
assertThat(shardIterators.size(), equalTo(1));
|
||||
|
@ -23,11 +23,11 @@ import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.util.TestRuleMarkFailure;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
import org.junit.Assert;
|
||||
@ -303,7 +303,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase {
|
||||
|
||||
private IndexMetaData.Builder indexBuilder(String index) throws IOException {
|
||||
return IndexMetaData.builder(index)
|
||||
.settings(ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10)).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 5)));
|
||||
.settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10)).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 5)));
|
||||
}
|
||||
|
||||
|
||||
|
@ -23,7 +23,6 @@ import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
@ -82,7 +81,7 @@ public class PreBuiltAnalyzerIntegrationTests extends ElasticsearchIntegrationTe
|
||||
.endObject()
|
||||
.endObject();
|
||||
|
||||
Settings versionSettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build();
|
||||
Settings versionSettings = settings(randomVersion).build();
|
||||
client().admin().indices().prepareCreate(indexName).addMapping("type", mapping).setSettings(versionSettings).get();
|
||||
}
|
||||
|
||||
@ -138,8 +137,7 @@ public class PreBuiltAnalyzerIntegrationTests extends ElasticsearchIntegrationTe
|
||||
.endObject()
|
||||
.endObject();
|
||||
|
||||
Settings versionSettings = ImmutableSettings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion())
|
||||
Settings versionSettings = settings(randomVersion())
|
||||
.put("index.analysis.analyzer.my_dummy.type", "custom")
|
||||
.put("index.analysis.analyzer.my_dummy.filter", "my_dummy_token_filter")
|
||||
.put("index.analysis.analyzer.my_dummy.char_filter", "my_dummy_char_filter")
|
||||
|
@ -21,10 +21,13 @@ package org.elasticsearch.indices.stats;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.admin.indices.stats.*;
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
@ -56,7 +59,12 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
@ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0, randomDynamicTemplates = false)
|
||||
public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
@ -515,7 +523,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
NumShards test1 = getNumShards("test1");
|
||||
|
||||
for (int i = 0; i < 20; i++) {
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test1", "type1", Integer.toString(i), "field", "value");
|
||||
index("test1", "type2", Integer.toString(i), "field", "value");
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ public class IndicesStoreTests extends ElasticsearchTestCase {
|
||||
int numReplicas = randomInt(2);
|
||||
|
||||
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
|
||||
|
||||
assertFalse(indicesStore.shardCanBeDeleted(clusterState.build(), routingTable.build()));
|
||||
@ -81,7 +81,7 @@ public class IndicesStoreTests extends ElasticsearchTestCase {
|
||||
int numReplicas = randomInt(2);
|
||||
|
||||
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
|
||||
|
||||
for (int i = 0; i < numShards; i++) {
|
||||
@ -105,7 +105,7 @@ public class IndicesStoreTests extends ElasticsearchTestCase {
|
||||
int numReplicas = randomInt(2);
|
||||
|
||||
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode).put(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), Version.CURRENT)));
|
||||
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
|
||||
int localShardId = randomInt(numShards - 1);
|
||||
@ -128,7 +128,7 @@ public class IndicesStoreTests extends ElasticsearchTestCase {
|
||||
int numReplicas = randomInt(2);
|
||||
|
||||
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode));
|
||||
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
|
||||
for (int i = 0; i < numShards; i++) {
|
||||
@ -151,7 +151,7 @@ public class IndicesStoreTests extends ElasticsearchTestCase {
|
||||
// Most of the times don't test bwc and use current version
|
||||
final Version nodeVersion = randomBoolean() ? Version.CURRENT : randomVersion();
|
||||
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode).put(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), nodeVersion)));
|
||||
IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
|
||||
for (int i = 0; i < numShards; i++) {
|
||||
@ -178,7 +178,7 @@ public class IndicesStoreTests extends ElasticsearchTestCase {
|
||||
int numReplicas = randomInt(2);
|
||||
|
||||
ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
|
||||
final Version nodeVersion = randomBoolean() ? Version.CURRENT : randomVersion();
|
||||
|
||||
clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id())
|
||||
|
@ -150,13 +150,16 @@ public class SimpleMgetTests extends ElasticsearchIntegrationTest {
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, between(2, DEFAULT_MAX_NUM_SHARDS))));
|
||||
ensureYellow();
|
||||
|
||||
client().prepareIndex("test", "test", "1").setRefresh(true).setRouting("2")
|
||||
final String id = routingKeyForShard("test", "test", 0);
|
||||
final String routingOtherShard = routingKeyForShard("test", "test", 1);
|
||||
|
||||
client().prepareIndex("test", "test", id).setRefresh(true).setRouting(routingOtherShard)
|
||||
.setSource(jsonBuilder().startObject().field("foo", "bar").endObject())
|
||||
.execute().actionGet();
|
||||
|
||||
MultiGetResponse mgetResponse = client().prepareMultiGet()
|
||||
.add(new MultiGetRequest.Item(indexOrAlias(), "test", "1").routing("2"))
|
||||
.add(new MultiGetRequest.Item(indexOrAlias(), "test", "1"))
|
||||
.add(new MultiGetRequest.Item(indexOrAlias(), "test", id).routing(routingOtherShard))
|
||||
.add(new MultiGetRequest.Item(indexOrAlias(), "test", id))
|
||||
.execute().actionGet();
|
||||
|
||||
assertThat(mgetResponse.getResponses().length, is(2));
|
||||
|
@ -22,8 +22,6 @@ import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.percolate.PercolateResponse;
|
||||
import org.elasticsearch.action.percolate.PercolateSourceBuilder;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.index.percolator.PercolatorException;
|
||||
import org.elasticsearch.index.query.QueryParsingException;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
@ -43,7 +41,7 @@ public class PercolatorBackwardsCompatibilityTests extends ElasticsearchIntegrat
|
||||
public void testPercolatorUpgrading() throws Exception {
|
||||
// Simulates an index created on an node before 1.4.0 where the field resolution isn't strict.
|
||||
assertAcked(prepareCreate("test")
|
||||
.setSettings(ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_3_0).put(indexSettings())));
|
||||
.setSettings(settings(Version.V_1_3_0).put(indexSettings())));
|
||||
ensureGreen();
|
||||
int numDocs = randomIntBetween(100, 150);
|
||||
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
|
||||
|
@ -22,22 +22,20 @@ package org.elasticsearch.rest.action.admin.indices.upgrade;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.node.internal.InternalNode;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
|
||||
import org.junit.Ignore;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Arrays;
|
||||
|
||||
@ -47,31 +45,12 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0, minNumDataNodes = 0, maxNumDataNodes = 0)
|
||||
public class UpgradeReallyOldIndexTest extends ElasticsearchIntegrationTest {
|
||||
|
||||
// this can maybe go into ElasticsearchIntegrationTest
|
||||
public File prepareBackwardsDataDir(File backwardsIndex) throws IOException {
|
||||
File dataDir = new File(newTempDir(), "data");
|
||||
TestUtil.unzip(backwardsIndex, dataDir.getParentFile());
|
||||
assertTrue(dataDir.exists());
|
||||
String[] list = dataDir.list();
|
||||
if (list == null || list.length > 1) {
|
||||
throw new IllegalStateException("Backwards index must contain exactly one cluster");
|
||||
}
|
||||
File src = new File(dataDir, list[0]);
|
||||
File dest = new File(dataDir, internalCluster().getClusterName());
|
||||
assertTrue(src.exists());
|
||||
src.renameTo(dest);
|
||||
assertFalse(src.exists());
|
||||
assertTrue(dest.exists());
|
||||
return dataDir;
|
||||
}
|
||||
|
||||
public void testUpgrade_0_20() throws Exception {
|
||||
// If this assert trips it means we are not suppressing enough codecs up above:
|
||||
assertFalse("test infra is broken!", LuceneTestCase.OLD_FORMAT_IMPERSONATION_IS_ACTIVE);
|
||||
File dataDir = prepareBackwardsDataDir(new File(getClass().getResource("index-0.20.zip").toURI()));
|
||||
Settings baseSettings = prepareBackwardsDataDir(new File(getClass().getResource("index-0.20.zip").toURI()));
|
||||
internalCluster().startNode(ImmutableSettings.builder()
|
||||
.put("path.data", dataDir.getPath())
|
||||
.put("gateway.type", "local") // this is important we need to recover from gateway
|
||||
.put(baseSettings)
|
||||
.put(InternalNode.HTTP_ENABLED, true)
|
||||
.build());
|
||||
ensureGreen("test");
|
||||
|
@ -116,7 +116,7 @@ public class ShardSizeTermsTests extends ShardSizeTests {
|
||||
|
||||
indexData();
|
||||
|
||||
SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
|
||||
SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1)
|
||||
.setQuery(matchAllQuery())
|
||||
.addAggregation(terms("keys").field("key").size(3)
|
||||
.collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false)))
|
||||
@ -245,7 +245,7 @@ public class ShardSizeTermsTests extends ShardSizeTests {
|
||||
|
||||
indexData();
|
||||
|
||||
SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
|
||||
SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1)
|
||||
.setQuery(matchAllQuery())
|
||||
.addAggregation(terms("keys").field("key").size(3)
|
||||
.collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false)))
|
||||
@ -375,7 +375,7 @@ public class ShardSizeTermsTests extends ShardSizeTests {
|
||||
|
||||
indexData();
|
||||
|
||||
SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting("1")
|
||||
SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1)
|
||||
.setQuery(matchAllQuery())
|
||||
.addAggregation(terms("keys").field("key").size(3)
|
||||
.collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false)))
|
||||
|
@ -21,8 +21,6 @@ package org.elasticsearch.search.aggregations.bucket;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
|
||||
import org.junit.Ignore;
|
||||
@ -42,24 +40,10 @@ import static org.hamcrest.Matchers.is;
|
||||
@ClusterScope(scope = SUITE)
|
||||
public abstract class ShardSizeTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
/**
|
||||
* to properly test the effect/functionality of shard_size, we need to force having 2 shards and also
|
||||
* control the routing such that certain documents will end on each shard. Using "djb" routing hash + ignoring the
|
||||
* doc type when hashing will ensure that docs with routing value "1" will end up in a different shard than docs with
|
||||
* routing value "2".
|
||||
*/
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return ImmutableSettings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("cluster.routing.operation.hash.type", "djb")
|
||||
.put("cluster.routing.operation.use_type", "false")
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int numberOfShards() {
|
||||
return 2;
|
||||
// we need at least 2
|
||||
return randomIntBetween(2, DEFAULT_MAX_NUM_SHARDS);
|
||||
}
|
||||
|
||||
protected void createIdx(String keyFieldMapping) {
|
||||
@ -67,6 +51,9 @@ public abstract class ShardSizeTests extends ElasticsearchIntegrationTest {
|
||||
.addMapping("type", "key", keyFieldMapping));
|
||||
}
|
||||
|
||||
protected static String routing1; // routing key to shard 1
|
||||
protected static String routing2; // routing key to shard 2
|
||||
|
||||
protected void indexData() throws Exception {
|
||||
|
||||
/*
|
||||
@ -87,29 +74,32 @@ public abstract class ShardSizeTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
List<IndexRequestBuilder> docs = new ArrayList<>();
|
||||
|
||||
docs.addAll(indexDoc("1", "1", 5));
|
||||
docs.addAll(indexDoc("1", "2", 4));
|
||||
docs.addAll(indexDoc("1", "3", 3));
|
||||
docs.addAll(indexDoc("1", "4", 2));
|
||||
docs.addAll(indexDoc("1", "5", 1));
|
||||
routing1 = routingKeyForShard("idx", "type", 0);
|
||||
routing2 = routingKeyForShard("idx", "type", 1);
|
||||
|
||||
docs.addAll(indexDoc(routing1, "1", 5));
|
||||
docs.addAll(indexDoc(routing1, "2", 4));
|
||||
docs.addAll(indexDoc(routing1, "3", 3));
|
||||
docs.addAll(indexDoc(routing1, "4", 2));
|
||||
docs.addAll(indexDoc(routing1, "5", 1));
|
||||
|
||||
// total docs in shard "1" = 15
|
||||
|
||||
docs.addAll(indexDoc("2", "1", 3));
|
||||
docs.addAll(indexDoc("2", "2", 1));
|
||||
docs.addAll(indexDoc("2", "3", 5));
|
||||
docs.addAll(indexDoc("2", "4", 2));
|
||||
docs.addAll(indexDoc("2", "5", 1));
|
||||
docs.addAll(indexDoc(routing2, "1", 3));
|
||||
docs.addAll(indexDoc(routing2, "2", 1));
|
||||
docs.addAll(indexDoc(routing2, "3", 5));
|
||||
docs.addAll(indexDoc(routing2, "4", 2));
|
||||
docs.addAll(indexDoc(routing2, "5", 1));
|
||||
|
||||
// total docs in shard "2" = 12
|
||||
|
||||
indexRandom(true, docs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("idx").setTypes("type").setRouting("1").setQuery(matchAllQuery()).execute().actionGet();
|
||||
SearchResponse resp = client().prepareSearch("idx").setTypes("type").setRouting(routing1).setQuery(matchAllQuery()).execute().actionGet();
|
||||
assertSearchResponse(resp);
|
||||
long totalOnOne = resp.getHits().getTotalHits();
|
||||
assertThat(totalOnOne, is(15l));
|
||||
resp = client().prepareSearch("idx").setTypes("type").setRouting("2").setQuery(matchAllQuery()).execute().actionGet();
|
||||
resp = client().prepareSearch("idx").setTypes("type").setRouting(routing2).setQuery(matchAllQuery()).execute().actionGet();
|
||||
assertSearchResponse(resp);
|
||||
long totalOnTwo = resp.getHits().getTotalHits();
|
||||
assertThat(totalOnTwo, is(12l));
|
||||
|
@ -21,6 +21,8 @@ package org.elasticsearch.search.basic;
|
||||
|
||||
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
@ -234,29 +236,25 @@ public class TransportTwoNodesSearchTests extends ElasticsearchIntegrationTest {
|
||||
public void testQueryThenFetchWithSort() throws Exception {
|
||||
prepareData();
|
||||
|
||||
SearchSourceBuilder source = searchSource()
|
||||
.query(termQuery("multi", "test"))
|
||||
.from(0).size(60).explain(true).sort("age", SortOrder.ASC);
|
||||
|
||||
SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_THEN_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(60));
|
||||
for (int i = 0; i < 60; i++) {
|
||||
SearchHit hit = searchResponse.getHits().hits()[i];
|
||||
// System.out.println(hit.shard() + ": " + hit.explanation());
|
||||
assertThat(hit.explanation(), notNullValue());
|
||||
assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i)));
|
||||
}
|
||||
|
||||
searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
|
||||
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(40));
|
||||
for (int i = 0; i < 40; i++) {
|
||||
SearchHit hit = searchResponse.getHits().hits()[i];
|
||||
assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(i + 60)));
|
||||
int total = 0;
|
||||
SearchResponse searchResponse = client().prepareSearch("test").setQuery(termQuery("multi", "test")).setSize(60).setExplain(true).addSort("age", SortOrder.ASC).setScroll(TimeValue.timeValueSeconds(30)).get();
|
||||
while (true) {
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
|
||||
SearchHit[] hits = searchResponse.getHits().hits();
|
||||
if (hits.length == 0) {
|
||||
break; // finished
|
||||
}
|
||||
for (int i = 0; i < hits.length; ++i) {
|
||||
SearchHit hit = hits[i];
|
||||
assertThat(hit.explanation(), notNullValue());
|
||||
assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(total + i)));
|
||||
}
|
||||
total += hits.length;
|
||||
searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).get();
|
||||
}
|
||||
clearScroll(searchResponse.getScrollId());
|
||||
assertEquals(100, total);
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -285,16 +283,17 @@ public class TransportTwoNodesSearchTests extends ElasticsearchIntegrationTest {
|
||||
assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
|
||||
}
|
||||
|
||||
searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
|
||||
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(40));
|
||||
for (int i = 0; i < 40; i++) {
|
||||
SearchHit hit = searchResponse.getHits().hits()[i];
|
||||
// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i)));
|
||||
// we don't do perfect sorting when it comes to scroll with Query+Fetch
|
||||
assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
|
||||
}
|
||||
do {
|
||||
searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll("10m").get();
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
|
||||
assertThat(searchResponse.getHits().hits().length, lessThanOrEqualTo(40));
|
||||
for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
|
||||
SearchHit hit = searchResponse.getHits().hits()[i];
|
||||
// we don't do perfect sorting when it comes to scroll with Query+Fetch
|
||||
assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
|
||||
}
|
||||
} while (searchResponse.getHits().getHits().length > 0);
|
||||
clearScroll(searchResponse.getScrollId());
|
||||
assertThat("make sure we got all [" + expectedIds + "]", expectedIds.size(), equalTo(0));
|
||||
}
|
||||
|
||||
@ -312,7 +311,8 @@ public class TransportTwoNodesSearchTests extends ElasticsearchIntegrationTest {
|
||||
}
|
||||
|
||||
|
||||
SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(DFS_QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
|
||||
//SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(DFS_QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
|
||||
SearchResponse searchResponse = client().prepareSearch("test").setSearchType(DFS_QUERY_AND_FETCH).setScroll("10m").setSource(source.buildAsBytes()).get();
|
||||
assertNoFailures(searchResponse);
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(60)); // 20 per shard
|
||||
@ -324,17 +324,18 @@ public class TransportTwoNodesSearchTests extends ElasticsearchIntegrationTest {
|
||||
assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
|
||||
}
|
||||
|
||||
searchResponse = client().searchScroll(searchScrollRequest(searchResponse.getScrollId())).actionGet();
|
||||
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(40));
|
||||
for (int i = 0; i < 40; i++) {
|
||||
SearchHit hit = searchResponse.getHits().hits()[i];
|
||||
// System.out.println(hit.shard() + ": " + hit.explanation());
|
||||
// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - 60 - 1 - i)));
|
||||
// we don't do perfect sorting when it comes to scroll with Query+Fetch
|
||||
assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
|
||||
}
|
||||
do {
|
||||
searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll("10m").get();
|
||||
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
|
||||
assertThat(searchResponse.getHits().hits().length, lessThanOrEqualTo(40));
|
||||
for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
|
||||
SearchHit hit = searchResponse.getHits().hits()[i];
|
||||
// we don't do perfect sorting when it comes to scroll with Query+Fetch
|
||||
assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
|
||||
}
|
||||
} while (searchResponse.getHits().hits().length > 0);
|
||||
clearScroll(searchResponse.getScrollId());
|
||||
assertThat("make sure we got all [" + expectedIds + "]", expectedIds.size(), equalTo(0));
|
||||
}
|
||||
|
||||
|
@ -459,7 +459,7 @@ public class SimpleQueryTests extends ElasticsearchIntegrationTest {
|
||||
// backwards compat test!
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.addMapping("type1", "field1", "type=string,omit_term_freq_and_positions=true")
|
||||
.setSettings(SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_VERSION_CREATED, version.id));
|
||||
.setSettings(settings(version).put(SETTING_NUMBER_OF_SHARDS, 1)));
|
||||
assertThat(version.onOrAfter(Version.V_1_0_0_RC2), equalTo(false));
|
||||
indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"),
|
||||
client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox"));
|
||||
|
@ -73,16 +73,17 @@ public class QueryRescorerTests extends ElasticsearchIntegrationTest {
|
||||
QueryBuilders.functionScoreQuery(QueryBuilders.matchAllQuery())
|
||||
.boostMode("replace").add(ScoreFunctionBuilders.factorFunction(100))).setQueryWeight(0.0f).setRescoreQueryWeight(1.0f))
|
||||
.setRescoreWindow(1).setSize(randomIntBetween(2,10)).execute().actionGet();
|
||||
assertSearchResponse(searchResponse);
|
||||
assertFirstHit(searchResponse, hasScore(100.f));
|
||||
int numPending100 = numShards;
|
||||
int numDocsWith100AsAScore = 0;
|
||||
for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
|
||||
float score = searchResponse.getHits().hits()[i].getScore();
|
||||
if (score == 100f) {
|
||||
assertThat(numPending100--, greaterThanOrEqualTo(0));
|
||||
} else {
|
||||
assertThat(numPending100, equalTo(0));
|
||||
numDocsWith100AsAScore += 1;
|
||||
}
|
||||
}
|
||||
// we cannot assert that they are equal since some shards might not have docs at all
|
||||
assertThat(numDocsWith100AsAScore, lessThanOrEqualTo(numShards));
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,18 +94,16 @@ public class QueryRescorerTests extends ElasticsearchIntegrationTest {
|
||||
"type1",
|
||||
jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
|
||||
.field("analyzer", "whitespace").field("type", "string").endObject().endObject().endObject().endObject())
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put(indexSettings()).put("index.number_of_shards", 2)));
|
||||
.setSettings(ImmutableSettings.settingsBuilder().put(indexSettings()).put("index.number_of_shards", 1)));
|
||||
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox").execute().actionGet();
|
||||
client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree").execute()
|
||||
.actionGet();
|
||||
client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree ").get();
|
||||
client().prepareIndex("test", "type1", "3")
|
||||
.setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree").execute()
|
||||
.actionGet();
|
||||
.setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree").get();
|
||||
refresh();
|
||||
SearchResponse searchResponse = client().prepareSearch()
|
||||
.setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
|
||||
.setRescorer(RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)))
|
||||
.setRescorer(RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)).setRescoreQueryWeight(2))
|
||||
.setRescoreWindow(5).execute().actionGet();
|
||||
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(3l));
|
||||
|
@ -29,6 +29,7 @@ import com.google.common.collect.Lists;
|
||||
import org.apache.lucene.store.StoreRateLimiting;
|
||||
import org.apache.lucene.util.AbstractRandomizedTest;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
@ -115,6 +116,7 @@ import org.hamcrest.Matchers;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.junit.*;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.lang.annotation.ElementType;
|
||||
import java.lang.annotation.Retention;
|
||||
@ -1841,6 +1843,43 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute a routing key that will route documents to the <code>shard</code>-th shard
|
||||
* of the provided index.
|
||||
*/
|
||||
protected String routingKeyForShard(String index, String type, int shard) {
|
||||
return internalCluster().routingKeyForShard(index, type, shard, getRandom());
|
||||
}
|
||||
|
||||
/**
|
||||
* Return settings that could be used to start a node that has the given zipped home directory.
|
||||
*/
|
||||
protected Settings prepareBackwardsDataDir(File backwardsIndex) throws IOException {
|
||||
File indexDir = newTempDir();
|
||||
File dataDir = new File(indexDir, "data");
|
||||
TestUtil.unzip(backwardsIndex, indexDir);
|
||||
assertTrue(dataDir.exists());
|
||||
String[] list = dataDir.list();
|
||||
if (list == null || list.length > 1) {
|
||||
throw new IllegalStateException("Backwards index must contain exactly one cluster");
|
||||
}
|
||||
File src = new File(dataDir, list[0]);
|
||||
File dest = new File(dataDir, internalCluster().getClusterName());
|
||||
assertTrue(src.exists());
|
||||
src.renameTo(dest);
|
||||
assertFalse(src.exists());
|
||||
assertTrue(dest.exists());
|
||||
ImmutableSettings.Builder builder = ImmutableSettings.builder()
|
||||
.put("gateway.type", "local") // this is important we need to recover from gateway
|
||||
.put("path.data", dataDir.getPath());
|
||||
|
||||
File configDir = new File(indexDir, "config");
|
||||
if (configDir.exists()) {
|
||||
builder.put("path.conf", configDir.getPath());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is executed iff the test is annotated with {@link SuiteScopeTest}
|
||||
* before the first test of this class is executed.
|
||||
|
@ -30,9 +30,12 @@ import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TimeUnits;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.operation.hash.djb.DjbHashFunction;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.util.concurrent.EsAbortPolicy;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
@ -379,6 +382,17 @@ public abstract class ElasticsearchTestCase extends AbstractRandomizedTest {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return consistent index settings for the provided index version.
|
||||
*/
|
||||
public static ImmutableSettings.Builder settings(Version version) {
|
||||
ImmutableSettings.Builder builder = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version);
|
||||
if (version.before(Version.V_2_0_0)) {
|
||||
builder.put(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION, DjbHashFunction.class);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
static final class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
|
||||
|
||||
private final Thread.UncaughtExceptionHandler parent;
|
||||
|
@ -22,6 +22,7 @@ import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import com.carrotsearch.randomizedtesting.SeedUtils;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.base.Predicates;
|
||||
import com.google.common.collect.*;
|
||||
@ -43,10 +44,12 @@ import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
|
||||
import org.elasticsearch.cluster.routing.operation.OperationRouting;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
@ -69,6 +72,8 @@ import org.elasticsearch.index.cache.filter.FilterCacheModule;
|
||||
import org.elasticsearch.index.cache.filter.none.NoneFilterCache;
|
||||
import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache;
|
||||
import org.elasticsearch.index.engine.IndexEngineModule;
|
||||
import org.elasticsearch.index.service.IndexService;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||
import org.elasticsearch.node.Node;
|
||||
@ -107,7 +112,7 @@ import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilde
|
||||
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
|
||||
import static org.elasticsearch.test.ElasticsearchTestCase.assertBusy;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertThat;
|
||||
|
||||
@ -1533,6 +1538,30 @@ public final class InternalTestCluster extends TestCluster {
|
||||
}
|
||||
}
|
||||
|
||||
synchronized String routingKeyForShard(String index, String type, int shard, Random random) {
|
||||
assertThat(shard, greaterThanOrEqualTo(0));
|
||||
assertThat(shard, greaterThanOrEqualTo(0));
|
||||
for (NodeAndClient n : nodes.values()) {
|
||||
InternalNode node = (InternalNode) n.node;
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class, node);
|
||||
ClusterService clusterService = getInstanceFromNode(ClusterService.class, node);
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService != null) {
|
||||
assertThat(indexService.settingsService().getSettings().getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1), greaterThan(shard));
|
||||
OperationRouting operationRouting = indexService.injector().getInstance(OperationRouting.class);
|
||||
while (true) {
|
||||
String routing = RandomStrings.randomAsciiOfLength(random, 10);
|
||||
final int targetShard = operationRouting.indexShards(clusterService.state(), index, type, null, routing).shardId().getId();
|
||||
if (shard == targetShard) {
|
||||
return routing;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fail("Could not find a node that holds " + index);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized Iterator<Client> iterator() {
|
||||
ensureOpen();
|
||||
|
Binary file not shown.
Binary file not shown.
@ -1,217 +1,217 @@
|
||||
# Index num_shards _type _id _routing shard_id
|
||||
index 2 type1 foo null 1
|
||||
index 2 type1 foo 42 1
|
||||
index 2 type1 foo my_routing_key 0
|
||||
index 2 type1 bar null 0
|
||||
index 2 type1 bar 42 1
|
||||
index 2 type1 bar my_routing_key 0
|
||||
index 2 type1 foobar null 0
|
||||
index 2 type1 foobar 42 1
|
||||
index 2 type1 foobar my_routing_key 0
|
||||
index 2 type1 elasticsearch null 0
|
||||
index 2 type1 elasticsearch 42 1
|
||||
index 2 type1 elasticsearch my_routing_key 0
|
||||
index 2 type1 0956317778 null 0
|
||||
index 2 type1 0956317778 42 1
|
||||
index 2 type1 0956317778 my_routing_key 0
|
||||
index 2 type1 0 null 1
|
||||
index 2 type1 0 42 1
|
||||
index 2 type1 0 my_routing_key 0
|
||||
index 2 type2 foo null 1
|
||||
index 2 type2 foo 42 1
|
||||
index 2 type2 foo my_routing_key 0
|
||||
index 2 type2 bar null 0
|
||||
index 2 type2 bar 42 1
|
||||
index 2 type2 bar my_routing_key 0
|
||||
index 2 type2 foobar null 0
|
||||
index 2 type2 foobar 42 1
|
||||
index 2 type2 foobar my_routing_key 0
|
||||
index 2 type2 elasticsearch null 0
|
||||
index 2 type2 elasticsearch 42 1
|
||||
index 2 type2 elasticsearch my_routing_key 0
|
||||
index 2 type2 0956317778 null 0
|
||||
index 2 type2 0956317778 42 1
|
||||
index 2 type2 0956317778 my_routing_key 0
|
||||
index 2 type2 0 null 1
|
||||
index 2 type2 0 42 1
|
||||
index 2 type2 0 my_routing_key 0
|
||||
index 5 type1 foo null 4
|
||||
index 5 type1 foo 42 0
|
||||
index 5 type1 foo my_routing_key 3
|
||||
index 5 type1 bar null 4
|
||||
index 5 type1 bar 42 0
|
||||
index 5 type1 bar my_routing_key 3
|
||||
index 5 type1 foobar null 4
|
||||
index 5 type1 foobar 42 0
|
||||
index 5 type1 foobar my_routing_key 3
|
||||
index 5 type1 elasticsearch null 0
|
||||
index 5 type1 elasticsearch 42 0
|
||||
index 5 type1 elasticsearch my_routing_key 3
|
||||
index 5 type1 0956317778 null 3
|
||||
index 5 type1 0956317778 42 0
|
||||
index 5 type1 0956317778 my_routing_key 3
|
||||
index 5 type1 0 null 1
|
||||
index 5 type1 0 42 0
|
||||
index 5 type1 0 my_routing_key 3
|
||||
index 5 type2 foo null 4
|
||||
index 5 type2 foo 42 0
|
||||
index 5 type2 foo my_routing_key 3
|
||||
index 5 type2 bar null 4
|
||||
index 5 type2 bar 42 0
|
||||
index 5 type2 bar my_routing_key 3
|
||||
index 5 type2 foobar null 4
|
||||
index 5 type2 foobar 42 0
|
||||
index 5 type2 foobar my_routing_key 3
|
||||
index 5 type2 elasticsearch null 0
|
||||
index 5 type2 elasticsearch 42 0
|
||||
index 5 type2 elasticsearch my_routing_key 3
|
||||
index 5 type2 0956317778 null 3
|
||||
index 5 type2 0956317778 42 0
|
||||
index 5 type2 0956317778 my_routing_key 3
|
||||
index 5 type2 0 null 1
|
||||
index 5 type2 0 42 0
|
||||
index 5 type2 0 my_routing_key 3
|
||||
index 100 type1 foo null 49
|
||||
index 100 type1 foo 42 75
|
||||
index 100 type1 foo my_routing_key 38
|
||||
index 100 type1 bar null 34
|
||||
index 100 type1 bar 42 75
|
||||
index 100 type1 bar my_routing_key 38
|
||||
index 100 type1 foobar null 74
|
||||
index 100 type1 foobar 42 75
|
||||
index 100 type1 foobar my_routing_key 38
|
||||
index 100 type1 elasticsearch null 20
|
||||
index 100 type1 elasticsearch 42 75
|
||||
index 100 type1 elasticsearch my_routing_key 38
|
||||
index 100 type1 0956317778 null 18
|
||||
index 100 type1 0956317778 42 75
|
||||
index 100 type1 0956317778 my_routing_key 38
|
||||
index 100 type1 0 null 21
|
||||
index 100 type1 0 42 75
|
||||
index 100 type1 0 my_routing_key 38
|
||||
index 100 type2 foo null 49
|
||||
index 100 type2 foo 42 75
|
||||
index 100 type2 foo my_routing_key 38
|
||||
index 100 type2 bar null 34
|
||||
index 100 type2 bar 42 75
|
||||
index 100 type2 bar my_routing_key 38
|
||||
index 100 type2 foobar null 74
|
||||
index 100 type2 foobar 42 75
|
||||
index 100 type2 foobar my_routing_key 38
|
||||
index 100 type2 elasticsearch null 20
|
||||
index 100 type2 elasticsearch 42 75
|
||||
index 100 type2 elasticsearch my_routing_key 38
|
||||
index 100 type2 0956317778 null 18
|
||||
index 100 type2 0956317778 42 75
|
||||
index 100 type2 0956317778 my_routing_key 38
|
||||
index 100 type2 0 null 21
|
||||
index 100 type2 0 42 75
|
||||
index 100 type2 0 my_routing_key 38
|
||||
index2 2 type1 foo null 1
|
||||
index2 2 type1 foo 42 1
|
||||
index2 2 type1 foo my_routing_key 0
|
||||
index2 2 type1 bar null 0
|
||||
index2 2 type1 bar 42 1
|
||||
index2 2 type1 bar my_routing_key 0
|
||||
index2 2 type1 foobar null 0
|
||||
index2 2 type1 foobar 42 1
|
||||
index2 2 type1 foobar my_routing_key 0
|
||||
index2 2 type1 elasticsearch null 0
|
||||
index2 2 type1 elasticsearch 42 1
|
||||
index2 2 type1 elasticsearch my_routing_key 0
|
||||
index2 2 type1 0956317778 null 0
|
||||
index2 2 type1 0956317778 42 1
|
||||
index2 2 type1 0956317778 my_routing_key 0
|
||||
index2 2 type1 0 null 1
|
||||
index2 2 type1 0 42 1
|
||||
index2 2 type1 0 my_routing_key 0
|
||||
index2 2 type2 foo null 1
|
||||
index2 2 type2 foo 42 1
|
||||
index2 2 type2 foo my_routing_key 0
|
||||
index2 2 type2 bar null 0
|
||||
index2 2 type2 bar 42 1
|
||||
index2 2 type2 bar my_routing_key 0
|
||||
index2 2 type2 foobar null 0
|
||||
index2 2 type2 foobar 42 1
|
||||
index2 2 type2 foobar my_routing_key 0
|
||||
index2 2 type2 elasticsearch null 0
|
||||
index2 2 type2 elasticsearch 42 1
|
||||
index2 2 type2 elasticsearch my_routing_key 0
|
||||
index2 2 type2 0956317778 null 0
|
||||
index2 2 type2 0956317778 42 1
|
||||
index2 2 type2 0956317778 my_routing_key 0
|
||||
index2 2 type2 0 null 1
|
||||
index2 2 type2 0 42 1
|
||||
index2 2 type2 0 my_routing_key 0
|
||||
index2 5 type1 foo null 4
|
||||
index2 5 type1 foo 42 0
|
||||
index2 5 type1 foo my_routing_key 3
|
||||
index2 5 type1 bar null 4
|
||||
index2 5 type1 bar 42 0
|
||||
index2 5 type1 bar my_routing_key 3
|
||||
index2 5 type1 foobar null 4
|
||||
index2 5 type1 foobar 42 0
|
||||
index2 5 type1 foobar my_routing_key 3
|
||||
index2 5 type1 elasticsearch null 0
|
||||
index2 5 type1 elasticsearch 42 0
|
||||
index2 5 type1 elasticsearch my_routing_key 3
|
||||
index2 5 type1 0956317778 null 3
|
||||
index2 5 type1 0956317778 42 0
|
||||
index2 5 type1 0956317778 my_routing_key 3
|
||||
index2 5 type1 0 null 1
|
||||
index2 5 type1 0 42 0
|
||||
index2 5 type1 0 my_routing_key 3
|
||||
index2 5 type2 foo null 4
|
||||
index2 5 type2 foo 42 0
|
||||
index2 5 type2 foo my_routing_key 3
|
||||
index2 5 type2 bar null 4
|
||||
index2 5 type2 bar 42 0
|
||||
index2 5 type2 bar my_routing_key 3
|
||||
index2 5 type2 foobar null 4
|
||||
index2 5 type2 foobar 42 0
|
||||
index2 5 type2 foobar my_routing_key 3
|
||||
index2 5 type2 elasticsearch null 0
|
||||
index2 5 type2 elasticsearch 42 0
|
||||
index2 5 type2 elasticsearch my_routing_key 3
|
||||
index2 5 type2 0956317778 null 3
|
||||
index2 5 type2 0956317778 42 0
|
||||
index2 5 type2 0956317778 my_routing_key 3
|
||||
index2 5 type2 0 null 1
|
||||
index2 5 type2 0 42 0
|
||||
index2 5 type2 0 my_routing_key 3
|
||||
index2 100 type1 foo null 49
|
||||
index2 100 type1 foo 42 75
|
||||
index2 100 type1 foo my_routing_key 38
|
||||
index2 100 type1 bar null 34
|
||||
index2 100 type1 bar 42 75
|
||||
index2 100 type1 bar my_routing_key 38
|
||||
index2 100 type1 foobar null 74
|
||||
index2 100 type1 foobar 42 75
|
||||
index2 100 type1 foobar my_routing_key 38
|
||||
index2 100 type1 elasticsearch null 20
|
||||
index2 100 type1 elasticsearch 42 75
|
||||
index2 100 type1 elasticsearch my_routing_key 38
|
||||
index2 100 type1 0956317778 null 18
|
||||
index2 100 type1 0956317778 42 75
|
||||
index2 100 type1 0956317778 my_routing_key 38
|
||||
index2 100 type1 0 null 21
|
||||
index2 100 type1 0 42 75
|
||||
index2 100 type1 0 my_routing_key 38
|
||||
index2 100 type2 foo null 49
|
||||
index2 100 type2 foo 42 75
|
||||
index2 100 type2 foo my_routing_key 38
|
||||
index2 100 type2 bar null 34
|
||||
index2 100 type2 bar 42 75
|
||||
index2 100 type2 bar my_routing_key 38
|
||||
index2 100 type2 foobar null 74
|
||||
index2 100 type2 foobar 42 75
|
||||
index2 100 type2 foobar my_routing_key 38
|
||||
index2 100 type2 elasticsearch null 20
|
||||
index2 100 type2 elasticsearch 42 75
|
||||
index2 100 type2 elasticsearch my_routing_key 38
|
||||
index2 100 type2 0956317778 null 18
|
||||
index2 100 type2 0956317778 42 75
|
||||
index2 100 type2 0956317778 my_routing_key 38
|
||||
index2 100 type2 0 null 21
|
||||
index2 100 type2 0 42 75
|
||||
index2 100 type2 0 my_routing_key 38
|
||||
# Index num_shards _type _id _routing pre_2.0_shard_id current_shard_id
|
||||
index 2 type1 foo null 1 1
|
||||
index 2 type1 foo 42 1 0
|
||||
index 2 type1 foo my_routing_key 0 1
|
||||
index 2 type1 bar null 0 1
|
||||
index 2 type1 bar 42 1 0
|
||||
index 2 type1 bar my_routing_key 0 1
|
||||
index 2 type1 foobar null 0 1
|
||||
index 2 type1 foobar 42 1 0
|
||||
index 2 type1 foobar my_routing_key 0 1
|
||||
index 2 type1 elasticsearch null 0 0
|
||||
index 2 type1 elasticsearch 42 1 0
|
||||
index 2 type1 elasticsearch my_routing_key 0 1
|
||||
index 2 type1 0956317778 null 0 1
|
||||
index 2 type1 0956317778 42 1 0
|
||||
index 2 type1 0956317778 my_routing_key 0 1
|
||||
index 2 type1 0 null 1 0
|
||||
index 2 type1 0 42 1 0
|
||||
index 2 type1 0 my_routing_key 0 1
|
||||
index 2 type2 foo null 1 1
|
||||
index 2 type2 foo 42 1 0
|
||||
index 2 type2 foo my_routing_key 0 1
|
||||
index 2 type2 bar null 0 1
|
||||
index 2 type2 bar 42 1 0
|
||||
index 2 type2 bar my_routing_key 0 1
|
||||
index 2 type2 foobar null 0 1
|
||||
index 2 type2 foobar 42 1 0
|
||||
index 2 type2 foobar my_routing_key 0 1
|
||||
index 2 type2 elasticsearch null 0 0
|
||||
index 2 type2 elasticsearch 42 1 0
|
||||
index 2 type2 elasticsearch my_routing_key 0 1
|
||||
index 2 type2 0956317778 null 0 1
|
||||
index 2 type2 0956317778 42 1 0
|
||||
index 2 type2 0956317778 my_routing_key 0 1
|
||||
index 2 type2 0 null 1 0
|
||||
index 2 type2 0 42 1 0
|
||||
index 2 type2 0 my_routing_key 0 1
|
||||
index 5 type1 foo null 4 1
|
||||
index 5 type1 foo 42 0 1
|
||||
index 5 type1 foo my_routing_key 3 1
|
||||
index 5 type1 bar null 4 3
|
||||
index 5 type1 bar 42 0 1
|
||||
index 5 type1 bar my_routing_key 3 1
|
||||
index 5 type1 foobar null 4 1
|
||||
index 5 type1 foobar 42 0 1
|
||||
index 5 type1 foobar my_routing_key 3 1
|
||||
index 5 type1 elasticsearch null 0 0
|
||||
index 5 type1 elasticsearch 42 0 1
|
||||
index 5 type1 elasticsearch my_routing_key 3 1
|
||||
index 5 type1 0956317778 null 3 4
|
||||
index 5 type1 0956317778 42 0 1
|
||||
index 5 type1 0956317778 my_routing_key 3 1
|
||||
index 5 type1 0 null 1 0
|
||||
index 5 type1 0 42 0 1
|
||||
index 5 type1 0 my_routing_key 3 1
|
||||
index 5 type2 foo null 4 1
|
||||
index 5 type2 foo 42 0 1
|
||||
index 5 type2 foo my_routing_key 3 1
|
||||
index 5 type2 bar null 4 3
|
||||
index 5 type2 bar 42 0 1
|
||||
index 5 type2 bar my_routing_key 3 1
|
||||
index 5 type2 foobar null 4 1
|
||||
index 5 type2 foobar 42 0 1
|
||||
index 5 type2 foobar my_routing_key 3 1
|
||||
index 5 type2 elasticsearch null 0 0
|
||||
index 5 type2 elasticsearch 42 0 1
|
||||
index 5 type2 elasticsearch my_routing_key 3 1
|
||||
index 5 type2 0956317778 null 3 4
|
||||
index 5 type2 0956317778 42 0 1
|
||||
index 5 type2 0956317778 my_routing_key 3 1
|
||||
index 5 type2 0 null 1 0
|
||||
index 5 type2 0 42 0 1
|
||||
index 5 type2 0 my_routing_key 3 1
|
||||
index 100 type1 foo null 49 81
|
||||
index 100 type1 foo 42 75 6
|
||||
index 100 type1 foo my_routing_key 38 1
|
||||
index 100 type1 bar null 34 53
|
||||
index 100 type1 bar 42 75 6
|
||||
index 100 type1 bar my_routing_key 38 1
|
||||
index 100 type1 foobar null 74 41
|
||||
index 100 type1 foobar 42 75 6
|
||||
index 100 type1 foobar my_routing_key 38 1
|
||||
index 100 type1 elasticsearch null 20 90
|
||||
index 100 type1 elasticsearch 42 75 6
|
||||
index 100 type1 elasticsearch my_routing_key 38 1
|
||||
index 100 type1 0956317778 null 18 39
|
||||
index 100 type1 0956317778 42 75 6
|
||||
index 100 type1 0956317778 my_routing_key 38 1
|
||||
index 100 type1 0 null 21 40
|
||||
index 100 type1 0 42 75 6
|
||||
index 100 type1 0 my_routing_key 38 1
|
||||
index 100 type2 foo null 49 81
|
||||
index 100 type2 foo 42 75 6
|
||||
index 100 type2 foo my_routing_key 38 1
|
||||
index 100 type2 bar null 34 53
|
||||
index 100 type2 bar 42 75 6
|
||||
index 100 type2 bar my_routing_key 38 1
|
||||
index 100 type2 foobar null 74 41
|
||||
index 100 type2 foobar 42 75 6
|
||||
index 100 type2 foobar my_routing_key 38 1
|
||||
index 100 type2 elasticsearch null 20 90
|
||||
index 100 type2 elasticsearch 42 75 6
|
||||
index 100 type2 elasticsearch my_routing_key 38 1
|
||||
index 100 type2 0956317778 null 18 39
|
||||
index 100 type2 0956317778 42 75 6
|
||||
index 100 type2 0956317778 my_routing_key 38 1
|
||||
index 100 type2 0 null 21 40
|
||||
index 100 type2 0 42 75 6
|
||||
index 100 type2 0 my_routing_key 38 1
|
||||
index2 2 type1 foo null 1 1
|
||||
index2 2 type1 foo 42 1 0
|
||||
index2 2 type1 foo my_routing_key 0 1
|
||||
index2 2 type1 bar null 0 1
|
||||
index2 2 type1 bar 42 1 0
|
||||
index2 2 type1 bar my_routing_key 0 1
|
||||
index2 2 type1 foobar null 0 1
|
||||
index2 2 type1 foobar 42 1 0
|
||||
index2 2 type1 foobar my_routing_key 0 1
|
||||
index2 2 type1 elasticsearch null 0 0
|
||||
index2 2 type1 elasticsearch 42 1 0
|
||||
index2 2 type1 elasticsearch my_routing_key 0 1
|
||||
index2 2 type1 0956317778 null 0 1
|
||||
index2 2 type1 0956317778 42 1 0
|
||||
index2 2 type1 0956317778 my_routing_key 0 1
|
||||
index2 2 type1 0 null 1 0
|
||||
index2 2 type1 0 42 1 0
|
||||
index2 2 type1 0 my_routing_key 0 1
|
||||
index2 2 type2 foo null 1 1
|
||||
index2 2 type2 foo 42 1 0
|
||||
index2 2 type2 foo my_routing_key 0 1
|
||||
index2 2 type2 bar null 0 1
|
||||
index2 2 type2 bar 42 1 0
|
||||
index2 2 type2 bar my_routing_key 0 1
|
||||
index2 2 type2 foobar null 0 1
|
||||
index2 2 type2 foobar 42 1 0
|
||||
index2 2 type2 foobar my_routing_key 0 1
|
||||
index2 2 type2 elasticsearch null 0 0
|
||||
index2 2 type2 elasticsearch 42 1 0
|
||||
index2 2 type2 elasticsearch my_routing_key 0 1
|
||||
index2 2 type2 0956317778 null 0 1
|
||||
index2 2 type2 0956317778 42 1 0
|
||||
index2 2 type2 0956317778 my_routing_key 0 1
|
||||
index2 2 type2 0 null 1 0
|
||||
index2 2 type2 0 42 1 0
|
||||
index2 2 type2 0 my_routing_key 0 1
|
||||
index2 5 type1 foo null 4 1
|
||||
index2 5 type1 foo 42 0 1
|
||||
index2 5 type1 foo my_routing_key 3 1
|
||||
index2 5 type1 bar null 4 3
|
||||
index2 5 type1 bar 42 0 1
|
||||
index2 5 type1 bar my_routing_key 3 1
|
||||
index2 5 type1 foobar null 4 1
|
||||
index2 5 type1 foobar 42 0 1
|
||||
index2 5 type1 foobar my_routing_key 3 1
|
||||
index2 5 type1 elasticsearch null 0 0
|
||||
index2 5 type1 elasticsearch 42 0 1
|
||||
index2 5 type1 elasticsearch my_routing_key 3 1
|
||||
index2 5 type1 0956317778 null 3 4
|
||||
index2 5 type1 0956317778 42 0 1
|
||||
index2 5 type1 0956317778 my_routing_key 3 1
|
||||
index2 5 type1 0 null 1 0
|
||||
index2 5 type1 0 42 0 1
|
||||
index2 5 type1 0 my_routing_key 3 1
|
||||
index2 5 type2 foo null 4 1
|
||||
index2 5 type2 foo 42 0 1
|
||||
index2 5 type2 foo my_routing_key 3 1
|
||||
index2 5 type2 bar null 4 3
|
||||
index2 5 type2 bar 42 0 1
|
||||
index2 5 type2 bar my_routing_key 3 1
|
||||
index2 5 type2 foobar null 4 1
|
||||
index2 5 type2 foobar 42 0 1
|
||||
index2 5 type2 foobar my_routing_key 3 1
|
||||
index2 5 type2 elasticsearch null 0 0
|
||||
index2 5 type2 elasticsearch 42 0 1
|
||||
index2 5 type2 elasticsearch my_routing_key 3 1
|
||||
index2 5 type2 0956317778 null 3 4
|
||||
index2 5 type2 0956317778 42 0 1
|
||||
index2 5 type2 0956317778 my_routing_key 3 1
|
||||
index2 5 type2 0 null 1 0
|
||||
index2 5 type2 0 42 0 1
|
||||
index2 5 type2 0 my_routing_key 3 1
|
||||
index2 100 type1 foo null 49 81
|
||||
index2 100 type1 foo 42 75 6
|
||||
index2 100 type1 foo my_routing_key 38 1
|
||||
index2 100 type1 bar null 34 53
|
||||
index2 100 type1 bar 42 75 6
|
||||
index2 100 type1 bar my_routing_key 38 1
|
||||
index2 100 type1 foobar null 74 41
|
||||
index2 100 type1 foobar 42 75 6
|
||||
index2 100 type1 foobar my_routing_key 38 1
|
||||
index2 100 type1 elasticsearch null 20 90
|
||||
index2 100 type1 elasticsearch 42 75 6
|
||||
index2 100 type1 elasticsearch my_routing_key 38 1
|
||||
index2 100 type1 0956317778 null 18 39
|
||||
index2 100 type1 0956317778 42 75 6
|
||||
index2 100 type1 0956317778 my_routing_key 38 1
|
||||
index2 100 type1 0 null 21 40
|
||||
index2 100 type1 0 42 75 6
|
||||
index2 100 type1 0 my_routing_key 38 1
|
||||
index2 100 type2 foo null 49 81
|
||||
index2 100 type2 foo 42 75 6
|
||||
index2 100 type2 foo my_routing_key 38 1
|
||||
index2 100 type2 bar null 34 53
|
||||
index2 100 type2 bar 42 75 6
|
||||
index2 100 type2 bar my_routing_key 38 1
|
||||
index2 100 type2 foobar null 74 41
|
||||
index2 100 type2 foobar 42 75 6
|
||||
index2 100 type2 foobar my_routing_key 38 1
|
||||
index2 100 type2 elasticsearch null 20 90
|
||||
index2 100 type2 elasticsearch 42 75 6
|
||||
index2 100 type2 elasticsearch my_routing_key 38 1
|
||||
index2 100 type2 0956317778 null 18 39
|
||||
index2 100 type2 0956317778 42 75 6
|
||||
index2 100 type2 0956317778 my_routing_key 38 1
|
||||
index2 100 type2 0 null 21 40
|
||||
index2 100 type2 0 42 75 6
|
||||
index2 100 type2 0 my_routing_key 38 1
|
||||
|
Loading…
x
Reference in New Issue
Block a user