Merge branch 'master' into ccr
* master: Tests: Fix convert error tests to use fixed value (#32415) IndicesClusterStateService should replace an init. replica with an init. primary with the same aId (#32374) REST high-level client: parse back _ignored meta field (#32362) [CI] Mute DocumentSubsetReaderTests testSearch
This commit is contained in:
commit
2245812ef7
|
@ -67,10 +67,9 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(10));
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32370")
|
||||
public void testConvertIntHexError() {
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||
String value = "0x" + randomAlphaOfLengthBetween(1, 10);
|
||||
String value = "0xnotanumber";
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, value);
|
||||
Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.INTEGER, false);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument));
|
||||
|
@ -138,10 +137,9 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
assertThat(ingestDocument.getFieldValue(fieldName, Long.class), equalTo(10L));
|
||||
}
|
||||
|
||||
@AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/32370")
|
||||
public void testConvertLongHexError() {
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||
String value = "0x" + randomAlphaOfLengthBetween(1, 10);
|
||||
String value = "0xnotanumber";
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, value);
|
||||
Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.LONG, false);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument));
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.get;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
|
@ -30,6 +31,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.IgnoredFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
|
@ -225,10 +227,13 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (DocumentField field : metaFields) {
|
||||
Object value = field.getValue();
|
||||
builder.field(field.getName(), value);
|
||||
// TODO: can we avoid having an exception here?
|
||||
if (field.getName().equals(IgnoredFieldMapper.NAME)) {
|
||||
builder.field(field.getName(), field.getValues());
|
||||
} else {
|
||||
builder.field(field.getName(), field.<Object>getValue());
|
||||
}
|
||||
}
|
||||
|
||||
builder.field(FOUND, exists);
|
||||
|
@ -316,7 +321,11 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
parser.skipChildren(); // skip potential inner objects for forward compatibility
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
parser.skipChildren(); // skip potential inner arrays for forward compatibility
|
||||
if (IgnoredFieldMapper.NAME.equals(currentFieldName)) {
|
||||
fields.put(currentFieldName, new DocumentField(currentFieldName, parser.list()));
|
||||
} else {
|
||||
parser.skipChildren(); // skip potential inner arrays for forward compatibility
|
||||
}
|
||||
}
|
||||
}
|
||||
return new GetResult(index, type, id, version, found, source, fields);
|
||||
|
@ -400,7 +409,12 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(index, type, id, version, exists, fields, sourceAsMap());
|
||||
return Objects.hash(version, exists, index, type, id, fields, sourceAsMap());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this, true, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -420,6 +420,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
// state may result in a new shard being initialized while having the same allocation id as the currently started shard.
|
||||
logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting);
|
||||
indexService.removeShard(shardId.id(), "removing shard (stale copy)");
|
||||
} else if (newShardRouting.primary() && currentRoutingEntry.primary() == false && newShardRouting.initializing()) {
|
||||
assert currentRoutingEntry.initializing() : currentRoutingEntry; // see above if clause
|
||||
// this can happen when cluster state batching batches activation of the shard, closing an index, reopening it
|
||||
// and assigning an initializing primary to this node
|
||||
logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting);
|
||||
indexService.removeShard(shardId.id(), "removing shard (stale copy)");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -602,16 +602,24 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
|||
for (String metadatafield : MapperService.getAllMetaFields()) {
|
||||
if (metadatafield.equals(Fields._ID) == false && metadatafield.equals(Fields._INDEX) == false
|
||||
&& metadatafield.equals(Fields._TYPE) == false) {
|
||||
parser.declareField((map, field) -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, DocumentField> fieldMap = (Map<String, DocumentField>) map.computeIfAbsent(Fields.FIELDS,
|
||||
v -> new HashMap<String, DocumentField>());
|
||||
fieldMap.put(field.getName(), field);
|
||||
}, (p, c) -> {
|
||||
List<Object> values = new ArrayList<>();
|
||||
values.add(parseFieldsValue(p));
|
||||
return new DocumentField(metadatafield, values);
|
||||
}, new ParseField(metadatafield), ValueType.VALUE);
|
||||
if (metadatafield.equals(IgnoredFieldMapper.NAME)) {
|
||||
parser.declareObjectArray((map, list) -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, DocumentField> fieldMap = (Map<String, DocumentField>) map.computeIfAbsent(Fields.FIELDS,
|
||||
v -> new HashMap<String, DocumentField>());
|
||||
DocumentField field = new DocumentField(metadatafield, list);
|
||||
fieldMap.put(field.getName(), field);
|
||||
}, (p, c) -> parseFieldsValue(p),
|
||||
new ParseField(metadatafield));
|
||||
} else {
|
||||
parser.declareField((map, field) -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, DocumentField> fieldMap = (Map<String, DocumentField>) map.computeIfAbsent(Fields.FIELDS,
|
||||
v -> new HashMap<String, DocumentField>());
|
||||
fieldMap.put(field.getName(), field);
|
||||
}, (p, c) -> new DocumentField(metadatafield, Collections.singletonList(parseFieldsValue(p))),
|
||||
new ParseField(metadatafield), ValueType.VALUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -958,4 +966,9 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
|||
return Objects.hash(field, offset, child);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this, true, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,10 +27,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.AllocationId;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable.Builder;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
|
@ -44,6 +46,7 @@ import java.util.Collections;
|
|||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
|
@ -93,7 +96,8 @@ public class ClusterStateCreationUtils {
|
|||
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
|
||||
.put(SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
|
||||
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).primaryTerm(0, primaryTerm).build();
|
||||
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).primaryTerm(0, primaryTerm)
|
||||
.build();
|
||||
|
||||
RoutingTable.Builder routing = new RoutingTable.Builder();
|
||||
routing.addAsNew(indexMetaData);
|
||||
|
@ -138,12 +142,19 @@ public class ClusterStateCreationUtils {
|
|||
TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, false, replicaState,
|
||||
unassignedInfo));
|
||||
}
|
||||
final IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingBuilder.build();
|
||||
|
||||
IndexMetaData.Builder indexMetaDataBuilder = new IndexMetaData.Builder(indexMetaData);
|
||||
indexMetaDataBuilder.putInSyncAllocationIds(0,
|
||||
indexShardRoutingTable.activeShards().stream().map(ShardRouting::allocationId).map(AllocationId::getId)
|
||||
.collect(Collectors.toSet())
|
||||
);
|
||||
|
||||
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
|
||||
state.nodes(discoBuilder);
|
||||
state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded());
|
||||
state.metaData(MetaData.builder().put(indexMetaDataBuilder.build(), false).generateClusterUuidIfNeeded());
|
||||
state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(indexMetaData.getIndex())
|
||||
.addIndexShard(indexShardRoutingBuilder.build())).build());
|
||||
.addIndexShard(indexShardRoutingTable)).build());
|
||||
return state.build();
|
||||
}
|
||||
|
||||
|
@ -272,21 +283,21 @@ public class ClusterStateCreationUtils {
|
|||
state.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build());
|
||||
return state.build();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Creates cluster state with several indexes, shards and replicas and all shards STARTED.
|
||||
*/
|
||||
public static ClusterState stateWithAssignedPrimariesAndReplicas(String[] indices, int numberOfShards, int numberOfReplicas) {
|
||||
|
||||
int numberOfDataNodes = numberOfReplicas + 1;
|
||||
int numberOfDataNodes = numberOfReplicas + 1;
|
||||
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
|
||||
for (int i = 0; i < numberOfDataNodes + 1; i++) {
|
||||
final DiscoveryNode node = newNode(i);
|
||||
discoBuilder = discoBuilder.add(node);
|
||||
}
|
||||
discoBuilder.localNodeId(newNode(0).getId());
|
||||
discoBuilder.masterNodeId(newNode(numberOfDataNodes + 1).getId());
|
||||
discoBuilder.masterNodeId(newNode(numberOfDataNodes + 1).getId());
|
||||
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
|
||||
state.nodes(discoBuilder);
|
||||
Builder routingTableBuilder = RoutingTable.builder();
|
||||
|
@ -316,7 +327,7 @@ public class ClusterStateCreationUtils {
|
|||
state.metaData(metadataBuilder);
|
||||
state.routingTable(routingTableBuilder.build());
|
||||
return state.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas.
|
||||
|
|
|
@ -26,7 +26,11 @@ import org.elasticsearch.common.document.DocumentField;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.IgnoredFieldMapper;
|
||||
import org.elasticsearch.index.mapper.IndexFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.RandomObjects;
|
||||
|
||||
|
@ -98,14 +102,28 @@ public class DocumentFieldTests extends ESTestCase {
|
|||
|
||||
public static Tuple<DocumentField, DocumentField> randomDocumentField(XContentType xContentType) {
|
||||
if (randomBoolean()) {
|
||||
String fieldName = randomFrom(RoutingFieldMapper.NAME);
|
||||
DocumentField documentField = new DocumentField(fieldName, Collections.singletonList(randomAlphaOfLengthBetween(3, 10)));
|
||||
String metaField = randomValueOtherThanMany(field -> field.equals(TypeFieldMapper.NAME)
|
||||
|| field.equals(IndexFieldMapper.NAME) || field.equals(IdFieldMapper.NAME),
|
||||
() -> randomFrom(MapperService.getAllMetaFields()));
|
||||
DocumentField documentField;
|
||||
if (metaField.equals(IgnoredFieldMapper.NAME)) {
|
||||
int numValues = randomIntBetween(1, 3);
|
||||
List<Object> ignoredFields = new ArrayList<>(numValues);
|
||||
for (int i = 0; i < numValues; i++) {
|
||||
ignoredFields.add(randomAlphaOfLengthBetween(3, 10));
|
||||
}
|
||||
documentField = new DocumentField(metaField, ignoredFields);
|
||||
} else {
|
||||
//meta fields are single value only, besides _ignored
|
||||
documentField = new DocumentField(metaField, Collections.singletonList(randomAlphaOfLengthBetween(3, 10)));
|
||||
}
|
||||
return Tuple.tuple(documentField, documentField);
|
||||
} else {
|
||||
String fieldName = randomAlphaOfLengthBetween(3, 10);
|
||||
Tuple<List<Object>, List<Object>> tuple = RandomObjects.randomStoredFieldValues(random(), xContentType);
|
||||
DocumentField input = new DocumentField(fieldName, tuple.v1());
|
||||
DocumentField expected = new DocumentField(fieldName, tuple.v2());
|
||||
return Tuple.tuple(input, expected);
|
||||
}
|
||||
String fieldName = randomAlphaOfLengthBetween(3, 10);
|
||||
Tuple<List<Object>, List<Object>> tuple = RandomObjects.randomStoredFieldValues(random(), xContentType);
|
||||
DocumentField input = new DocumentField(fieldName, tuple.v1());
|
||||
DocumentField expected = new DocumentField(fieldName, tuple.v2());
|
||||
return Tuple.tuple(input, expected);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,6 @@ public class GetResultTests extends ESTestCase {
|
|||
XContentType xContentType = randomFrom(XContentType.values());
|
||||
Tuple<GetResult, GetResult> tuple = randomGetResult(xContentType);
|
||||
GetResult getResult = tuple.v1();
|
||||
|
||||
// We don't expect to retrieve the index/type/id of the GetResult because they are not rendered
|
||||
// by the toXContentEmbedded method.
|
||||
GetResult expectedGetResult = new GetResult(null, null, null, -1,
|
||||
|
@ -106,7 +105,6 @@ public class GetResultTests extends ESTestCase {
|
|||
parsedEmbeddedGetResult = GetResult.fromXContentEmbedded(parser);
|
||||
assertNull(parser.nextToken());
|
||||
}
|
||||
|
||||
assertEquals(expectedGetResult, parsedEmbeddedGetResult);
|
||||
//print the parsed object out and test that the output is the same as the original output
|
||||
BytesReference finalBytes = toXContentEmbedded(parsedEmbeddedGetResult, xContentType, humanReadable);
|
||||
|
@ -203,16 +201,17 @@ public class GetResultTests extends ESTestCase {
|
|||
return Tuple.tuple(getResult, expectedGetResult);
|
||||
}
|
||||
|
||||
private static Tuple<Map<String, DocumentField>,Map<String, DocumentField>> randomDocumentFields(XContentType xContentType) {
|
||||
public static Tuple<Map<String, DocumentField>,Map<String, DocumentField>> randomDocumentFields(XContentType xContentType) {
|
||||
int numFields = randomIntBetween(2, 10);
|
||||
Map<String, DocumentField> fields = new HashMap<>(numFields);
|
||||
Map<String, DocumentField> expectedFields = new HashMap<>(numFields);
|
||||
for (int i = 0; i < numFields; i++) {
|
||||
while (fields.size() < numFields) {
|
||||
Tuple<DocumentField, DocumentField> tuple = randomDocumentField(xContentType);
|
||||
DocumentField getField = tuple.v1();
|
||||
DocumentField expectedGetField = tuple.v2();
|
||||
fields.put(getField.getName(), getField);
|
||||
expectedFields.put(expectedGetField.getName(), expectedGetField);
|
||||
if (fields.putIfAbsent(getField.getName(), getField) == null) {
|
||||
assertNull(expectedFields.putIfAbsent(expectedGetField.getName(), expectedGetField));
|
||||
}
|
||||
}
|
||||
return Tuple.tuple(fields, expectedFields);
|
||||
}
|
||||
|
|
|
@ -74,6 +74,10 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
|
|||
enableRandomFailures = randomBoolean();
|
||||
}
|
||||
|
||||
protected void disableRandomFailures() {
|
||||
enableRandomFailures = false;
|
||||
}
|
||||
|
||||
protected void failRandomly() {
|
||||
if (enableRandomFailures && rarely()) {
|
||||
throw new RuntimeException("dummy test failure");
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.elasticsearch.common.util.set.Sets;
|
|||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.PrimaryReplicaSyncer;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
import org.elasticsearch.repositories.RepositoriesService;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
|
@ -75,6 +76,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPA
|
|||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
|
@ -97,7 +99,6 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
terminate(threadPool);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32308")
|
||||
public void testRandomClusterStateUpdates() {
|
||||
// we have an IndicesClusterStateService per node in the cluster
|
||||
final Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap = new HashMap<>();
|
||||
|
@ -199,6 +200,59 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* In rare cases it is possible that a nodes gets an instruction to replace a replica
|
||||
* shard that's in POST_RECOVERY with a new initializing primary with the same allocation id.
|
||||
* This can happen by batching cluster states that include the starting of the replica, with
|
||||
* closing of the indices, opening it up again and allocating the primary shard to the node in
|
||||
* question. The node should then clean it's initializing replica and replace it with a new
|
||||
* initializing primary.
|
||||
*/
|
||||
public void testInitializingPrimaryRemovesInitializingReplicaWithSameAID() {
|
||||
disableRandomFailures();
|
||||
String index = "index_" + randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
|
||||
ClusterState state = ClusterStateCreationUtils.state(index, randomBoolean(),
|
||||
ShardRoutingState.STARTED, ShardRoutingState.INITIALIZING);
|
||||
|
||||
// the initial state which is derived from the newly created cluster state but doesn't contain the index
|
||||
ClusterState previousState = ClusterState.builder(state)
|
||||
.metaData(MetaData.builder(state.metaData()).remove(index))
|
||||
.routingTable(RoutingTable.builder().build())
|
||||
.build();
|
||||
|
||||
// pick a data node to simulate the adding an index cluster state change event on, that has shards assigned to it
|
||||
final ShardRouting shardRouting = state.routingTable().index(index).shard(0).replicaShards().get(0);
|
||||
final ShardId shardId = shardRouting.shardId();
|
||||
DiscoveryNode node = state.nodes().get(shardRouting.currentNodeId());
|
||||
|
||||
// simulate the cluster state change on the node
|
||||
ClusterState localState = adaptClusterStateToLocalNode(state, node);
|
||||
ClusterState previousLocalState = adaptClusterStateToLocalNode(previousState, node);
|
||||
IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(node, RecordingIndicesService::new);
|
||||
indicesCSSvc.start();
|
||||
indicesCSSvc.applyClusterState(new ClusterChangedEvent("cluster state change that adds the index", localState, previousLocalState));
|
||||
previousState = state;
|
||||
|
||||
// start the replica
|
||||
state = cluster.applyStartedShards(state, state.routingTable().index(index).shard(0).replicaShards());
|
||||
|
||||
// close the index and open it up again (this will sometimes swap roles between primary and replica)
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metaData().index(index).getIndex().getName());
|
||||
state = cluster.closeIndices(state, closeIndexRequest);
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metaData().index(index).getIndex().getName());
|
||||
state = cluster.openIndices(state, openIndexRequest);
|
||||
|
||||
localState = adaptClusterStateToLocalNode(state, node);
|
||||
previousLocalState = adaptClusterStateToLocalNode(previousState, node);
|
||||
|
||||
indicesCSSvc.applyClusterState(new ClusterChangedEvent("new cluster state", localState, previousLocalState));
|
||||
|
||||
final MockIndexShard shardOrNull = ((RecordingIndicesService) indicesCSSvc.indicesService).getShardOrNull(shardId);
|
||||
assertThat(shardOrNull == null ? null : shardOrNull.routingEntry(),
|
||||
equalTo(state.getRoutingNodes().node(node.getId()).getByShardId(shardId)));
|
||||
|
||||
}
|
||||
|
||||
public ClusterState randomInitialClusterState(Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap,
|
||||
Supplier<MockIndicesService> indicesServiceSupplier) {
|
||||
List<DiscoveryNode> allNodes = new ArrayList<>();
|
||||
|
|
|
@ -19,30 +19,6 @@
|
|||
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchHit.NestedIdentity;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightFieldTests;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.RandomObjects;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
|
@ -50,9 +26,31 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.get.GetResultTests;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchHit.NestedIdentity;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightFieldTests;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.RandomObjects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
|
||||
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
|
||||
|
@ -63,8 +61,6 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
|
||||
public class SearchHitTests extends ESTestCase {
|
||||
|
||||
private static Set<String> META_FIELDS = Sets.newHashSet("_uid", "_parent", "_routing", "_size", "_timestamp", "_ttl");
|
||||
|
||||
public static SearchHit createTestItem(boolean withOptionalInnerHits) {
|
||||
int internalId = randomInt();
|
||||
String uid = randomAlphaOfLength(10);
|
||||
|
@ -75,18 +71,7 @@ public class SearchHitTests extends ESTestCase {
|
|||
}
|
||||
Map<String, DocumentField> fields = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
int size = randomIntBetween(0, 10);
|
||||
for (int i = 0; i < size; i++) {
|
||||
Tuple<List<Object>, List<Object>> values = RandomObjects.randomStoredFieldValues(random(),
|
||||
XContentType.JSON);
|
||||
if (randomBoolean()) {
|
||||
String metaField = randomFrom(META_FIELDS);
|
||||
fields.put(metaField, new DocumentField(metaField, values.v1()));
|
||||
} else {
|
||||
String fieldName = randomAlphaOfLengthBetween(5, 10);
|
||||
fields.put(fieldName, new DocumentField(fieldName, values.v1()));
|
||||
}
|
||||
}
|
||||
fields = GetResultTests.randomDocumentFields(XContentType.JSON).v1();
|
||||
}
|
||||
SearchHit hit = new SearchHit(internalId, uid, type, nestedIdentity, fields);
|
||||
if (frequently()) {
|
||||
|
@ -109,7 +94,8 @@ public class SearchHitTests extends ESTestCase {
|
|||
int size = randomIntBetween(0, 5);
|
||||
Map<String, HighlightField> highlightFields = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
highlightFields.put(randomAlphaOfLength(5), HighlightFieldTests.createTestItem());
|
||||
HighlightField testItem = HighlightFieldTests.createTestItem();
|
||||
highlightFields.put(testItem.getName(), testItem);
|
||||
}
|
||||
hit.highlightFields(highlightFields);
|
||||
}
|
||||
|
|
|
@ -46,13 +46,13 @@ public class SearchSortValuesTests extends ESTestCase {
|
|||
List<Supplier<Object>> valueSuppliers = new ArrayList<>();
|
||||
// this should reflect all values that are allowed to go through the transport layer
|
||||
valueSuppliers.add(() -> null);
|
||||
valueSuppliers.add(() -> randomInt());
|
||||
valueSuppliers.add(() -> randomLong());
|
||||
valueSuppliers.add(() -> randomDouble());
|
||||
valueSuppliers.add(() -> randomFloat());
|
||||
valueSuppliers.add(() -> randomByte());
|
||||
valueSuppliers.add(() -> randomShort());
|
||||
valueSuppliers.add(() -> randomBoolean());
|
||||
valueSuppliers.add(ESTestCase::randomInt);
|
||||
valueSuppliers.add(ESTestCase::randomLong);
|
||||
valueSuppliers.add(ESTestCase::randomDouble);
|
||||
valueSuppliers.add(ESTestCase::randomFloat);
|
||||
valueSuppliers.add(ESTestCase::randomByte);
|
||||
valueSuppliers.add(ESTestCase::randomShort);
|
||||
valueSuppliers.add(ESTestCase::randomBoolean);
|
||||
valueSuppliers.add(() -> frequently() ? randomAlphaOfLengthBetween(1, 30) : randomRealisticUnicodeOfCodepointLength(30));
|
||||
|
||||
int size = randomIntBetween(1, 20);
|
||||
|
|
|
@ -48,7 +48,7 @@ import java.util.List;
|
|||
import java.util.Random;
|
||||
|
||||
import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween;
|
||||
import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiOfLength;
|
||||
import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiLettersOfLength;
|
||||
import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomUnicodeOfLengthBetween;
|
||||
import static java.util.Collections.singleton;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
|
@ -122,7 +122,7 @@ public final class RandomObjects {
|
|||
expectedParsedValues.add(randomBoolean);
|
||||
break;
|
||||
case 7:
|
||||
String randomString = random.nextBoolean() ? RandomStrings.randomAsciiOfLengthBetween(random, 3, 10 ) :
|
||||
String randomString = random.nextBoolean() ? RandomStrings.randomAsciiLettersOfLengthBetween(random, 3, 10) :
|
||||
randomUnicodeOfLengthBetween(random, 3, 10);
|
||||
originalValues.add(randomString);
|
||||
expectedParsedValues.add(randomString);
|
||||
|
@ -191,11 +191,11 @@ public final class RandomObjects {
|
|||
for (int i = 0; i < numFields; i++) {
|
||||
if (currentDepth < 5 && random.nextInt(100) >= 70) {
|
||||
if (random.nextBoolean()) {
|
||||
builder.startObject(RandomStrings.randomAsciiOfLengthBetween(random, 6, 10));
|
||||
builder.startObject(RandomStrings.randomAsciiLettersOfLengthBetween(random, 6, 10));
|
||||
addFields(random, builder, minNumFields, currentDepth + 1);
|
||||
builder.endObject();
|
||||
} else {
|
||||
builder.startArray(RandomStrings.randomAsciiOfLengthBetween(random, 6, 10));
|
||||
builder.startArray(RandomStrings.randomAsciiLettersOfLengthBetween(random, 6, 10));
|
||||
int numElements = randomIntBetween(random, 1, 5);
|
||||
boolean object = random.nextBoolean();
|
||||
int dataType = -1;
|
||||
|
@ -214,7 +214,7 @@ public final class RandomObjects {
|
|||
builder.endArray();
|
||||
}
|
||||
} else {
|
||||
builder.field(RandomStrings.randomAsciiOfLengthBetween(random, 6, 10),
|
||||
builder.field(RandomStrings.randomAsciiLettersOfLengthBetween(random, 6, 10),
|
||||
randomFieldValue(random, randomDataType(random)));
|
||||
}
|
||||
}
|
||||
|
@ -227,9 +227,9 @@ public final class RandomObjects {
|
|||
private static Object randomFieldValue(Random random, int dataType) {
|
||||
switch(dataType) {
|
||||
case 0:
|
||||
return RandomStrings.randomAsciiOfLengthBetween(random, 3, 10);
|
||||
return RandomStrings.randomAsciiLettersOfLengthBetween(random, 3, 10);
|
||||
case 1:
|
||||
return RandomStrings.randomAsciiOfLengthBetween(random, 3, 10);
|
||||
return RandomStrings.randomAsciiLettersOfLengthBetween(random, 3, 10);
|
||||
case 2:
|
||||
return random.nextLong();
|
||||
case 3:
|
||||
|
@ -287,10 +287,10 @@ public final class RandomObjects {
|
|||
* @param random Random generator
|
||||
*/
|
||||
private static Tuple<Failure, Failure> randomShardInfoFailure(Random random) {
|
||||
String index = randomAsciiOfLength(random, 5);
|
||||
String indexUuid = randomAsciiOfLength(random, 5);
|
||||
String index = randomAsciiLettersOfLength(random, 5);
|
||||
String indexUuid = randomAsciiLettersOfLength(random, 5);
|
||||
int shardId = randomIntBetween(random, 1, 10);
|
||||
String nodeId = randomAsciiOfLength(random, 5);
|
||||
String nodeId = randomAsciiLettersOfLength(random, 5);
|
||||
RestStatus status = randomFrom(random, RestStatus.INTERNAL_SERVER_ERROR, RestStatus.FORBIDDEN, RestStatus.NOT_FOUND);
|
||||
boolean primary = random.nextBoolean();
|
||||
ShardId shard = new ShardId(index, indexUuid, shardId);
|
||||
|
|
|
@ -80,6 +80,7 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
|||
bitsetFilterCache.close();
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32457")
|
||||
public void testSearch() throws Exception {
|
||||
IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig());
|
||||
|
||||
|
|
Loading…
Reference in New Issue