Make typeless APIs usable with indices whose type name is different from `_doc` (#35790)
This commit makes `document`, `update`, `explain`, `termvectors` and `mapping` typeless APIs work on indices that have a type whose name is not `_doc`. Unfortunately, this needs to be a bit of a hack since I didn't want calls with random type names to see documents with the type name that the user had chosen upon type creation. The `explain` and `termvectors` do not support being called without a type for now so the test is just using `_doc` as a type for now, we will need to fix tests later but this shouldn't require further changes server-side since passing `_doc` as a type name is what typeless APIs do internally anyway. Relates #35190
This commit is contained in:
parent
8ccb466072
commit
d24b40f688
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
"bulk without types on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: index
|
||||
_id: 0
|
||||
- foo: bar
|
||||
- index:
|
||||
_index: index
|
||||
_id: 1
|
||||
- foo: bar
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: index
|
||||
|
||||
- match: {count: 2}
|
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
"DELETE with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
delete:
|
||||
index: index
|
||||
type: some_random_type
|
||||
id: 1
|
||||
|
||||
- match: { error.root_cause.0.reason: "/Rejecting.mapping.update.to.\\[index\\].as.the.final.mapping.would.have.more.than.1.type.*/" }
|
||||
|
||||
- do:
|
||||
delete:
|
||||
index: index
|
||||
id: 1
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _id: "1"}
|
||||
- match: { _version: 2}
|
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
"Explain with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
explain:
|
||||
index: index
|
||||
type: some_random_type
|
||||
id: 1
|
||||
body:
|
||||
query:
|
||||
match_all: {}
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "some_random_type" }
|
||||
- match: { _id: "1"}
|
||||
- match: { matched: false}
|
||||
|
||||
- do:
|
||||
explain:
|
||||
index: index
|
||||
type: _doc #todo: make _explain typeless and remove this
|
||||
id: 1
|
||||
body:
|
||||
query:
|
||||
match_all: {}
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _id: "1"}
|
||||
- is_true: matched
|
||||
- match: { explanation.value: 1 }
|
|
@ -0,0 +1,46 @@
|
|||
---
|
||||
"GET with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
get:
|
||||
index: index
|
||||
type: some_random_type
|
||||
id: 1
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "some_random_type" }
|
||||
- match: { _id: "1"}
|
||||
- match: { found: false}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: index
|
||||
id: 1
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _id: "1"}
|
||||
- match: { _version: 1}
|
||||
- match: { _source: { foo: bar }}
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
"Index with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _id: "1"}
|
||||
- match: { _version: 1}
|
||||
|
||||
- do:
|
||||
get: # not using typeless API on purpose
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "not_doc" } # the important bit to check
|
||||
- match: { _id: "1"}
|
||||
- match: { _version: 1}
|
||||
- match: { _source: { foo: bar }}
|
||||
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
body: { foo: bar }
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _version: 1}
|
||||
- set: { _id: id }
|
||||
|
||||
- do:
|
||||
get: # using typeful API on purpose
|
||||
index: index
|
||||
type: not_doc
|
||||
id: '$id'
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "not_doc" } # the important bit to check
|
||||
- match: { _id: $id}
|
||||
- match: { _version: 1}
|
||||
- match: { _source: { foo: bar }}
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
"GET mapping with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: include_type_name was introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
include_type_name: false
|
||||
index: index
|
||||
|
||||
- match: { index.mappings.properties.foo.type: "keyword" }
|
|
@ -0,0 +1,52 @@
|
|||
---
|
||||
"PUT mapping with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: include_type_name was introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
indices.put_mapping:
|
||||
include_type_name: false
|
||||
index: index
|
||||
body:
|
||||
properties:
|
||||
bar:
|
||||
type: "long"
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
include_type_name: false
|
||||
index: index
|
||||
|
||||
- match: { index.mappings.properties.foo.type: "keyword" }
|
||||
- match: { index.mappings.properties.bar.type: "long" }
|
||||
|
||||
- do:
|
||||
indices.put_mapping:
|
||||
include_type_name: false
|
||||
index: index
|
||||
body:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword" # also test no-op updates that trigger special logic wrt the mapping version
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.put_mapping:
|
||||
index: index
|
||||
body:
|
||||
some_other_type:
|
||||
properties:
|
||||
bar:
|
||||
type: "long"
|
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
"mtermvectors without types on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type : "text"
|
||||
term_vector : "with_positions_offsets"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
mtermvectors:
|
||||
body:
|
||||
docs:
|
||||
- _index: index
|
||||
_id: 1
|
||||
|
||||
- match: {docs.0.term_vectors.foo.terms.bar.term_freq: 1}
|
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
"Term vectors with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "text"
|
||||
term_vector: "with_positions"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
termvectors:
|
||||
index: index
|
||||
type: _doc # todo: remove when termvectors support typeless API
|
||||
id: 1
|
||||
|
||||
- is_true: found
|
||||
- match: {_type: _doc}
|
||||
- match: {term_vectors.foo.terms.bar.term_freq: 1}
|
||||
|
||||
- do:
|
||||
termvectors:
|
||||
index: index
|
||||
type: some_random_type
|
||||
id: 1
|
||||
|
||||
- is_false: found
|
|
@ -0,0 +1,39 @@
|
|||
---
|
||||
"Update with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
update:
|
||||
index: index
|
||||
id: 1
|
||||
body:
|
||||
doc:
|
||||
foo: baz
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
|
||||
- match: { _source.foo: baz }
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -77,14 +76,14 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction<Ty
|
|||
return;
|
||||
}
|
||||
|
||||
ImmutableOpenMap<String, MappingMetaData> mappings = state.metaData().getIndices().get(concreteIndex).getMappings();
|
||||
if (mappings.isEmpty()) {
|
||||
MappingMetaData mapping = state.metaData().getIndices().get(concreteIndex).mapping();
|
||||
if (mapping == null) {
|
||||
listener.onResponse(new TypesExistsResponse(false));
|
||||
return;
|
||||
}
|
||||
|
||||
for (String type : request.types()) {
|
||||
if (!mappings.containsKey(type)) {
|
||||
if (mapping.type().equals(type) == false) {
|
||||
listener.onResponse(new TypesExistsResponse(false));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -334,8 +334,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
case DELETE:
|
||||
docWriteRequest.routing(metaData.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index()));
|
||||
// check if routing is required, if so, throw error if routing wasn't specified
|
||||
if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName(),
|
||||
docWriteRequest.type())) {
|
||||
if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName())) {
|
||||
throw new RoutingMissingException(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id());
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -30,11 +30,15 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
|
@ -83,7 +87,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
request.request().index());
|
||||
request.request().filteringAlias(aliasFilter);
|
||||
// Fail fast on the node that received the request.
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) {
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex())) {
|
||||
throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id());
|
||||
}
|
||||
}
|
||||
|
@ -104,15 +108,19 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
|
||||
@Override
|
||||
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException {
|
||||
String[] types;
|
||||
if (MapperService.SINGLE_MAPPING_NAME.equals(request.type())) { // typeless explain call
|
||||
types = Strings.EMPTY_ARRAY;
|
||||
} else {
|
||||
types = new String[] { request.type() };
|
||||
}
|
||||
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
|
||||
new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
|
||||
types, request.nowInMillis, request.filteringAlias());
|
||||
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT);
|
||||
Engine.GetResult result = null;
|
||||
try {
|
||||
Term uidTerm = context.mapperService().createUidTerm(request.type(), request.id());
|
||||
if (uidTerm == null) {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||
}
|
||||
// No need to check the type, IndexShard#get does it for us
|
||||
Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(request.id()));
|
||||
result = context.indexShard().get(new Engine.Get(false, false, request.type(), request.id(), uidTerm));
|
||||
if (!result.exists()) {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||
|
|
|
@ -71,7 +71,7 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
|||
// update the routing (request#index here is possibly an alias)
|
||||
request.request().routing(state.metaData().resolveIndexRouting(request.request().routing(), request.request().index()));
|
||||
// Fail fast on the node that received the request.
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) {
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex())) {
|
||||
throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequ
|
|||
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item).getName();
|
||||
|
||||
item.routing(clusterState.metaData().resolveIndexRouting(item.routing(), item.index()));
|
||||
if ((item.routing() == null) && (clusterState.getMetaData().routingRequired(concreteSingleIndex, item.type()))) {
|
||||
if ((item.routing() == null) && (clusterState.getMetaData().routingRequired(concreteSingleIndex))) {
|
||||
responses.set(i, newItemFailure(concreteSingleIndex, item.type(), item.id(),
|
||||
new RoutingMissingException(concreteSingleIndex, item.type(), item.id())));
|
||||
continue;
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -101,6 +102,7 @@ public class MultiTermVectorsRequest extends ActionRequest
|
|||
throw new IllegalArgumentException("docs array element should include an object");
|
||||
}
|
||||
TermVectorsRequest termVectorsRequest = new TermVectorsRequest(template);
|
||||
termVectorsRequest.type(MapperService.SINGLE_MAPPING_NAME);
|
||||
TermVectorsRequest.parseRequest(termVectorsRequest, parser);
|
||||
add(termVectorsRequest);
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction<Mult
|
|||
}
|
||||
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName();
|
||||
if (termVectorsRequest.routing() == null &&
|
||||
clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) {
|
||||
clusterState.getMetaData().routingRequired(concreteSingleIndex)) {
|
||||
responses.set(i, new MultiTermVectorsItemResponse(null,
|
||||
new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(),
|
||||
new RoutingMissingException(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id()))));
|
||||
|
|
|
@ -79,7 +79,7 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
|
|||
// update the routing (request#index here is possibly an alias or a parent)
|
||||
request.request().routing(state.metaData().resolveIndexRouting(request.request().routing(), request.request().index()));
|
||||
// Fail fast on the node that received the request.
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) {
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex())) {
|
||||
throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
public static void resolveAndValidateRouting(MetaData metaData, String concreteIndex, UpdateRequest request) {
|
||||
request.routing((metaData.resolveWriteIndexRouting(request.routing(), request.index())));
|
||||
// Fail fast on the node that received the request, rather than failing when translating on the index or delete request.
|
||||
if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) {
|
||||
if (request.routing() == null && metaData.routingRequired(concreteIndex)) {
|
||||
throw new RoutingMissingException(concreteIndex, request.type(), request.id());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -449,13 +449,37 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
|
|||
return this.aliases;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an object that maps each type to the associated mappings.
|
||||
* The return value is never {@code null} but may be empty if the index
|
||||
* has no mappings.
|
||||
* @deprecated Use {@link #mapping()} instead now that indices have a single type
|
||||
*/
|
||||
@Deprecated
|
||||
public ImmutableOpenMap<String, MappingMetaData> getMappings() {
|
||||
return mappings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the concrete mapping for this index or {@code null} if this index has no mappings at all.
|
||||
*/
|
||||
@Nullable
|
||||
public MappingMetaData mapping(String mappingType) {
|
||||
return mappings.get(mappingType);
|
||||
public MappingMetaData mapping() {
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : mappings) {
|
||||
if (cursor.key.equals(MapperService.DEFAULT_MAPPING) == false) {
|
||||
return cursor.value;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default mapping.
|
||||
* NOTE: this is always {@code null} for 7.x indices which are disallowed to have a default mapping.
|
||||
*/
|
||||
@Nullable
|
||||
public MappingMetaData defaultMapping() {
|
||||
return mappings.get(MapperService.DEFAULT_MAPPING);
|
||||
}
|
||||
|
||||
public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid";
|
||||
|
|
|
@ -732,13 +732,12 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
|
|||
|
||||
/**
|
||||
* @param concreteIndex The concrete index to check if routing is required
|
||||
* @param type The type to check if routing is required
|
||||
* @return Whether routing is required according to the mapping for the specified index and type
|
||||
*/
|
||||
public boolean routingRequired(String concreteIndex, String type) {
|
||||
public boolean routingRequired(String concreteIndex) {
|
||||
IndexMetaData indexMetaData = indices.get(concreteIndex);
|
||||
if (indexMetaData != null) {
|
||||
MappingMetaData mappingMetaData = indexMetaData.getMappings().get(type);
|
||||
MappingMetaData mappingMetaData = indexMetaData.mapping();
|
||||
if (mappingMetaData != null) {
|
||||
return mappingMetaData.routing().required();
|
||||
}
|
||||
|
|
|
@ -263,7 +263,7 @@ public class MetaDataMappingService {
|
|||
updateList.add(indexMetaData);
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
DocumentMapper newMapper;
|
||||
DocumentMapper existingMapper = mapperService.documentMapper(request.type());
|
||||
DocumentMapper existingMapper = mapperService.documentMapper();
|
||||
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
||||
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
||||
newMapper = mapperService.parse(request.type(), mappingUpdateSource, false);
|
||||
|
@ -295,12 +295,22 @@ public class MetaDataMappingService {
|
|||
// we use the exact same indexService and metadata we used to validate above here to actually apply the update
|
||||
final Index index = indexMetaData.getIndex();
|
||||
final MapperService mapperService = indexMapperServices.get(index);
|
||||
String typeForUpdate = mappingType; // the type to use to apply the mapping update
|
||||
if (MapperService.SINGLE_MAPPING_NAME.equals(typeForUpdate)) {
|
||||
// If the user gave _doc as a special type value or if (s)he is using the new typeless APIs,
|
||||
// then we apply the mapping update to the existing type. This allows to move to typeless
|
||||
// APIs with indices whose type name is different from `_doc`.
|
||||
DocumentMapper mapper = mapperService.documentMapper();
|
||||
if (mapper != null) {
|
||||
typeForUpdate = mapper.type();
|
||||
}
|
||||
}
|
||||
CompressedXContent existingSource = null;
|
||||
DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
|
||||
DocumentMapper existingMapper = mapperService.documentMapper(typeForUpdate);
|
||||
if (existingMapper != null) {
|
||||
existingSource = existingMapper.mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE);
|
||||
DocumentMapper mergedMapper = mapperService.merge(typeForUpdate, mappingUpdateSource, MergeReason.MAPPING_UPDATE);
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
|
|
|
@ -39,10 +39,12 @@ import org.elasticsearch.index.engine.Engine;
|
|||
import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor;
|
||||
import org.elasticsearch.index.fieldvisitor.FieldsVisitor;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
@ -157,13 +159,11 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
|
||||
Engine.GetResult get = null;
|
||||
if (type != null) {
|
||||
Term uidTerm = mapperService.createUidTerm(type, id);
|
||||
if (uidTerm != null) {
|
||||
get = indexShard.get(new Engine.Get(realtime, readFromTranslog, type, id, uidTerm)
|
||||
.version(version).versionType(versionType));
|
||||
if (get.exists() == false) {
|
||||
get.close();
|
||||
}
|
||||
Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(id));
|
||||
get = indexShard.get(new Engine.Get(realtime, readFromTranslog, type, id, uidTerm)
|
||||
.version(version).versionType(versionType));
|
||||
if (get.exists() == false) {
|
||||
get.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
}
|
||||
}
|
||||
|
||||
DocumentMapper docMapper = mapperService.documentMapper(type);
|
||||
DocumentMapper docMapper = mapperService.documentMapper();
|
||||
|
||||
if (gFields != null && gFields.length > 0) {
|
||||
for (String field : gFields) {
|
||||
|
|
|
@ -106,7 +106,8 @@ final class DocumentParser {
|
|||
throw new IllegalArgumentException("It is forbidden to index into the default mapping [" + MapperService.DEFAULT_MAPPING + "]");
|
||||
}
|
||||
|
||||
if (Objects.equals(source.type(), docMapper.type()) == false) {
|
||||
if (Objects.equals(source.type(), docMapper.type()) == false &&
|
||||
MapperService.SINGLE_MAPPING_NAME.equals(source.type()) == false) { // used by typeless APIs
|
||||
throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type ["
|
||||
+ docMapper.type() + "]");
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.logging.log4j.LogManager;
|
|||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.elasticsearch.Assertions;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -218,7 +217,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
|
||||
for (DocumentMapper documentMapper : updatedEntries.values()) {
|
||||
String mappingType = documentMapper.type();
|
||||
CompressedXContent incomingMappingSource = newIndexMetaData.mapping(mappingType).source();
|
||||
MappingMetaData mappingMetaData;
|
||||
if (mappingType.equals(MapperService.DEFAULT_MAPPING)) {
|
||||
mappingMetaData = newIndexMetaData.defaultMapping();
|
||||
} else {
|
||||
mappingMetaData = newIndexMetaData.mapping();
|
||||
assert mappingType.equals(mappingMetaData.type());
|
||||
}
|
||||
CompressedXContent incomingMappingSource = mappingMetaData.source();
|
||||
|
||||
String op = existingMappers.contains(mappingType) ? "updated" : "added";
|
||||
if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) {
|
||||
|
@ -254,13 +260,25 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
if (currentIndexMetaData.getMappingVersion() == newIndexMetaData.getMappingVersion()) {
|
||||
// if the mapping version is unchanged, then there should not be any updates and all mappings should be the same
|
||||
assert updatedEntries.isEmpty() : updatedEntries;
|
||||
for (final ObjectCursor<MappingMetaData> mapping : newIndexMetaData.getMappings().values()) {
|
||||
final CompressedXContent currentSource = currentIndexMetaData.mapping(mapping.value.type()).source();
|
||||
final CompressedXContent newSource = mapping.value.source();
|
||||
|
||||
MappingMetaData defaultMapping = newIndexMetaData.defaultMapping();
|
||||
if (defaultMapping != null) {
|
||||
final CompressedXContent currentSource = currentIndexMetaData.defaultMapping().source();
|
||||
final CompressedXContent newSource = defaultMapping.source();
|
||||
assert currentSource.equals(newSource) :
|
||||
"expected current mapping [" + currentSource + "] for type [" + mapping.value.type() + "] "
|
||||
"expected current mapping [" + currentSource + "] for type [" + defaultMapping.type() + "] "
|
||||
+ "to be the same as new mapping [" + newSource + "]";
|
||||
}
|
||||
|
||||
MappingMetaData mapping = newIndexMetaData.mapping();
|
||||
if (mapping != null) {
|
||||
final CompressedXContent currentSource = currentIndexMetaData.mapping().source();
|
||||
final CompressedXContent newSource = mapping.source();
|
||||
assert currentSource.equals(newSource) :
|
||||
"expected current mapping [" + currentSource + "] for type [" + mapping.type() + "] "
|
||||
+ "to be the same as new mapping [" + newSource + "]";
|
||||
}
|
||||
|
||||
} else {
|
||||
// if the mapping version is changed, it should increase, there should be updates, and the mapping should be different
|
||||
final long currentMappingVersion = currentIndexMetaData.getMappingVersion();
|
||||
|
@ -270,7 +288,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
+ "to be less than new mapping version [" + newMappingVersion + "]";
|
||||
assert updatedEntries.isEmpty() == false;
|
||||
for (final DocumentMapper documentMapper : updatedEntries.values()) {
|
||||
final MappingMetaData currentMapping = currentIndexMetaData.mapping(documentMapper.type());
|
||||
final MappingMetaData currentMapping;
|
||||
if (documentMapper.type().equals(MapperService.DEFAULT_MAPPING)) {
|
||||
currentMapping = currentIndexMetaData.defaultMapping();
|
||||
} else {
|
||||
currentMapping = currentIndexMetaData.mapping();
|
||||
assert currentMapping == null || documentMapper.type().equals(currentMapping.type());
|
||||
}
|
||||
if (currentMapping != null) {
|
||||
final CompressedXContent currentSource = currentMapping.source();
|
||||
final CompressedXContent newSource = documentMapper.mappingSource();
|
||||
|
@ -766,11 +790,4 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
/** Return a term that uniquely identifies the document, or {@code null} if the type is not allowed. */
|
||||
public Term createUidTerm(String type, String id) {
|
||||
if (mapper == null || mapper.type().equals(type) == false) {
|
||||
return null;
|
||||
}
|
||||
return new Term(IdFieldMapper.NAME, Uid.encodeId(id));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.lucene.search.ReferenceManager;
|
|||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
import org.elasticsearch.Assertions;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -78,6 +77,7 @@ import org.elasticsearch.index.cache.request.ShardRequestCache;
|
|||
import org.elasticsearch.index.codec.CodecService;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.Engine.GetResult;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.EngineException;
|
||||
import org.elasticsearch.index.engine.EngineFactory;
|
||||
|
@ -816,23 +816,23 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
} catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) {
|
||||
return new Engine.DeleteResult(e, version, operationPrimaryTerm, seqNo, false);
|
||||
}
|
||||
final Term uid = extractUidForDelete(type, id);
|
||||
if (resolveType(type).equals(mapperService.documentMapper().type()) == false) {
|
||||
// We should never get there due to the fact that we generate mapping updates on deletes,
|
||||
// but we still prefer to have a hard exception here as we would otherwise delete a
|
||||
// document in the wrong type.
|
||||
throw new IllegalStateException("Deleting document from type [" + resolveType(type) + "] while current type is [" +
|
||||
mapperService.documentMapper().type() + "]");
|
||||
}
|
||||
final Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(id));
|
||||
final Engine.Delete delete = prepareDelete(type, id, uid, seqNo, opPrimaryTerm, version,
|
||||
versionType, origin);
|
||||
return delete(getEngine(), delete);
|
||||
}
|
||||
|
||||
private static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version,
|
||||
private Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version,
|
||||
VersionType versionType, Engine.Operation.Origin origin) {
|
||||
long startTime = System.nanoTime();
|
||||
return new Engine.Delete(type, id, uid, seqNo, primaryTerm, version, versionType, origin, startTime);
|
||||
}
|
||||
|
||||
private Term extractUidForDelete(String type, String id) {
|
||||
// This is only correct because we create types dynamically on delete operations
|
||||
// otherwise this could match the same _id from a different type
|
||||
BytesRef idBytes = Uid.encodeId(id);
|
||||
return new Term(IdFieldMapper.NAME, idBytes);
|
||||
return new Engine.Delete(resolveType(type), id, uid, seqNo, primaryTerm, version, versionType, origin, startTime);
|
||||
}
|
||||
|
||||
private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws IOException {
|
||||
|
@ -854,6 +854,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
|
||||
public Engine.GetResult get(Engine.Get get) {
|
||||
readAllowed();
|
||||
DocumentMapper mapper = mapperService.documentMapper();
|
||||
if (mapper == null || mapper.type().equals(resolveType(get.type())) == false) {
|
||||
return GetResult.NOT_EXISTS;
|
||||
}
|
||||
return getEngine().get(get, this::acquireSearcher);
|
||||
}
|
||||
|
||||
|
@ -2274,8 +2278,23 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If an index/update/get/delete operation is using the special `_doc` type, then we replace
|
||||
* it with the actual type that is being used in the mappings so that users may use typeless
|
||||
* APIs with indices that have types.
|
||||
*/
|
||||
private String resolveType(String type) {
|
||||
if (MapperService.SINGLE_MAPPING_NAME.equals(type)) {
|
||||
DocumentMapper docMapper = mapperService.documentMapper();
|
||||
if (docMapper != null) {
|
||||
return docMapper.type();
|
||||
}
|
||||
}
|
||||
return type;
|
||||
}
|
||||
|
||||
private DocumentMapperForType docMapper(String type) {
|
||||
return mapperService.documentMapperWithAutoCreate(type);
|
||||
return mapperService.documentMapperWithAutoCreate(resolveType(type));
|
||||
}
|
||||
|
||||
private EngineConfig newEngineConfig() {
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
|||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperForType;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
@ -50,6 +51,7 @@ import org.elasticsearch.index.mapper.ParseContext;
|
|||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
|
||||
|
@ -82,11 +84,7 @@ public class TermVectorsService {
|
|||
final long startTime = nanoTimeSupplier.getAsLong();
|
||||
final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(),
|
||||
request.type(), request.id());
|
||||
final Term uidTerm = indexShard.mapperService().createUidTerm(request.type(), request.id());
|
||||
if (uidTerm == null) {
|
||||
termVectorsResponse.setExists(false);
|
||||
return termVectorsResponse;
|
||||
}
|
||||
final Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(request.id()));
|
||||
|
||||
Fields termVectorsByField = null;
|
||||
AggregatedDfs dfs = null;
|
||||
|
|
|
@ -91,6 +91,7 @@ public class TransportMultiGetActionTests extends ESTestCase {
|
|||
};
|
||||
|
||||
final Index index1 = new Index("index1", randomBase64UUID());
|
||||
final Index index2 = new Index("index2", randomBase64UUID());
|
||||
final ClusterState clusterState = ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName()))
|
||||
.metaData(new MetaData.Builder()
|
||||
.put(new IndexMetaData.Builder(index1.getName())
|
||||
|
@ -98,33 +99,45 @@ public class TransportMultiGetActionTests extends ESTestCase {
|
|||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID()))
|
||||
.putMapping("type1",
|
||||
.putMapping("_doc",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type1")
|
||||
.startObject("_doc")
|
||||
.startObject("_routing")
|
||||
.field("required", false)
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()), true, XContentType.JSON))
|
||||
.putMapping("type2",
|
||||
.endObject()), true, XContentType.JSON)))
|
||||
.put(new IndexMetaData.Builder(index2.getName())
|
||||
.settings(Settings.builder().put("index.version.created", Version.CURRENT)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID()))
|
||||
.putMapping("_doc",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type2")
|
||||
.startObject("_doc")
|
||||
.startObject("_routing")
|
||||
.field("required", true)
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()), true, XContentType.JSON)))).build();
|
||||
|
||||
final ShardIterator shardIterator = mock(ShardIterator.class);
|
||||
when(shardIterator.shardId()).thenReturn(new ShardId(index1, randomInt()));
|
||||
final ShardIterator index1ShardIterator = mock(ShardIterator.class);
|
||||
when(index1ShardIterator.shardId()).thenReturn(new ShardId(index1, randomInt()));
|
||||
|
||||
final ShardIterator index2ShardIterator = mock(ShardIterator.class);
|
||||
when(index2ShardIterator.shardId()).thenReturn(new ShardId(index2, randomInt()));
|
||||
|
||||
final OperationRouting operationRouting = mock(OperationRouting.class);
|
||||
when(operationRouting.getShards(eq(clusterState), eq(index1.getName()), anyString(), anyString(), anyString()))
|
||||
.thenReturn(shardIterator);
|
||||
.thenReturn(index1ShardIterator);
|
||||
when(operationRouting.shardId(eq(clusterState), eq(index1.getName()), anyString(), anyString()))
|
||||
.thenReturn(new ShardId(index1, randomInt()));
|
||||
when(operationRouting.getShards(eq(clusterState), eq(index2.getName()), anyString(), anyString(), anyString()))
|
||||
.thenReturn(index2ShardIterator);
|
||||
when(operationRouting.shardId(eq(clusterState), eq(index2.getName()), anyString(), anyString()))
|
||||
.thenReturn(new ShardId(index2, randomInt()));
|
||||
|
||||
clusterService = mock(ClusterService.class);
|
||||
when(clusterService.localNode()).thenReturn(transportService.getLocalNode());
|
||||
|
@ -153,8 +166,8 @@ public class TransportMultiGetActionTests extends ESTestCase {
|
|||
final Task task = createTask();
|
||||
final NodeClient client = new NodeClient(Settings.EMPTY, threadPool);
|
||||
final MultiGetRequestBuilder request = new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE);
|
||||
request.add(new MultiGetRequest.Item("index1", "type1", "1"));
|
||||
request.add(new MultiGetRequest.Item("index1", "type1", "2"));
|
||||
request.add(new MultiGetRequest.Item("index1", "_doc", "1"));
|
||||
request.add(new MultiGetRequest.Item("index1", "_doc", "2"));
|
||||
|
||||
final AtomicBoolean shardActionInvoked = new AtomicBoolean(false);
|
||||
transportAction = new TransportMultiGetAction(transportService, clusterService, shardAction,
|
||||
|
@ -178,8 +191,8 @@ public class TransportMultiGetActionTests extends ESTestCase {
|
|||
final Task task = createTask();
|
||||
final NodeClient client = new NodeClient(Settings.EMPTY, threadPool);
|
||||
final MultiGetRequestBuilder request = new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE);
|
||||
request.add(new MultiGetRequest.Item("index1", "type2", "1").routing("1"));
|
||||
request.add(new MultiGetRequest.Item("index1", "type2", "2"));
|
||||
request.add(new MultiGetRequest.Item("index2", "_doc", "1").routing("1"));
|
||||
request.add(new MultiGetRequest.Item("index2", "_doc", "2"));
|
||||
|
||||
final AtomicBoolean shardActionInvoked = new AtomicBoolean(false);
|
||||
transportAction = new TransportMultiGetAction(transportService, clusterService, shardAction,
|
||||
|
@ -193,7 +206,7 @@ public class TransportMultiGetActionTests extends ESTestCase {
|
|||
assertNull(responses.get(0));
|
||||
assertThat(responses.get(1).getFailure().getFailure(), instanceOf(RoutingMissingException.class));
|
||||
assertThat(responses.get(1).getFailure().getFailure().getMessage(),
|
||||
equalTo("routing is required for [index1]/[type2]/[2]"));
|
||||
equalTo("routing is required for [index2]/[_doc]/[2]"));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -92,40 +92,53 @@ public class TransportMultiTermVectorsActionTests extends ESTestCase {
|
|||
};
|
||||
|
||||
final Index index1 = new Index("index1", randomBase64UUID());
|
||||
final Index index2 = new Index("index2", randomBase64UUID());
|
||||
final ClusterState clusterState = ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName()))
|
||||
.metaData(new MetaData.Builder()
|
||||
.put(new IndexMetaData.Builder(index1.getName())
|
||||
.settings(Settings.builder().put("index.version.created", Version.CURRENT)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID()))
|
||||
.putMapping("type1",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type1")
|
||||
.startObject("_routing")
|
||||
.field("required", false)
|
||||
.settings(Settings.builder().put("index.version.created", Version.CURRENT)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID()))
|
||||
.putMapping("_doc",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("_doc")
|
||||
.startObject("_routing")
|
||||
.field("required", false)
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()), true, XContentType.JSON))
|
||||
.putMapping("type2",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type2")
|
||||
.startObject("_routing")
|
||||
.field("required", true)
|
||||
.endObject()), true, XContentType.JSON)))
|
||||
.put(new IndexMetaData.Builder(index2.getName())
|
||||
.settings(Settings.builder().put("index.version.created", Version.CURRENT)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID()))
|
||||
.putMapping("_doc",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("_doc")
|
||||
.startObject("_routing")
|
||||
.field("required", true)
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()), true, XContentType.JSON)))).build();
|
||||
.endObject()), true, XContentType.JSON)))).build();
|
||||
|
||||
final ShardIterator shardIterator = mock(ShardIterator.class);
|
||||
when(shardIterator.shardId()).thenReturn(new ShardId(index1, randomInt()));
|
||||
final ShardIterator index1ShardIterator = mock(ShardIterator.class);
|
||||
when(index1ShardIterator.shardId()).thenReturn(new ShardId(index1, randomInt()));
|
||||
|
||||
final ShardIterator index2ShardIterator = mock(ShardIterator.class);
|
||||
when(index2ShardIterator.shardId()).thenReturn(new ShardId(index2, randomInt()));
|
||||
|
||||
final OperationRouting operationRouting = mock(OperationRouting.class);
|
||||
when(operationRouting.getShards(eq(clusterState), eq(index1.getName()), anyString(), anyString(), anyString()))
|
||||
.thenReturn(shardIterator);
|
||||
.thenReturn(index1ShardIterator);
|
||||
when(operationRouting.shardId(eq(clusterState), eq(index1.getName()), anyString(), anyString()))
|
||||
.thenReturn(new ShardId(index1, randomInt()));
|
||||
when(operationRouting.getShards(eq(clusterState), eq(index2.getName()), anyString(), anyString(), anyString()))
|
||||
.thenReturn(index2ShardIterator);
|
||||
when(operationRouting.shardId(eq(clusterState), eq(index2.getName()), anyString(), anyString()))
|
||||
.thenReturn(new ShardId(index2, randomInt()));
|
||||
|
||||
clusterService = mock(ClusterService.class);
|
||||
when(clusterService.localNode()).thenReturn(transportService.getLocalNode());
|
||||
|
@ -155,8 +168,8 @@ public class TransportMultiTermVectorsActionTests extends ESTestCase {
|
|||
final Task task = createTask();
|
||||
final NodeClient client = new NodeClient(Settings.EMPTY, threadPool);
|
||||
final MultiTermVectorsRequestBuilder request = new MultiTermVectorsRequestBuilder(client, MultiTermVectorsAction.INSTANCE);
|
||||
request.add(new TermVectorsRequest("index1", "type1", "1"));
|
||||
request.add(new TermVectorsRequest("index1", "type1", "2"));
|
||||
request.add(new TermVectorsRequest("index1", "_doc", "1"));
|
||||
request.add(new TermVectorsRequest("index2", "_doc", "2"));
|
||||
|
||||
final AtomicBoolean shardActionInvoked = new AtomicBoolean(false);
|
||||
transportAction = new TransportMultiTermVectorsAction(transportService, clusterService, shardAction,
|
||||
|
@ -180,8 +193,8 @@ public class TransportMultiTermVectorsActionTests extends ESTestCase {
|
|||
final Task task = createTask();
|
||||
final NodeClient client = new NodeClient(Settings.EMPTY, threadPool);
|
||||
final MultiTermVectorsRequestBuilder request = new MultiTermVectorsRequestBuilder(client, MultiTermVectorsAction.INSTANCE);
|
||||
request.add(new TermVectorsRequest("index1", "type2", "1").routing("1"));
|
||||
request.add(new TermVectorsRequest("index1", "type2", "2"));
|
||||
request.add(new TermVectorsRequest("index2", "_doc", "1").routing("1"));
|
||||
request.add(new TermVectorsRequest("index2", "_doc", "2"));
|
||||
|
||||
final AtomicBoolean shardActionInvoked = new AtomicBoolean(false);
|
||||
transportAction = new TransportMultiTermVectorsAction(transportService, clusterService, shardAction,
|
||||
|
|
|
@ -278,7 +278,7 @@ public class AckIT extends ESIntegTestCase {
|
|||
assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=keyword"));
|
||||
|
||||
for (Client client : clients()) {
|
||||
assertThat(getLocalClusterState(client).metaData().indices().get("test").mapping("test"), notNullValue());
|
||||
assertThat(getLocalClusterState(client).metaData().indices().get("test").getMappings().get("test"), notNullValue());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ public class MetaDataMappingServiceTests extends ESSingleNodeTestCase {
|
|||
// the task really was a mapping update
|
||||
assertThat(
|
||||
indexService.mapperService().documentMapper("type").mappingSource(),
|
||||
not(equalTo(result.resultingState.metaData().index("test").mapping("type").source())));
|
||||
not(equalTo(result.resultingState.metaData().index("test").getMappings().get("type").source())));
|
||||
// since we never committed the cluster state update, the in-memory state is unchanged
|
||||
assertThat(indexService.mapperService().documentMapper("type").mappingSource(), equalTo(currentMapping));
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
|
||||
logger.info("--> verify meta _routing required exists");
|
||||
MappingMetaData mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData()
|
||||
.index("test").mapping("type1");
|
||||
.index("test").getMappings().get("type1");
|
||||
assertThat(mappingMd.routing().required(), equalTo(true));
|
||||
|
||||
logger.info("--> restarting nodes...");
|
||||
|
@ -87,7 +87,8 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
ensureYellow();
|
||||
|
||||
logger.info("--> verify meta _routing required exists");
|
||||
mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").mapping("type1");
|
||||
mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").getMappings()
|
||||
.get("type1");
|
||||
assertThat(mappingMd.routing().required(), equalTo(true));
|
||||
}
|
||||
|
||||
|
|
|
@ -1550,4 +1550,21 @@ public class DocumentParserTests extends ESSingleNodeTestCase {
|
|||
assertEquals("Could not dynamically add mapping for field [alias-field.dynamic-field]. "
|
||||
+ "Existing mapping for [alias-field] must be of type object but found [alias].", exception.getMessage());
|
||||
}
|
||||
|
||||
public void testTypeless() throws IOException {
|
||||
DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser();
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder()
|
||||
.startObject().startObject("type").startObject("properties")
|
||||
.startObject("foo").field("type", "keyword").endObject()
|
||||
.endObject().endObject().endObject());
|
||||
DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping));
|
||||
|
||||
BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("foo", "1234")
|
||||
.endObject());
|
||||
|
||||
ParsedDocument doc = mapper.parse(SourceToParse.source("test", "_doc", "1", bytes, XContentType.JSON));
|
||||
assertNull(doc.dynamicMappingsUpdate()); // no update since we reused the existing type
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,6 +67,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -80,8 +81,10 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.Engine.DeleteResult;
|
||||
import org.elasticsearch.index.engine.EngineException;
|
||||
import org.elasticsearch.index.engine.EngineTestCase;
|
||||
import org.elasticsearch.index.engine.InternalEngine;
|
||||
|
@ -1430,7 +1433,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
}
|
||||
long refreshCount = shard.refreshStats().getTotal();
|
||||
indexDoc(shard, "_doc", "test");
|
||||
try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", "test",
|
||||
try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "_doc", "test",
|
||||
new Term(IdFieldMapper.NAME, Uid.encodeId("test"))))) {
|
||||
assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+1));
|
||||
}
|
||||
|
@ -2130,7 +2133,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
shard.refresh("test");
|
||||
|
||||
try (Engine.GetResult getResult = shard
|
||||
.get(new Engine.Get(false, false, "test", "1",
|
||||
.get(new Engine.Get(false, false, "_doc", "1",
|
||||
new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
|
||||
assertTrue(getResult.exists());
|
||||
assertNotNull(getResult.searcher());
|
||||
|
@ -2172,7 +2175,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
assertEquals(search.totalHits.value, 1);
|
||||
}
|
||||
try (Engine.GetResult getResult = newShard
|
||||
.get(new Engine.Get(false, false, "test", "1",
|
||||
.get(new Engine.Get(false, false, "_doc", "1",
|
||||
new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
|
||||
assertTrue(getResult.exists());
|
||||
assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader
|
||||
|
@ -3663,6 +3666,59 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
return Settings.builder().put(super.threadPoolSettings()).put("thread_pool.estimated_time_interval", "5ms").build();
|
||||
}
|
||||
|
||||
public void testTypelessDelete() throws IOException {
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder("index")
|
||||
.putMapping("some_type", "{ \"properties\": {}}")
|
||||
.settings(settings)
|
||||
.build();
|
||||
IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
|
||||
recoverShardFromStore(shard);
|
||||
Engine.IndexResult indexResult = indexDoc(shard, "some_type", "id", "{}");
|
||||
assertTrue(indexResult.isCreated());
|
||||
|
||||
DeleteResult deleteResult = shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, "some_other_type", "id", VersionType.INTERNAL);
|
||||
assertFalse(deleteResult.isFound());
|
||||
|
||||
deleteResult = shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, "_doc", "id", VersionType.INTERNAL);
|
||||
assertTrue(deleteResult.isFound());
|
||||
|
||||
closeShards(shard);
|
||||
}
|
||||
|
||||
public void testTypelessGet() throws IOException {
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder("index")
|
||||
.putMapping("some_type", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
|
||||
.settings(settings)
|
||||
.primaryTerm(0, 1).build();
|
||||
IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
|
||||
recoverShardFromStore(shard);
|
||||
Engine.IndexResult indexResult = indexDoc(shard, "some_type", "0", "{\"foo\" : \"bar\"}");
|
||||
assertTrue(indexResult.isCreated());
|
||||
|
||||
org.elasticsearch.index.engine.Engine.GetResult getResult = shard.get(
|
||||
new Engine.Get(true, true, "some_type", "0", new Term("_id", Uid.encodeId("0"))));
|
||||
assertTrue(getResult.exists());
|
||||
getResult.close();
|
||||
|
||||
getResult = shard.get(new Engine.Get(true, true, "some_other_type", "0", new Term("_id", Uid.encodeId("0"))));
|
||||
assertFalse(getResult.exists());
|
||||
getResult.close();
|
||||
|
||||
getResult = shard.get(new Engine.Get(true, true, "_doc", "0", new Term("_id", Uid.encodeId("0"))));
|
||||
assertTrue(getResult.exists());
|
||||
getResult.close();
|
||||
|
||||
closeShards(shard);
|
||||
}
|
||||
|
||||
/**
|
||||
* Randomizes the usage of {@link IndexShard#acquireReplicaOperationPermit(long, long, long, ActionListener, String, Object)} and
|
||||
* {@link IndexShard#acquireAllReplicaOperationsPermits(long, long, long, ActionListener, TimeValue)} in order to acquire a permit.
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.index.shard;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -77,4 +78,30 @@ public class ShardGetServiceTests extends IndexShardTestCase {
|
|||
|
||||
closeShards(primary);
|
||||
}
|
||||
|
||||
public void testTypelessGetForUpdate() throws IOException {
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder("index")
|
||||
.putMapping("some_type", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
|
||||
.settings(settings)
|
||||
.primaryTerm(0, 1).build();
|
||||
IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
|
||||
recoverShardFromStore(shard);
|
||||
Engine.IndexResult indexResult = indexDoc(shard, "some_type", "0", "{\"foo\" : \"bar\"}");
|
||||
assertTrue(indexResult.isCreated());
|
||||
|
||||
GetResult getResult = shard.getService().getForUpdate("some_type", "0", Versions.MATCH_ANY, VersionType.INTERNAL);
|
||||
assertTrue(getResult.isExists());
|
||||
|
||||
getResult = shard.getService().getForUpdate("some_other_type", "0", Versions.MATCH_ANY, VersionType.INTERNAL);
|
||||
assertFalse(getResult.isExists());
|
||||
|
||||
getResult = shard.getService().getForUpdate("_doc", "0", Versions.MATCH_ANY, VersionType.INTERNAL);
|
||||
assertTrue(getResult.isExists());
|
||||
|
||||
closeShards(shard);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -388,7 +388,7 @@ public class IndexAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
indices.stream().map(imd -> imd.getIndex().getName()).collect(Collectors.toList()));
|
||||
}
|
||||
IndexMetaData indexMetaData = indices.get(0);
|
||||
MappingMetaData docMapping = indexMetaData.mapping("doc");
|
||||
MappingMetaData docMapping = indexMetaData.getMappings().get("doc");
|
||||
if (docMapping == null) {
|
||||
if (indexToRemoteCluster || state.nodes().isLocalNodeElectedMaster() || hasStaleMessage()) {
|
||||
putAuditIndexMappingsAndStart(index);
|
||||
|
|
Loading…
Reference in New Issue