Merge branch 'master' into ccr

* master:
  Trim down usages of `ShardOperationFailedException` interface (#28312)
  Do not return all indices if a specific alias is requested via get aliases api.
  [Test] Lower bwc version for rank-eval rest tests
  CountedBitSet doesn't need to extend BitSet. (#28239)
  Calculate sum in Kahan summation algorithm in aggregations (#27807) (#27848)
  Remove the `update_all_types` option. (#28288)
  Add information when master node left to DiscoveryNodes' shortSummary() (#28197)
  Provide explanation of dangling indices, fixes #26008 (#26999)
This commit is contained in:
Jason Tedor 2018-01-22 11:50:16 -05:00
commit 437459d2f9
179 changed files with 1097 additions and 881 deletions

View File

@ -173,7 +173,6 @@ public final class Request {
parameters.withTimeout(createIndexRequest.timeout());
parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout());
parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards());
parameters.withUpdateAllTypes(createIndexRequest.updateAllTypes());
HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE);
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity);
@ -585,13 +584,6 @@ public final class Request {
return putParam("timeout", timeout);
}
Params withUpdateAllTypes(boolean updateAllTypes) {
if (updateAllTypes) {
return putParam("update_all_types", Boolean.TRUE.toString());
}
return this;
}
Params withVersion(long version) {
if (version != Versions.MATCH_ANY) {
return putParam("version", Long.toString(version));

View File

@ -310,14 +310,6 @@ public class RequestTests extends ESTestCase {
setRandomMasterTimeout(createIndexRequest, expectedParams);
setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams);
if (randomBoolean()) {
boolean updateAllTypes = randomBoolean();
createIndexRequest.updateAllTypes(updateAllTypes);
if (updateAllTypes) {
expectedParams.put("update_all_types", Boolean.TRUE.toString());
}
}
Request request = Request.createIndex(createIndexRequest);
assertEquals("/" + indexName, request.getEndpoint());
assertEquals(expectedParams, request.getParameters());

View File

@ -13,4 +13,8 @@ The `index_options` field for numeric fields has been deprecated in 6 and has n
To safeguard against out of memory errors, the number of nested json objects within a single
document across all fields has been limited to 10000. This default limit can be changed with
the index setting `index.mapping.nested_objects.limit`.
the index setting `index.mapping.nested_objects.limit`.
==== The `update_all_types` option has been removed
This option is useless now that all indices have at most one type.

View File

@ -48,3 +48,12 @@ as long as the following conditions are met:
Recover as long as this many data nodes have joined the cluster.
NOTE: These settings only take effect on a full cluster restart.
=== Dangling indices
When a node joins the cluster, any shards stored in its local data directory
directory which do not already exist in the cluster will be imported into the
cluster. This functionality is intended as a best effort to help users who
lose all master nodes. If a new master node is started which is unaware of
the other indices in the cluster, adding the old nodes will cause the old
indices to be imported, instead of being deleted.

View File

@ -207,8 +207,8 @@ public class ScaledFloatFieldMapper extends FieldMapper {
}
@Override
public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) {
super.checkCompatibility(other, conflicts, strict);
public void checkCompatibility(MappedFieldType other, List<String> conflicts) {
super.checkCompatibility(other, conflicts);
if (scalingFactor != ((ScaledFloatFieldType) other).getScalingFactor()) {
conflicts.add("mapper [" + name() + "] has different [scaling_factor] values");
}
@ -424,8 +424,8 @@ public class ScaledFloatFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
ScaledFloatFieldMapper other = (ScaledFloatFieldMapper) mergeWith;
if (other.ignoreMalformed.explicit()) {
this.ignoreMalformed = other.ignoreMalformed;

View File

@ -202,8 +202,8 @@ public class TokenCountFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
this.enablePositionIncrements = ((TokenCountFieldMapper) mergeWith).enablePositionIncrements;
}

View File

@ -63,7 +63,7 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string();
MapperService mapperService = createIndex("test").mapperService();
DocumentMapper stage1 = mapperService.merge("person",
new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE, false);
new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE);
String stage2Mapping = XContentFactory.jsonBuilder().startObject()
.startObject("person")
@ -75,7 +75,7 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.endObject().endObject().string();
DocumentMapper stage2 = mapperService.merge("person",
new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false);
new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE);
// previous mapper has not been modified
assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword"));

View File

@ -194,8 +194,8 @@ public final class ParentIdFieldMapper extends FieldMapper {
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
ParentIdFieldMapper parentMergeWith = (ParentIdFieldMapper) mergeWith;
this.children = parentMergeWith.children;
}

View File

@ -316,8 +316,8 @@ public final class ParentJoinFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
ParentJoinFieldMapper joinMergeWith = (ParentJoinFieldMapper) mergeWith;
List<String> conflicts = new ArrayList<>();
for (ParentIdFieldMapper mapper : parentIdFields) {
@ -347,7 +347,7 @@ public final class ParentJoinFieldMapper extends FieldMapper {
conflicts.add("cannot remove child [" + child + "] in join field [" + name() + "]");
}
}
ParentIdFieldMapper merged = (ParentIdFieldMapper) self.merge(mergeWithMapper, updateAllTypes);
ParentIdFieldMapper merged = (ParentIdFieldMapper) self.merge(mergeWithMapper);
newParentIdFields.add(merged);
}
}
@ -356,7 +356,7 @@ public final class ParentJoinFieldMapper extends FieldMapper {
}
this.eagerGlobalOrdinals = joinMergeWith.eagerGlobalOrdinals;
this.parentIdFields = Collections.unmodifiableList(newParentIdFields);
this.uniqueFieldMapper = (MetaJoinFieldMapper) uniqueFieldMapper.merge(joinMergeWith.uniqueFieldMapper, updateAllTypes);
this.uniqueFieldMapper = (MetaJoinFieldMapper) uniqueFieldMapper.merge(joinMergeWith.uniqueFieldMapper);
uniqueFieldMapper.setFieldMapper(this);
}

View File

@ -57,7 +57,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().string();
IndexService service = createIndex("test");
DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false);
MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService()));
// Doc without join
@ -106,7 +106,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().string();
IndexService service = createIndex("test");
DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false);
MapperService.MergeReason.MAPPING_UPDATE);
ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "2",
XContentFactory.jsonBuilder().startObject()
.startObject("join_field")
@ -141,7 +141,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().string();
IndexService service = createIndex("test");
DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false);
MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService()));
// Doc without join
@ -221,7 +221,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string();
IndexService indexService = createIndex("test");
DocumentMapper docMapper = indexService.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false);
MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService()));
{
@ -235,7 +235,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string();
IllegalStateException exc = expectThrows(IllegalStateException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, false));
MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("cannot remove parent [parent] in join field [join_field]"));
}
@ -251,7 +251,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string();
IllegalStateException exc = expectThrows(IllegalStateException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, false));
MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("cannot remove child [grand_child2] in join field [join_field]"));
}
@ -268,7 +268,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string();
IllegalStateException exc = expectThrows(IllegalStateException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, false));
MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("cannot create child [parent] from an existing parent"));
}
@ -285,7 +285,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string();
IllegalStateException exc = expectThrows(IllegalStateException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, false));
MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("cannot create parent [grand_child2] from an existing child]"));
}
@ -300,7 +300,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.endObject().endObject().string();
docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, true);
MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService()));
ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService());
assertTrue(mapper.hasChild("child2"));
@ -321,7 +321,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.endObject().endObject().string();
docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, true);
MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService()));
ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService());
assertTrue(mapper.hasParent("other"));
@ -349,7 +349,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
IndexService indexService = createIndex("test");
MapperParsingException exc = expectThrows(MapperParsingException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false));
MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getRootCause().getMessage(),
containsString("join field [object.join_field] cannot be added inside an object or in a multi-field"));
}
@ -371,7 +371,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
IndexService indexService = createIndex("test");
MapperParsingException exc = expectThrows(MapperParsingException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false));
MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getRootCause().getMessage(),
containsString("join field [number.join_field] cannot be added inside an object or in a multi-field"));
}
@ -397,7 +397,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.endObject().string();
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type",
new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false));
new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined twice in [type]"));
}
@ -414,7 +414,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.endObject().string();
indexService.mapperService().merge("type",
new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false);
new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE);
String updateMapping = XContentFactory.jsonBuilder().startObject()
.startObject("properties")
.startObject("another_join_field")
@ -423,7 +423,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.endObject().string();
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type",
new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE, false));
new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined twice in [type]"));
}
}
@ -442,7 +442,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().string();
IndexService service = createIndex("test");
DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false);
MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService()));
assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals());
assertNotNull(service.mapperService().fullName("join_field#parent"));
@ -463,7 +463,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.endObject().string();
service.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false);
MapperService.MergeReason.MAPPING_UPDATE);
assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals());
assertNotNull(service.mapperService().fullName("join_field#parent"));
assertFalse(service.mapperService().fullName("join_field#parent").eagerGlobalOrdinals());

View File

@ -132,7 +132,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
.endObject().endObject().endObject();
mapperService.merge(TYPE,
new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false);
new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE);
}
/**

View File

@ -112,7 +112,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
.endObject().endObject().endObject();
mapperService.merge(TYPE,
new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false);
new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE);
}
/**

View File

@ -97,7 +97,7 @@ public class LegacyHasChildQueryBuilderTests extends AbstractQueryTestCase<HasCh
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
).string()), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE,
"_parent", "type=" + PARENT_TYPE,
STRING_FIELD_NAME, "type=text",
@ -107,7 +107,7 @@ public class LegacyHasChildQueryBuilderTests extends AbstractQueryTestCase<HasCh
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
).string()), MapperService.MergeReason.MAPPING_UPDATE);
}
@Override

View File

@ -88,7 +88,7 @@ public class LegacyHasParentQueryBuilderTests extends AbstractQueryTestCase<HasP
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
).string()), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE,
"_parent", "type=" + PARENT_TYPE,
STRING_FIELD_NAME, "type=text",
@ -98,9 +98,9 @@ public class LegacyHasParentQueryBuilderTests extends AbstractQueryTestCase<HasP
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
).string()), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge("just_a_type", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("just_a_type"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
).string()), MapperService.MergeReason.MAPPING_UPDATE);
}
/**

View File

@ -72,7 +72,7 @@ public class LegacyParentIdQueryBuilderTests extends AbstractQueryTestCase<Paren
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
).string()), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE,
"_parent", "type=" + PARENT_TYPE,
STRING_FIELD_NAME, "type=text",
@ -81,7 +81,7 @@ public class LegacyParentIdQueryBuilderTests extends AbstractQueryTestCase<Paren
BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
).string()), MapperService.MergeReason.MAPPING_UPDATE);
}
@Override

View File

@ -104,7 +104,7 @@ public class ParentIdQueryBuilderTests extends AbstractQueryTestCase<ParentIdQue
.endObject().endObject().endObject();
mapperService.merge(TYPE,
new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false);
new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE);
}
@Override

View File

@ -136,13 +136,13 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
.startObject("ip_field").field("type", "ip").endObject()
.startObject("field").field("type", "keyword").endObject()
.endObject().endObject().endObject().string();
documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, true);
documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE);
String queryField = "query_field";
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject(queryField).field("type", "percolator").endObject().endObject()
.endObject().endObject().string();
mapperService.merge("type", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
mapperService.merge("type", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
fieldMapper = (PercolatorFieldMapper) mapperService.documentMapper("type").mappers().getMapper(queryField);
fieldType = (PercolatorFieldMapper.FieldType) fieldMapper.fieldType();

View File

@ -98,10 +98,10 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQ
String docType = "_doc";
mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType,
queryField, "type=percolator"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
).string()), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType,
STRING_FIELD_NAME, "type=text"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
).string()), MapperService.MergeReason.MAPPING_UPDATE);
if (mapperService.getIndexSettings().isSingleType() == false) {
PercolateQueryBuilderTests.docType = docType;
}

View File

@ -156,7 +156,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
.startObject("number_field7").field("type", "ip").endObject()
.startObject("date_field").field("type", "date").endObject()
.endObject().endObject().endObject().string();
mapperService.merge("doc", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, false);
mapperService.merge("doc", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE);
}
private void addQueryFieldMappings() throws Exception {
@ -164,7 +164,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject("doc")
.startObject("properties").startObject(fieldName).field("type", "percolator").endObject().endObject()
.endObject().endObject().string();
mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, false);
mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
fieldType = (PercolatorFieldMapper.FieldType) mapperService.fullName(fieldName);
}
@ -578,7 +578,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
.startObject("properties").startObject(fieldName).field("type", "percolator").field("index", "no").endObject().endObject()
.endObject().endObject().string();
MapperParsingException e = expectThrows(MapperParsingException.class, () ->
mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true));
mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE));
assertThat(e.getMessage(), containsString("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]"));
}
@ -592,7 +592,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
.startObject("query_field2").field("type", "percolator").endObject()
.endObject()
.endObject().endObject().string();
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
QueryBuilder queryBuilder = matchQuery("field", "value");
ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1",
@ -623,7 +623,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
.endObject()
.endObject()
.endObject().endObject().string();
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true);
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
QueryBuilder queryBuilder = matchQuery("field", "value");
ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1",

View File

@ -2,8 +2,8 @@
"Response format":
- skip:
version: " - 6.99.99"
reason: the ranking evaluation feature is only available on 7.0
version: " - 6.1.99"
reason: the ranking evaluation feature is available since 6.2
- do:
indices.create:

View File

@ -2,8 +2,8 @@
"Response format":
- skip:
version: " - 6.99.99"
reason: the ranking evaluation feature is only available on 7.0
version: " - 6.1.99"
reason: the ranking evaluation feature is available since 6.2
- do:
index:

View File

@ -2,8 +2,8 @@
"Response format":
- skip:
version: " - 6.99.99"
reason: the ranking evaluation feature is only available on 7.0
version: " - 6.1.99"
reason: the ranking evaluation feature is available since 6.2
- do:
index:

View File

@ -99,8 +99,8 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
}
@Override
public void checkCompatibility(MappedFieldType otherFT, List<String> conflicts, boolean strict) {
super.checkCompatibility(otherFT, conflicts, strict);
public void checkCompatibility(MappedFieldType otherFT, List<String> conflicts) {
super.checkCompatibility(otherFT, conflicts);
CollationFieldType other = (CollationFieldType) otherFT;
if (!Objects.equals(collator, other.collator)) {
conflicts.add("mapper [" + name() + "] has different [collator]");
@ -619,8 +619,8 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
List<String> conflicts = new ArrayList<>();
ICUCollationKeywordFieldMapper icuMergeWith = (ICUCollationKeywordFieldMapper) mergeWith;

View File

@ -434,7 +434,7 @@ public class ICUCollationKeywordFieldMapperTests extends ESSingleNodeTestCase {
.field("language", "tr")
.field("strength", "primary")
.endObject().endObject().endObject().endObject().string();
indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean());
indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field")
@ -443,7 +443,7 @@ public class ICUCollationKeywordFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().endObject().endObject().string();
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> indexService.mapperService().merge("type",
new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, randomBoolean()));
new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE));
assertEquals("Can't merge because of conflicts: [Cannot update language setting for [" + FIELD_TYPE
+ "], Cannot update strength setting for [" + FIELD_TYPE + "]]", e.getMessage());
}

View File

@ -183,7 +183,7 @@ public class SizeFieldMapper extends MetadataFieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
protected void doMerge(Mapper mergeWith) {
SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith;
if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) {
this.enabledState = sizeFieldMapperMergeWith.enabledState;

View File

@ -110,7 +110,7 @@ public class SizeMappingTests extends ESSingleNodeTestCase {
.startObject("_size").field("enabled", false).endObject()
.endObject().endObject().string();
docMapper = service.mapperService().merge("type", new CompressedXContent(disabledMapping),
MapperService.MergeReason.MAPPING_UPDATE, false);
MapperService.MergeReason.MAPPING_UPDATE);
assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(false));
}

View File

@ -2,8 +2,8 @@
"Template request":
- skip:
version: " - 6.99.99"
reason: the ranking evaluation feature is only available on 7.0
version: " - 6.1.99"
reason: the ranking evaluation feature is available since 6.2
- do:
indices.create:

View File

@ -24,10 +24,6 @@
"master_timeout": {
"type" : "time",
"description" : "Specify timeout for connection to master"
},
"update_all_types": {
"type": "boolean",
"description": "Whether to update the mapping for all fields with the same name across all types or not"
}
}
},

View File

@ -38,10 +38,6 @@
"options" : ["open","closed","none","all"],
"default" : "open",
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
},
"update_all_types": {
"type": "boolean",
"description": "Whether to update the mapping for all fields with the same name across all types or not"
}
}
},

View File

@ -62,8 +62,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction<Get
@Override
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
@SuppressWarnings("unchecked")
ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices);
ImmutableOpenMap<String, List<AliasMetaData>> result = state.metaData().findAliases(request.aliases(), concreteIndices);
listener.onResponse(new GetAliasesResponse(result));
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.cache.clear;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -38,7 +38,8 @@ public class ClearIndicesCacheResponse extends BroadcastResponse {
}
ClearIndicesCacheResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
ClearIndicesCacheResponse(int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.cache.clear;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -65,7 +65,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
@Override
protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, int totalShards, int successfulShards,
int failedShards, List<EmptyResult> responses,
List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, shardFailures);
}

View File

@ -43,7 +43,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private final String cause;
private final String index;
private final String providedName;
private final boolean updateAllTypes;
private Index recoverFrom;
private ResizeType resizeType;
@ -61,12 +60,10 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName,
boolean updateAllTypes) {
public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName) {
this.originalMessage = originalMessage;
this.cause = cause;
this.index = index;
this.updateAllTypes = updateAllTypes;
this.providedName = providedName;
}
@ -155,11 +152,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
return recoverFrom;
}
/** True if all fields that span multiple types should be updated, false otherwise */
public boolean updateAllTypes() {
return updateAllTypes;
}
/**
* The name that was provided by the user. This might contain a date math expression.
* @see IndexMetaData#SETTING_INDEX_PROVIDED_NAME

View File

@ -85,8 +85,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
private final Map<String, IndexMetaData.Custom> customs = new HashMap<>();
private boolean updateAllTypes = false;
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
public CreateIndexRequest() {
@ -429,17 +427,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
return this.customs;
}
/** True if all fields that span multiple types should be updated, false otherwise */
public boolean updateAllTypes() {
return updateAllTypes;
}
/** See {@link #updateAllTypes()} */
public CreateIndexRequest updateAllTypes(boolean updateAllTypes) {
this.updateAllTypes = updateAllTypes;
return this;
}
public ActiveShardCount waitForActiveShards() {
return waitForActiveShards;
}
@ -499,7 +486,9 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
for (int i = 0; i < aliasesSize; i++) {
aliases.add(Alias.read(in));
}
updateAllTypes = in.readBoolean();
if (in.getVersion().before(Version.V_7_0_0_alpha1)) {
in.readBoolean(); // updateAllTypes
}
waitForActiveShards = ActiveShardCount.readFrom(in);
}
@ -523,7 +512,9 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
for (Alias alias : aliases) {
alias.writeTo(out);
}
out.writeBoolean(updateAllTypes);
if (out.getVersion().before(Version.V_7_0_0_alpha1)) {
out.writeBoolean(true); // updateAllTypes
}
waitForActiveShards.writeTo(out);
}

View File

@ -239,12 +239,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
return this;
}
/** True if all fields that span multiple types should be updated, false otherwise */
public CreateIndexRequestBuilder setUpdateAllTypes(boolean updateAllTypes) {
request.updateAllTypes(updateAllTypes);
return this;
}
/**
* Sets the number of shard copies that should be active for index creation to return.
* Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy

View File

@ -72,7 +72,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
}
final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index());
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index(), request.updateAllTypes())
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index())
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.settings(request.settings()).mappings(request.mappings())
.aliases(request.aliases()).customs(request.customs())

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import java.util.List;
@ -35,7 +35,7 @@ public class FlushResponse extends BroadcastResponse {
}
FlushResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
FlushResponse(int totalShards, int successfulShards, int failedShards, List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -57,7 +57,8 @@ public class TransportFlushAction extends TransportBroadcastReplicationAction<Fl
}
@Override
protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List<ShardOperationFailedException> shardFailures) {
protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List
<DefaultShardOperationFailedException> shardFailures) {
return new FlushResponse(totalNumCopies, successfulShards, failedShards, shardFailures);
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.forcemerge;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import java.util.List;
@ -32,7 +32,7 @@ public class ForceMergeResponse extends BroadcastResponse {
ForceMergeResponse() {
}
ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
}
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.forcemerge;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -62,7 +62,7 @@ public class TransportForceMergeAction extends TransportBroadcastByNodeAction<Fo
}
@Override
protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List<EmptyResult> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List<EmptyResult> responses, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
return new ForceMergeResponse(totalShards, successfulShards, failedShards, shardFailures);
}

View File

@ -30,8 +30,6 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda
private String source;
private boolean updateAllTypes = false;
public PutMappingClusterStateUpdateRequest() {
}
@ -53,13 +51,4 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda
this.source = source;
return this;
}
public boolean updateAllTypes() {
return updateAllTypes;
}
public PutMappingClusterStateUpdateRequest updateAllTypes(boolean updateAllTypes) {
this.updateAllTypes = updateAllTypes;
return this;
}
}

View File

@ -72,7 +72,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
private String source;
private boolean updateAllTypes = false;
private Index concreteIndex;
public PutMappingRequest() {
@ -290,17 +289,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
}
}
/** True if all fields that span multiple types should be updated, false otherwise */
public boolean updateAllTypes() {
return updateAllTypes;
}
/** See {@link #updateAllTypes()} */
public PutMappingRequest updateAllTypes(boolean updateAllTypes) {
this.updateAllTypes = updateAllTypes;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -312,7 +300,9 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
// we do not know the format from earlier versions so convert if necessary
source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source));
}
updateAllTypes = in.readBoolean();
if (in.getVersion().before(Version.V_7_0_0_alpha1)) {
in.readBoolean(); // updateAllTypes
}
concreteIndex = in.readOptionalWriteable(Index::new);
}
@ -323,7 +313,9 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
indicesOptions.writeIndicesOptions(out);
out.writeOptionalString(type);
out.writeString(source);
out.writeBoolean(updateAllTypes);
if (out.getVersion().before(Version.V_7_0_0_alpha1)) {
out.writeBoolean(true); // updateAllTypes
}
out.writeOptionalWriteable(concreteIndex);
}
}

View File

@ -98,10 +98,4 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder<PutMapp
return this;
}
/** True if all fields that span multiple types should be updated, false otherwise */
public PutMappingRequestBuilder setUpdateAllTypes(boolean updateAllTypes) {
request.updateAllTypes(updateAllTypes);
return this;
}
}

View File

@ -82,7 +82,6 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(concreteIndices).type(request.type())
.updateAllTypes(request.updateAllTypes())
.source(request.source());
metaDataMappingService.putMapping(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.recovery;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
@ -56,7 +56,8 @@ public class RecoveryResponse extends BroadcastResponse implements ToXContentFra
* @param shardFailures List of failures processing shards
*/
public RecoveryResponse(int totalShards, int successfulShards, int failedShards, boolean detailed,
Map<String, List<RecoveryState>> shardRecoveryStates, List<ShardOperationFailedException> shardFailures) {
Map<String, List<RecoveryState>> shardRecoveryStates,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.shardRecoveryStates = shardRecoveryStates;
this.detailed = detailed;

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.recovery;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -69,7 +69,7 @@ public class TransportRecoveryAction extends TransportBroadcastByNodeAction<Reco
@Override
protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List<RecoveryState> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List<RecoveryState> responses, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
Map<String, List<RecoveryState>> shardResponses = new HashMap<>();
for (RecoveryState recoveryState : responses) {
if (recoveryState == null) {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.refresh;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import java.util.List;
@ -32,7 +32,7 @@ public class RefreshResponse extends BroadcastResponse {
RefreshResponse() {
}
RefreshResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
RefreshResponse(int totalShards, int successfulShards, int failedShards, List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
}
}

View File

@ -19,9 +19,9 @@
package org.elasticsearch.action.admin.indices.refresh;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
@ -61,7 +61,8 @@ public class TransportRefreshAction extends TransportBroadcastReplicationAction<
}
@Override
protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List<ShardOperationFailedException> shardFailures) {
protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies,
List<DefaultShardOperationFailedException> shardFailures) {
return new RefreshResponse(totalNumCopies, successfulShards, failedShards, shardFailures);
}
}

View File

@ -232,7 +232,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
createIndexRequest.cause("rollover_index");
createIndexRequest.index(targetIndexName);
return new CreateIndexClusterStateUpdateRequest(createIndexRequest,
"rollover_index", targetIndexName, providedIndexName, true)
"rollover_index", targetIndexName, providedIndexName)
.ackTimeout(createIndexRequest.timeout())
.masterNodeTimeout(createIndexRequest.masterNodeTimeout())
.settings(createIndexRequest.settings())

View File

@ -24,7 +24,7 @@ import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortedNumericSortField;
import org.apache.lucene.search.SortedSetSortField;
import org.apache.lucene.util.Accountable;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -53,7 +53,8 @@ public class IndicesSegmentResponse extends BroadcastResponse implements ToXCont
}
IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.shards = shards;
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.segments;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -77,7 +77,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
}
@Override
protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List<ShardSegments> results, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List<ShardSegments> results, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, shardFailures);
}

View File

@ -25,7 +25,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
@ -348,7 +347,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
}
}
out.writeVInt(failures.size());
for (ShardOperationFailedException failure : failures) {
for (Failure failure : failures) {
failure.writeTo(out);
}
}
@ -357,7 +356,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (failures.size() > 0) {
builder.startArray(Fields.FAILURES);
for (ShardOperationFailedException failure : failures) {
for (Failure failure : failures) {
builder.startObject();
failure.toXContent(builder, params);
builder.endObject();

View File

@ -179,7 +179,7 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
targetIndex.settings(settingsBuilder);
return new CreateIndexClusterStateUpdateRequest(targetIndex,
cause, targetIndex.index(), targetIndexName, true)
cause, targetIndex.index(), targetIndexName)
// mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be
// applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we miss
// the mappings for everything is corrupted and hard to debug

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.stats;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput;
@ -48,7 +48,8 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten
}
IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.shards = shards;
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.stats;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -79,7 +79,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
}
@Override
protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List<ShardStats> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List<ShardStats> responses, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, shardFailures);
}

View File

@ -20,8 +20,8 @@
package org.elasticsearch.action.admin.indices.upgrade.get;
import org.elasticsearch.Version;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -79,7 +79,7 @@ public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction
}
@Override
protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeStatus> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeStatus> responses, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, failedShards, shardFailures);
}

View File

@ -19,11 +19,10 @@
package org.elasticsearch.action.admin.indices.upgrade.get;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -43,7 +42,8 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte
UpgradeStatusResponse() {
}
UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.shards = shards;
}

View File

@ -22,8 +22,8 @@ package org.elasticsearch.action.admin.indices.upgrade.post;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.PrimaryMissingActionException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -71,7 +71,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
}
@Override
protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeResult> shardUpgradeResults, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeResult> shardUpgradeResults, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
Map<String, Integer> successfulPrimaryShards = new HashMap<>();
Map<String, Tuple<Version, org.apache.lucene.util.Version>> versions = new HashMap<>();
for (ShardUpgradeResult result : shardUpgradeResults) {

View File

@ -20,7 +20,7 @@
package org.elasticsearch.action.admin.indices.upgrade.post;
import org.elasticsearch.Version;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput;
@ -44,7 +44,8 @@ public class UpgradeResponse extends BroadcastResponse {
}
UpgradeResponse(Map<String, Tuple<Version, String>> versions, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
UpgradeResponse(Map<String, Tuple<Version, String>> versions, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.versions = versions;
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.validate.query;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
@ -115,7 +114,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
int successfulShards = 0;
int failedShards = 0;
boolean valid = true;
List<ShardOperationFailedException> shardFailures = null;
List<DefaultShardOperationFailedException> shardFailures = null;
List<QueryExplanation> queryExplanations = null;
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.validate.query;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -46,7 +46,8 @@ public class ValidateQueryResponse extends BroadcastResponse {
}
ValidateQueryResponse(boolean valid, List<QueryExplanation> queryExplanations, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
ValidateQueryResponse(boolean valid, List<QueryExplanation> queryExplanations, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures);
this.valid = valid;
this.queryExplanations = queryExplanations;

View File

@ -20,11 +20,10 @@
package org.elasticsearch.action.support.broadcast;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.index.shard.ShardNotFoundException;
import java.io.IOException;
import java.util.List;
@ -35,30 +34,24 @@ import static org.elasticsearch.action.support.DefaultShardOperationFailedExcept
* Base class for all broadcast operation based responses.
*/
public class BroadcastResponse extends ActionResponse {
private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0];
private static final DefaultShardOperationFailedException[] EMPTY = new DefaultShardOperationFailedException[0];
private int totalShards;
private int successfulShards;
private int failedShards;
private ShardOperationFailedException[] shardFailures = EMPTY;
private DefaultShardOperationFailedException[] shardFailures = EMPTY;
public BroadcastResponse() {
}
public BroadcastResponse(int totalShards, int successfulShards, int failedShards,
List<? extends ShardOperationFailedException> shardFailures) {
assertNoShardNotAvailableFailures(shardFailures);
List<DefaultShardOperationFailedException> shardFailures) {
this.totalShards = totalShards;
this.successfulShards = successfulShards;
this.failedShards = failedShards;
this.shardFailures = shardFailures == null ? EMPTY :
shardFailures.toArray(new ShardOperationFailedException[shardFailures.size()]);
}
private void assertNoShardNotAvailableFailures(List<? extends ShardOperationFailedException> shardFailures) {
if (shardFailures != null) {
for (Object e : shardFailures) {
assert (e instanceof ShardNotFoundException) == false : "expected no ShardNotFoundException failures, but got " + e;
}
if (shardFailures == null) {
this.shardFailures = EMPTY;
} else {
this.shardFailures = shardFailures.toArray(new DefaultShardOperationFailedException[shardFailures.size()]);
}
}
@ -97,7 +90,7 @@ public class BroadcastResponse extends ActionResponse {
/**
* The list of shard failures exception.
*/
public ShardOperationFailedException[] getShardFailures() {
public DefaultShardOperationFailedException[] getShardFailures() {
return shardFailures;
}
@ -109,7 +102,7 @@ public class BroadcastResponse extends ActionResponse {
failedShards = in.readVInt();
int size = in.readVInt();
if (size > 0) {
shardFailures = new ShardOperationFailedException[size];
shardFailures = new DefaultShardOperationFailedException[size];
for (int i = 0; i < size; i++) {
shardFailures[i] = readShardOperationFailed(in);
}
@ -123,7 +116,7 @@ public class BroadcastResponse extends ActionResponse {
out.writeVInt(successfulShards);
out.writeVInt(failedShards);
out.writeVInt(shardFailures.length);
for (ShardOperationFailedException exp : shardFailures) {
for (DefaultShardOperationFailedException exp : shardFailures) {
exp.writeTo(out);
}
}

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.HandledTransportAction;
@ -131,7 +130,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
int totalShards = 0;
int successfulShards = 0;
List<ShardOperationResult> broadcastByNodeResponses = new ArrayList<>();
List<ShardOperationFailedException> exceptions = new ArrayList<>();
List<DefaultShardOperationFailedException> exceptions = new ArrayList<>();
for (int i = 0; i < responses.length(); i++) {
if (responses.get(i) instanceof FailedNodeException) {
FailedNodeException exception = (FailedNodeException) responses.get(i);
@ -176,7 +175,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
* @param clusterState the cluster state
* @return the response
*/
protected abstract Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List<ShardOperationResult> results, List<ShardOperationFailedException> shardFailures, ClusterState clusterState);
protected abstract Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List<ShardOperationResult> results, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState);
/**
* Deserialize a request from an input stream

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.support.replication;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.HandledTransportAction;
@ -76,7 +75,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
final ClusterState clusterState = clusterService.state();
List<ShardId> shards = shards(request, clusterState);
final CopyOnWriteArrayList<ShardResponse> shardsResponses = new CopyOnWriteArrayList();
final CopyOnWriteArrayList<ShardResponse> shardsResponses = new CopyOnWriteArrayList<>();
if (shards.size() == 0) {
finishAndNotifyListener(listener, shardsResponses);
}
@ -148,7 +147,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
int successfulShards = 0;
int failedShards = 0;
int totalNumCopies = 0;
List<ShardOperationFailedException> shardFailures = null;
List<DefaultShardOperationFailedException> shardFailures = null;
for (int i = 0; i < shardsResponses.size(); i++) {
ReplicationResponse shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
@ -168,5 +167,6 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
listener.onResponse(newResponse(successfulShards, failedShards, totalNumCopies, shardFailures));
}
protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List<ShardOperationFailedException> shardFailures);
protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies,
List<DefaultShardOperationFailedException> shardFailures);
}

View File

@ -275,14 +275,12 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
if (!filteredValues.isEmpty()) {
// Make the list order deterministic
CollectionUtil.timSort(filteredValues, new Comparator<AliasMetaData>() {
@Override
public int compare(AliasMetaData o1, AliasMetaData o2) {
return o1.alias().compareTo(o2.alias());
}
});
CollectionUtil.timSort(filteredValues, Comparator.comparing(AliasMetaData::alias));
mapBuilder.put(index, Collections.unmodifiableList(filteredValues));
} else if (matchAllAliases) {
// in case all aliases are requested then it is desired to return the concrete index with no aliases (#25114):
mapBuilder.put(index, Collections.emptyList());
}
mapBuilder.put(index, Collections.unmodifiableList(filteredValues));
}
return mapBuilder.build();
}

View File

@ -444,7 +444,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// now add the mappings
MapperService mapperService = indexService.mapperService();
try {
mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
mapperService.merge(mappings, MergeReason.MAPPING_UPDATE);
} catch (Exception e) {
removalExtraInfo = "failed on parsing default mapping/mappings on index creation";
throw e;

View File

@ -144,7 +144,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
} catch (IOException e) {
throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e);
}
indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY, false);
indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY);
}
indices.put(action.getIndex(), indexService);
}

View File

@ -250,7 +250,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue()));
}
dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE, false);
dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE);
} finally {
if (createdIndex != null) {

View File

@ -187,7 +187,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap)) {
MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService,
mapperRegistry, () -> null);
mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false);
mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY);
}
} catch (Exception ex) {
// Wrap the inner exception so we have the index name in the exception message

View File

@ -147,7 +147,7 @@ public class MetaDataMappingService extends AbstractComponent {
// we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(indexMetaData, Collections.emptyList());
removeIndex = true;
indexService.mapperService().merge(indexMetaData, MergeReason.MAPPING_RECOVERY, true);
indexService.mapperService().merge(indexMetaData, MergeReason.MAPPING_RECOVERY);
}
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
@ -224,7 +224,7 @@ public class MetaDataMappingService extends AbstractComponent {
MapperService mapperService = indicesService.createIndexMapperService(indexMetaData);
indexMapperServices.put(index, mapperService);
// add mappings for all types, we need them for cross-type validation
mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY);
}
}
currentState = applyRequest(currentState, request, indexMapperServices);
@ -264,7 +264,7 @@ public class MetaDataMappingService extends AbstractComponent {
newMapper = mapperService.parse(request.type(), mappingUpdateSource, existingMapper == null);
if (existingMapper != null) {
// first, simulate: just call merge and ignore the result
existingMapper.merge(newMapper.mapping(), request.updateAllTypes());
existingMapper.merge(newMapper.mapping());
} else {
// TODO: can we find a better place for this validation?
// The reason this validation is here is that the mapper service doesn't learn about
@ -310,7 +310,7 @@ public class MetaDataMappingService extends AbstractComponent {
if (existingMapper != null) {
existingSource = existingMapper.mappingSource();
}
DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE);
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {

View File

@ -39,6 +39,7 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to
@ -205,12 +206,14 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
}
/**
* Get the master node
*
* @return master node
* Returns the master node, or {@code null} if there is no master node
*/
@Nullable
public DiscoveryNode getMasterNode() {
return nodes.get(masterNodeId);
if (masterNodeId != null) {
return nodes.get(masterNodeId);
}
return null;
}
/**
@ -385,27 +388,20 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
* Returns the changes comparing this nodes to the provided nodes.
*/
public Delta delta(DiscoveryNodes other) {
List<DiscoveryNode> removed = new ArrayList<>();
List<DiscoveryNode> added = new ArrayList<>();
final List<DiscoveryNode> removed = new ArrayList<>();
final List<DiscoveryNode> added = new ArrayList<>();
for (DiscoveryNode node : other) {
if (!this.nodeExists(node)) {
if (this.nodeExists(node) == false) {
removed.add(node);
}
}
for (DiscoveryNode node : this) {
if (!other.nodeExists(node)) {
if (other.nodeExists(node) == false) {
added.add(node);
}
}
DiscoveryNode previousMasterNode = null;
DiscoveryNode newMasterNode = null;
if (masterNodeId != null) {
if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) {
previousMasterNode = other.getMasterNode();
newMasterNode = getMasterNode();
}
}
return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed),
return new Delta(other.getMasterNode(), getMasterNode(), localNodeId, Collections.unmodifiableList(removed),
Collections.unmodifiableList(added));
}
@ -429,8 +425,8 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
public static class Delta {
private final String localNodeId;
private final DiscoveryNode previousMasterNode;
private final DiscoveryNode newMasterNode;
@Nullable private final DiscoveryNode previousMasterNode;
@Nullable private final DiscoveryNode newMasterNode;
private final List<DiscoveryNode> removed;
private final List<DiscoveryNode> added;
@ -448,13 +444,15 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
}
public boolean masterNodeChanged() {
return newMasterNode != null;
return Objects.equals(newMasterNode, previousMasterNode) == false;
}
@Nullable
public DiscoveryNode previousMasterNode() {
return previousMasterNode;
}
@Nullable
public DiscoveryNode newMasterNode() {
return newMasterNode;
}
@ -476,51 +474,45 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
}
public String shortSummary() {
StringBuilder sb = new StringBuilder();
if (!removed() && masterNodeChanged()) {
if (newMasterNode.getId().equals(localNodeId)) {
// we are the master, no nodes we removed, we are actually the first master
sb.append("new_master ").append(newMasterNode());
} else {
// we are not the master, so we just got this event. No nodes were removed, so its not a *new* master
sb.append("detected_master ").append(newMasterNode());
final StringBuilder summary = new StringBuilder();
if (masterNodeChanged()) {
summary.append("master node changed {previous [");
if (previousMasterNode() != null) {
summary.append(previousMasterNode());
}
} else {
if (masterNodeChanged()) {
sb.append("master {new ").append(newMasterNode());
if (previousMasterNode() != null) {
sb.append(", previous ").append(previousMasterNode());
}
sb.append("}");
summary.append("], current [");
if (newMasterNode() != null) {
summary.append(newMasterNode());
}
if (removed()) {
if (masterNodeChanged()) {
sb.append(", ");
}
sb.append("removed {");
for (DiscoveryNode node : removedNodes()) {
sb.append(node).append(',');
}
sb.append("}");
summary.append("]}");
}
if (removed()) {
if (summary.length() > 0) {
summary.append(", ");
}
summary.append("removed {");
for (DiscoveryNode node : removedNodes()) {
summary.append(node).append(',');
}
summary.append("}");
}
if (added()) {
// don't print if there is one added, and it is us
if (!(addedNodes().size() == 1 && addedNodes().get(0).getId().equals(localNodeId))) {
if (removed() || masterNodeChanged()) {
sb.append(", ");
if (summary.length() > 0) {
summary.append(", ");
}
sb.append("added {");
summary.append("added {");
for (DiscoveryNode node : addedNodes()) {
if (!node.getId().equals(localNodeId)) {
// don't print ourself
sb.append(node).append(',');
summary.append(node).append(',');
}
}
sb.append("}");
summary.append("}");
}
}
return sb.toString();
return summary.toString();
}
}

View File

@ -324,8 +324,8 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
}
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts) {
super.checkCompatibility(fieldType, conflicts);
CompletionFieldType other = (CompletionFieldType)fieldType;
if (preservePositionIncrements != other.preservePositionIncrements) {
@ -607,8 +607,8 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith;
this.maxInputLength = fieldMergeWith.maxInputLength;
}

View File

@ -219,8 +219,8 @@ public class DateFieldMapper extends FieldMapper {
}
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts) {
super.checkCompatibility(fieldType, conflicts);
DateFieldType other = (DateFieldType) fieldType;
if (Objects.equals(dateTimeFormatter().format(), other.dateTimeFormatter().format()) == false) {
conflicts.add("mapper [" + name() + "] has different [format] values");
@ -472,8 +472,8 @@ public class DateFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
final DateFieldMapper other = (DateFieldMapper) mergeWith;
if (other.ignoreMalformed.explicit()) {
this.ignoreMalformed = other.ignoreMalformed;

View File

@ -296,8 +296,8 @@ public class DocumentMapper implements ToXContentFragment {
return mapperService.getParentTypes().contains(type);
}
public DocumentMapper merge(Mapping mapping, boolean updateAllTypes) {
Mapping merged = this.mapping.merge(mapping, updateAllTypes);
public DocumentMapper merge(Mapping mapping) {
Mapping merged = this.mapping.merge(mapping);
return new DocumentMapper(mapperService, merged);
}

View File

@ -218,7 +218,7 @@ final class DocumentParser {
// We can see the same mapper more than once, for example, if we had foo.bar and foo.baz, where
// foo did not yet exist. This will create 2 copies in dynamic mappings, which should be identical.
// Here we just skip over the duplicates, but we merge them to ensure there are no conflicts.
newMapper.merge(previousMapper, false);
newMapper.merge(previousMapper);
continue;
}
previousMapper = newMapper;
@ -275,7 +275,7 @@ final class DocumentParser {
int lastIndex = parentMappers.size() - 1;
ObjectMapper withNewMapper = parentMappers.get(lastIndex).mappingUpdate(mapper);
if (merge) {
withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper, false);
withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper);
}
parentMappers.set(lastIndex, withNewMapper);
}

View File

@ -312,17 +312,16 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
}
@Override
public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) {
public FieldMapper merge(Mapper mergeWith) {
FieldMapper merged = clone();
merged.doMerge(mergeWith, updateAllTypes);
merged.doMerge(mergeWith);
return merged;
}
/**
* Merge changes coming from {@code mergeWith} in place.
* @param updateAllTypes TODO
*/
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
protected void doMerge(Mapper mergeWith) {
if (!this.getClass().equals(mergeWith.getClass())) {
String mergedType = mergeWith.getClass().getSimpleName();
if (mergeWith instanceof FieldMapper) {
@ -553,7 +552,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
if (mergeIntoMapper == null) {
newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
} else {
FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false);
FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper);
newMappersBuilder.put(merged.simpleName(), merged); // override previous definition
}
}

View File

@ -165,17 +165,6 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
return CONTENT_TYPE;
}
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
if (strict) {
FieldNamesFieldType other = (FieldNamesFieldType)fieldType;
if (isEnabled() != other.isEnabled()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [enabled] across all types.");
}
}
}
public void setEnabled(boolean enabled) {
checkIfFrozen();
this.enabled = enabled;

View File

@ -24,7 +24,6 @@ import org.elasticsearch.common.regex.Regex;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
@ -39,37 +38,13 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
/** Full field name to field type */
final CopyOnWriteHashMap<String, MappedFieldType> fullNameToFieldType;
/** Full field name to types containing a mapping for this full name. */
final CopyOnWriteHashMap<String, Set<String>> fullNameToTypes;
/** Create a new empty instance. */
FieldTypeLookup() {
fullNameToFieldType = new CopyOnWriteHashMap<>();
fullNameToTypes = new CopyOnWriteHashMap<>();
}
private FieldTypeLookup(
CopyOnWriteHashMap<String, MappedFieldType> fullName,
CopyOnWriteHashMap<String, Set<String>> fullNameToTypes) {
private FieldTypeLookup(CopyOnWriteHashMap<String, MappedFieldType> fullName) {
this.fullNameToFieldType = fullName;
this.fullNameToTypes = fullNameToTypes;
}
private static CopyOnWriteHashMap<String, Set<String>> addType(CopyOnWriteHashMap<String, Set<String>> map, String key, String type) {
Set<String> types = map.get(key);
if (types == null) {
return map.copyAndPut(key, Collections.singleton(type));
} else if (types.contains(type)) {
// noting to do
return map;
} else {
Set<String> newTypes = new HashSet<>(types.size() + 1);
newTypes.addAll(types);
newTypes.add(type);
assert newTypes.size() == types.size() + 1;
newTypes = Collections.unmodifiableSet(newTypes);
return map.copyAndPut(key, newTypes);
}
}
/**
@ -77,58 +52,41 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
* from the provided fields. If a field already exists, the field type will be updated
* to use the new mappers field type.
*/
public FieldTypeLookup copyAndAddAll(String type, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) {
public FieldTypeLookup copyAndAddAll(String type, Collection<FieldMapper> fieldMappers) {
Objects.requireNonNull(type, "type must not be null");
if (MapperService.DEFAULT_MAPPING.equals(type)) {
throw new IllegalArgumentException("Default mappings should not be added to the lookup");
}
CopyOnWriteHashMap<String, MappedFieldType> fullName = this.fullNameToFieldType;
CopyOnWriteHashMap<String, Set<String>> fullNameToTypes = this.fullNameToTypes;
for (FieldMapper fieldMapper : fieldMappers) {
MappedFieldType fieldType = fieldMapper.fieldType();
MappedFieldType fullNameFieldType = fullName.get(fieldType.name());
// is the update even legal?
checkCompatibility(type, fieldMapper, updateAllTypes);
if (fieldType.equals(fullNameFieldType) == false) {
if (fullNameFieldType == null) {
// introduction of a new field
fullName = fullName.copyAndPut(fieldType.name(), fieldMapper.fieldType());
} else {
// modification of an existing field
checkCompatibility(fullNameFieldType, fieldType);
if (fieldType.equals(fullNameFieldType) == false) {
fullName = fullName.copyAndPut(fieldType.name(), fieldMapper.fieldType());
}
}
fullNameToTypes = addType(fullNameToTypes, fieldType.name(), type);
}
return new FieldTypeLookup(fullName, fullNameToTypes);
}
private static boolean beStrict(String type, Set<String> types, boolean updateAllTypes) {
assert types.size() >= 1;
if (updateAllTypes) {
return false;
} else if (types.size() == 1 && types.contains(type)) {
// we are implicitly updating all types
return false;
} else {
return true;
}
return new FieldTypeLookup(fullName);
}
/**
* Checks if the given field type is compatible with an existing field type.
* An IllegalArgumentException is thrown in case of incompatibility.
* If updateAllTypes is true, only basic compatibility is checked.
*/
private void checkCompatibility(String type, FieldMapper fieldMapper, boolean updateAllTypes) {
MappedFieldType fieldType = fullNameToFieldType.get(fieldMapper.fieldType().name());
if (fieldType != null) {
List<String> conflicts = new ArrayList<>();
final Set<String> types = fullNameToTypes.get(fieldMapper.fieldType().name());
boolean strict = beStrict(type, types, updateAllTypes);
fieldType.checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().name() + "] conflicts with existing mapping in other types:\n" + conflicts.toString());
}
private void checkCompatibility(MappedFieldType existingFieldType, MappedFieldType newFieldType) {
List<String> conflicts = new ArrayList<>();
existingFieldType.checkCompatibility(newFieldType, conflicts);
if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Mapper for [" + newFieldType.name() + "] conflicts with existing mapping:\n" + conflicts.toString());
}
}
@ -137,15 +95,6 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
return fullNameToFieldType.get(field);
}
/** Get the set of types that have a mapping for the given field. */
public Set<String> getTypes(String field) {
Set<String> types = fullNameToTypes.get(field);
if (types == null) {
types = Collections.emptySet();
}
return types;
}
/**
* Returns a list of the full names of a simple match regex like pattern against full name and index name.
*/

View File

@ -142,8 +142,8 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
GeoPointFieldMapper gpfmMergeWith = (GeoPointFieldMapper) mergeWith;
if (gpfmMergeWith.ignoreMalformed.explicit()) {
this.ignoreMalformed = gpfmMergeWith.ignoreMalformed;

View File

@ -309,8 +309,8 @@ public class GeoShapeFieldMapper extends FieldMapper {
}
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts) {
super.checkCompatibility(fieldType, conflicts);
GeoShapeFieldType other = (GeoShapeFieldType)fieldType;
// prevent user from changing strategies
if (strategyName().equals(other.strategyName()) == false) {
@ -334,15 +334,6 @@ public class GeoShapeFieldMapper extends FieldMapper {
if (precisionInMeters() != other.precisionInMeters()) {
conflicts.add("mapper [" + name() + "] has different [precision]");
}
if (strict) {
if (orientation() != other.orientation()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [orientation] across all types.");
}
if (distanceErrorPct() != other.distanceErrorPct()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [distance_error_pct] across all types.");
}
}
}
private static int getLevels(int treeLevels, double precisionInMeters, int defaultLevels, boolean geoHash) {
@ -511,8 +502,8 @@ public class GeoShapeFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith;
if (gsfm.coerce.explicit()) {

View File

@ -314,7 +314,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
protected void doMerge(Mapper mergeWith) {
// do nothing here, no merging, but also no exception
}
}

View File

@ -189,7 +189,7 @@ public class IndexFieldMapper extends MetadataFieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
protected void doMerge(Mapper mergeWith) {
// nothing to do
}

View File

@ -390,8 +390,8 @@ public class IpFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
IpFieldMapper other = (IpFieldMapper) mergeWith;
if (other.ignoreMalformed.explicit()) {
this.ignoreMalformed = other.ignoreMalformed;

View File

@ -187,8 +187,8 @@ public final class KeywordFieldMapper extends FieldMapper {
}
@Override
public void checkCompatibility(MappedFieldType otherFT, List<String> conflicts, boolean strict) {
super.checkCompatibility(otherFT, conflicts, strict);
public void checkCompatibility(MappedFieldType otherFT, List<String> conflicts) {
super.checkCompatibility(otherFT, conflicts);
KeywordFieldType other = (KeywordFieldType) otherFT;
if (Objects.equals(normalizer, other.normalizer) == false) {
conflicts.add("mapper [" + name() + "] has different [normalizer]");
@ -352,8 +352,8 @@ public final class KeywordFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
this.ignoreAbove = ((KeywordFieldMapper) mergeWith).ignoreAbove;
}

View File

@ -157,7 +157,7 @@ public abstract class MappedFieldType extends FieldType {
* If strict is true, all properties must be equal.
* Otherwise, only properties which must never change in an index are checked.
*/
public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) {
public void checkCompatibility(MappedFieldType other, List<String> conflicts) {
checkTypeName(other);
boolean indexed = indexOptions() != IndexOptions.NONE;
@ -202,27 +202,6 @@ public abstract class MappedFieldType extends FieldType {
if (Objects.equals(similarity(), other.similarity()) == false) {
conflicts.add("mapper [" + name() + "] has different [similarity]");
}
if (strict) {
if (omitNorms() != other.omitNorms()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [omit_norms] across all types.");
}
if (boost() != other.boost()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types.");
}
if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types.");
}
if (Objects.equals(searchQuoteAnalyzer(), other.searchQuoteAnalyzer()) == false) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_quote_analyzer] across all types.");
}
if (Objects.equals(nullValue(), other.nullValue()) == false) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [null_value] across all types.");
}
if (eagerGlobalOrdinals() != other.eagerGlobalOrdinals()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [eager_global_ordinals] across all types.");
}
}
}
public String name() {

View File

@ -175,7 +175,7 @@ public abstract class Mapper implements ToXContentFragment, Iterable<Mapper> {
/** Return the merge of {@code mergeWith} into this.
* Both {@code this} and {@code mergeWith} will be left unmodified. */
public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes);
public abstract Mapper merge(Mapper mergeWith);
/**
* Update the field type of this mapper. This is necessary because some mapping updates

View File

@ -215,7 +215,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
final Map<String, DocumentMapper> updatedEntries;
try {
// only update entries if needed
updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true, true);
updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true);
} catch (Exception e) {
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e);
throw e;
@ -250,7 +250,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
return requireRefresh;
}
public void merge(Map<String, Map<String, Object>> mappings, MergeReason reason, boolean updateAllTypes) {
public void merge(Map<String, Map<String, Object>> mappings, MergeReason reason) {
Map<String, CompressedXContent> mappingSourcesCompressed = new LinkedHashMap<>(mappings.size());
for (Map.Entry<String, Map<String, Object>> entry : mappings.entrySet()) {
try {
@ -260,19 +260,18 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
}
}
internalMerge(mappingSourcesCompressed, reason, updateAllTypes);
internalMerge(mappingSourcesCompressed, reason);
}
public void merge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes) {
internalMerge(indexMetaData, reason, updateAllTypes, false);
public void merge(IndexMetaData indexMetaData, MergeReason reason) {
internalMerge(indexMetaData, reason, false);
}
public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) {
return internalMerge(Collections.singletonMap(type, mappingSource), reason, updateAllTypes).get(type);
public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason) {
return internalMerge(Collections.singletonMap(type, mappingSource), reason).get(type);
}
private synchronized Map<String, DocumentMapper> internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes,
boolean onlyUpdateIfNeeded) {
private synchronized Map<String, DocumentMapper> internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean onlyUpdateIfNeeded) {
Map<String, CompressedXContent> map = new LinkedHashMap<>();
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
@ -285,10 +284,10 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
map.put(mappingMetaData.type(), mappingMetaData.source());
}
}
return internalMerge(map, reason, updateAllTypes);
return internalMerge(map, reason);
}
private synchronized Map<String, DocumentMapper> internalMerge(Map<String, CompressedXContent> mappings, MergeReason reason, boolean updateAllTypes) {
private synchronized Map<String, DocumentMapper> internalMerge(Map<String, CompressedXContent> mappings, MergeReason reason) {
DocumentMapper defaultMapper = null;
String defaultMappingSource = null;
@ -336,7 +335,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
}
}
return internalMerge(defaultMapper, defaultMappingSource, documentMappers, reason, updateAllTypes);
return internalMerge(defaultMapper, defaultMappingSource, documentMappers, reason);
}
static void validateTypeName(String type) {
@ -361,7 +360,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
}
private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource,
List<DocumentMapper> documentMappers, MergeReason reason, boolean updateAllTypes) {
List<DocumentMapper> documentMappers, MergeReason reason) {
boolean hasNested = this.hasNested;
Map<String, ObjectMapper> fullPathObjectMappers = this.fullPathObjectMappers;
FieldTypeLookup fieldTypes = this.fieldTypes;
@ -392,7 +391,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
DocumentMapper oldMapper = mappers.get(mapper.type());
DocumentMapper newMapper;
if (oldMapper != null) {
newMapper = oldMapper.merge(mapper.mapping(), updateAllTypes);
newMapper = oldMapper.merge(mapper.mapping());
} else {
newMapper = mapper;
}
@ -403,12 +402,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers);
MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers);
checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers, fullPathObjectMappers, fieldTypes);
checkObjectsCompatibility(objectMappers, updateAllTypes, fullPathObjectMappers);
checkObjectsCompatibility(objectMappers, fullPathObjectMappers);
checkPartitionedIndexConstraints(newMapper);
// update lookup data-structures
// this will in particular make sure that the merged fields are compatible with other types
fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes);
fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers);
for (ObjectMapper objectMapper : objectMappers) {
if (fullPathObjectMappers == this.fullPathObjectMappers) {
@ -575,14 +574,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
}
}
private static void checkObjectsCompatibility(Collection<ObjectMapper> objectMappers, boolean updateAllTypes,
private static void checkObjectsCompatibility(Collection<ObjectMapper> objectMappers,
Map<String, ObjectMapper> fullPathObjectMappers) {
for (ObjectMapper newObjectMapper : objectMappers) {
ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
if (existingObjectMapper != null) {
// simulate a merge and ignore the result, we are just interested
// in exceptions here
existingObjectMapper.merge(newObjectMapper, updateAllTypes);
existingObjectMapper.merge(newObjectMapper);
}
}
}

View File

@ -84,9 +84,9 @@ public final class Mapping implements ToXContentFragment {
return (T) metadataMappersMap.get(clazz);
}
/** @see DocumentMapper#merge(Mapping, boolean) */
public Mapping merge(Mapping mergeWith, boolean updateAllTypes) {
RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes);
/** @see DocumentMapper#merge(Mapping) */
public Mapping merge(Mapping mergeWith) {
RootObjectMapper mergedRoot = root.merge(mergeWith.root);
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap);
for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) {
MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass());
@ -94,7 +94,7 @@ public final class Mapping implements ToXContentFragment {
if (mergeInto == null) {
merged = metaMergeWith;
} else {
merged = mergeInto.merge(metaMergeWith, updateAllTypes);
merged = mergeInto.merge(metaMergeWith);
}
mergedMetaDataMappers.put(merged.getClass(), merged);
}

View File

@ -67,7 +67,7 @@ public abstract class MetadataFieldMapper extends FieldMapper {
public abstract void postParse(ParseContext context) throws IOException;
@Override
public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) {
return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes);
public MetadataFieldMapper merge(Mapper mergeWith) {
return (MetadataFieldMapper) super.merge(mergeWith);
}
}

View File

@ -1019,8 +1019,8 @@ public class NumberFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
NumberFieldMapper other = (NumberFieldMapper) mergeWith;
if (other.ignoreMalformed.explicit()) {
this.ignoreMalformed = other.ignoreMalformed;

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.fielddata.ScriptDocValues;
import java.io.IOException;
import java.util.ArrayList;
@ -139,7 +138,7 @@ public class ObjectMapper extends Mapper implements Cloneable {
Mapper mapper = builder.build(context);
Mapper existing = mappers.get(mapper.simpleName());
if (existing != null) {
mapper = existing.merge(mapper, false);
mapper = existing.merge(mapper);
}
mappers.put(mapper.simpleName(), mapper);
}
@ -426,17 +425,17 @@ public class ObjectMapper extends Mapper implements Cloneable {
}
@Override
public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) {
public ObjectMapper merge(Mapper mergeWith) {
if (!(mergeWith instanceof ObjectMapper)) {
throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
}
ObjectMapper mergeWithObject = (ObjectMapper) mergeWith;
ObjectMapper merged = clone();
merged.doMerge(mergeWithObject, updateAllTypes);
merged.doMerge(mergeWithObject);
return merged;
}
protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) {
protected void doMerge(final ObjectMapper mergeWith) {
if (nested().isNested()) {
if (!mergeWith.nested().isNested()) {
throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested");
@ -459,7 +458,7 @@ public class ObjectMapper extends Mapper implements Cloneable {
merged = mergeWithMapper;
} else {
// root mappers can only exist here for backcompat, and are merged in Mapping
merged = mergeIntoMapper.merge(mergeWithMapper, updateAllTypes);
merged = mergeIntoMapper.merge(mergeWithMapper);
}
putMapper(merged);
}

View File

@ -301,7 +301,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
protected void doMerge(Mapper mergeWith) {
ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith;
if (fieldMergeWith.parentType != null && Objects.equals(parentType, fieldMergeWith.parentType) == false) {
throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]");
@ -310,7 +310,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
// update that does not explicitly configure the _parent field, so we
// ignore it.
if (fieldMergeWith.active()) {
super.doMerge(mergeWith, updateAllTypes);
super.doMerge(mergeWith);
}
}

View File

@ -131,7 +131,7 @@ public class ParsedDocument {
if (dynamicMappingsUpdate == null) {
dynamicMappingsUpdate = update;
} else {
dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false);
dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update);
}
}

View File

@ -256,29 +256,6 @@ public class RangeFieldMapper extends FieldMapper {
return rangeType.name;
}
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
if (strict) {
RangeFieldType other = (RangeFieldType)fieldType;
if (this.rangeType != other.rangeType) {
conflicts.add("mapper [" + name()
+ "] is attempting to update from type [" + rangeType.name
+ "] to incompatible type [" + other.rangeType.name + "].");
}
if (this.rangeType == RangeType.DATE) {
if (Objects.equals(dateTimeFormatter().format(), other.dateTimeFormatter().format()) == false) {
conflicts.add("mapper [" + name()
+ "] is used by multiple types. Set update_all_types to true to update [format] across all types.");
}
if (Objects.equals(dateTimeFormatter().locale(), other.dateTimeFormatter().locale()) == false) {
conflicts.add("mapper [" + name()
+ "] is used by multiple types. Set update_all_types to true to update [locale] across all types.");
}
}
}
}
public FormatDateTimeFormatter dateTimeFormatter() {
return dateTimeFormatter;
}
@ -416,8 +393,8 @@ public class RangeFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
RangeFieldMapper other = (RangeFieldMapper) mergeWith;
if (other.coerce.explicit()) {
this.coerce = other.coerce;

View File

@ -268,13 +268,13 @@ public class RootObjectMapper extends ObjectMapper {
}
@Override
public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) {
return (RootObjectMapper) super.merge(mergeWith, updateAllTypes);
public RootObjectMapper merge(Mapper mergeWith) {
return (RootObjectMapper) super.merge(mergeWith);
}
@Override
protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(ObjectMapper mergeWith) {
super.doMerge(mergeWith);
RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith;
if (mergeWithObject.numericDetection.explicit()) {
this.numericDetection = mergeWithObject.numericDetection;

View File

@ -201,7 +201,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
protected void doMerge(Mapper mergeWith) {
// do nothing here, no merging, but also no exception
}
}

View File

@ -278,7 +278,7 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
protected void doMerge(Mapper mergeWith) {
// nothing to do
}

View File

@ -291,7 +291,7 @@ public class SourceFieldMapper extends MetadataFieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
protected void doMerge(Mapper mergeWith) {
SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith;
List<String> conflicts = new ArrayList<>();
if (this.enabled != sourceMergeWith.enabled) {

View File

@ -212,31 +212,6 @@ public class TextFieldMapper extends FieldMapper {
fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize);
}
@Override
public void checkCompatibility(MappedFieldType other,
List<String> conflicts, boolean strict) {
super.checkCompatibility(other, conflicts, strict);
TextFieldType otherType = (TextFieldType) other;
if (strict) {
if (fielddata() != otherType.fielddata()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] "
+ "across all types.");
}
if (fielddataMinFrequency() != otherType.fielddataMinFrequency()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update "
+ "[fielddata_frequency_filter.min] across all types.");
}
if (fielddataMaxFrequency() != otherType.fielddataMaxFrequency()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update "
+ "[fielddata_frequency_filter.max] across all types.");
}
if (fielddataMinSegmentSize() != otherType.fielddataMinSegmentSize()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update "
+ "[fielddata_frequency_filter.min_segment_size] across all types.");
}
}
}
public boolean fielddata() {
return fielddata;
}
@ -357,8 +332,8 @@ public class TextFieldMapper extends FieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith);
}
@Override

View File

@ -316,7 +316,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
protected void doMerge(Mapper mergeWith) {
// do nothing here, no merging, but also no exception
}
}

Some files were not shown because too many files have changed in this diff Show More