Merge remote-tracking branch 'origin/master' into feature/synced_flush
This commit is contained in:
commit
e6f5fb82f0
|
@ -224,18 +224,19 @@ mvn test -Dtests.jvm.argline="-XX:HeapDumpPath=/path/to/heapdumps"
|
||||||
|
|
||||||
Running backwards compatibility tests is disabled by default since it
|
Running backwards compatibility tests is disabled by default since it
|
||||||
requires a release version of elasticsearch to be present on the test system.
|
requires a release version of elasticsearch to be present on the test system.
|
||||||
To run backwards compatibiilty tests untar or unzip a release and run the tests
|
To run backwards compatibilty tests untar or unzip a release and run the tests
|
||||||
with the following command:
|
with the following command:
|
||||||
|
|
||||||
---------------------------------------------------------------------------
|
---------------------------------------------------------------------------
|
||||||
mvn test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.bwc.path=/path/to/elasticsearch
|
mvn test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.bwc.path=/path/to/elasticsearch -Dtests.security.manager=false
|
||||||
---------------------------------------------------------------------------
|
---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
Note that backwards tests must be run with security manager disabled.
|
||||||
If the elasticsearch release is placed under `./backwards/elasticsearch-x.y.z` the path
|
If the elasticsearch release is placed under `./backwards/elasticsearch-x.y.z` the path
|
||||||
can be omitted:
|
can be omitted:
|
||||||
|
|
||||||
---------------------------------------------------------------------------
|
---------------------------------------------------------------------------
|
||||||
mvn test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z
|
mvn test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.security.manager=false
|
||||||
---------------------------------------------------------------------------
|
---------------------------------------------------------------------------
|
||||||
|
|
||||||
To setup the bwc test environment execute the following steps (provided you are
|
To setup the bwc test environment execute the following steps (provided you are
|
||||||
|
|
|
@ -68,7 +68,7 @@ curl -XDELETE localhost:9200/_template/template_1
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
[[getting]]
|
[[getting]]
|
||||||
=== GETting templates
|
=== Getting templates
|
||||||
|
|
||||||
Index templates are identified by a name (in the above case
|
Index templates are identified by a name (in the above case
|
||||||
`template_1`) and can be retrieved using the following:
|
`template_1`) and can be retrieved using the following:
|
||||||
|
@ -157,39 +157,3 @@ for indices of that start with `te*`, source will still be enabled.
|
||||||
Note, for mappings, the merging is "deep", meaning that specific
|
Note, for mappings, the merging is "deep", meaning that specific
|
||||||
object/property based mappings can easily be added/overridden on higher
|
object/property based mappings can easily be added/overridden on higher
|
||||||
order templates, with lower order templates providing the basis.
|
order templates, with lower order templates providing the basis.
|
||||||
|
|
||||||
[float]
|
|
||||||
[[config]]
|
|
||||||
=== Config
|
|
||||||
|
|
||||||
Index templates can also be placed within the config location
|
|
||||||
(`path.conf`) under the `templates` directory (note, make sure to place
|
|
||||||
them on all master eligible nodes). For example, a file called
|
|
||||||
`template_1.json` can be placed under `config/templates` and it will be
|
|
||||||
added if it matches an index. Here is a sample of the mentioned file:
|
|
||||||
|
|
||||||
[source,js]
|
|
||||||
--------------------------------------------------
|
|
||||||
{
|
|
||||||
"template_1" : {
|
|
||||||
"template" : "*",
|
|
||||||
"settings" : {
|
|
||||||
"index.number_of_shards" : 2
|
|
||||||
},
|
|
||||||
"mappings" : {
|
|
||||||
"_default_" : {
|
|
||||||
"_source" : {
|
|
||||||
"enabled" : false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"type1" : {
|
|
||||||
"_all" : {
|
|
||||||
"enabled" : false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--------------------------------------------------
|
|
||||||
|
|
||||||
*Please note that templates added this way will not appear in the `/_template/*` API request.*
|
|
||||||
|
|
|
@ -520,3 +520,6 @@ Log messages are now truncated at 10,000 characters. This can be changed in the
|
||||||
The `top_children` query has been removed in favour of the `has_child` query. The `top_children` query wasn't always faster
|
The `top_children` query has been removed in favour of the `has_child` query. The `top_children` query wasn't always faster
|
||||||
than the `has_child` query and the `top_children` query was often inaccurate. The total hits and any aggregations in the
|
than the `has_child` query and the `top_children` query was often inaccurate. The total hits and any aggregations in the
|
||||||
same search request will likely be off if `top_children` was used.
|
same search request will likely be off if `top_children` was used.
|
||||||
|
|
||||||
|
=== Removed file based index templates
|
||||||
|
Index templates can no longer be configured on disk. Use the `_template` API instead.
|
||||||
|
|
|
@ -642,19 +642,11 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
||||||
* @param indicesOptions how the aliases or indices need to be resolved to concrete indices
|
* @param indicesOptions how the aliases or indices need to be resolved to concrete indices
|
||||||
* @param aliasesOrIndices the aliases or indices to be resolved to concrete indices
|
* @param aliasesOrIndices the aliases or indices to be resolved to concrete indices
|
||||||
* @return the obtained concrete indices
|
* @return the obtained concrete indices
|
||||||
<<<<<<< HEAD
|
|
||||||
* @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options
|
* @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options
|
||||||
* don't allow such a case, or if the final result of the indices resolution is no indices and the indices options
|
* don't allow such a case, or if the final result of the indices resolution is no indices and the indices options
|
||||||
* don't allow such a case.
|
* don't allow such a case.
|
||||||
* @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided
|
* @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided
|
||||||
* indices options don't allow such a case.
|
* indices options don't allow such a case.
|
||||||
=======
|
|
||||||
* @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options
|
|
||||||
* don't allow such a case, or if the final result of the indices resolution is no indices and the indices options
|
|
||||||
* don't allow such a case.
|
|
||||||
* @throws ElasticsearchIllegalArgumentException if one of the aliases resolve to multiple indices and the provided
|
|
||||||
* indices options don't allow such a case.
|
|
||||||
>>>>>>> Add support for cluster state diffs
|
|
||||||
*/
|
*/
|
||||||
public String[] concreteIndices(IndicesOptions indicesOptions, String... aliasesOrIndices) throws IndexMissingException, IllegalArgumentException {
|
public String[] concreteIndices(IndicesOptions indicesOptions, String... aliasesOrIndices) throws IndexMissingException, IllegalArgumentException {
|
||||||
if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed()) {
|
if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed()) {
|
||||||
|
|
|
@ -300,22 +300,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// now add config level mappings
|
|
||||||
Path mappingsDir = environment.configFile().resolve("mappings");
|
|
||||||
if (Files.isDirectory(mappingsDir)) {
|
|
||||||
// first index level
|
|
||||||
Path indexMappingsDir = mappingsDir.resolve(request.index());
|
|
||||||
if (Files.isDirectory(indexMappingsDir)) {
|
|
||||||
addMappings(mappings, indexMappingsDir);
|
|
||||||
}
|
|
||||||
|
|
||||||
// second is the _default mapping
|
|
||||||
Path defaultMappingsDir = mappingsDir.resolve("_default");
|
|
||||||
if (Files.isDirectory(defaultMappingsDir)) {
|
|
||||||
addMappings(mappings, defaultMappingsDir);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ImmutableSettings.Builder indexSettingsBuilder = settingsBuilder();
|
ImmutableSettings.Builder indexSettingsBuilder = settingsBuilder();
|
||||||
// apply templates, here, in reverse order, since first ones are better matching
|
// apply templates, here, in reverse order, since first ones are better matching
|
||||||
for (int i = templates.size() - 1; i >= 0; i--) {
|
for (int i = templates.size() - 1; i >= 0; i--) {
|
||||||
|
@ -517,30 +501,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// see if we have templates defined under config
|
|
||||||
final Path templatesDir = environment.configFile().resolve("templates");
|
|
||||||
if (Files.isDirectory(templatesDir)) {
|
|
||||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(templatesDir)) {
|
|
||||||
for (Path templatesFile : stream) {
|
|
||||||
if (Files.isRegularFile(templatesFile)) {
|
|
||||||
XContentParser parser = null;
|
|
||||||
try {
|
|
||||||
final byte[] templatesData = Files.readAllBytes(templatesFile);
|
|
||||||
parser = XContentHelper.createParser(templatesData, 0, templatesData.length);
|
|
||||||
IndexTemplateMetaData template = IndexTemplateMetaData.Builder.fromXContent(parser, templatesFile.getFileName().toString());
|
|
||||||
if (indexTemplateFilter.apply(request, template)) {
|
|
||||||
templates.add(template);
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
logger.warn("[{}] failed to read template [{}] from config", e, request.index(), templatesFile.toAbsolutePath());
|
|
||||||
} finally {
|
|
||||||
Releasables.closeWhileHandlingException(parser);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
CollectionUtil.timSort(templates, new Comparator<IndexTemplateMetaData>() {
|
CollectionUtil.timSort(templates, new Comparator<IndexTemplateMetaData>() {
|
||||||
@Override
|
@Override
|
||||||
public int compare(IndexTemplateMetaData o1, IndexTemplateMetaData o2) {
|
public int compare(IndexTemplateMetaData o1, IndexTemplateMetaData o2) {
|
||||||
|
|
|
@ -99,6 +99,10 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri
|
||||||
if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) {
|
if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
if (bucketDocCount < bucketCountThresholds.getShardMinDocCount()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (spare == null) {
|
if (spare == null) {
|
||||||
spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null);
|
spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null);
|
||||||
}
|
}
|
||||||
|
@ -113,9 +117,7 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri
|
||||||
// Back at the central reducer these properties will be updated with
|
// Back at the central reducer these properties will be updated with
|
||||||
// global stats
|
// global stats
|
||||||
spare.updateScore(termsAggFactory.getSignificanceHeuristic());
|
spare.updateScore(termsAggFactory.getSignificanceHeuristic());
|
||||||
if (spare.subsetDf >= bucketCountThresholds.getShardMinDocCount()) {
|
spare = (SignificantStringTerms.Bucket) ordered.insertWithOverflow(spare);
|
||||||
spare = (SignificantStringTerms.Bucket) ordered.insertWithOverflow(spare);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
final InternalSignificantTerms.Bucket[] list = new InternalSignificantTerms.Bucket[ordered.size()];
|
final InternalSignificantTerms.Bucket[] list = new InternalSignificantTerms.Bucket[ordered.size()];
|
||||||
|
|
|
@ -24,8 +24,8 @@ import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.search.aggregations.Aggregator;
|
import org.elasticsearch.search.aggregations.Aggregator;
|
||||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||||
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
|
||||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||||
|
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
||||||
import org.elasticsearch.search.aggregations.bucket.terms.LongTermsAggregator;
|
import org.elasticsearch.search.aggregations.bucket.terms.LongTermsAggregator;
|
||||||
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
|
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
|
||||||
import org.elasticsearch.search.aggregations.reducers.Reducer;
|
import org.elasticsearch.search.aggregations.reducers.Reducer;
|
||||||
|
@ -82,11 +82,15 @@ public class SignificantLongTermsAggregator extends LongTermsAggregator {
|
||||||
BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue(size);
|
BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue(size);
|
||||||
SignificantLongTerms.Bucket spare = null;
|
SignificantLongTerms.Bucket spare = null;
|
||||||
for (long i = 0; i < bucketOrds.size(); i++) {
|
for (long i = 0; i < bucketOrds.size(); i++) {
|
||||||
|
final int docCount = bucketDocCount(i);
|
||||||
|
if (docCount < bucketCountThresholds.getShardMinDocCount()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (spare == null) {
|
if (spare == null) {
|
||||||
spare = new SignificantLongTerms.Bucket(0, 0, 0, 0, 0, null, formatter);
|
spare = new SignificantLongTerms.Bucket(0, 0, 0, 0, 0, null, formatter);
|
||||||
}
|
}
|
||||||
spare.term = bucketOrds.get(i);
|
spare.term = bucketOrds.get(i);
|
||||||
spare.subsetDf = bucketDocCount(i);
|
spare.subsetDf = docCount;
|
||||||
spare.subsetSize = subsetSize;
|
spare.subsetSize = subsetSize;
|
||||||
spare.supersetDf = termsAggFactory.getBackgroundFrequency(spare.term);
|
spare.supersetDf = termsAggFactory.getBackgroundFrequency(spare.term);
|
||||||
spare.supersetSize = supersetSize;
|
spare.supersetSize = supersetSize;
|
||||||
|
@ -95,9 +99,7 @@ public class SignificantLongTermsAggregator extends LongTermsAggregator {
|
||||||
spare.updateScore(termsAggFactory.getSignificanceHeuristic());
|
spare.updateScore(termsAggFactory.getSignificanceHeuristic());
|
||||||
|
|
||||||
spare.bucketOrd = i;
|
spare.bucketOrd = i;
|
||||||
if (spare.subsetDf >= bucketCountThresholds.getShardMinDocCount()) {
|
spare = (SignificantLongTerms.Bucket) ordered.insertWithOverflow(spare);
|
||||||
spare = (SignificantLongTerms.Bucket) ordered.insertWithOverflow(spare);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
final InternalSignificantTerms.Bucket[] list = new InternalSignificantTerms.Bucket[ordered.size()];
|
final InternalSignificantTerms.Bucket[] list = new InternalSignificantTerms.Bucket[ordered.size()];
|
||||||
|
|
|
@ -24,8 +24,8 @@ import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.search.aggregations.Aggregator;
|
import org.elasticsearch.search.aggregations.Aggregator;
|
||||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||||
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
|
||||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||||
|
import org.elasticsearch.search.aggregations.LeafBucketCollectorBase;
|
||||||
import org.elasticsearch.search.aggregations.bucket.terms.StringTermsAggregator;
|
import org.elasticsearch.search.aggregations.bucket.terms.StringTermsAggregator;
|
||||||
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
|
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
|
||||||
import org.elasticsearch.search.aggregations.reducers.Reducer;
|
import org.elasticsearch.search.aggregations.reducers.Reducer;
|
||||||
|
@ -81,12 +81,17 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator {
|
||||||
BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue(size);
|
BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue(size);
|
||||||
SignificantStringTerms.Bucket spare = null;
|
SignificantStringTerms.Bucket spare = null;
|
||||||
for (int i = 0; i < bucketOrds.size(); i++) {
|
for (int i = 0; i < bucketOrds.size(); i++) {
|
||||||
|
final int docCount = bucketDocCount(i);
|
||||||
|
if (docCount < bucketCountThresholds.getShardMinDocCount()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (spare == null) {
|
if (spare == null) {
|
||||||
spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null);
|
spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
bucketOrds.get(i, spare.termBytes);
|
bucketOrds.get(i, spare.termBytes);
|
||||||
spare.subsetDf = bucketDocCount(i);
|
spare.subsetDf = docCount;
|
||||||
spare.subsetSize = subsetSize;
|
spare.subsetSize = subsetSize;
|
||||||
spare.supersetDf = termsAggFactory.getBackgroundFrequency(spare.termBytes);
|
spare.supersetDf = termsAggFactory.getBackgroundFrequency(spare.termBytes);
|
||||||
spare.supersetSize = supersetSize;
|
spare.supersetSize = supersetSize;
|
||||||
|
@ -97,9 +102,7 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator {
|
||||||
spare.updateScore(termsAggFactory.getSignificanceHeuristic());
|
spare.updateScore(termsAggFactory.getSignificanceHeuristic());
|
||||||
|
|
||||||
spare.bucketOrd = i;
|
spare.bucketOrd = i;
|
||||||
if (spare.subsetDf >= bucketCountThresholds.getShardMinDocCount()) {
|
spare = (SignificantStringTerms.Bucket) ordered.insertWithOverflow(spare);
|
||||||
spare = (SignificantStringTerms.Bucket) ordered.insertWithOverflow(spare);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
final InternalSignificantTerms.Bucket[] list = new InternalSignificantTerms.Bucket[ordered.size()];
|
final InternalSignificantTerms.Bucket[] list = new InternalSignificantTerms.Bucket[ordered.size()];
|
||||||
|
|
|
@ -32,18 +32,6 @@ grant codeBase "file:${{java.ext.dirs}}/*" {
|
||||||
|
|
||||||
grant {
|
grant {
|
||||||
|
|
||||||
// system jar resources
|
|
||||||
permission java.io.FilePermission "${java.home}${/}-", "read";
|
|
||||||
|
|
||||||
// paths used for running tests
|
|
||||||
// compiled classes
|
|
||||||
permission java.io.FilePermission "${project.basedir}${/}target${/}classes${/}-", "read";
|
|
||||||
permission java.io.FilePermission "${project.basedir}${/}target${/}test-classes${/}-", "read";
|
|
||||||
// read permission for lib sigar
|
|
||||||
permission java.io.FilePermission "${project.basedir}${/}lib${/}sigar${/}-", "read";
|
|
||||||
// mvn custom ./m2/repository for dependency jars
|
|
||||||
permission java.io.FilePermission "${m2.repository}${/}-", "read";
|
|
||||||
|
|
||||||
permission java.nio.file.LinkPermission "symbolic";
|
permission java.nio.file.LinkPermission "symbolic";
|
||||||
permission groovy.security.GroovyCodeSourcePermission "/groovy/script";
|
permission groovy.security.GroovyCodeSourcePermission "/groovy/script";
|
||||||
|
|
||||||
|
|
|
@ -32,10 +32,6 @@ public class NativesTests extends ElasticsearchTestCase {
|
||||||
if (Constants.MAC_OS_X) {
|
if (Constants.MAC_OS_X) {
|
||||||
assertFalse("Memory locking is not available on OS X platforms", Natives.LOCAL_MLOCKALL);
|
assertFalse("Memory locking is not available on OS X platforms", Natives.LOCAL_MLOCKALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Constants.WINDOWS) {
|
|
||||||
assertTrue(Natives.LOCAL_MLOCKALL);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -1,111 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index.mapper;
|
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableSet;
|
|
||||||
|
|
||||||
import org.apache.lucene.util.IOUtils;
|
|
||||||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
|
|
||||||
import org.elasticsearch.cluster.ClusterName;
|
|
||||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
|
||||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
|
||||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
|
||||||
import org.elasticsearch.node.Node;
|
|
||||||
import org.elasticsearch.node.NodeBuilder;
|
|
||||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
|
||||||
|
|
||||||
import java.io.OutputStream;
|
|
||||||
import java.nio.file.Files;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
|
||||||
|
|
||||||
public class FileBasedMappingsTests extends ElasticsearchTestCase {
|
|
||||||
|
|
||||||
private static final String NAME = FileBasedMappingsTests.class.getSimpleName();
|
|
||||||
|
|
||||||
public void testFileBasedMappings() throws Exception {
|
|
||||||
Path configDir = createTempDir();
|
|
||||||
Path mappingsDir = configDir.resolve("mappings");
|
|
||||||
Path indexMappings = mappingsDir.resolve("index").resolve("type.json");
|
|
||||||
Path defaultMappings = mappingsDir.resolve("_default").resolve("type.json");
|
|
||||||
try {
|
|
||||||
Files.createDirectories(indexMappings.getParent());
|
|
||||||
Files.createDirectories(defaultMappings.getParent());
|
|
||||||
|
|
||||||
try (OutputStream stream = Files.newOutputStream(indexMappings);
|
|
||||||
XContentBuilder builder = new XContentBuilder(JsonXContent.jsonXContent, stream)) {
|
|
||||||
builder.startObject()
|
|
||||||
.startObject("type")
|
|
||||||
.startObject("properties")
|
|
||||||
.startObject("f")
|
|
||||||
.field("type", "string")
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject();
|
|
||||||
}
|
|
||||||
|
|
||||||
try (OutputStream stream = Files.newOutputStream(defaultMappings);
|
|
||||||
XContentBuilder builder = new XContentBuilder(JsonXContent.jsonXContent, stream)) {
|
|
||||||
builder.startObject()
|
|
||||||
.startObject("type")
|
|
||||||
.startObject("properties")
|
|
||||||
.startObject("g")
|
|
||||||
.field("type", "string")
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject()
|
|
||||||
.endObject();
|
|
||||||
}
|
|
||||||
|
|
||||||
Settings settings = ImmutableSettings.builder()
|
|
||||||
.put(ClusterName.SETTING, NAME)
|
|
||||||
.put("node.name", NAME)
|
|
||||||
.put("path.home", createTempDir())
|
|
||||||
.put("path.conf", configDir.toAbsolutePath())
|
|
||||||
.put("http.enabled", false)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
try (Node node = NodeBuilder.nodeBuilder().local(true).data(true).settings(settings).node()) {
|
|
||||||
|
|
||||||
assertAcked(node.client().admin().indices().prepareCreate("index").addMapping("type", "h", "type=string").get());
|
|
||||||
try {
|
|
||||||
final GetMappingsResponse response = node.client().admin().indices().prepareGetMappings("index").get();
|
|
||||||
assertTrue(response.mappings().toString(), response.mappings().containsKey("index"));
|
|
||||||
MappingMetaData mappings = response.mappings().get("index").get("type");
|
|
||||||
assertNotNull(mappings);
|
|
||||||
Map<?, ?> properties = (Map<?, ?>) (mappings.getSourceAsMap().get("properties"));
|
|
||||||
assertNotNull(properties);
|
|
||||||
assertEquals(ImmutableSet.of("f", "g", "h"), properties.keySet());
|
|
||||||
} finally {
|
|
||||||
// remove the index...
|
|
||||||
assertAcked(node.client().admin().indices().prepareDelete("index"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
IOUtils.rm(configDir);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,104 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
package org.elasticsearch.indices.template;
|
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.LifecycleScope;
|
|
||||||
import com.google.common.base.Charsets;
|
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
|
||||||
import org.elasticsearch.common.io.Streams;
|
|
||||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
|
||||||
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
import java.nio.charset.StandardCharsets;
|
|
||||||
import java.nio.file.Files;
|
|
||||||
import java.nio.file.Path;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Locale;
|
|
||||||
import java.util.Set;
|
|
||||||
|
|
||||||
import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
|
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
|
||||||
import static org.hamcrest.Matchers.is;
|
|
||||||
|
|
||||||
@ClusterScope(scope= Scope.TEST, numDataNodes =1)
|
|
||||||
public class IndexTemplateFileLoadingTests extends ElasticsearchIntegrationTest {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected Settings nodeSettings(int nodeOrdinal) {
|
|
||||||
ImmutableSettings.Builder settingsBuilder = ImmutableSettings.settingsBuilder();
|
|
||||||
settingsBuilder.put(super.nodeSettings(nodeOrdinal));
|
|
||||||
|
|
||||||
try {
|
|
||||||
Path directory = createTempDir();
|
|
||||||
settingsBuilder.put("path.conf", directory.toAbsolutePath());
|
|
||||||
|
|
||||||
Path templatesDir = directory.resolve("templates");
|
|
||||||
Files.createDirectory(templatesDir);
|
|
||||||
|
|
||||||
Path dst = templatesDir.resolve("template.json");
|
|
||||||
String templatePath = "/org/elasticsearch/indices/template/template" + randomInt(5) + ".json";
|
|
||||||
logger.info("Picking template path [{}]", templatePath);
|
|
||||||
// random template, one uses the 'setting.index.number_of_shards', the other 'settings.number_of_shards'
|
|
||||||
String template = Streams.copyToStringFromClasspath(templatePath);
|
|
||||||
Files.write(dst, template.getBytes(StandardCharsets.UTF_8));
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
return settingsBuilder.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected int numberOfShards() {
|
|
||||||
//number of shards won't be set through index settings, the one from the index templates needs to be used
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected int numberOfReplicas() {
|
|
||||||
//number of replicas won't be set through index settings, the one from the index templates needs to be used
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testThatLoadingTemplateFromFileWorks() throws Exception {
|
|
||||||
final int iters = scaledRandomIntBetween(1, 5);
|
|
||||||
Set<String> indices = new HashSet<>();
|
|
||||||
for (int i = 0; i < iters; i++) {
|
|
||||||
String indexName = "foo" + randomAsciiOfLengthBetween(0, 5).toLowerCase(Locale.ROOT);
|
|
||||||
if (indices.contains(indexName)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
indices.add(indexName);
|
|
||||||
createIndex(indexName);
|
|
||||||
ensureYellow(); // ensuring yellow so the test fails faster if the template cannot be loaded
|
|
||||||
|
|
||||||
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().setIndices(indexName).get();
|
|
||||||
assertThat(stateResponse.getState().getMetaData().indices().get(indexName).getNumberOfShards(), is(10));
|
|
||||||
assertThat(stateResponse.getState().getMetaData().indices().get(indexName).getNumberOfReplicas(), is(0));
|
|
||||||
assertThat(stateResponse.getState().getMetaData().indices().get(indexName).aliases().size(), equalTo(1));
|
|
||||||
String aliasName = indexName + "-alias";
|
|
||||||
assertThat(stateResponse.getState().getMetaData().indices().get(indexName).aliases().get(aliasName).alias(), equalTo(aliasName));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -441,7 +441,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
|
|
||||||
if (random.nextBoolean()) {
|
if (random.nextBoolean()) {
|
||||||
builder.put(Translog.INDEX_TRANSLOG_FS_TYPE, RandomPicks.randomFrom(random, TranslogFile.Type.values()));
|
builder.put(Translog.INDEX_TRANSLOG_FS_TYPE, RandomPicks.randomFrom(random, TranslogFile.Type.values()));
|
||||||
if (random.nextBoolean()) {
|
if (rarely(random)) {
|
||||||
builder.put(Translog.INDEX_TRANSLOG_SYNC_INTERVAL, 0); // 0 has special meaning to sync each op
|
builder.put(Translog.INDEX_TRANSLOG_SYNC_INTERVAL, 0); // 0 has special meaning to sync each op
|
||||||
} else {
|
} else {
|
||||||
builder.put(Translog.INDEX_TRANSLOG_SYNC_INTERVAL, RandomInts.randomIntBetween(random, 100, 5000));
|
builder.put(Translog.INDEX_TRANSLOG_SYNC_INTERVAL, RandomInts.randomIntBetween(random, 100, 5000));
|
||||||
|
|
|
@ -25,8 +25,10 @@ import org.elasticsearch.bootstrap.ESPolicy;
|
||||||
import org.elasticsearch.bootstrap.Security;
|
import org.elasticsearch.bootstrap.Security;
|
||||||
import org.elasticsearch.common.io.PathUtils;
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
|
|
||||||
|
import java.nio.file.Path;
|
||||||
import java.security.Permissions;
|
import java.security.Permissions;
|
||||||
import java.security.Policy;
|
import java.security.Policy;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
|
import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
|
||||||
|
|
||||||
|
@ -48,9 +50,23 @@ class SecurityBootstrap {
|
||||||
// install security manager if requested
|
// install security manager if requested
|
||||||
if (systemPropertyAsBoolean("tests.security.manager", false)) {
|
if (systemPropertyAsBoolean("tests.security.manager", false)) {
|
||||||
try {
|
try {
|
||||||
// initialize tmpdir the same exact way as bootstrap.
|
// initialize paths the same exact way as bootstrap.
|
||||||
Permissions perms = new Permissions();
|
Permissions perms = new Permissions();
|
||||||
Security.addPath(perms, PathUtils.get(System.getProperty("java.io.tmpdir")), "read,readlink,write,delete");
|
Path basedir = PathUtils.get(Objects.requireNonNull(System.getProperty("project.basedir"),
|
||||||
|
"please set ${project.basedir} in pom.xml"));
|
||||||
|
// target/classes, target/test-classes
|
||||||
|
Security.addPath(perms, basedir.resolve("target").resolve("classes"), "read,readlink");
|
||||||
|
Security.addPath(perms, basedir.resolve("target").resolve("test-classes"), "read,readlink");
|
||||||
|
// lib/sigar
|
||||||
|
Security.addPath(perms, basedir.resolve("lib").resolve("sigar"), "read,readlink");
|
||||||
|
// .m2/repository
|
||||||
|
Path m2repoDir = PathUtils.get(Objects.requireNonNull(System.getProperty("m2.repository"),
|
||||||
|
"please set ${m2.repository} in pom.xml"));
|
||||||
|
Security.addPath(perms, m2repoDir, "read,readlink");
|
||||||
|
// java.io.tmpdir
|
||||||
|
Path javaTmpDir = PathUtils.get(Objects.requireNonNull(System.getProperty("java.io.tmpdir"),
|
||||||
|
"please set ${java.io.tmpdir} in pom.xml"));
|
||||||
|
Security.addPath(perms, javaTmpDir, "read,readlink,write,delete");
|
||||||
Policy.setPolicy(new ESPolicy(perms));
|
Policy.setPolicy(new ESPolicy(perms));
|
||||||
System.setSecurityManager(new TestSecurityManager());
|
System.setSecurityManager(new TestSecurityManager());
|
||||||
Security.selfTest();
|
Security.selfTest();
|
||||||
|
|
Loading…
Reference in New Issue