mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge remote-tracking branch 'es/master' into ccr
* es/master: Fix snapshot getting stuck in INIT state (#27214) Add an example of dynamic field names (#27255) #26260 Allow ip_range to accept CIDR notation (#27192) #27189 Fixed rounding of bounds in scaled float comparison (#27207) Add support for Gradle 4.3 (#27249) Fixes QueryStringQueryBuilderTests build: Fix setting the incorrect bwc version in mixed cluster qa module [Test] Fix QueryStringQueryBuilderTests.testExistsFieldQuery BWC Adjust assertions for sequence numbers BWC tests Do not create directories if repository is readonly (#26909) [Test] Fix InternalStatsTests [Test] Fix QueryStringQueryBuilderTests.testExistsFieldQuery Uses norms for exists query if enabled (#27237) Reinstate recommendation for ≥ 3 master-eligible nodes. (#27204)
This commit is contained in:
commit
e61e4b8571
@ -131,9 +131,10 @@ class BuildPlugin implements Plugin<Project> {
|
||||
throw new GradleException("${minGradle} or above is required to build elasticsearch")
|
||||
}
|
||||
|
||||
final GradleVersion maxGradle = GradleVersion.version('4.2')
|
||||
if (currentGradleVersion >= maxGradle) {
|
||||
throw new GradleException("${maxGradle} or above is not compatible with the elasticsearch build")
|
||||
final GradleVersion gradle42 = GradleVersion.version('4.2')
|
||||
final GradleVersion gradle43 = GradleVersion.version('4.3')
|
||||
if (currentGradleVersion >= gradle42 && currentGradleVersion < gradle43) {
|
||||
throw new GradleException("${currentGradleVersion} is not compatible with the elasticsearch build")
|
||||
}
|
||||
|
||||
// enforce Java version
|
||||
|
@ -140,7 +140,9 @@ public class FsBlobContainer extends AbstractBlobContainer {
|
||||
Path targetPath = path.resolve(target);
|
||||
// If the target file exists then Files.move() behaviour is implementation specific
|
||||
// the existing file might be replaced or this method fails by throwing an IOException.
|
||||
assert !Files.exists(targetPath);
|
||||
if (Files.exists(targetPath)) {
|
||||
throw new FileAlreadyExistsException("blob [" + targetPath + "] already exists, cannot overwrite");
|
||||
}
|
||||
Files.move(sourcePath, targetPath, StandardCopyOption.ATOMIC_MOVE);
|
||||
IOUtils.fsync(path, true);
|
||||
}
|
||||
|
@ -39,10 +39,15 @@ public class FsBlobStore extends AbstractComponent implements BlobStore {
|
||||
|
||||
private final int bufferSizeInBytes;
|
||||
|
||||
private final boolean readOnly;
|
||||
|
||||
public FsBlobStore(Settings settings, Path path) throws IOException {
|
||||
super(settings);
|
||||
this.path = path;
|
||||
Files.createDirectories(path);
|
||||
this.readOnly = settings.getAsBoolean("readonly", false);
|
||||
if (!this.readOnly) {
|
||||
Files.createDirectories(path);
|
||||
}
|
||||
this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes();
|
||||
}
|
||||
|
||||
@ -80,7 +85,9 @@ public class FsBlobStore extends AbstractComponent implements BlobStore {
|
||||
|
||||
private synchronized Path buildAndCreate(BlobPath path) throws IOException {
|
||||
Path f = buildPath(path);
|
||||
Files.createDirectories(f);
|
||||
if (!readOnly) {
|
||||
Files.createDirectories(f);
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.DocValuesFieldExistsQuery;
|
||||
import org.apache.lucene.search.NormsFieldExistsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -280,10 +280,10 @@ public class TextFieldMapper extends FieldMapper {
|
||||
|
||||
@Override
|
||||
public Query existsQuery(QueryShardContext context) {
|
||||
if (hasDocValues()) {
|
||||
return new DocValuesFieldExistsQuery(name());
|
||||
} else {
|
||||
if (omitNorms()) {
|
||||
return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name()));
|
||||
} else {
|
||||
return new NormsFieldExistsQuery(name());
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,7 +345,9 @@ public class TextFieldMapper extends FieldMapper {
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
Field field = new Field(fieldType().name(), value, fieldType());
|
||||
fields.add(field);
|
||||
createFieldNamesField(context, fields);
|
||||
if (fieldType().omitNorms()) {
|
||||
createFieldNamesField(context, fields);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ public class ParsedStats extends ParsedAggregation implements Stats {
|
||||
builder.nullField(Fields.MIN);
|
||||
builder.nullField(Fields.MAX);
|
||||
builder.nullField(Fields.AVG);
|
||||
builder.nullField(Fields.SUM);
|
||||
builder.field(Fields.SUM, 0.0d);
|
||||
}
|
||||
otherStatsToXContent(builder, params);
|
||||
return builder;
|
||||
|
@ -425,6 +425,15 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNoLongerMaster(String source) {
|
||||
// We are not longer a master - we shouldn't try to do any cleanup
|
||||
// The new master will take care of it
|
||||
logger.warn("[{}] failed to create snapshot - no longer a master", snapshot.snapshot().getSnapshotId());
|
||||
userCreateSnapshotListener.onFailure(
|
||||
new SnapshotException(snapshot.snapshot(), "master changed during snapshot initialization"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
// The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted
|
||||
@ -473,6 +482,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
cleanupAfterError(e);
|
||||
}
|
||||
|
||||
public void onNoLongerMaster(String source) {
|
||||
userCreateSnapshotListener.onFailure(e);
|
||||
}
|
||||
|
||||
private void cleanupAfterError(Exception exception) {
|
||||
if(snapshotCreated) {
|
||||
try {
|
||||
@ -628,7 +641,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
public void applyClusterState(ClusterChangedEvent event) {
|
||||
try {
|
||||
if (event.localNodeMaster()) {
|
||||
if (event.nodesRemoved()) {
|
||||
// We don't remove old master when master flips anymore. So, we need to check for change in master
|
||||
if (event.nodesRemoved() || event.previousState().nodes().isLocalNodeElectedMaster() == false) {
|
||||
processSnapshotsOnRemovedNodes(event);
|
||||
}
|
||||
if (event.routingTableChanged()) {
|
||||
@ -981,7 +995,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
* @param listener listener to notify when snapshot information is removed from the cluster state
|
||||
*/
|
||||
private void removeSnapshotFromClusterState(final Snapshot snapshot, final SnapshotInfo snapshotInfo, final Exception failure,
|
||||
@Nullable ActionListener<SnapshotInfo> listener) {
|
||||
@Nullable CleanupAfterErrorListener listener) {
|
||||
clusterService.submitStateUpdateTask("remove snapshot metadata", new ClusterStateUpdateTask() {
|
||||
|
||||
@Override
|
||||
@ -1013,6 +1027,13 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNoLongerMaster(String source) {
|
||||
if (listener != null) {
|
||||
listener.onNoLongerMaster(source);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
for (SnapshotCompletionListener listener : snapshotCompletionListeners) {
|
||||
@ -1183,9 +1204,16 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
if (completedSnapshot.equals(snapshot)) {
|
||||
logger.debug("deleted snapshot completed - deleting files");
|
||||
removeListener(this);
|
||||
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() ->
|
||||
deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(),
|
||||
listener, true)
|
||||
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
|
||||
try {
|
||||
deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(),
|
||||
listener, true);
|
||||
|
||||
} catch (Exception ex) {
|
||||
logger.warn((Supplier<?>) () ->
|
||||
new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -20,12 +20,14 @@ package org.elasticsearch.common.blobstore;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.common.blobstore.fs.FsBlobStore;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.repositories.ESBlobStoreTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
||||
@ -35,4 +37,39 @@ public class FsBlobStoreTests extends ESBlobStoreTestCase {
|
||||
Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
|
||||
return new FsBlobStore(settings, tempDir);
|
||||
}
|
||||
|
||||
public void testReadOnly() throws Exception {
|
||||
Settings settings = Settings.builder().put("readonly", true).build();
|
||||
Path tempDir = createTempDir();
|
||||
Path path = tempDir.resolve("bar");
|
||||
|
||||
try (FsBlobStore store = new FsBlobStore(settings, path)) {
|
||||
assertFalse(Files.exists(path));
|
||||
BlobPath blobPath = BlobPath.cleanPath().add("foo");
|
||||
store.blobContainer(blobPath);
|
||||
Path storePath = store.path();
|
||||
for (String d : blobPath) {
|
||||
storePath = storePath.resolve(d);
|
||||
}
|
||||
assertFalse(Files.exists(storePath));
|
||||
}
|
||||
|
||||
settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("readonly", false).build();
|
||||
try (FsBlobStore store = new FsBlobStore(settings, path)) {
|
||||
assertTrue(Files.exists(path));
|
||||
BlobPath blobPath = BlobPath.cleanPath().add("foo");
|
||||
BlobContainer container = store.blobContainer(blobPath);
|
||||
Path storePath = store.path();
|
||||
for (String d : blobPath) {
|
||||
storePath = storePath.resolve(d);
|
||||
}
|
||||
assertTrue(Files.exists(storePath));
|
||||
assertTrue(Files.isDirectory(storePath));
|
||||
|
||||
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
|
||||
writeBlob(container, "test", new BytesArray(data));
|
||||
assertArrayEquals(readBlobFully(container, "test", data.length), data);
|
||||
assertTrue(container.blobExists("test"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,173 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.discovery;
|
||||
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.SnapshotsInProgress;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotMissingException;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.disruption.NetworkDisruption;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
/**
|
||||
* Tests snapshot operations during disruptions.
|
||||
*/
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0, autoMinMasterNodes = false)
|
||||
@TestLogging("org.elasticsearch.snapshot:TRACE")
|
||||
public class SnapshotDisruptionIT extends AbstractDisruptionTestCase {
|
||||
|
||||
public void testDisruptionOnSnapshotInitialization() throws Exception {
|
||||
final Settings settings = Settings.builder()
|
||||
.put(DEFAULT_SETTINGS)
|
||||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed
|
||||
.build();
|
||||
final String idxName = "test";
|
||||
configureCluster(settings, 4, null, 2);
|
||||
final List<String> allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(3);
|
||||
final String dataNode = internalCluster().startDataOnlyNode();
|
||||
ensureStableCluster(4);
|
||||
|
||||
createRandomIndex(idxName);
|
||||
|
||||
logger.info("--> creating repository");
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(Settings.builder()
|
||||
.put("location", randomRepoPath())
|
||||
.put("compress", randomBoolean())
|
||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
|
||||
|
||||
// Writing incompatible snapshot can cause this test to fail due to a race condition in repo initialization
|
||||
// by the current master and the former master. It is not causing any issues in real life scenario, but
|
||||
// might make this test to fail. We are going to complete initialization of the snapshot to prevent this failures.
|
||||
logger.info("--> initializing the repository");
|
||||
assertEquals(SnapshotState.SUCCESS, client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1")
|
||||
.setWaitForCompletion(true).setIncludeGlobalState(true).setIndices().get().getSnapshotInfo().state());
|
||||
|
||||
final String masterNode1 = internalCluster().getMasterName();
|
||||
Set<String> otherNodes = new HashSet<>();
|
||||
otherNodes.addAll(allMasterEligibleNodes);
|
||||
otherNodes.remove(masterNode1);
|
||||
otherNodes.add(dataNode);
|
||||
|
||||
NetworkDisruption networkDisruption =
|
||||
new NetworkDisruption(new NetworkDisruption.TwoPartitions(Collections.singleton(masterNode1), otherNodes),
|
||||
new NetworkDisruption.NetworkUnresponsive());
|
||||
internalCluster().setDisruptionScheme(networkDisruption);
|
||||
|
||||
ClusterService clusterService = internalCluster().clusterService(masterNode1);
|
||||
CountDownLatch disruptionStarted = new CountDownLatch(1);
|
||||
clusterService.addListener(new ClusterStateListener() {
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
SnapshotsInProgress snapshots = event.state().custom(SnapshotsInProgress.TYPE);
|
||||
if (snapshots != null && snapshots.entries().size() > 0) {
|
||||
if (snapshots.entries().get(0).state() == SnapshotsInProgress.State.INIT) {
|
||||
// The snapshot started, we can start disruption so the INIT state will arrive to another master node
|
||||
logger.info("--> starting disruption");
|
||||
networkDisruption.startDisrupting();
|
||||
clusterService.removeListener(this);
|
||||
disruptionStarted.countDown();
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
logger.info("--> starting snapshot");
|
||||
ActionFuture<CreateSnapshotResponse> future = client(masterNode1).admin().cluster()
|
||||
.prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(false).setIndices(idxName).execute();
|
||||
|
||||
logger.info("--> waiting for disruption to start");
|
||||
assertTrue(disruptionStarted.await(1, TimeUnit.MINUTES));
|
||||
|
||||
logger.info("--> wait until the snapshot is done");
|
||||
assertBusy(() -> {
|
||||
SnapshotsInProgress snapshots = dataNodeClient().admin().cluster().prepareState().setLocal(true).get().getState()
|
||||
.custom(SnapshotsInProgress.TYPE);
|
||||
if (snapshots != null && snapshots.entries().size() > 0) {
|
||||
logger.info("Current snapshot state [{}]", snapshots.entries().get(0).state());
|
||||
fail("Snapshot is still running");
|
||||
} else {
|
||||
logger.info("Snapshot is no longer in the cluster state");
|
||||
}
|
||||
}, 1, TimeUnit.MINUTES);
|
||||
|
||||
logger.info("--> verify that snapshot was successful or no longer exist");
|
||||
assertBusy(() -> {
|
||||
try {
|
||||
GetSnapshotsResponse snapshotsStatusResponse = dataNodeClient().admin().cluster().prepareGetSnapshots("test-repo")
|
||||
.setSnapshots("test-snap-2").get();
|
||||
SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
|
||||
assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
|
||||
assertEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards());
|
||||
assertEquals(0, snapshotInfo.failedShards());
|
||||
logger.info("--> done verifying");
|
||||
} catch (SnapshotMissingException exception) {
|
||||
logger.info("--> snapshot doesn't exist");
|
||||
}
|
||||
}, 1, TimeUnit.MINUTES);
|
||||
|
||||
logger.info("--> stopping disrupting");
|
||||
networkDisruption.stopDisrupting();
|
||||
ensureStableCluster(4, masterNode1);
|
||||
logger.info("--> done");
|
||||
|
||||
try {
|
||||
future.get();
|
||||
} catch (Exception ex) {
|
||||
logger.info("--> got exception from hanged master", ex);
|
||||
Throwable cause = ex.getCause();
|
||||
assertThat(cause, instanceOf(MasterNotDiscoveredException.class));
|
||||
cause = cause.getCause();
|
||||
assertThat(cause, instanceOf(Discovery.FailedToCommitClusterStateException.class));
|
||||
}
|
||||
}
|
||||
|
||||
private void createRandomIndex(String idxName) throws ExecutionException, InterruptedException {
|
||||
assertAcked(prepareCreate(idxName, 0, Settings.builder().put("number_of_shards", between(1, 20))
|
||||
.put("number_of_replicas", 0)));
|
||||
logger.info("--> indexing some data");
|
||||
final int numdocs = randomIntBetween(10, 100);
|
||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
||||
for (int i = 0; i < builders.length; i++) {
|
||||
builders[i] = client().prepareIndex(idxName, "type1", Integer.toString(i)).setSource("field1", "bar " + i);
|
||||
}
|
||||
indexRandom(true, builders);
|
||||
}
|
||||
}
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
@ -87,7 +88,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase {
|
||||
.bytes(),
|
||||
XContentType.JSON));
|
||||
|
||||
assertFieldNames(set("a"), doc);
|
||||
assertFieldNames(Collections.emptySet(), doc);
|
||||
}
|
||||
|
||||
public void testExplicitEnabled() throws Exception {
|
||||
|
@ -24,6 +24,7 @@ import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.DocValuesFieldExistsQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.NormsFieldExistsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.Version;
|
||||
@ -113,6 +114,10 @@ public class ExistsQueryBuilderTests extends AbstractQueryTestCase<ExistsQueryBu
|
||||
assertThat(constantScoreQuery.getQuery(), instanceOf(DocValuesFieldExistsQuery.class));
|
||||
DocValuesFieldExistsQuery dvExistsQuery = (DocValuesFieldExistsQuery) constantScoreQuery.getQuery();
|
||||
assertEquals(field, dvExistsQuery.getField());
|
||||
} else if (context.getQueryShardContext().getMapperService().fullName(field).omitNorms() == false) {
|
||||
assertThat(constantScoreQuery.getQuery(), instanceOf(NormsFieldExistsQuery.class));
|
||||
NormsFieldExistsQuery normsExistsQuery = (NormsFieldExistsQuery) constantScoreQuery.getQuery();
|
||||
assertEquals(field, normsExistsQuery.getField());
|
||||
} else {
|
||||
assertThat(constantScoreQuery.getQuery(), instanceOf(TermQuery.class));
|
||||
TermQuery termQuery = (TermQuery) constantScoreQuery.getQuery();
|
||||
|
@ -31,6 +31,7 @@ import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.NormsFieldExistsQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
@ -45,6 +46,7 @@ import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.automaton.TooComplexToDeterminizeException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
@ -803,17 +805,20 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
|
||||
QueryShardContext context = createShardContext();
|
||||
QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder(STRING_FIELD_NAME + ":*");
|
||||
Query query = queryBuilder.toQuery(context);
|
||||
Query expected;
|
||||
if (getCurrentTypes().length > 0) {
|
||||
expected = new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME)));
|
||||
if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0)
|
||||
&& (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false)) {
|
||||
assertThat(query, equalTo(new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME))));
|
||||
} else {
|
||||
assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME)))));
|
||||
}
|
||||
} else {
|
||||
expected = new MatchNoDocsQuery();
|
||||
assertThat(query, equalTo(new MatchNoDocsQuery()));
|
||||
}
|
||||
assertThat(query, equalTo(expected));
|
||||
|
||||
queryBuilder = new QueryStringQueryBuilder("*:*");
|
||||
query = queryBuilder.toQuery(context);
|
||||
expected = new MatchAllDocsQuery();
|
||||
Query expected = new MatchAllDocsQuery();
|
||||
assertThat(query, equalTo(expected));
|
||||
|
||||
queryBuilder = new QueryStringQueryBuilder("*");
|
||||
|
@ -26,6 +26,7 @@ import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.DocValuesFieldExistsQuery;
|
||||
import org.apache.lucene.search.IndexOrDocValuesQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.NormsFieldExistsQuery;
|
||||
import org.apache.lucene.search.PointRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
@ -129,6 +130,9 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase<RangeQueryBuil
|
||||
if (context.mapperService().getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0)
|
||||
&& context.mapperService().fullName(queryBuilder.fieldName()).hasDocValues()) {
|
||||
expectedQuery = new ConstantScoreQuery(new DocValuesFieldExistsQuery(queryBuilder.fieldName()));
|
||||
} else if (context.mapperService().getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0)
|
||||
&& context.mapperService().fullName(queryBuilder.fieldName()).omitNorms() == false) {
|
||||
expectedQuery = new ConstantScoreQuery(new NormsFieldExistsQuery(queryBuilder.fieldName()));
|
||||
} else {
|
||||
expectedQuery = new ConstantScoreQuery(new TermQuery(new Term(FieldNamesFieldMapper.NAME, queryBuilder.fieldName())));
|
||||
}
|
||||
|
@ -563,6 +563,20 @@ to set the index that the document will be indexed into:
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
Dynamic field names are also supported. This example sets the field named after the
|
||||
value of `service` to the value of the field `code`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"set": {
|
||||
"field": "{{service}}"
|
||||
"value": "{{code}}"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
[[handling-failure-in-pipelines]]
|
||||
== Handling Failures in Pipelines
|
||||
|
||||
|
@ -156,6 +156,13 @@ discovery.zen.minimum_master_nodes: 2 <1>
|
||||
----------------------------
|
||||
<1> Defaults to `1`.
|
||||
|
||||
To be able to remain available when one of the master-eligible nodes fails,
|
||||
clusters should have at least three master-eligible nodes, with
|
||||
`minimum_master_nodes` set accordingly. A <<rolling-upgrades,rolling upgrade>>,
|
||||
performed without any downtime, also requires at least three master-eligible
|
||||
nodes to avoid the possibility of data loss if a network split occurs while the
|
||||
upgrade is in progress.
|
||||
|
||||
This setting can also be changed dynamically on a live cluster with the
|
||||
<<cluster-update-settings,cluster update settings API>>:
|
||||
|
||||
|
@ -42,6 +42,7 @@ import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.geo.ShapeRelation;
|
||||
import org.elasticsearch.common.joda.DateMathParser;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
@ -57,6 +58,7 @@ import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
@ -353,7 +355,8 @@ public class RangeFieldMapper extends FieldMapper {
|
||||
range = context.parseExternalValue(Range.class);
|
||||
} else {
|
||||
XContentParser parser = context.parser();
|
||||
if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
|
||||
final XContentParser.Token start = parser.currentToken();
|
||||
if (start == XContentParser.Token.START_OBJECT) {
|
||||
RangeFieldType fieldType = fieldType();
|
||||
RangeType rangeType = fieldType.rangeType;
|
||||
String fieldName = null;
|
||||
@ -393,6 +396,8 @@ public class RangeFieldMapper extends FieldMapper {
|
||||
}
|
||||
}
|
||||
range = new Range(rangeType, from, to, includeFrom, includeTo);
|
||||
} else if (fieldType().rangeType == RangeType.IP && start == XContentParser.Token.VALUE_STRING) {
|
||||
range = parseIpRangeFromCidr(parser);
|
||||
} else {
|
||||
throw new MapperParsingException("error parsing field ["
|
||||
+ name() + "], expected an object but got " + parser.currentName());
|
||||
@ -435,6 +440,23 @@ public class RangeFieldMapper extends FieldMapper {
|
||||
}
|
||||
}
|
||||
|
||||
private static Range parseIpRangeFromCidr(final XContentParser parser) throws IOException {
|
||||
final Tuple<InetAddress, Integer> cidr = InetAddresses.parseCidr(parser.text());
|
||||
// create the lower value by zeroing out the host portion, upper value by filling it with all ones.
|
||||
byte[] lower = cidr.v1().getAddress();
|
||||
byte[] upper = lower.clone();
|
||||
for (int i = cidr.v2(); i < 8 * lower.length; i++) {
|
||||
int m = 1 << 7 - (i & 7);
|
||||
lower[i >> 3] &= ~m;
|
||||
upper[i >> 3] |= m;
|
||||
}
|
||||
try {
|
||||
return new Range(RangeType.IP, InetAddress.getByAddress(lower), InetAddress.getByAddress(upper), true, true);
|
||||
} catch (UnknownHostException bogus) {
|
||||
throw new AssertionError(bogus);
|
||||
}
|
||||
}
|
||||
|
||||
/** Enum defining the type of range */
|
||||
public enum RangeType {
|
||||
IP("ip_range") {
|
||||
|
@ -256,19 +256,19 @@ public class ScaledFloatFieldMapper extends FieldMapper {
|
||||
failIfNotIndexed();
|
||||
Long lo = null;
|
||||
if (lowerTerm != null) {
|
||||
double dValue = parse(lowerTerm);
|
||||
double dValue = parse(lowerTerm) * scalingFactor;
|
||||
if (includeLower == false) {
|
||||
dValue = Math.nextUp(dValue);
|
||||
}
|
||||
lo = Math.round(Math.ceil(dValue * scalingFactor));
|
||||
lo = Math.round(Math.ceil(dValue));
|
||||
}
|
||||
Long hi = null;
|
||||
if (upperTerm != null) {
|
||||
double dValue = parse(upperTerm);
|
||||
double dValue = parse(upperTerm) * scalingFactor;
|
||||
if (includeUpper == false) {
|
||||
dValue = Math.nextDown(dValue);
|
||||
}
|
||||
hi = Math.round(Math.floor(dValue * scalingFactor));
|
||||
hi = Math.round(Math.floor(dValue));
|
||||
}
|
||||
Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true, hasDocValues());
|
||||
if (boost() != 1f) {
|
||||
|
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class IpRangeFieldMapperTests extends ESSingleNodeTestCase {
|
||||
|
||||
private IndexService indexService;
|
||||
private DocumentMapperParser parser;
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return pluginList(MapperExtrasPlugin.class);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
indexService = createIndex("test");
|
||||
parser = indexService.mapperService().documentMapperParser();
|
||||
}
|
||||
|
||||
public void testStoreCidr() throws Exception {
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "ip_range")
|
||||
.field("store", true);
|
||||
mapping = mapping.endObject().endObject().endObject().endObject();
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.string()));
|
||||
assertEquals(mapping.string(), mapper.mappingSource().toString());
|
||||
final Map<String, String> cases = new HashMap<>();
|
||||
cases.put("192.168.0.0/15", "192.169.255.255");
|
||||
cases.put("192.168.0.0/16", "192.168.255.255");
|
||||
cases.put("192.168.0.0/17", "192.168.127.255");
|
||||
for (final Map.Entry<String, String> entry : cases.entrySet()) {
|
||||
ParsedDocument doc =
|
||||
mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", entry.getKey())
|
||||
.endObject().bytes(),
|
||||
XContentType.JSON
|
||||
));
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(3, fields.length);
|
||||
IndexableField dvField = fields[0];
|
||||
assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType());
|
||||
IndexableField pointField = fields[1];
|
||||
assertEquals(2, pointField.fieldType().pointDimensionCount());
|
||||
IndexableField storedField = fields[2];
|
||||
assertTrue(storedField.fieldType().stored());
|
||||
String strVal =
|
||||
InetAddresses.toAddrString(InetAddresses.forString("192.168.0.0")) + " : " +
|
||||
InetAddresses.toAddrString(InetAddresses.forString(entry.getValue()));
|
||||
assertThat(storedField.stringValue(), containsString(strVal));
|
||||
}
|
||||
}
|
||||
}
|
@ -124,6 +124,42 @@ public class ScaledFloatFieldTypeTests extends FieldTypeTestCase {
|
||||
IOUtils.close(reader, dir);
|
||||
}
|
||||
|
||||
public void testRoundsUpperBoundCorrectly() {
|
||||
ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
|
||||
ft.setName("scaled_float");
|
||||
ft.setScalingFactor(100.0);
|
||||
Query scaledFloatQ = ft.rangeQuery(null, 0.1, true, false, null);
|
||||
assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString());
|
||||
scaledFloatQ = ft.rangeQuery(null, 0.1, true, true, null);
|
||||
assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString());
|
||||
scaledFloatQ = ft.rangeQuery(null, 0.095, true, false, null);
|
||||
assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString());
|
||||
scaledFloatQ = ft.rangeQuery(null, 0.095, true, true, null);
|
||||
assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString());
|
||||
scaledFloatQ = ft.rangeQuery(null, 0.105, true, false, null);
|
||||
assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString());
|
||||
scaledFloatQ = ft.rangeQuery(null, 0.105, true, true, null);
|
||||
assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString());
|
||||
}
|
||||
|
||||
public void testRoundsLowerBoundCorrectly() {
|
||||
ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
|
||||
ft.setName("scaled_float");
|
||||
ft.setScalingFactor(100.0);
|
||||
Query scaledFloatQ = ft.rangeQuery(-0.1, null, false, true, null);
|
||||
assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString());
|
||||
scaledFloatQ = ft.rangeQuery(-0.1, null, true, true, null);
|
||||
assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString());
|
||||
scaledFloatQ = ft.rangeQuery(-0.095, null, false, true, null);
|
||||
assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString());
|
||||
scaledFloatQ = ft.rangeQuery(-0.095, null, true, true, null);
|
||||
assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString());
|
||||
scaledFloatQ = ft.rangeQuery(-0.105, null, false, true, null);
|
||||
assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString());
|
||||
scaledFloatQ = ft.rangeQuery(-0.105, null, true, true, null);
|
||||
assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString());
|
||||
}
|
||||
|
||||
public void testValueForSearch() {
|
||||
ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
|
||||
ft.setName("scaled_float");
|
||||
|
@ -39,17 +39,21 @@ final class HdfsBlobStore implements BlobStore {
|
||||
private final FileContext fileContext;
|
||||
private final HdfsSecurityContext securityContext;
|
||||
private final int bufferSize;
|
||||
private final boolean readOnly;
|
||||
private volatile boolean closed;
|
||||
|
||||
HdfsBlobStore(FileContext fileContext, String path, int bufferSize) throws IOException {
|
||||
HdfsBlobStore(FileContext fileContext, String path, int bufferSize, boolean readOnly) throws IOException {
|
||||
this.fileContext = fileContext;
|
||||
this.securityContext = new HdfsSecurityContext(fileContext.getUgi());
|
||||
this.bufferSize = bufferSize;
|
||||
this.root = execute(fileContext1 -> fileContext1.makeQualified(new Path(path)));
|
||||
try {
|
||||
mkdirs(root);
|
||||
} catch (FileAlreadyExistsException ok) {
|
||||
// behaves like Files.createDirectories
|
||||
this.readOnly = readOnly;
|
||||
if (!readOnly) {
|
||||
try {
|
||||
mkdirs(root);
|
||||
} catch (FileAlreadyExistsException ok) {
|
||||
// behaves like Files.createDirectories
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,12 +84,14 @@ final class HdfsBlobStore implements BlobStore {
|
||||
|
||||
private Path buildHdfsPath(BlobPath blobPath) {
|
||||
final Path path = translateToHdfsPath(blobPath);
|
||||
try {
|
||||
mkdirs(path);
|
||||
} catch (FileAlreadyExistsException ok) {
|
||||
// behaves like Files.createDirectories
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("failed to create blob container", ex);
|
||||
if (!readOnly) {
|
||||
try {
|
||||
mkdirs(path);
|
||||
} catch (FileAlreadyExistsException ok) {
|
||||
// behaves like Files.createDirectories
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("failed to create blob container", ex);
|
||||
}
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ public final class HdfsRepository extends BlobStoreRepository {
|
||||
SpecialPermission.check();
|
||||
FileContext fileContext = AccessController.doPrivileged((PrivilegedAction<FileContext>)
|
||||
() -> createContext(uri, getMetadata().settings()));
|
||||
blobStore = new HdfsBlobStore(fileContext, pathSetting, bufferSize);
|
||||
blobStore = new HdfsBlobStore(fileContext, pathSetting, bufferSize, isReadOnly());
|
||||
logger.debug("Using file-system [{}] for URI [{}], path [{}]", fileContext.getDefaultFileSystem(), fileContext.getDefaultFileSystem().getUri(), pathSetting);
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(String.format(Locale.ROOT, "Cannot create HDFS repository for uri [%s]", uri), e);
|
||||
|
@ -19,6 +19,20 @@
|
||||
|
||||
package org.elasticsearch.repositories.hdfs;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.AbstractFileSystem;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
|
||||
|
||||
import javax.security.auth.Subject;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
@ -29,22 +43,20 @@ import java.security.PrivilegedAction;
|
||||
import java.security.PrivilegedActionException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Collections;
|
||||
import javax.security.auth.Subject;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.AbstractFileSystem;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes;
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.readBlobFully;
|
||||
|
||||
|
||||
@ThreadLeakFilters(filters = {HdfsClientThreadLeakFilter.class})
|
||||
public class HdfsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||
|
||||
@Override
|
||||
protected BlobStore newBlobStore() throws IOException {
|
||||
return new HdfsBlobStore(createTestContext(), "temp", 1024, false);
|
||||
}
|
||||
|
||||
private FileContext createTestContext() {
|
||||
FileContext fileContext;
|
||||
try {
|
||||
fileContext = AccessController.doPrivileged((PrivilegedExceptionAction<FileContext>)
|
||||
@ -52,7 +64,7 @@ public class HdfsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||
} catch (PrivilegedActionException e) {
|
||||
throw new RuntimeException(e.getCause());
|
||||
}
|
||||
return new HdfsBlobStore(fileContext, "temp", 1024);
|
||||
return fileContext;
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "lesser of two evils (the other being a bunch of JNI/classloader nightmares)")
|
||||
@ -69,7 +81,7 @@ public class HdfsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||
Class<?> clazz = Class.forName("org.apache.hadoop.security.User");
|
||||
ctor = clazz.getConstructor(String.class);
|
||||
ctor.setAccessible(true);
|
||||
} catch (ClassNotFoundException | NoSuchMethodException e) {
|
||||
} catch (ClassNotFoundException | NoSuchMethodException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
@ -98,4 +110,33 @@ public class HdfsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testReadOnly() throws Exception {
|
||||
FileContext fileContext = createTestContext();
|
||||
// Constructor will not create dir if read only
|
||||
HdfsBlobStore hdfsBlobStore = new HdfsBlobStore(fileContext, "dir", 1024, true);
|
||||
FileContext.Util util = fileContext.util();
|
||||
Path root = fileContext.makeQualified(new Path("dir"));
|
||||
assertFalse(util.exists(root));
|
||||
BlobPath blobPath = BlobPath.cleanPath().add("path");
|
||||
|
||||
// blobContainer() will not create path if read only
|
||||
hdfsBlobStore.blobContainer(blobPath);
|
||||
Path hdfsPath = root;
|
||||
for (String p : blobPath) {
|
||||
hdfsPath = new Path(hdfsPath, p);
|
||||
}
|
||||
assertFalse(util.exists(hdfsPath));
|
||||
|
||||
// if not read only, directory will be created
|
||||
hdfsBlobStore = new HdfsBlobStore(fileContext, "dir", 1024, false);
|
||||
assertTrue(util.exists(root));
|
||||
BlobContainer container = hdfsBlobStore.blobContainer(blobPath);
|
||||
assertTrue(util.exists(hdfsPath));
|
||||
|
||||
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
|
||||
writeBlob(container, "foo", new BytesArray(data));
|
||||
assertArrayEquals(readBlobFully(container, "foo", data.length), data);
|
||||
assertTrue(container.blobExists("foo"));
|
||||
}
|
||||
}
|
||||
|
@ -37,14 +37,14 @@ for (Version version : wireCompatVersions) {
|
||||
includePackaged = true
|
||||
}
|
||||
|
||||
/* This project runs the core REST tests against a 2 node cluster where one of
|
||||
/* This project runs the core REST tests against a 4 node cluster where two of
|
||||
the nodes has a different minor. */
|
||||
Object extension = extensions.findByName("${baseName}#mixedClusterTestCluster")
|
||||
configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) {
|
||||
configure(extension) {
|
||||
distribution = 'zip'
|
||||
numNodes = 4
|
||||
numBwcNodes = 2
|
||||
bwcVersion = project.wireCompatVersions[-1]
|
||||
bwcVersion = version
|
||||
}
|
||||
|
||||
Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") {
|
||||
|
@ -192,8 +192,8 @@ public class IndexingIT extends ESRestTestCase {
|
||||
int numDocs = 0;
|
||||
final int numberOfInitialDocs = 1 + randomInt(5);
|
||||
logger.info("indexing [{}] docs initially", numberOfInitialDocs);
|
||||
numDocs += indexDocs(index, numDocs, numberOfInitialDocs);
|
||||
assertSeqNoOnShards(index, nodes, numDocs, newNodeClient);
|
||||
numDocs += indexDocs(index, 0, numberOfInitialDocs);
|
||||
assertSeqNoOnShards(index, nodes, nodes.getBWCVersion().major >= 6 ? numDocs : 0, newNodeClient);
|
||||
logger.info("allowing shards on all nodes");
|
||||
updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name"));
|
||||
ensureGreen();
|
||||
@ -204,7 +204,7 @@ public class IndexingIT extends ESRestTestCase {
|
||||
final int numberOfDocsAfterAllowingShardsOnAllNodes = 1 + randomInt(5);
|
||||
logger.info("indexing [{}] docs after allowing shards on all nodes", numberOfDocsAfterAllowingShardsOnAllNodes);
|
||||
numDocs += indexDocs(index, numDocs, numberOfDocsAfterAllowingShardsOnAllNodes);
|
||||
assertSeqNoOnShards(index, nodes, numDocs, newNodeClient);
|
||||
assertSeqNoOnShards(index, nodes, nodes.getBWCVersion().major >= 6 ? numDocs : 0, newNodeClient);
|
||||
Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get();
|
||||
logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName());
|
||||
updateIndexSetting(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName()));
|
||||
@ -214,7 +214,7 @@ public class IndexingIT extends ESRestTestCase {
|
||||
logger.info("indexing [{}] docs after moving primary", numberOfDocsAfterMovingPrimary);
|
||||
numDocsOnNewPrimary += indexDocs(index, numDocs, numberOfDocsAfterMovingPrimary);
|
||||
numDocs += numberOfDocsAfterMovingPrimary;
|
||||
assertSeqNoOnShards(index, nodes, numDocs, newNodeClient);
|
||||
assertSeqNoOnShards(index, nodes, nodes.getBWCVersion().major >= 6 ? numDocs : numDocsOnNewPrimary, newNodeClient);
|
||||
/*
|
||||
* Dropping the number of replicas to zero, and then increasing it to one triggers a recovery thus exercising any BWC-logic in
|
||||
* the recovery code.
|
||||
@ -233,7 +233,7 @@ public class IndexingIT extends ESRestTestCase {
|
||||
for (Shard shard : buildShards(index, nodes, newNodeClient)) {
|
||||
assertCount(index, "_only_nodes:" + shard.node.nodeName, numDocs);
|
||||
}
|
||||
assertSeqNoOnShards(index, nodes, numDocs, newNodeClient);
|
||||
assertSeqNoOnShards(index, nodes, nodes.getBWCVersion().major >= 6 ? numDocs : numDocsOnNewPrimary, newNodeClient);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
private void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray) throws IOException {
|
||||
protected void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray) throws IOException {
|
||||
try (InputStream stream = bytesArray.streamInput()) {
|
||||
container.writeBlob(blobName, stream, bytesArray.length());
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ public abstract class ESBlobStoreTestCase extends ESTestCase {
|
||||
return data;
|
||||
}
|
||||
|
||||
private static void writeBlob(BlobContainer container, String blobName, BytesArray bytesArray) throws IOException {
|
||||
protected static void writeBlob(BlobContainer container, String blobName, BytesArray bytesArray) throws IOException {
|
||||
try (InputStream stream = bytesArray.streamInput()) {
|
||||
container.writeBlob(blobName, stream, bytesArray.length());
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user