listener) {
requireNonNull(listener, "listener cannot be null");
requireNonNull(location, "location cannot be null");
if (lastRefreshedLocation != null && lastRefreshedLocation.compareTo(location) >= 0) {
// Location already visible, just call the listener
listener.accept(false);
- return;
+ return true;
}
synchronized (this) {
if (refreshListeners == null) {
@@ -85,12 +86,13 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
if (refreshListeners.size() < getMaxRefreshListeners.getAsInt()) {
// We have a free slot so register the listener
refreshListeners.add(new Tuple<>(location, listener));
- return;
+ return false;
}
}
// No free slot so force a refresh and call the listener in this thread
forceRefresh.run();
listener.accept(true);
+ return true;
}
/**
@@ -135,14 +137,14 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
*/
return;
}
- // First check if we've actually moved forward. If not then just bail immediately.
- assert lastRefreshedLocation == null || currentRefreshLocation.compareTo(lastRefreshedLocation) >= 0;
- if (lastRefreshedLocation != null && currentRefreshLocation.compareTo(lastRefreshedLocation) == 0) {
- return;
- }
/*
* Set the lastRefreshedLocation so listeners that come in for locations before that will just execute inline without messing
- * around with refreshListeners or synchronizing at all.
+ * around with refreshListeners or synchronizing at all. Note that it is not safe for us to abort early if we haven't advanced the
+ * position here because we set and read lastRefreshedLocation outside of a synchronized block. We do that so that waiting for a
+ * refresh that has already passed is just a volatile read but the cost is that any check whether or not we've advanced the
+ * position will introduce a race between adding the listener and the position check. We could work around this by moving this
+ * assignment into the synchronized block below and double checking lastRefreshedLocation in addOrNotify's synchronized block but
+ * that doesn't seem worth it given that we already skip this process early if there aren't any listeners to iterate.
*/
lastRefreshedLocation = currentRefreshLocation;
/*
diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java
index e22f684637e..e35c95ae1f0 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java
@@ -59,16 +59,16 @@ public final class ShadowIndexShard extends IndexShard {
/**
* In addition to the regular accounting done in
- * {@link IndexShard#updateRoutingEntry(ShardRouting, boolean)},
+ * {@link IndexShard#updateRoutingEntry(ShardRouting)},
* if this shadow replica needs to be promoted to a primary, the shard is
* failed in order to allow a new primary to be re-allocated.
*/
@Override
- public void updateRoutingEntry(ShardRouting newRouting, boolean persistState) throws IOException {
+ public void updateRoutingEntry(ShardRouting newRouting) throws IOException {
if (newRouting.primary() == true) {// becoming a primary
throw new IllegalStateException("can't promote shard to primary");
}
- super.updateRoutingEntry(newRouting, persistState);
+ super.updateRoutingEntry(newRouting);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
index 62173f936c5..dbfcad6048a 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
@@ -131,16 +131,7 @@ final class StoreRecovery {
}
final void addIndices(RecoveryState.Index indexRecoveryStats, Directory target, Directory... sources) throws IOException {
- /*
- * TODO: once we upgraded to Lucene 6.1 use HardlinkCopyDirectoryWrapper to enable hardlinks if possible and enable it
- * in the security.policy:
- *
- * grant codeBase "${codebase.lucene-misc-6.1.0.jar}" {
- * // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper
- * permission java.nio.file.LinkPermission "hard";
- * };
- * target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target);
- */
+ target = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target);
try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(target, indexRecoveryStats),
new IndexWriterConfig(null)
.setCommitOnClose(false)
diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java
index e0032fe503b..94337ecdbc5 100644
--- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java
+++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java
@@ -23,6 +23,8 @@ import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
@@ -49,7 +51,6 @@ import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.iterable.Iterables;
-import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardRepository;
import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;
@@ -458,7 +459,9 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
}
if (latest >= 0) {
try {
- return new Tuple<>(indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest)), latest);
+ final BlobStoreIndexShardSnapshots shardSnapshots =
+ indexShardSnapshotsFormat.read(blobContainer, Integer.toString(latest));
+ return new Tuple<>(shardSnapshots, latest);
} catch (IOException e) {
logger.warn("failed to read index file [{}]", e, SNAPSHOT_INDEX_PREFIX + latest);
}
@@ -503,10 +506,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
*/
public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) {
super(snapshotId, Version.CURRENT, shardId);
- IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
- store = indexService.getShardOrNull(shardId.id()).store();
this.snapshotStatus = snapshotStatus;
-
+ store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();
}
/**
@@ -788,8 +789,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
*/
public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) {
super(snapshotId, version, shardId, snapshotShardId);
- store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();
this.recoveryState = recoveryState;
+ store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();
}
/**
@@ -800,6 +801,25 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
try {
logger.debug("[{}] [{}] restoring to [{}] ...", snapshotId, repositoryName, shardId);
BlobStoreIndexShardSnapshot snapshot = loadSnapshot();
+
+ if (snapshot.indexFiles().size() == 1
+ && snapshot.indexFiles().get(0).physicalName().startsWith("segments_")
+ && snapshot.indexFiles().get(0).hasUnknownChecksum()) {
+ // If the shard has no documents, it will only contain a single segments_N file for the
+ // shard's snapshot. If we are restoring a snapshot created by a previous supported version,
+ // it is still possible that in that version, an empty shard has a segments_N file with an unsupported
+ // version (and no checksum), because we don't know the Lucene version to assign segments_N until we
+ // have written some data. Since the segments_N for an empty shard could have an incompatible Lucene
+ // version number and no checksum, even though the index itself is perfectly fine to restore, this
+ // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty
+ // shard anyway, we just create the empty shard here and then exit.
+ IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null)
+ .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+ .setCommitOnClose(true));
+ writer.close();
+ return;
+ }
+
SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles());
final Store.MetadataSnapshot recoveryTargetMetadata;
try {
diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
index 60b7ec2112e..5bb0f728bc1 100644
--- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
+++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java
@@ -22,7 +22,6 @@ package org.elasticsearch.index.snapshots.blobstore;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Version;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.Strings;
@@ -50,6 +49,8 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
* Information about snapshotted file
*/
public static class FileInfo {
+ private static final String UNKNOWN_CHECKSUM = "_na_";
+
private final String name;
private final ByteSizeValue partSize;
private final long partBytes;
@@ -207,27 +208,43 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
* @return true if file in a store this this file have the same checksum and length
*/
public boolean isSame(FileInfo fileInfo) {
- if (numberOfParts != fileInfo.numberOfParts) return false;
- if (partBytes != fileInfo.partBytes) return false;
- if (!name.equals(fileInfo.name)) return false;
+ if (numberOfParts != fileInfo.numberOfParts) {
+ return false;
+ }
+ if (partBytes != fileInfo.partBytes) {
+ return false;
+ }
+ if (!name.equals(fileInfo.name)) {
+ return false;
+ }
if (partSize != null) {
- if (!partSize.equals(fileInfo.partSize)) return false;
+ if (!partSize.equals(fileInfo.partSize)) {
+ return false;
+ }
} else {
- if (fileInfo.partSize != null) return false;
+ if (fileInfo.partSize != null) {
+ return false;
+ }
}
return metadata.isSame(fileInfo.metadata);
}
- static final class Fields {
- static final String NAME = "name";
- static final String PHYSICAL_NAME = "physical_name";
- static final String LENGTH = "length";
- static final String CHECKSUM = "checksum";
- static final String PART_SIZE = "part_size";
- static final String WRITTEN_BY = "written_by";
- static final String META_HASH = "meta_hash";
+ /**
+ * Checks if the checksum for the file is unknown. This only is possible on an empty shard's
+ * segments_N file which was created in older Lucene versions.
+ */
+ public boolean hasUnknownChecksum() {
+ return metadata.checksum().equals(UNKNOWN_CHECKSUM);
}
+ static final String NAME = "name";
+ static final String PHYSICAL_NAME = "physical_name";
+ static final String LENGTH = "length";
+ static final String CHECKSUM = "checksum";
+ static final String PART_SIZE = "part_size";
+ static final String WRITTEN_BY = "written_by";
+ static final String META_HASH = "meta_hash";
+
/**
* Serializes file info into JSON
*
@@ -237,22 +254,22 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
*/
public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject();
- builder.field(Fields.NAME, file.name);
- builder.field(Fields.PHYSICAL_NAME, file.metadata.name());
- builder.field(Fields.LENGTH, file.metadata.length());
- if (file.metadata.checksum() != null) {
- builder.field(Fields.CHECKSUM, file.metadata.checksum());
+ builder.field(NAME, file.name);
+ builder.field(PHYSICAL_NAME, file.metadata.name());
+ builder.field(LENGTH, file.metadata.length());
+ if (file.metadata.checksum().equals(UNKNOWN_CHECKSUM) == false) {
+ builder.field(CHECKSUM, file.metadata.checksum());
}
if (file.partSize != null) {
- builder.field(Fields.PART_SIZE, file.partSize.bytes());
+ builder.field(PART_SIZE, file.partSize.bytes());
}
if (file.metadata.writtenBy() != null) {
- builder.field(Fields.WRITTEN_BY, file.metadata.writtenBy());
+ builder.field(WRITTEN_BY, file.metadata.writtenBy());
}
if (file.metadata.hash() != null && file.metadata().hash().length > 0) {
- builder.field(Fields.META_HASH, file.metadata.hash());
+ builder.field(META_HASH, file.metadata.hash());
}
builder.endObject();
}
@@ -271,6 +288,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
String checksum = null;
ByteSizeValue partSize = null;
Version writtenBy = null;
+ String writtenByStr = null;
BytesRef metaHash = new BytesRef();
if (token == XContentParser.Token.START_OBJECT) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
@@ -278,19 +296,20 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
String currentFieldName = parser.currentName();
token = parser.nextToken();
if (token.isValue()) {
- if ("name".equals(currentFieldName)) {
+ if (NAME.equals(currentFieldName)) {
name = parser.text();
- } else if ("physical_name".equals(currentFieldName)) {
+ } else if (PHYSICAL_NAME.equals(currentFieldName)) {
physicalName = parser.text();
- } else if ("length".equals(currentFieldName)) {
+ } else if (LENGTH.equals(currentFieldName)) {
length = parser.longValue();
- } else if ("checksum".equals(currentFieldName)) {
+ } else if (CHECKSUM.equals(currentFieldName)) {
checksum = parser.text();
- } else if ("part_size".equals(currentFieldName)) {
+ } else if (PART_SIZE.equals(currentFieldName)) {
partSize = new ByteSizeValue(parser.longValue());
- } else if ("written_by".equals(currentFieldName)) {
- writtenBy = Lucene.parseVersionLenient(parser.text(), null);
- } else if ("meta_hash".equals(currentFieldName)) {
+ } else if (WRITTEN_BY.equals(currentFieldName)) {
+ writtenByStr = parser.text();
+ writtenBy = Lucene.parseVersionLenient(writtenByStr, null);
+ } else if (META_HASH.equals(currentFieldName)) {
metaHash.bytes = parser.binaryValue();
metaHash.offset = 0;
metaHash.length = metaHash.bytes.length;
@@ -305,6 +324,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
}
}
}
+
// Verify that file information is complete
if (name == null || Strings.validFileName(name) == false) {
throw new ElasticsearchParseException("missing or invalid file name [" + name + "]");
@@ -312,10 +332,29 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
throw new ElasticsearchParseException("missing or invalid physical file name [" + physicalName + "]");
} else if (length < 0) {
throw new ElasticsearchParseException("missing or invalid file length");
+ } else if (writtenBy == null) {
+ throw new ElasticsearchParseException("missing or invalid written_by [" + writtenByStr + "]");
+ } else if (checksum == null) {
+ if (physicalName.startsWith("segments_")
+ && writtenBy.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION) == false) {
+ // its possible the checksum is null for segments_N files that belong to a shard with no data,
+ // so we will assign it _na_ for now and try to get the checksum from the file itself later
+ checksum = UNKNOWN_CHECKSUM;
+ } else {
+ throw new ElasticsearchParseException("missing checksum for name [" + name + "]");
+ }
}
return new FileInfo(name, new StoreFileMetaData(physicalName, length, checksum, writtenBy, metaHash), partSize);
}
+ @Override
+ public String toString() {
+ return "[name: " + name +
+ ", numberOfParts: " + numberOfParts +
+ ", partSize: " + partSize +
+ ", partBytes: " + partBytes +
+ ", metadata: " + metadata + "]";
+ }
}
private final String snapshot;
@@ -424,26 +463,21 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
return totalSize;
}
- static final class Fields {
- static final String NAME = "name";
- static final String INDEX_VERSION = "index_version";
- static final String START_TIME = "start_time";
- static final String TIME = "time";
- static final String NUMBER_OF_FILES = "number_of_files";
- static final String TOTAL_SIZE = "total_size";
- static final String FILES = "files";
- }
-
- static final class ParseFields {
- static final ParseField NAME = new ParseField("name");
- static final ParseField INDEX_VERSION = new ParseField("index_version", "index-version");
- static final ParseField START_TIME = new ParseField("start_time");
- static final ParseField TIME = new ParseField("time");
- static final ParseField NUMBER_OF_FILES = new ParseField("number_of_files");
- static final ParseField TOTAL_SIZE = new ParseField("total_size");
- static final ParseField FILES = new ParseField("files");
- }
+ private static final String NAME = "name";
+ private static final String INDEX_VERSION = "index_version";
+ private static final String START_TIME = "start_time";
+ private static final String TIME = "time";
+ private static final String NUMBER_OF_FILES = "number_of_files";
+ private static final String TOTAL_SIZE = "total_size";
+ private static final String FILES = "files";
+ private static final ParseField PARSE_NAME = new ParseField("name");
+ private static final ParseField PARSE_INDEX_VERSION = new ParseField("index_version", "index-version");
+ private static final ParseField PARSE_START_TIME = new ParseField("start_time");
+ private static final ParseField PARSE_TIME = new ParseField("time");
+ private static final ParseField PARSE_NUMBER_OF_FILES = new ParseField("number_of_files");
+ private static final ParseField PARSE_TOTAL_SIZE = new ParseField("total_size");
+ private static final ParseField PARSE_FILES = new ParseField("files");
/**
* Serializes shard snapshot metadata info into JSON
@@ -453,13 +487,13 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
*/
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.field(Fields.NAME, snapshot);
- builder.field(Fields.INDEX_VERSION, indexVersion);
- builder.field(Fields.START_TIME, startTime);
- builder.field(Fields.TIME, time);
- builder.field(Fields.NUMBER_OF_FILES, numberOfFiles);
- builder.field(Fields.TOTAL_SIZE, totalSize);
- builder.startArray(Fields.FILES);
+ builder.field(NAME, snapshot);
+ builder.field(INDEX_VERSION, indexVersion);
+ builder.field(START_TIME, startTime);
+ builder.field(TIME, time);
+ builder.field(NUMBER_OF_FILES, numberOfFiles);
+ builder.field(TOTAL_SIZE, totalSize);
+ builder.startArray(FILES);
for (FileInfo fileInfo : indexFiles) {
FileInfo.toXContent(fileInfo, builder, params);
}
@@ -493,24 +527,24 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
String currentFieldName = parser.currentName();
token = parser.nextToken();
if (token.isValue()) {
- if (parseFieldMatcher.match(currentFieldName, ParseFields.NAME)) {
+ if (parseFieldMatcher.match(currentFieldName, PARSE_NAME)) {
snapshot = parser.text();
- } else if (parseFieldMatcher.match(currentFieldName, ParseFields.INDEX_VERSION)) {
+ } else if (parseFieldMatcher.match(currentFieldName, PARSE_INDEX_VERSION)) {
// The index-version is needed for backward compatibility with v 1.0
indexVersion = parser.longValue();
- } else if (parseFieldMatcher.match(currentFieldName, ParseFields.START_TIME)) {
+ } else if (parseFieldMatcher.match(currentFieldName, PARSE_START_TIME)) {
startTime = parser.longValue();
- } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TIME)) {
+ } else if (parseFieldMatcher.match(currentFieldName, PARSE_TIME)) {
time = parser.longValue();
- } else if (parseFieldMatcher.match(currentFieldName, ParseFields.NUMBER_OF_FILES)) {
+ } else if (parseFieldMatcher.match(currentFieldName, PARSE_NUMBER_OF_FILES)) {
numberOfFiles = parser.intValue();
- } else if (parseFieldMatcher.match(currentFieldName, ParseFields.TOTAL_SIZE)) {
+ } else if (parseFieldMatcher.match(currentFieldName, PARSE_TOTAL_SIZE)) {
totalSize = parser.longValue();
} else {
throw new ElasticsearchParseException("unknown parameter [{}]", currentFieldName);
}
} else if (token == XContentParser.Token.START_ARRAY) {
- if (parseFieldMatcher.match(currentFieldName, ParseFields.FILES)) {
+ if (parseFieldMatcher.match(currentFieldName, PARSE_FILES)) {
while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) {
indexFiles.add(FileInfo.fromXContent(parser));
}
@@ -526,6 +560,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
}
}
return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles),
- startTime, time, numberOfFiles, totalSize);
+ startTime, time, numberOfFiles, totalSize);
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java
index a720f5cb258..166f978a4db 100644
--- a/core/src/main/java/org/elasticsearch/index/store/Store.java
+++ b/core/src/main/java/org/elasticsearch/index/store/Store.java
@@ -41,7 +41,6 @@ import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.Version;
@@ -444,11 +443,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
}
/**
- * The returned IndexOutput might validate the files checksum if the file has been written with a newer lucene version
- * and the metadata holds the necessary information to detect that it was been written by Lucene 4.8 or newer. If it has only
- * a legacy checksum, returned IndexOutput will not verify the checksum.
+ * The returned IndexOutput validates the files checksum.
*
- * Note: Checksums are calculated nevertheless since lucene does it by default sicne version 4.8.0. This method only adds the
+ * Note: Checksums are calculated by default since version 4.8.0. This method only adds the
* verification against the checksum in the given metadata and does not add any significant overhead.
*/
public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, final IOContext context) throws IOException {
@@ -652,17 +649,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
// different in the diff. That's why we have to double check here again if the rest of it matches.
// all is fine this file is just part of a commit or a segment that is different
- final boolean same = local.isSame(remote);
-
- // this check ensures that the two files are consistent ie. if we don't have checksums only the rest needs to match we are just
- // verifying that we are consistent on both ends source and target
- final boolean hashAndLengthEqual = (
- local.checksum() == null
- && remote.checksum() == null
- && local.hash().equals(remote.hash())
- && local.length() == remote.length());
- final boolean consistent = hashAndLengthEqual || same;
- if (consistent == false) {
+ if (local.isSame(remote) == false) {
logger.debug("Files are different on the recovery target: {} ", recoveryDiff);
throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null);
}
@@ -898,18 +885,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
}
}
- /**
- * Computes a strong hash value for small files. Note that this method should only be used for files < 1MB
- */
- public static BytesRef hashFile(Directory directory, String file) throws IOException {
- final BytesRefBuilder fileHash = new BytesRefBuilder();
- try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) {
- hashFile(fileHash, new InputStreamIndexInput(in, in.length()), in.length());
- }
- return fileHash.get();
- }
-
-
/**
* Computes a strong hash value for small files. Note that this method should only be used for files < 1MB
*/
diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java
index e163b15f60e..2653f01c81d 100644
--- a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java
+++ b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java
@@ -21,10 +21,8 @@ package org.elasticsearch.index.store;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Version;
-import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.lucene.Lucene;
@@ -58,14 +56,15 @@ public class StoreFileMetaData implements Writeable {
}
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) {
- assert writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION) : "index version less that "
- + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy;
- Objects.requireNonNull(writtenBy, "writtenBy must not be null");
- Objects.requireNonNull(checksum, "checksum must not be null");
- this.name = name;
+ // its possible here to have a _na_ checksum or an unsupported writtenBy version, if the
+ // file is a segments_N file, but that is fine in the case of a segments_N file because
+ // we handle that case upstream
+ assert name.startsWith("segments_") || (writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) :
+ "index version less that " + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy;
+ this.name = Objects.requireNonNull(name, "name must not be null");
this.length = length;
- this.checksum = checksum;
- this.writtenBy = writtenBy;
+ this.checksum = Objects.requireNonNull(checksum, "checksum must not be null");
+ this.writtenBy = Objects.requireNonNull(writtenBy, "writtenBy must not be null");
this.hash = hash == null ? new BytesRef() : hash;
}
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java
index bd01e7f0183..70b9443e043 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java
@@ -45,6 +45,7 @@ import java.util.IdentityHashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Predicate;
public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable {
@@ -52,6 +53,9 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
"indices.queries.cache.size", "10%", Property.NodeScope);
public static final Setting INDICES_CACHE_QUERY_COUNT_SETTING = Setting.intSetting(
"indices.queries.cache.count", 10000, 1, Property.NodeScope);
+ // enables caching on all segments instead of only the larger ones, for testing only
+ public static final Setting INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING = Setting.boolSetting(
+ "indices.queries.cache.all_segments", false, Property.NodeScope);
private final LRUQueryCache cache;
private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap();
@@ -69,111 +73,11 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
final int count = INDICES_CACHE_QUERY_COUNT_SETTING.get(settings);
logger.debug("using [node] query cache with size [{}] max filter count [{}]",
size, count);
- cache = new LRUQueryCache(count, size.bytes()) {
-
- private Stats getStats(Object coreKey) {
- final ShardId shardId = shardKeyMap.getShardId(coreKey);
- if (shardId == null) {
- return null;
- }
- return shardStats.get(shardId);
- }
-
- private Stats getOrCreateStats(Object coreKey) {
- final ShardId shardId = shardKeyMap.getShardId(coreKey);
- Stats stats = shardStats.get(shardId);
- if (stats == null) {
- stats = new Stats();
- shardStats.put(shardId, stats);
- }
- return stats;
- }
-
- // It's ok to not protect these callbacks by a lock since it is
- // done in LRUQueryCache
- @Override
- protected void onClear() {
- assert Thread.holdsLock(this);
- super.onClear();
- for (Stats stats : shardStats.values()) {
- // don't throw away hit/miss
- stats.cacheSize = 0;
- stats.ramBytesUsed = 0;
- }
- sharedRamBytesUsed = 0;
- }
-
- @Override
- protected void onQueryCache(Query filter, long ramBytesUsed) {
- assert Thread.holdsLock(this);
- super.onQueryCache(filter, ramBytesUsed);
- sharedRamBytesUsed += ramBytesUsed;
- }
-
- @Override
- protected void onQueryEviction(Query filter, long ramBytesUsed) {
- assert Thread.holdsLock(this);
- super.onQueryEviction(filter, ramBytesUsed);
- sharedRamBytesUsed -= ramBytesUsed;
- }
-
- @Override
- protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) {
- assert Thread.holdsLock(this);
- super.onDocIdSetCache(readerCoreKey, ramBytesUsed);
- final Stats shardStats = getOrCreateStats(readerCoreKey);
- shardStats.cacheSize += 1;
- shardStats.cacheCount += 1;
- shardStats.ramBytesUsed += ramBytesUsed;
-
- StatsAndCount statsAndCount = stats2.get(readerCoreKey);
- if (statsAndCount == null) {
- statsAndCount = new StatsAndCount(shardStats);
- stats2.put(readerCoreKey, statsAndCount);
- }
- statsAndCount.count += 1;
- }
-
- @Override
- protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) {
- assert Thread.holdsLock(this);
- super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed);
- // onDocIdSetEviction might sometimes be called with a number
- // of entries equal to zero if the cache for the given segment
- // was already empty when the close listener was called
- if (numEntries > 0) {
- // We can't use ShardCoreKeyMap here because its core closed
- // listener is called before the listener of the cache which
- // triggers this eviction. So instead we use use stats2 that
- // we only evict when nothing is cached anymore on the segment
- // instead of relying on close listeners
- final StatsAndCount statsAndCount = stats2.get(readerCoreKey);
- final Stats shardStats = statsAndCount.stats;
- shardStats.cacheSize -= numEntries;
- shardStats.ramBytesUsed -= sumRamBytesUsed;
- statsAndCount.count -= numEntries;
- if (statsAndCount.count == 0) {
- stats2.remove(readerCoreKey);
- }
- }
- }
-
- @Override
- protected void onHit(Object readerCoreKey, Query filter) {
- assert Thread.holdsLock(this);
- super.onHit(readerCoreKey, filter);
- final Stats shardStats = getStats(readerCoreKey);
- shardStats.hitCount += 1;
- }
-
- @Override
- protected void onMiss(Object readerCoreKey, Query filter) {
- assert Thread.holdsLock(this);
- super.onMiss(readerCoreKey, filter);
- final Stats shardStats = getOrCreateStats(readerCoreKey);
- shardStats.missCount += 1;
- }
- };
+ if (INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.get(settings)) {
+ cache = new ElasticsearchLRUQueryCache(count, size.bytes(), context -> true);
+ } else {
+ cache = new ElasticsearchLRUQueryCache(count, size.bytes());
+ }
sharedRamBytesUsed = 0;
}
@@ -316,4 +220,111 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
assert empty(shardStats.get(shardId));
shardStats.remove(shardId);
}
+
+ private class ElasticsearchLRUQueryCache extends LRUQueryCache {
+
+ ElasticsearchLRUQueryCache(int maxSize, long maxRamBytesUsed, Predicate leavesToCache) {
+ super(maxSize, maxRamBytesUsed, leavesToCache);
+ }
+
+ ElasticsearchLRUQueryCache(int maxSize, long maxRamBytesUsed) {
+ super(maxSize, maxRamBytesUsed);
+ }
+
+ private Stats getStats(Object coreKey) {
+ final ShardId shardId = shardKeyMap.getShardId(coreKey);
+ if (shardId == null) {
+ return null;
+ }
+ return shardStats.get(shardId);
+ }
+
+ private Stats getOrCreateStats(Object coreKey) {
+ final ShardId shardId = shardKeyMap.getShardId(coreKey);
+ Stats stats = shardStats.get(shardId);
+ if (stats == null) {
+ stats = new Stats();
+ shardStats.put(shardId, stats);
+ }
+ return stats;
+ }
+
+ // It's ok to not protect these callbacks by a lock since it is
+ // done in LRUQueryCache
+ @Override
+ protected void onClear() {
+ super.onClear();
+ for (Stats stats : shardStats.values()) {
+ // don't throw away hit/miss
+ stats.cacheSize = 0;
+ stats.ramBytesUsed = 0;
+ }
+ sharedRamBytesUsed = 0;
+ }
+
+ @Override
+ protected void onQueryCache(Query filter, long ramBytesUsed) {
+ super.onQueryCache(filter, ramBytesUsed);
+ sharedRamBytesUsed += ramBytesUsed;
+ }
+
+ @Override
+ protected void onQueryEviction(Query filter, long ramBytesUsed) {
+ super.onQueryEviction(filter, ramBytesUsed);
+ sharedRamBytesUsed -= ramBytesUsed;
+ }
+
+ @Override
+ protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) {
+ super.onDocIdSetCache(readerCoreKey, ramBytesUsed);
+ final Stats shardStats = getOrCreateStats(readerCoreKey);
+ shardStats.cacheSize += 1;
+ shardStats.cacheCount += 1;
+ shardStats.ramBytesUsed += ramBytesUsed;
+
+ StatsAndCount statsAndCount = stats2.get(readerCoreKey);
+ if (statsAndCount == null) {
+ statsAndCount = new StatsAndCount(shardStats);
+ stats2.put(readerCoreKey, statsAndCount);
+ }
+ statsAndCount.count += 1;
+ }
+
+ @Override
+ protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) {
+ super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed);
+ // onDocIdSetEviction might sometimes be called with a number
+ // of entries equal to zero if the cache for the given segment
+ // was already empty when the close listener was called
+ if (numEntries > 0) {
+ // We can't use ShardCoreKeyMap here because its core closed
+ // listener is called before the listener of the cache which
+ // triggers this eviction. So instead we use use stats2 that
+ // we only evict when nothing is cached anymore on the segment
+ // instead of relying on close listeners
+ final StatsAndCount statsAndCount = stats2.get(readerCoreKey);
+ final Stats shardStats = statsAndCount.stats;
+ shardStats.cacheSize -= numEntries;
+ shardStats.ramBytesUsed -= sumRamBytesUsed;
+ statsAndCount.count -= numEntries;
+ if (statsAndCount.count == 0) {
+ stats2.remove(readerCoreKey);
+ }
+ }
+ }
+
+ @Override
+ protected void onHit(Object readerCoreKey, Query filter) {
+ super.onHit(readerCoreKey, filter);
+ final Stats shardStats = getStats(readerCoreKey);
+ shardStats.hitCount += 1;
+ }
+
+ @Override
+ protected void onMiss(Object readerCoreKey, Query filter) {
+ super.onMiss(readerCoreKey, filter);
+ final Stats shardStats = getOrCreateStats(readerCoreKey);
+ shardStats.missCount += 1;
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
index ba512379868..3ae02c7eadd 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
@@ -35,6 +35,7 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.breaker.CircuitBreaker;
@@ -55,6 +56,7 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.env.NodeEnvironment;
@@ -86,10 +88,14 @@ import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.cluster.IndicesClusterStateService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
+import org.elasticsearch.indices.recovery.RecoveryState;
+import org.elasticsearch.indices.recovery.RecoveryTargetService;
import org.elasticsearch.plugins.PluginsService;
+import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.query.QueryPhase;
@@ -124,7 +130,8 @@ import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList;
/**
*
*/
-public class IndicesService extends AbstractLifecycleComponent implements Iterable, IndexService.ShardStoreDeleter {
+public class IndicesService extends AbstractLifecycleComponent
+ implements IndicesClusterStateService.AllocatedIndices, IndexService.ShardStoreDeleter {
public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout";
public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING =
@@ -296,11 +303,14 @@ public class IndicesService extends AbstractLifecycleComponent i
}
/**
- * Returns true if changes (adding / removing) indices, shards and so on are allowed.
+ * Checks if changes (adding / removing) indices, shards and so on are allowed.
+ *
+ * @throws IllegalStateException if no changes allowed.
*/
- public boolean changesAllowed() {
- // we check on stop here since we defined stop when we delete the indices
- return lifecycle.started();
+ private void ensureChangesAllowed() {
+ if (lifecycle.started() == false) {
+ throw new IllegalStateException("Can't make changes to indices service, node is closed");
+ }
}
@Override
@@ -314,10 +324,9 @@ public class IndicesService extends AbstractLifecycleComponent i
/**
* Returns an IndexService for the specified index if exists otherwise returns null
.
- *
*/
- @Nullable
- public IndexService indexService(Index index) {
+ @Override
+ public @Nullable IndexService indexService(Index index) {
return indices.get(index.getUUID());
}
@@ -339,11 +348,9 @@ public class IndicesService extends AbstractLifecycleComponent i
* @param builtInListeners a list of built-in lifecycle {@link IndexEventListener} that should should be used along side with the per-index listeners
* @throws IndexAlreadyExistsException if the index already exists.
*/
+ @Override
public synchronized IndexService createIndex(final NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData, List builtInListeners) throws IOException {
-
- if (!lifecycle.started()) {
- throw new IllegalStateException("Can't create an index [" + indexMetaData.getIndex() + "], node is closed");
- }
+ ensureChangesAllowed();
if (indexMetaData.getIndexUUID().equals(IndexMetaData.INDEX_UUID_NA_VALUE)) {
throw new IllegalArgumentException("index must have a real UUID found value: [" + indexMetaData.getIndexUUID() + "]");
}
@@ -424,14 +431,44 @@ public class IndicesService extends AbstractLifecycleComponent i
}
}
+ @Override
+ public IndexShard createShard(ShardRouting shardRouting, RecoveryState recoveryState, RecoveryTargetService recoveryTargetService,
+ RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
+ NodeServicesProvider nodeServicesProvider, Callback onShardFailure) throws IOException {
+ ensureChangesAllowed();
+ IndexService indexService = indexService(shardRouting.index());
+ IndexShard indexShard = indexService.createShard(shardRouting);
+ indexShard.addShardFailureCallback(onShardFailure);
+ indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService,
+ (type, mapping) -> {
+ assert recoveryState.getType() == RecoveryState.Type.LOCAL_SHARDS :
+ "mapping update consumer only required by local shards recovery";
+ try {
+ nodeServicesProvider.getClient().admin().indices().preparePutMapping()
+ .setConcreteIndex(shardRouting.index()) // concrete index - no name clash, it uses uuid
+ .setType(type)
+ .setSource(mapping.source().string())
+ .get();
+ } catch (IOException ex) {
+ throw new ElasticsearchException("failed to stringify mapping source", ex);
+ }
+ }, this);
+ return indexShard;
+ }
+
/**
* Removes the given index from this service and releases all associated resources. Persistent parts of the index
* like the shards files, state and transaction logs are kept around in the case of a disaster recovery.
* @param index the index to remove
* @param reason the high level reason causing this removal
*/
+ @Override
public void removeIndex(Index index, String reason) {
- removeIndex(index, reason, false);
+ try {
+ removeIndex(index, reason, false);
+ } catch (Throwable e) {
+ logger.warn("failed to remove index ({})", e, reason);
+ }
}
private void removeIndex(Index index, String reason, boolean delete) {
@@ -516,14 +553,20 @@ public class IndicesService extends AbstractLifecycleComponent i
* @param index the index to delete
* @param reason the high level reason causing this delete
*/
- public void deleteIndex(Index index, String reason) throws IOException {
- removeIndex(index, reason, true);
+ @Override
+ public void deleteIndex(Index index, String reason) {
+ try {
+ removeIndex(index, reason, true);
+ } catch (Throwable e) {
+ logger.warn("failed to delete index ({})", e, reason);
+ }
}
/**
* Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index
* but does not deal with in-memory structures. For those call {@link #deleteIndex(Index, String)}
*/
+ @Override
public void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) {
if (nodeEnv.hasNodeFile()) {
String indexName = metaData.getIndex().getName();
@@ -683,8 +726,8 @@ public class IndicesService extends AbstractLifecycleComponent i
* @param clusterState {@code ClusterState} to ensure the index is not part of it
* @return IndexMetaData for the index loaded from disk
*/
- @Nullable
- public IndexMetaData verifyIndexIsDeleted(final Index index, final ClusterState clusterState) {
+ @Override
+ public @Nullable IndexMetaData verifyIndexIsDeleted(final Index index, final ClusterState clusterState) {
// this method should only be called when we know the index (name + uuid) is not part of the cluster state
if (clusterState.metaData().index(index) != null) {
throw new IllegalStateException("Cannot delete index [" + index + "], it is still part of the cluster state.");
@@ -839,6 +882,7 @@ public class IndicesService extends AbstractLifecycleComponent i
* @param index the index to process the pending deletes for
* @param timeout the timeout used for processing pending deletes
*/
+ @Override
public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeout) throws IOException, InterruptedException {
logger.debug("{} processing pending deletes", index);
final long startTimeNS = System.nanoTime();
diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
index 2c77f863c47..c0bfedc47ca 100644
--- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
+++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
@@ -19,16 +19,13 @@
package org.elasticsearch.indices.cluster;
-import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.store.LockObtainFailedException;
-import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNode;
@@ -37,7 +34,6 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
-import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
@@ -48,17 +44,18 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexComponent;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexShardAlreadyExistsException;
import org.elasticsearch.index.NodeServicesProvider;
-import org.elasticsearch.index.mapper.DocumentMapper;
-import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.IndexShardRelocatedException;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.indices.recovery.RecoveryFailedException;
@@ -73,7 +70,6 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -82,14 +78,13 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
/**
*
*/
public class IndicesClusterStateService extends AbstractLifecycleComponent implements ClusterStateListener {
- private final IndicesService indicesService;
+ final AllocatedIndices extends Shard, ? extends AllocatedIndex extends Shard>> indicesService;
private final ClusterService clusterService;
private final ThreadPool threadPool;
private final RecoveryTargetService recoveryTargetService;
@@ -102,11 +97,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent failedShards = ConcurrentCollections.newConcurrentMap();
+ final ConcurrentMap failedShardsCache = ConcurrentCollections.newConcurrentMap();
private final RestoreService restoreService;
private final RepositoriesService repositoriesService;
- private final Object mutex = new Object();
private final FailedShardHandler failedShardHandler = new FailedShardHandler();
private final boolean sendRefreshMapping;
@@ -120,6 +114,22 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent>) indicesService,
+ clusterService, threadPool, recoveryTargetService, shardStateAction,
+ nodeMappingRefreshAction, repositoriesService, restoreService, searchService, syncedFlushService, recoverySource,
+ nodeServicesProvider);
+ }
+
+ // for tests
+ IndicesClusterStateService(Settings settings,
+ AllocatedIndices extends Shard, ? extends AllocatedIndex extends Shard>> indicesService,
+ ClusterService clusterService,
+ ThreadPool threadPool, RecoveryTargetService recoveryTargetService,
+ ShardStateAction shardStateAction,
+ NodeMappingRefreshAction nodeMappingRefreshAction,
+ RepositoriesService repositoriesService, RestoreService restoreService,
+ SearchService searchService, SyncedFlushService syncedFlushService,
+ RecoverySource recoverySource, NodeServicesProvider nodeServicesProvider) {
super(settings);
this.buildInIndexListener = Arrays.asList(recoverySource, recoveryTargetService, searchService, syncedFlushService);
this.indicesService = indicesService;
@@ -149,87 +159,97 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent indexService : indicesService) {
+ indicesService.removeIndex(indexService.index(), "cleaning index (disabled block persistence)"); // also cleans shards
}
-
- cleanFailedShards(event);
-
- // cleaning up indices that are completely deleted so we won't need to worry about them
- // when checking for shards
- applyDeletedIndices(event);
- applyDeletedShards(event);
- // call after deleted shards so indices with no shards will be cleaned
- applyCleanedIndices(event);
- // make sure that newly created shards use the latest meta data
- applyIndexMetaData(event);
- applyNewIndices(event);
- // apply mappings also updates new indices. TODO: make new indices good to begin with
- applyMappings(event);
- applyNewOrUpdatedShards(event);
- }
- }
-
- private void cleanFailedShards(final ClusterChangedEvent event) {
- RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId());
- if (routingNode == null) {
- failedShards.clear();
return;
}
- for (Iterator> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) {
- Map.Entry entry = iterator.next();
- ShardRouting failedShardRouting = entry.getValue();
- ShardRouting matchedShardRouting = routingNode.getByShardId(failedShardRouting.shardId());
- if (matchedShardRouting == null || matchedShardRouting.isSameAllocation(failedShardRouting) == false) {
+
+ updateFailedShardsCache(state);
+
+ deleteIndices(event); // also deletes shards of deleted indices
+
+ removeUnallocatedIndices(state); // also removes shards of removed indices
+
+ failMissingShards(state);
+
+ removeShards(state);
+
+ updateIndices(event); // can also fail shards, but these are then guaranteed to be in failedShardsCache
+
+ createIndices(state);
+
+ createOrUpdateShards(state);
+ }
+
+ /**
+ * Removes shard entries from the failed shards cache that are no longer allocated to this node by the master.
+ * Sends shard failures for shards that are marked as actively allocated to this node but don't actually exist on the node.
+ * Resends shard failures for shards that are still marked as allocated to this node but previously failed.
+ *
+ * @param state new cluster state
+ */
+ private void updateFailedShardsCache(final ClusterState state) {
+ RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
+ if (localRoutingNode == null) {
+ failedShardsCache.clear();
+ return;
+ }
+
+ DiscoveryNode masterNode = state.nodes().getMasterNode();
+
+ // remove items from cache which are not in our routing table anymore and resend failures that have not executed on master yet
+ for (Iterator> iterator = failedShardsCache.entrySet().iterator(); iterator.hasNext(); ) {
+ ShardRouting failedShardRouting = iterator.next().getValue();
+ ShardRouting matchedRouting = localRoutingNode.getByShardId(failedShardRouting.shardId());
+ if (matchedRouting == null || matchedRouting.isSameAllocation(failedShardRouting) == false) {
iterator.remove();
+ } else {
+ if (masterNode != null) { // TODO: can we remove this? Is resending shard failures the responsibility of shardStateAction?
+ String message = "master " + masterNode + " has not removed previously failed shard. resending shard failure";
+ logger.trace("[{}] re-sending failed shard [{}], reason [{}]", matchedRouting.shardId(), matchedRouting, message);
+ shardStateAction.shardFailed(matchedRouting, matchedRouting, message, null, SHARD_STATE_ACTION_LISTENER);
+ }
}
}
}
- private void applyDeletedIndices(final ClusterChangedEvent event) {
+ /**
+ * Deletes indices (with shard data).
+ *
+ * @param event cluster change event
+ */
+ private void deleteIndices(final ClusterChangedEvent event) {
final ClusterState previousState = event.previousState();
- final String localNodeId = event.state().nodes().getLocalNodeId();
+ final ClusterState state = event.state();
+ final String localNodeId = state.nodes().getLocalNodeId();
assert localNodeId != null;
for (Index index : event.indicesDeleted()) {
if (logger.isDebugEnabled()) {
logger.debug("[{}] cleaning index, no longer part of the metadata", index);
}
- final IndexService idxService = indicesService.indexService(index);
+ AllocatedIndex extends Shard> indexService = indicesService.indexService(index);
final IndexSettings indexSettings;
- if (idxService != null) {
- indexSettings = idxService.getIndexSettings();
- deleteIndex(index, "index no longer part of the metadata");
+ if (indexService != null) {
+ indexSettings = indexService.getIndexSettings();
+ indicesService.deleteIndex(index, "index no longer part of the metadata");
} else if (previousState.metaData().hasIndex(index.getName())) {
// The deleted index was part of the previous cluster state, but not loaded on the local node
final IndexMetaData metaData = previousState.metaData().index(index);
indexSettings = new IndexSettings(metaData, settings);
- indicesService.deleteUnassignedIndex("deleted index was not assigned to local node", metaData, event.state());
+ indicesService.deleteUnassignedIndex("deleted index was not assigned to local node", metaData, state);
} else {
// The previous cluster state's metadata also does not contain the index,
// which is what happens on node startup when an index was deleted while the
@@ -255,10 +275,10 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent indexService : indicesService) {
+ Index index = indexService.index();
+ IndexMetaData indexMetaData = event.state().metaData().index(index);
if (indexMetaData == null) {
- assert false : "index" + indexService.index() + " exists locally, doesn't have a metadata but is not part "
+ assert false : "index" + index + " exists locally, doesn't have a metadata but is not part"
+ " of the delete index list. \nprevious state: " + event.previousState().prettyPrint()
+ "\n current state:\n" + event.state().prettyPrint();
- logger.warn("[{}] isn't part of metadata but is part of in memory structures. removing",
- indexService.index());
- deleteIndex(indexService.index(), "isn't part of metadata (explicit check)");
+ logger.warn("[{}] isn't part of metadata but is part of in memory structures. removing", index);
+ indicesService.deleteIndex(index, "isn't part of metadata (explicit check)");
}
}
}
- private void applyDeletedShards(final ClusterChangedEvent event) {
- RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId());
- if (routingNode == null) {
+ /**
+ * Removes indices that have no shards allocated to this node. This does not delete the shard data as we wait for enough
+ * shard copies to exist in the cluster before deleting shard data (triggered by {@link org.elasticsearch.indices.store.IndicesStore}).
+ *
+ * @param state new cluster state
+ */
+ private void removeUnallocatedIndices(final ClusterState state) {
+ final String localNodeId = state.nodes().getLocalNodeId();
+ assert localNodeId != null;
+
+ Set indicesWithShards = new HashSet<>();
+ RoutingNode localRoutingNode = state.getRoutingNodes().node(localNodeId);
+ if (localRoutingNode != null) { // null e.g. if we are not a data node
+ for (ShardRouting shardRouting : localRoutingNode) {
+ indicesWithShards.add(shardRouting.index());
+ }
+ }
+
+ for (AllocatedIndex extends Shard> indexService : indicesService) {
+ Index index = indexService.index();
+ if (indicesWithShards.contains(index) == false) {
+ logger.debug("{} removing index, no shards allocated", index);
+ indicesService.removeIndex(index, "removing index (no shards allocated)");
+ }
+ }
+ }
+
+ /**
+ * Notifies master about shards that don't exist but are supposed to be active on this node.
+ *
+ * @param state new cluster state
+ */
+ private void failMissingShards(final ClusterState state) {
+ RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
+ if (localRoutingNode == null) {
return;
}
-
- final Map> shardsByIndex = new HashMap<>();
- for (ShardRouting shard : routingNode) {
- shardsByIndex.computeIfAbsent(shard.index(), k -> new HashSet<>()).add(shard.allocationId().getId());
+ for (final ShardRouting shardRouting : localRoutingNode) {
+ ShardId shardId = shardRouting.shardId();
+ if (shardRouting.initializing() == false &&
+ failedShardsCache.containsKey(shardId) == false &&
+ indicesService.getShardOrNull(shardId) == null) {
+ // the master thinks we are active, but we don't have this shard at all, mark it as failed
+ sendFailShard(shardRouting, "master marked shard as active, but shard has not been created, mark shard as failed", null);
+ }
}
+ }
- for (IndexService indexService : indicesService) {
- Index index = indexService.index();
- IndexMetaData indexMetaData = event.state().metaData().index(index);
- assert indexMetaData != null : "local index doesn't have metadata, should have been cleaned up by applyDeletedIndices: " + index;
- // now, go over and delete shards that needs to get deleted
- Set newShardAllocationIds = shardsByIndex.getOrDefault(index, Collections.emptySet());
- for (IndexShard existingShard : indexService) {
- if (newShardAllocationIds.contains(existingShard.routingEntry().allocationId().getId()) == false) {
- if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
- if (logger.isDebugEnabled()) {
- logger.debug("{} removing shard (index is closed)", existingShard.shardId());
+ /**
+ * Removes shards that are currently loaded by indicesService but have disappeared from the routing table of the current node.
+ * Also removes shards where the recovery source node has changed.
+ * This method does not delete the shard data.
+ *
+ * @param state new cluster state
+ */
+ private void removeShards(final ClusterState state) {
+ final RoutingTable routingTable = state.routingTable();
+ final DiscoveryNodes nodes = state.nodes();
+ final String localNodeId = state.nodes().getLocalNodeId();
+ assert localNodeId != null;
+
+ // remove shards based on routing nodes (no deletion of data)
+ RoutingNode localRoutingNode = state.getRoutingNodes().node(localNodeId);
+ for (AllocatedIndex extends Shard> indexService : indicesService) {
+ for (Shard shard : indexService) {
+ ShardRouting currentRoutingEntry = shard.routingEntry();
+ ShardId shardId = currentRoutingEntry.shardId();
+ ShardRouting newShardRouting = localRoutingNode == null ? null : localRoutingNode.getByShardId(shardId);
+ if (newShardRouting == null || newShardRouting.isSameAllocation(currentRoutingEntry) == false) {
+ // we can just remove the shard without cleaning it locally, since we will clean it in IndicesStore
+ // once all shards are allocated
+ logger.debug("{} removing shard (not allocated)", shardId);
+ indexService.removeShard(shardId.id(), "removing shard (not allocated)");
+ } else {
+ // remove shards where recovery source has changed. This re-initializes shards later in createOrUpdateShards
+ if (newShardRouting.isPeerRecovery()) {
+ RecoveryState recoveryState = shard.recoveryState();
+ final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, newShardRouting);
+ if (recoveryState.getSourceNode().equals(sourceNode) == false) {
+ if (recoveryTargetService.cancelRecoveriesForShard(shardId, "recovery source node changed")) {
+ // getting here means that the shard was still recovering
+ logger.debug("{} removing shard (recovery source changed), current [{}], global [{}])",
+ shardId, currentRoutingEntry, newShardRouting);
+ indexService.removeShard(shardId.id(), "removing shard (recovery source node changed)");
+ }
}
- indexService.removeShard(existingShard.shardId().id(), "removing shard (index is closed)");
- } else {
- // we can just remove the shard, without cleaning it locally, since we will clean it
- // when all shards are allocated in the IndicesStore
- if (logger.isDebugEnabled()) {
- logger.debug("{} removing shard (not allocated)", existingShard.shardId());
- }
- indexService.removeShard(existingShard.shardId().id(), "removing shard (not allocated)");
}
}
}
}
}
- private void applyCleanedIndices(final ClusterChangedEvent event) {
- // handle closed indices, since they are not allocated on a node once they are closed
- // so applyDeletedIndices might not take them into account
- for (IndexService indexService : indicesService) {
- Index index = indexService.index();
- IndexMetaData indexMetaData = event.state().metaData().index(index);
- if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) {
- for (Integer shardId : indexService.shardIds()) {
- logger.debug("{}[{}] removing shard (index is closed)", index, shardId);
- try {
- indexService.removeShard(shardId, "removing shard (index is closed)");
- } catch (Throwable e) {
- logger.warn("{} failed to remove shard (index is closed)", e, index);
- }
- }
- }
- }
-
- final Set hasAllocations = new HashSet<>();
- final RoutingNode node = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId());
- // if no shards are allocated ie. if this node is a master-only node it can return nul
- if (node != null) {
- for (ShardRouting routing : node) {
- hasAllocations.add(routing.index());
- }
- }
- for (IndexService indexService : indicesService) {
- Index index = indexService.index();
- if (hasAllocations.contains(index) == false) {
- assert indexService.shardIds().isEmpty() :
- "no locally assigned shards, but index wasn't emptied by applyDeletedShards."
- + " index " + index + ", shards: " + indexService.shardIds();
- if (logger.isDebugEnabled()) {
- logger.debug("{} cleaning index (no shards allocated)", index);
- }
- // clean the index
- removeIndex(index, "removing index (no shards allocated)");
- }
- }
- }
-
- private void applyIndexMetaData(ClusterChangedEvent event) {
- if (!event.metaDataChanged()) {
- return;
- }
- for (IndexMetaData indexMetaData : event.state().metaData()) {
- if (!indicesService.hasIndex(indexMetaData.getIndex())) {
- // we only create / update here
- continue;
- }
- // if the index meta data didn't change, no need check for refreshed settings
- if (!event.indexMetaDataChanged(indexMetaData)) {
- continue;
- }
- Index index = indexMetaData.getIndex();
- IndexService indexService = indicesService.indexService(index);
- if (indexService == null) {
- // already deleted on us, ignore it
- continue;
- }
- indexService.updateMetaData(indexMetaData);
- }
- }
-
- private void applyNewIndices(final ClusterChangedEvent event) {
+ private void createIndices(final ClusterState state) {
// we only create indices for shards that are allocated
- RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId());
- if (routingNode == null) {
+ RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
+ if (localRoutingNode == null) {
return;
}
- for (ShardRouting shard : routingNode) {
- if (!indicesService.hasIndex(shard.index())) {
- final IndexMetaData indexMetaData = event.state().metaData().getIndexSafe(shard.index());
- if (logger.isDebugEnabled()) {
- logger.debug("[{}] creating index", indexMetaData.getIndex());
- }
- try {
- indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener);
- } catch (Throwable e) {
- sendFailShard(shard, "failed to create index", e);
+ // create map of indices to create with shards to fail if index creation fails
+ final Map> indicesToCreate = new HashMap<>();
+ for (ShardRouting shardRouting : localRoutingNode) {
+ if (failedShardsCache.containsKey(shardRouting.shardId()) == false) {
+ final Index index = shardRouting.index();
+ if (indicesService.indexService(index) == null) {
+ indicesToCreate.computeIfAbsent(index, k -> new ArrayList<>()).add(shardRouting);
}
}
}
- }
- private void applyMappings(ClusterChangedEvent event) {
- // go over and update mappings
- for (IndexMetaData indexMetaData : event.state().metaData()) {
- Index index = indexMetaData.getIndex();
- if (!indicesService.hasIndex(index)) {
- // we only create / update here
- continue;
- }
- boolean requireRefresh = false;
- IndexService indexService = indicesService.indexService(index);
- if (indexService == null) {
- // got deleted on us, ignore (closing the node)
- return;
- }
+ for (Map.Entry> entry : indicesToCreate.entrySet()) {
+ final Index index = entry.getKey();
+ final IndexMetaData indexMetaData = state.metaData().index(index);
+ logger.debug("[{}] creating index", index);
+
+ AllocatedIndex extends Shard> indexService = null;
try {
- MapperService mapperService = indexService.mapperService();
- // go over and add the relevant mappings (or update them)
- for (ObjectCursor cursor : indexMetaData.getMappings().values()) {
- MappingMetaData mappingMd = cursor.value;
- String mappingType = mappingMd.type();
- CompressedXContent mappingSource = mappingMd.source();
- requireRefresh |= processMapping(index.getName(), mapperService, mappingType, mappingSource);
- }
- if (requireRefresh && sendRefreshMapping) {
- nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
- new NodeMappingRefreshAction.NodeMappingRefreshRequest(index.getName(), indexMetaData.getIndexUUID(),
- event.state().nodes().getLocalNodeId())
+ indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, buildInIndexListener);
+ if (indexService.updateMapping(indexMetaData) && sendRefreshMapping) {
+ nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(),
+ new NodeMappingRefreshAction.NodeMappingRefreshRequest(indexMetaData.getIndex().getName(),
+ indexMetaData.getIndexUUID(), state.nodes().getLocalNodeId())
);
}
} catch (Throwable t) {
- // if we failed the mappings anywhere, we need to fail the shards for this index, note, we safeguard
- // by creating the processing the mappings on the master, or on the node the mapping was introduced on,
- // so this failure typically means wrong node level configuration or something similar
- for (IndexShard indexShard : indexService) {
- ShardRouting shardRouting = indexShard.routingEntry();
- failAndRemoveShard(shardRouting, indexService, true, "failed to update mappings", t);
- }
- }
- }
- }
-
- private boolean processMapping(String index, MapperService mapperService, String mappingType, CompressedXContent mappingSource) throws Throwable {
- // refresh mapping can happen when the parsing/merging of the mapping from the metadata doesn't result in the same
- // mapping, in this case, we send to the master to refresh its own version of the mappings (to conform with the
- // merge version of it, which it does when refreshing the mappings), and warn log it.
- boolean requiresRefresh = false;
- try {
- DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
-
- if (existingMapper == null || mappingSource.equals(existingMapper.mappingSource()) == false) {
- String op = existingMapper == null ? "adding" : "updating";
- if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) {
- logger.debug("[{}] {} mapping [{}], source [{}]", index, op, mappingType, mappingSource.string());
- } else if (logger.isTraceEnabled()) {
- logger.trace("[{}] {} mapping [{}], source [{}]", index, op, mappingType, mappingSource.string());
+ final String failShardReason;
+ if (indexService == null) {
+ failShardReason = "failed to create index";
} else {
- logger.debug("[{}] {} mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, op, mappingType);
+ failShardReason = "failed to update mapping for index";
+ indicesService.removeIndex(index, "removing index (mapping update failed)");
}
- mapperService.merge(mappingType, mappingSource, MapperService.MergeReason.MAPPING_RECOVERY, true);
- if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) {
- logger.debug("[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource());
- requiresRefresh = true;
+ for (ShardRouting shardRouting : entry.getValue()) {
+ sendFailShard(shardRouting, failShardReason, t);
}
}
- } catch (Throwable e) {
- logger.warn("[{}] failed to add mapping [{}], source [{}]", e, index, mappingType, mappingSource);
- throw e;
}
- return requiresRefresh;
}
-
- private void applyNewOrUpdatedShards(final ClusterChangedEvent event) {
- if (!indicesService.changesAllowed()) {
+ private void updateIndices(ClusterChangedEvent event) {
+ if (!event.metaDataChanged()) {
return;
}
-
- RoutingTable routingTable = event.state().routingTable();
- RoutingNode routingNode = event.state().getRoutingNodes().node(event.state().nodes().getLocalNodeId());
-
- if (routingNode == null) {
- failedShards.clear();
- return;
- }
-
- DiscoveryNodes nodes = event.state().nodes();
- for (final ShardRouting shardRouting : routingNode) {
- final IndexService indexService = indicesService.indexService(shardRouting.index());
- if (indexService == null) {
- // creation failed for some reasons
- assert failedShards.containsKey(shardRouting.shardId()) :
- "index has local allocation but is not created by applyNewIndices and is not failed " + shardRouting;
- continue;
- }
- final IndexMetaData indexMetaData = event.state().metaData().index(shardRouting.index());
- assert indexMetaData != null : "index has local allocation but no meta data. " + shardRouting.index();
-
- final int shardId = shardRouting.id();
-
- if (!indexService.hasShard(shardId) && shardRouting.started()) {
- if (failedShards.containsKey(shardRouting.shardId())) {
- if (nodes.getMasterNode() != null) {
- String message = "master " + nodes.getMasterNode() + " marked shard as started, but shard has previous failed. resending shard failure";
- logger.trace("[{}] re-sending failed shard [{}], reason [{}]", shardRouting.shardId(), shardRouting, message);
- shardStateAction.shardFailed(shardRouting, shardRouting, message, null, SHARD_STATE_ACTION_LISTENER);
+ final ClusterState state = event.state();
+ for (AllocatedIndex extends Shard> indexService : indicesService) {
+ final Index index = indexService.index();
+ final IndexMetaData currentIndexMetaData = indexService.getIndexSettings().getIndexMetaData();
+ final IndexMetaData newIndexMetaData = state.metaData().index(index);
+ assert newIndexMetaData != null : "index " + index + " should have been removed by deleteIndices";
+ if (ClusterChangedEvent.indexMetaDataChanged(currentIndexMetaData, newIndexMetaData)) {
+ indexService.updateMetaData(newIndexMetaData);
+ try {
+ if (indexService.updateMapping(newIndexMetaData) && sendRefreshMapping) {
+ nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(),
+ new NodeMappingRefreshAction.NodeMappingRefreshRequest(newIndexMetaData.getIndex().getName(),
+ newIndexMetaData.getIndexUUID(), state.nodes().getLocalNodeId())
+ );
}
- } else {
- // the master thinks we are started, but we don't have this shard at all, mark it as failed
- sendFailShard(shardRouting, "master [" + nodes.getMasterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null);
- }
- continue;
- }
+ } catch (Throwable t) {
+ indicesService.removeIndex(indexService.index(), "removing index (mapping update failed)");
- IndexShard indexShard = indexService.getShardOrNull(shardId);
- if (indexShard != null) {
- ShardRouting currentRoutingEntry = indexShard.routingEntry();
- // if the current and global routing are initializing, but are still not the same, its a different "shard" being allocated
- // for example: a shard that recovers from one node and now needs to recover to another node,
- // or a replica allocated and then allocating a primary because the primary failed on another node
- boolean shardHasBeenRemoved = false;
- assert currentRoutingEntry.isSameAllocation(shardRouting) :
- "local shard has a different allocation id but wasn't cleaning by applyDeletedShards. "
- + "cluster state: " + shardRouting + " local: " + currentRoutingEntry;
- if (shardRouting.isPeerRecovery()) {
- RecoveryState recoveryState = indexShard.recoveryState();
- final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, shardRouting);
- if (recoveryState.getSourceNode().equals(sourceNode) == false) {
- if (recoveryTargetService.cancelRecoveriesForShard(currentRoutingEntry.shardId(), "recovery source node changed")) {
- // getting here means that the shard was still recovering
- logger.debug("[{}][{}] removing shard (recovery source changed), current [{}], global [{}])", shardRouting.index(), shardRouting.id(), currentRoutingEntry, shardRouting);
- indexService.removeShard(shardRouting.id(), "removing shard (recovery source node changed)");
- shardHasBeenRemoved = true;
+ // fail shards that would be created or updated by createOrUpdateShards
+ RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
+ if (localRoutingNode != null) {
+ for (final ShardRouting shardRouting : localRoutingNode) {
+ if (shardRouting.index().equals(index) && failedShardsCache.containsKey(shardRouting.shardId()) == false) {
+ sendFailShard(shardRouting, "failed to update mapping for index", t);
+ }
}
}
}
-
- if (shardHasBeenRemoved == false) {
- try {
- indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false);
- } catch (Throwable e) {
- failAndRemoveShard(shardRouting, indexService, true, "failed updating shard routing entry", e);
- }
- }
- }
-
- if (shardRouting.initializing()) {
- applyInitializingShard(event.state(), indexService, shardRouting);
}
}
}
- private void applyInitializingShard(final ClusterState state, IndexService indexService, final ShardRouting shardRouting) {
- final RoutingTable routingTable = state.routingTable();
- final DiscoveryNodes nodes = state.getNodes();
- final int shardId = shardRouting.id();
+ private void createOrUpdateShards(final ClusterState state) {
+ RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
+ if (localRoutingNode == null) {
+ return;
+ }
- if (indexService.hasShard(shardId)) {
- IndexShard indexShard = indexService.getShard(shardId);
- if (indexShard.state() == IndexShardState.STARTED || indexShard.state() == IndexShardState.POST_RECOVERY) {
- // the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting
- // for master to confirm a shard started message (either master failover, or a cluster event before
- // we managed to tell the master we started), mark us as started
- if (logger.isTraceEnabled()) {
- logger.trace("{} master marked shard as initializing, but shard has state [{}], resending shard started to {}",
- indexShard.shardId(), indexShard.state(), nodes.getMasterNode());
- }
- if (nodes.getMasterNode() != null) {
- shardStateAction.shardStarted(shardRouting,
- "master " + nodes.getMasterNode() + " marked shard as initializing, but shard state is [" + indexShard.state() + "], mark shard as started",
- SHARD_STATE_ACTION_LISTENER);
- }
- return;
- } else {
- if (indexShard.ignoreRecoveryAttempt()) {
- logger.trace("ignoring recovery instruction for an existing shard {} (shard state: [{}])", indexShard.shardId(), indexShard.state());
- return;
+ DiscoveryNodes nodes = state.nodes();
+ RoutingTable routingTable = state.routingTable();
+
+ for (final ShardRouting shardRouting : localRoutingNode) {
+ ShardId shardId = shardRouting.shardId();
+ if (failedShardsCache.containsKey(shardId) == false) {
+ AllocatedIndex extends Shard> indexService = indicesService.indexService(shardId.getIndex());
+ assert indexService != null : "index " + shardId.getIndex() + " should have been created by createIndices";
+ Shard shard = indexService.getShardOrNull(shardId.id());
+ if (shard == null) {
+ assert shardRouting.initializing() : shardRouting + " should have been removed by failMissingShards";
+ createShard(nodes, routingTable, shardRouting, indexService);
+ } else {
+ updateShard(nodes, shardRouting, shard);
}
}
}
+ }
+
+ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardRouting shardRouting,
+ AllocatedIndex extends Shard> indexService) {
+ assert shardRouting.initializing() : "only allow shard creation for initializing shard but was " + shardRouting;
- // if we're in peer recovery, try to find out the source node now so in case it fails, we will not create the index shard
DiscoveryNode sourceNode = null;
if (shardRouting.isPeerRecovery()) {
sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, shardRouting);
@@ -595,50 +516,49 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent {
- try {
- nodeServicesProvider.getClient().admin().indices().preparePutMapping()
- .setConcreteIndex(indexService.index()) // concrete index - no name clash, it uses uuid
- .setType(type)
- .setSource(mapping.source().string())
- .get();
- } catch (IOException ex) {
- throw new ElasticsearchException("failed to stringify mapping source", ex);
- }
- }, indicesService);
+ final IndexShardState state = shard.state();
+ if (shardRouting.initializing() && (state == IndexShardState.STARTED || state == IndexShardState.POST_RECOVERY)) {
+ // the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting
+ // for master to confirm a shard started message (either master failover, or a cluster event before
+ // we managed to tell the master we started), mark us as started
+ if (logger.isTraceEnabled()) {
+ logger.trace("{} master marked shard as initializing, but shard has state [{}], resending shard started to {}",
+ shardRouting.shardId(), state, nodes.getMasterNode());
+ }
+ if (nodes.getMasterNode() != null) {
+ shardStateAction.shardStarted(shardRouting, "master " + nodes.getMasterNode() +
+ " marked shard as initializing, but shard state is [" + state + "], mark shard as started",
+ SHARD_STATE_ACTION_LISTENER);
+ }
+ }
}
/**
@@ -646,7 +566,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent indexService = indicesService.indexService(shardRouting.shardId().getIndex());
+ if (indexService != null) {
+ indexService.removeShard(shardRouting.shardId().id(), message);
}
+ } catch (ShardNotFoundException e) {
+ // the node got closed on us, ignore it
+ } catch (Throwable e1) {
+ logger.warn("[{}][{}] failed to remove shard after failure ([{}])", e1, shardRouting.getIndexName(), shardRouting.getId(),
+ message);
}
if (sendShardFailure) {
sendFailShard(shardRouting, message, failure);
@@ -760,23 +683,156 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent {
@Override
public void handle(final IndexShard.ShardFailure shardFailure) {
- final IndexService indexService = indicesService.indexService(shardFailure.routing.shardId().getIndex());
final ShardRouting shardRouting = shardFailure.routing;
threadPool.generic().execute(() -> {
- synchronized (mutex) {
- failAndRemoveShard(shardRouting, indexService, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause);
+ synchronized (IndicesClusterStateService.this) {
+ failAndRemoveShard(shardRouting, true, "shard failure, reason [" + shardFailure.reason + "]", shardFailure.cause);
}
});
}
}
+
+ public interface Shard {
+
+ /**
+ * Returns the shard id of this shard.
+ */
+ ShardId shardId();
+
+ /**
+ * Returns the latest cluster routing entry received with this shard.
+ */
+ ShardRouting routingEntry();
+
+ /**
+ * Returns the latest internal shard state.
+ */
+ IndexShardState state();
+
+ /**
+ * Returns the recovery state associated with this shard.
+ */
+ RecoveryState recoveryState();
+
+ /**
+ * Updates the shards routing entry. This mutate the shards internal state depending
+ * on the changes that get introduced by the new routing value. This method will persist shard level metadata.
+ *
+ * @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted
+ * @throws IOException if shard state could not be persisted
+ */
+ void updateRoutingEntry(ShardRouting shardRouting) throws IOException;
+ }
+
+ public interface AllocatedIndex extends Iterable, IndexComponent {
+
+ /**
+ * Returns the index settings of this index.
+ */
+ IndexSettings getIndexSettings();
+
+ /**
+ * Updates the meta data of this index. Changes become visible through {@link #getIndexSettings()}
+ */
+ void updateMetaData(IndexMetaData indexMetaData);
+
+ /**
+ * Checks if index requires refresh from master.
+ */
+ boolean updateMapping(IndexMetaData indexMetaData) throws IOException;
+
+ /**
+ * Returns shard with given id.
+ */
+ @Nullable T getShardOrNull(int shardId);
+
+ /**
+ * Removes shard with given id.
+ */
+ void removeShard(int shardId, String message);
+ }
+
+ public interface AllocatedIndices> extends Iterable {
+
+ /**
+ * Creates a new {@link IndexService} for the given metadata.
+ * @param indexMetaData the index metadata to create the index for
+ * @param builtInIndexListener a list of built-in lifecycle {@link IndexEventListener} that should should be used along side with
+ * the per-index listeners
+ * @throws IndexAlreadyExistsException if the index already exists.
+ */
+ U createIndex(NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData,
+ List builtInIndexListener) throws IOException;
+
+ /**
+ * Verify that the contents on disk for the given index is deleted; if not, delete the contents.
+ * This method assumes that an index is already deleted in the cluster state and/or explicitly
+ * through index tombstones.
+ * @param index {@code Index} to make sure its deleted from disk
+ * @param clusterState {@code ClusterState} to ensure the index is not part of it
+ * @return IndexMetaData for the index loaded from disk
+ */
+ IndexMetaData verifyIndexIsDeleted(Index index, ClusterState clusterState);
+
+ /**
+ * Deletes the given index. Persistent parts of the index
+ * like the shards files, state and transaction logs are removed once all resources are released.
+ *
+ * Equivalent to {@link #removeIndex(Index, String)} but fires
+ * different lifecycle events to ensure pending resources of this index are immediately removed.
+ * @param index the index to delete
+ * @param reason the high level reason causing this delete
+ */
+ void deleteIndex(Index index, String reason);
+
+ /**
+ * Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index
+ * but does not deal with in-memory structures. For those call {@link #deleteIndex(Index, String)}
+ */
+ void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState);
+
+ /**
+ * Removes the given index from this service and releases all associated resources. Persistent parts of the index
+ * like the shards files, state and transaction logs are kept around in the case of a disaster recovery.
+ * @param index the index to remove
+ * @param reason the high level reason causing this removal
+ */
+ void removeIndex(Index index, String reason);
+
+ /**
+ * Returns an IndexService for the specified index if exists otherwise returns null
.
+ */
+ @Nullable U indexService(Index index);
+
+ /**
+ * Creates shard for the specified shard routing and starts recovery,
+ */
+ T createShard(ShardRouting shardRouting, RecoveryState recoveryState, RecoveryTargetService recoveryTargetService,
+ RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
+ NodeServicesProvider nodeServicesProvider, Callback onShardFailure) throws IOException;
+
+ /**
+ * Returns shard for the specified id if it exists otherwise returns null
.
+ */
+ default T getShardOrNull(ShardId shardId) {
+ U indexRef = indexService(shardId.getIndex());
+ if (indexRef != null) {
+ return indexRef.getShardOrNull(shardId.id());
+ }
+ return null;
+ }
+
+ void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeValue) throws IOException, InterruptedException;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
index f3cb76199dc..16e4d62a721 100644
--- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
+++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
@@ -114,11 +114,9 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
final ClusterState state = clusterService.state();
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
final Map> results = ConcurrentCollections.newConcurrentMap();
- int totalNumberOfShards = 0;
int numberOfShards = 0;
for (Index index : concreteIndices) {
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index);
- totalNumberOfShards += indexMetaData.getTotalNumberOfShards();
numberOfShards += indexMetaData.getNumberOfShards();
results.put(index.getName(), Collections.synchronizedList(new ArrayList<>()));
@@ -127,7 +125,6 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
listener.onResponse(new SyncedFlushResponse(results));
return;
}
- final int finalTotalNumberOfShards = totalNumberOfShards;
final CountDown countDown = new CountDown(numberOfShards);
for (final Index concreteIndex : concreteIndices) {
@@ -136,7 +133,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
final int indexNumberOfShards = indexMetaData.getNumberOfShards();
for (int shard = 0; shard < indexNumberOfShards; shard++) {
final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard);
- attemptSyncedFlush(shardId, new ActionListener() {
+ innerAttemptSyncedFlush(shardId, state, new ActionListener() {
@Override
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
results.get(index).add(syncedFlushResult);
@@ -148,7 +145,8 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override
public void onFailure(Throwable e) {
logger.debug("{} unexpected error while executing synced flush", shardId);
- results.get(index).add(new ShardsSyncedFlushResult(shardId, finalTotalNumberOfShards, e.getMessage()));
+ final int totalShards = indexMetaData.getNumberOfReplicas() + 1;
+ results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage()));
if (countDown.countDown()) {
listener.onResponse(new SyncedFlushResponse(results));
}
@@ -185,8 +183,11 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
* Synced flush is a best effort operation. The sync id may be written on all, some or none of the copies.
**/
public void attemptSyncedFlush(final ShardId shardId, final ActionListener actionListener) {
+ innerAttemptSyncedFlush(shardId, clusterService.state(), actionListener);
+ }
+
+ private void innerAttemptSyncedFlush(final ShardId shardId, final ClusterState state, final ActionListener actionListener) {
try {
- final ClusterState state = clusterService.state();
final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state);
final List activeShards = shardRoutingTable.activeShards();
final int totalShards = shardRoutingTable.getSize();
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java
index 8fd08d9f8fb..49cdb737ed3 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryFileChunkRequest.java
@@ -20,7 +20,6 @@
package org.elasticsearch.indices.recovery;
import org.apache.lucene.util.Version;
-import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -76,7 +75,6 @@ public final class RecoveryFileChunkRequest extends TransportRequest {
return position;
}
- @Nullable
public String checksum() {
return metaData.checksum();
}
@@ -105,11 +103,10 @@ public final class RecoveryFileChunkRequest extends TransportRequest {
String name = in.readString();
position = in.readVLong();
long length = in.readVLong();
- String checksum = in.readOptionalString();
+ String checksum = in.readString();
content = in.readBytesReference();
- Version writtenBy = null;
- String versionString = in.readOptionalString();
- writtenBy = Lucene.parseVersionLenient(versionString, null);
+ Version writtenBy = Lucene.parseVersionLenient(in.readString(), null);
+ assert writtenBy != null;
metaData = new StoreFileMetaData(name, length, checksum, writtenBy);
lastChunk = in.readBoolean();
totalTranslogOps = in.readVInt();
@@ -124,9 +121,9 @@ public final class RecoveryFileChunkRequest extends TransportRequest {
out.writeString(metaData.name());
out.writeVLong(position);
out.writeVLong(metaData.length());
- out.writeOptionalString(metaData.checksum());
+ out.writeString(metaData.checksum());
out.writeBytesReference(content);
- out.writeOptionalString(metaData.writtenBy() == null ? null : metaData.writtenBy().toString());
+ out.writeString(metaData.writtenBy().toString());
out.writeBoolean(lastChunk);
out.writeVInt(totalTranslogOps);
out.writeLong(sourceThrottleTimeInNanos);
diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java
index 23d390dfcfe..04900705e0a 100644
--- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java
+++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java
@@ -115,4 +115,5 @@ public abstract class BlobStoreFormat {
}
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java
index 57841466a62..b7a6d714e63 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchService.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchService.java
@@ -826,9 +826,7 @@ public class SearchService extends AbstractLifecycleComponent imp
if (context.scrollContext() == null) {
throw new SearchContextException(context, "`slice` cannot be used outside of a scroll context");
}
- context.sliceFilter(source.slice().toFilter(queryShardContext,
- context.shardTarget().getShardId().getId(),
- queryShardContext.getIndexSettings().getNumberOfShards()));
+ context.sliceBuilder(source.slice());
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java
index 38fb0e6a431..2d8ea318fd6 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java
@@ -27,6 +27,8 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.support.AggregationPath;
import org.elasticsearch.search.aggregations.support.AggregationPath.PathElement;
+import org.elasticsearch.search.profile.Profilers;
+import org.elasticsearch.search.profile.aggregation.ProfilingAggregator;
import java.io.IOException;
import java.util.ArrayList;
@@ -81,7 +83,12 @@ public class AggregatorFactories {
// propagate the fact that only bucket 0 will be collected with single-bucket
// aggs
final boolean collectsFromSingleBucket = false;
- aggregators[i] = factories[i].create(parent, collectsFromSingleBucket);
+ Aggregator factory = factories[i].create(parent, collectsFromSingleBucket);
+ Profilers profilers = factory.context().searchContext().getProfilers();
+ if (profilers != null) {
+ factory = new ProfilingAggregator(factory, profilers.getAggregationProfiler());
+ }
+ aggregators[i] = factory;
}
return aggregators;
}
@@ -92,7 +99,12 @@ public class AggregatorFactories {
for (int i = 0; i < factories.length; i++) {
// top-level aggs only get called with bucket 0
final boolean collectsFromSingleBucket = true;
- aggregators[i] = factories[i].create(null, collectsFromSingleBucket);
+ Aggregator factory = factories[i].create(null, collectsFromSingleBucket);
+ Profilers profilers = factory.context().searchContext().getProfilers();
+ if (profilers != null) {
+ factory = new ProfilingAggregator(factory, profilers.getAggregationProfiler());
+ }
+ aggregators[i] = factory;
}
return aggregators;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java
index 3fced89a014..854838b7441 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java
@@ -28,13 +28,139 @@ import org.elasticsearch.search.aggregations.InternalAggregation.Type;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
-
import java.io.IOException;
import java.util.List;
import java.util.Map;
public abstract class AggregatorFactory> {
+ public static final class MultiBucketAggregatorWrapper extends Aggregator {
+ private final BigArrays bigArrays;
+ private final Aggregator parent;
+ private final AggregatorFactory> factory;
+ private final Aggregator first;
+ ObjectArray aggregators;
+ ObjectArray collectors;
+
+ MultiBucketAggregatorWrapper(BigArrays bigArrays, AggregationContext context, Aggregator parent, AggregatorFactory> factory,
+ Aggregator first) {
+ this.bigArrays = bigArrays;
+ this.parent = parent;
+ this.factory = factory;
+ this.first = first;
+ context.searchContext().addReleasable(this, Lifetime.PHASE);
+ aggregators = bigArrays.newObjectArray(1);
+ aggregators.set(0, first);
+ collectors = bigArrays.newObjectArray(1);
+ }
+
+ public Class> getWrappedClass() {
+ return first.getClass();
+ }
+
+ @Override
+ public String name() {
+ return first.name();
+ }
+
+ @Override
+ public AggregationContext context() {
+ return first.context();
+ }
+
+ @Override
+ public Aggregator parent() {
+ return first.parent();
+ }
+
+ @Override
+ public boolean needsScores() {
+ return first.needsScores();
+ }
+
+ @Override
+ public Aggregator subAggregator(String name) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void preCollection() throws IOException {
+ for (long i = 0; i < aggregators.size(); ++i) {
+ final Aggregator aggregator = aggregators.get(i);
+ if (aggregator != null) {
+ aggregator.preCollection();
+ }
+ }
+ }
+
+ @Override
+ public void postCollection() throws IOException {
+ for (long i = 0; i < aggregators.size(); ++i) {
+ final Aggregator aggregator = aggregators.get(i);
+ if (aggregator != null) {
+ aggregator.postCollection();
+ }
+ }
+ }
+
+ @Override
+ public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx) {
+ for (long i = 0; i < collectors.size(); ++i) {
+ collectors.set(i, null);
+ }
+ return new LeafBucketCollector() {
+ Scorer scorer;
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ this.scorer = scorer;
+ }
+
+ @Override
+ public void collect(int doc, long bucket) throws IOException {
+ collectors = bigArrays.grow(collectors, bucket + 1);
+
+ LeafBucketCollector collector = collectors.get(bucket);
+ if (collector == null) {
+ aggregators = bigArrays.grow(aggregators, bucket + 1);
+ Aggregator aggregator = aggregators.get(bucket);
+ if (aggregator == null) {
+ aggregator = factory.create(parent, true);
+ aggregator.preCollection();
+ aggregators.set(bucket, aggregator);
+ }
+ collector = aggregator.getLeafCollector(ctx);
+ collector.setScorer(scorer);
+ collectors.set(bucket, collector);
+ }
+ collector.collect(doc, 0);
+ }
+
+ };
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long bucket) throws IOException {
+ if (bucket < aggregators.size()) {
+ Aggregator aggregator = aggregators.get(bucket);
+ if (aggregator != null) {
+ return aggregator.buildAggregation(0);
+ }
+ }
+ return buildEmptyAggregation();
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return first.buildEmptyAggregation();
+ }
+
+ @Override
+ public void close() {
+ Releasables.close(aggregators, collectors);
+ }
+ }
+
protected final String name;
protected final Type type;
protected final AggregatorFactory> parent;
@@ -112,120 +238,7 @@ public abstract class AggregatorFactory> {
final Aggregator parent) throws IOException {
final Aggregator first = factory.create(parent, true);
final BigArrays bigArrays = context.bigArrays();
- return new Aggregator() {
-
- ObjectArray aggregators;
- ObjectArray collectors;
-
- {
- context.searchContext().addReleasable(this, Lifetime.PHASE);
- aggregators = bigArrays.newObjectArray(1);
- aggregators.set(0, first);
- collectors = bigArrays.newObjectArray(1);
- }
-
- @Override
- public String name() {
- return first.name();
- }
-
- @Override
- public AggregationContext context() {
- return first.context();
- }
-
- @Override
- public Aggregator parent() {
- return first.parent();
- }
-
- @Override
- public boolean needsScores() {
- return first.needsScores();
- }
-
- @Override
- public Aggregator subAggregator(String name) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public void preCollection() throws IOException {
- for (long i = 0; i < aggregators.size(); ++i) {
- final Aggregator aggregator = aggregators.get(i);
- if (aggregator != null) {
- aggregator.preCollection();
- }
- }
- }
-
- @Override
- public void postCollection() throws IOException {
- for (long i = 0; i < aggregators.size(); ++i) {
- final Aggregator aggregator = aggregators.get(i);
- if (aggregator != null) {
- aggregator.postCollection();
- }
- }
- }
-
- @Override
- public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx) {
- for (long i = 0; i < collectors.size(); ++i) {
- collectors.set(i, null);
- }
- return new LeafBucketCollector() {
- Scorer scorer;
-
- @Override
- public void setScorer(Scorer scorer) throws IOException {
- this.scorer = scorer;
- }
-
- @Override
- public void collect(int doc, long bucket) throws IOException {
- aggregators = bigArrays.grow(aggregators, bucket + 1);
- collectors = bigArrays.grow(collectors, bucket + 1);
-
- LeafBucketCollector collector = collectors.get(bucket);
- if (collector == null) {
- Aggregator aggregator = aggregators.get(bucket);
- if (aggregator == null) {
- aggregator = factory.create(parent, true);
- aggregator.preCollection();
- aggregators.set(bucket, aggregator);
- }
- collector = aggregator.getLeafCollector(ctx);
- collector.setScorer(scorer);
- collectors.set(bucket, collector);
- }
- collector.collect(doc, 0);
- }
-
- };
- }
-
- @Override
- public InternalAggregation buildAggregation(long bucket) throws IOException {
- if (bucket < aggregators.size()) {
- Aggregator aggregator = aggregators.get(bucket);
- if (aggregator != null) {
- return aggregator.buildAggregation(0);
- }
- }
- return buildEmptyAggregation();
- }
-
- @Override
- public InternalAggregation buildEmptyAggregation() {
- return first.buildEmptyAggregation();
- }
-
- @Override
- public void close() {
- Releasables.close(aggregators, collectors);
- }
- };
+ return new MultiBucketAggregatorWrapper(bigArrays, context, parent, factory, first);
}
}
\ No newline at end of file
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java
index b5ed80a0022..0793bacf722 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java
@@ -35,7 +35,6 @@ import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.common.util.ObjectArray;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
-import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java
index 192ad6c28dc..ec838e7dd41 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/GeoCentroidAggregator.java
@@ -20,7 +20,7 @@
package org.elasticsearch.search.aggregations.metrics.geocentroid;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.spatial.util.GeoEncodingUtils;
+import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
@@ -82,9 +82,9 @@ public final class GeoCentroidAggregator extends MetricsAggregator {
counts.increment(bucket, valueCount);
// get the previous GeoPoint if a moving avg was computed
if (prevCounts > 0) {
- final GeoPoint centroid = GeoPoint.fromIndexLong(centroids.get(bucket));
- pt[0] = centroid.lon();
- pt[1] = centroid.lat();
+ final long mortonCode = centroids.get(bucket);
+ pt[0] = GeoPointField.decodeLongitude(mortonCode);
+ pt[1] = GeoPointField.decodeLatitude(mortonCode);
}
// update the moving average
for (int i = 0; i < valueCount; ++i) {
@@ -92,7 +92,9 @@ public final class GeoCentroidAggregator extends MetricsAggregator {
pt[0] = pt[0] + (value.getLon() - pt[0]) / ++prevCounts;
pt[1] = pt[1] + (value.getLat() - pt[1]) / prevCounts;
}
- centroids.set(bucket, GeoEncodingUtils.mortonHash(pt[1], pt[0]));
+ // TODO: we do not need to interleave the lat and lon bits here
+ // should we just store them contiguously?
+ centroids.set(bucket, GeoPointField.encodeLatLon(pt[1], pt[0]));
}
}
};
@@ -104,8 +106,10 @@ public final class GeoCentroidAggregator extends MetricsAggregator {
return buildEmptyAggregation();
}
final long bucketCount = counts.get(bucket);
- final GeoPoint bucketCentroid = (bucketCount > 0) ? GeoPoint.fromIndexLong(centroids.get(bucket)) :
- new GeoPoint(Double.NaN, Double.NaN);
+ final long mortonCode = centroids.get(bucket);
+ final GeoPoint bucketCentroid = (bucketCount > 0)
+ ? new GeoPoint(GeoPointField.decodeLatitude(mortonCode), GeoPointField.decodeLongitude(mortonCode))
+ : null;
return new InternalGeoCentroid(name, bucketCentroid , bucketCount, pipelineAggregators(), metaData());
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java
index 2798169b699..2bb3056ca66 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroid.java
@@ -19,7 +19,7 @@
package org.elasticsearch.search.aggregations.metrics.geocentroid;
-import org.apache.lucene.spatial.util.GeoEncodingUtils;
+import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -61,6 +61,7 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G
public InternalGeoCentroid(String name, GeoPoint centroid, long count, List
pipelineAggregators, Map metaData) {
super(name, pipelineAggregators, metaData);
+ assert (centroid == null) == (count == 0);
this.centroid = centroid;
assert count >= 0;
this.count = count;
@@ -68,7 +69,7 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G
@Override
public GeoPoint centroid() {
- return (centroid == null || Double.isNaN(centroid.lon()) ? null : centroid);
+ return centroid;
}
@Override
@@ -128,7 +129,8 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G
protected void doReadFrom(StreamInput in) throws IOException {
count = in.readVLong();
if (in.readBoolean()) {
- centroid = GeoPoint.fromIndexLong(in.readLong());
+ final long hash = in.readLong();
+ centroid = new GeoPoint(GeoPointField.decodeLatitude(hash), GeoPointField.decodeLongitude(hash));
} else {
centroid = null;
}
@@ -139,7 +141,8 @@ public class InternalGeoCentroid extends InternalMetricsAggregation implements G
out.writeVLong(count);
if (centroid != null) {
out.writeBoolean(true);
- out.writeLong(GeoEncodingUtils.mortonHash(centroid.lat(), centroid.lon()));
+ // should we just write lat and lon separately?
+ out.writeLong(GeoPointField.encodeLatLon(centroid.lat(), centroid.lon()));
} else {
out.writeBoolean(false);
}
diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
index 36fe562a568..b2ce044e4fc 100644
--- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
+++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
@@ -51,8 +51,9 @@ import org.elasticsearch.search.fetch.FetchSearchResultProvider;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHits;
import org.elasticsearch.search.internal.InternalSearchResponse;
-import org.elasticsearch.search.profile.SearchProfileShardResults;
import org.elasticsearch.search.profile.ProfileShardResult;
+import org.elasticsearch.search.profile.SearchProfileShardResults;
+import org.elasticsearch.search.profile.query.QueryProfileShardResult;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.search.suggest.Suggest;
@@ -407,7 +408,7 @@ public class SearchPhaseController extends AbstractComponent {
//Collect profile results
SearchProfileShardResults shardResults = null;
if (!queryResults.isEmpty() && firstResult.profileResults() != null) {
- Map> profileResults = new HashMap<>(queryResults.size());
+ Map profileResults = new HashMap<>(queryResults.size());
for (AtomicArray.Entry extends QuerySearchResultProvider> entry : queryResults) {
String key = entry.value.queryResult().shardTarget().toString();
profileResults.put(key, entry.value.queryResult().profileResults());
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java
index 31921457207..f34da5301d5 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java
@@ -175,7 +175,7 @@ public final class InnerHitsContext {
@Override
public boolean equals(Object obj) {
- if (super.equals(obj) == false) {
+ if (sameClassAs(obj) == false) {
return false;
}
NestedChildrenQuery other = (NestedChildrenQuery) obj;
@@ -187,7 +187,7 @@ public final class InnerHitsContext {
@Override
public int hashCode() {
- int hash = super.hashCode();
+ int hash = classHash();
hash = 31 * hash + parentFilter.hashCode();
hash = 31 * hash + childFilter.hashCode();
hash = 31 * hash + docId;
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
index df1007ebc71..50e91e082cd 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
@@ -122,7 +122,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
weight = super.createWeight(query, needsScores);
} finally {
profile.stopAndRecordTime();
- profiler.pollLastQuery();
+ profiler.pollLastElement();
}
return new ProfileWeight(query, weight, profile);
} else {
diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
index 30e994b7656..06df04db8a0 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
@@ -68,6 +68,7 @@ import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext;
+import org.elasticsearch.search.slice.SliceBuilder;
import org.elasticsearch.search.sort.SortAndFormats;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
@@ -116,7 +117,7 @@ public class DefaultSearchContext extends SearchContext {
private boolean trackScores = false; // when sorting, track scores as well...
private FieldDoc searchAfter;
// filter for sliced scroll
- private Query sliceFilter;
+ private SliceBuilder sliceBuilder;
/**
* The original query as sent by the user without the types and aliases
@@ -212,13 +213,23 @@ public class DefaultSearchContext extends SearchContext {
if (rescoreContext.window() > maxWindow) {
throw new QueryPhaseExecutionException(this, "Rescore window [" + rescoreContext.window() + "] is too large. It must "
+ "be less than [" + maxWindow + "]. This prevents allocating massive heaps for storing the results to be "
- + "rescored. This limit can be set by chaining the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey()
+ + "rescored. This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey()
+ "] index level setting.");
}
}
}
+ if (sliceBuilder != null) {
+ int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerScroll();
+ int numSlices = sliceBuilder.getMax();
+ if (numSlices > sliceLimit) {
+ throw new QueryPhaseExecutionException(this, "The number of slices [" + numSlices + "] is too large. It must "
+ + "be less than [" + sliceLimit + "]. This limit can be set by changing the [" +
+ IndexSettings.MAX_SLICES_PER_SCROLL.getKey() + "] index level setting.");
+ }
+ }
+
// initialize the filtering alias based on the provided filters
aliasFilter = indexService.aliasFilter(queryShardContext, request.filteringAliases());
@@ -257,9 +268,11 @@ public class DefaultSearchContext extends SearchContext {
@Nullable
public Query searchFilter(String[] types) {
Query typesFilter = createSearchFilter(types, aliasFilter, mapperService().hasNested());
- if (sliceFilter == null) {
+ if (sliceBuilder == null) {
return typesFilter;
}
+ Query sliceFilter = sliceBuilder.toFilter(queryShardContext, shardTarget().getShardId().getId(),
+ queryShardContext.getIndexSettings().getNumberOfShards());
if (typesFilter == null) {
return sliceFilter;
}
@@ -562,8 +575,8 @@ public class DefaultSearchContext extends SearchContext {
return searchAfter;
}
- public SearchContext sliceFilter(Query filter) {
- this.sliceFilter = filter;
+ public SearchContext sliceBuilder(SliceBuilder sliceBuilder) {
+ this.sliceBuilder = sliceBuilder;
return this;
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
index 9c33889dc9c..26410cc9680 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
@@ -28,13 +28,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.InternalAggregations;
-import org.elasticsearch.search.profile.SearchProfileShardResults;
import org.elasticsearch.search.profile.ProfileShardResult;
+import org.elasticsearch.search.profile.SearchProfileShardResults;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
import java.util.Collections;
-import java.util.List;
import java.util.Map;
import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits;
@@ -99,7 +98,7 @@ public class InternalSearchResponse implements Streamable, ToXContent {
*
* @return Profile results
*/
- public Map> profile() {
+ public Map profile() {
if (profileResults == null) {
return Collections.emptyMap();
}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/AbstractInternalProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/AbstractInternalProfileTree.java
new file mode 100644
index 00000000000..31cb3c21237
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/AbstractInternalProfileTree.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.elasticsearch.search.profile.query.QueryProfileBreakdown;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Deque;
+import java.util.List;
+import java.util.Map;
+
+public abstract class AbstractInternalProfileTree, E> {
+
+ protected ArrayList timings;
+ /** Maps the Query to it's list of children. This is basically the dependency tree */
+ protected ArrayList> tree;
+ /** A list of the original queries, keyed by index position */
+ protected ArrayList elements;
+ /** A list of top-level "roots". Each root can have its own tree of profiles */
+ protected ArrayList roots;
+ /** A temporary stack used to record where we are in the dependency tree. */
+ protected Deque stack;
+ private int currentToken = 0;
+
+ public AbstractInternalProfileTree() {
+ timings = new ArrayList<>(10);
+ stack = new ArrayDeque<>(10);
+ tree = new ArrayList<>(10);
+ elements = new ArrayList<>(10);
+ roots = new ArrayList<>(10);
+ }
+
+ /**
+ * Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those
+ * that are past the rewrite phase and are now being wrapped by createWeight() ) follow
+ * a recursive progression. We can track the dependency tree by a simple stack
+ *
+ * The only hiccup is that the first scoring query will be identical to the last rewritten
+ * query, so we need to take special care to fix that
+ *
+ * @param query The scoring query we wish to profile
+ * @return A ProfileBreakdown for this query
+ */
+ public PB getProfileBreakdown(E query) {
+ int token = currentToken;
+
+ boolean stackEmpty = stack.isEmpty();
+
+ // If the stack is empty, we are a new root query
+ if (stackEmpty) {
+
+ // We couldn't find a rewritten query to attach to, so just add it as a
+ // top-level root. This is just a precaution: it really shouldn't happen.
+ // We would only get here if a top-level query that never rewrites for some reason.
+ roots.add(token);
+
+ // Increment the token since we are adding a new node, but notably, do not
+ // updateParent() because this was added as a root
+ currentToken += 1;
+ stack.add(token);
+
+ return addDependencyNode(query, token);
+ }
+
+ updateParent(token);
+
+ // Increment the token since we are adding a new node
+ currentToken += 1;
+ stack.add(token);
+
+ return addDependencyNode(query, token);
+ }
+
+ /**
+ * Helper method to add a new node to the dependency tree.
+ *
+ * Initializes a new list in the dependency tree, saves the query and
+ * generates a new {@link QueryProfileBreakdown} to track the timings of
+ * this query
+ *
+ * @param element
+ * The element to profile
+ * @param token
+ * The assigned token for this element
+ * @return A ProfileBreakdown to profile this element
+ */
+ private PB addDependencyNode(E element, int token) {
+
+ // Add a new slot in the dependency tree
+ tree.add(new ArrayList<>(5));
+
+ // Save our query for lookup later
+ elements.add(element);
+
+ PB queryTimings = createProfileBreakdown();
+ timings.add(token, queryTimings);
+ return queryTimings;
+ }
+
+ protected abstract PB createProfileBreakdown();
+
+ /**
+ * Removes the last (e.g. most recent) value on the stack
+ */
+ public void pollLast() {
+ stack.pollLast();
+ }
+
+ /**
+ * After the query has been run and profiled, we need to merge the flat timing map
+ * with the dependency graph to build a data structure that mirrors the original
+ * query tree
+ *
+ * @return a hierarchical representation of the profiled query tree
+ */
+ public List getTree() {
+ ArrayList results = new ArrayList<>(5);
+ for (Integer root : roots) {
+ results.add(doGetTree(root));
+ }
+ return results;
+ }
+
+ /**
+ * Recursive helper to finalize a node in the dependency tree
+ * @param token The node we are currently finalizing
+ * @return A hierarchical representation of the tree inclusive of children at this level
+ */
+ private ProfileResult doGetTree(int token) {
+ E element = elements.get(token);
+ PB breakdown = timings.get(token);
+ Map timings = breakdown.toTimingMap();
+ List children = tree.get(token);
+ List childrenProfileResults = Collections.emptyList();
+
+ if (children != null) {
+ childrenProfileResults = new ArrayList<>(children.size());
+ for (Integer child : children) {
+ ProfileResult childNode = doGetTree(child);
+ childrenProfileResults.add(childNode);
+ }
+ }
+
+ // TODO this would be better done bottom-up instead of top-down to avoid
+ // calculating the same times over and over...but worth the effort?
+ long nodeTime = getNodeTime(timings, childrenProfileResults);
+ String type = getTypeFromElement(element);
+ String description = getDescriptionFromElement(element);
+ return new ProfileResult(type, description, timings, childrenProfileResults, nodeTime);
+ }
+
+ protected abstract String getTypeFromElement(E element);
+
+ protected abstract String getDescriptionFromElement(E element);
+
+ /**
+ * Internal helper to add a child to the current parent node
+ *
+ * @param childToken The child to add to the current parent
+ */
+ private void updateParent(int childToken) {
+ Integer parent = stack.peekLast();
+ ArrayList parentNode = tree.get(parent);
+ parentNode.add(childToken);
+ tree.set(parent, parentNode);
+ }
+
+ /**
+ * Internal helper to calculate the time of a node, inclusive of children
+ *
+ * @param timings
+ * A map of breakdown timing for the node
+ * @param children
+ * All children profile results at this node
+ * @return The total time at this node, inclusive of children
+ */
+ private static long getNodeTime(Map timings, List children) {
+ long nodeTime = 0;
+ for (long time : timings.values()) {
+ nodeTime += time;
+ }
+
+ // Then add up our children
+ for (ProfileResult child : children) {
+ nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren());
+ }
+ return nodeTime;
+ }
+
+}
\ No newline at end of file
diff --git a/core/src/main/java/org/elasticsearch/search/profile/AbstractProfiler.java b/core/src/main/java/org/elasticsearch/search/profile/AbstractProfiler.java
new file mode 100644
index 00000000000..a7ccb72785e
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/AbstractProfiler.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import java.util.List;
+
+public class AbstractProfiler, E> {
+
+ protected final AbstractInternalProfileTree profileTree;
+
+ public AbstractProfiler(AbstractInternalProfileTree profileTree) {
+ this.profileTree = profileTree;
+ }
+
+ /**
+ * Get the {@link AbstractProfileBreakdown} for the given element in the
+ * tree, potentially creating it if it did not exist.
+ */
+ public PB getQueryBreakdown(E query) {
+ return profileTree.getProfileBreakdown(query);
+ }
+
+ /**
+ * Removes the last (e.g. most recent) element on the stack.
+ */
+ public void pollLastElement() {
+ profileTree.pollLast();
+ }
+
+ /**
+ * @return a hierarchical representation of the profiled tree
+ */
+ public List getTree() {
+ return profileTree.getTree();
+ }
+
+}
\ No newline at end of file
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java
index 9def3db7582..2a1fb0ba9b1 100644
--- a/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java
@@ -22,83 +22,50 @@ package org.elasticsearch.search.profile;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
-import org.elasticsearch.common.xcontent.ToXContent;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.search.profile.query.CollectorResult;
+import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult;
+import org.elasticsearch.search.profile.query.QueryProfileShardResult;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
-/**
- * A container class to hold the profile results for a single shard in the request.
- * Contains a list of query profiles, a collector tree and a total rewrite tree.
- */
-public final class ProfileShardResult implements Writeable, ToXContent {
+public class ProfileShardResult implements Writeable {
- private final List queryProfileResults;
+ private final List queryProfileResults;
- private final CollectorResult profileCollector;
+ private final AggregationProfileShardResult aggProfileShardResult;
- private final long rewriteTime;
-
- public ProfileShardResult(List queryProfileResults, long rewriteTime,
- CollectorResult profileCollector) {
- assert(profileCollector != null);
- this.queryProfileResults = queryProfileResults;
- this.profileCollector = profileCollector;
- this.rewriteTime = rewriteTime;
+ public ProfileShardResult(List queryProfileResults, AggregationProfileShardResult aggProfileShardResult) {
+ this.aggProfileShardResult = aggProfileShardResult;
+ this.queryProfileResults = Collections.unmodifiableList(queryProfileResults);
}
- /**
- * Read from a stream.
- */
public ProfileShardResult(StreamInput in) throws IOException {
int profileSize = in.readVInt();
- queryProfileResults = new ArrayList<>(profileSize);
- for (int j = 0; j < profileSize; j++) {
- queryProfileResults.add(new ProfileResult(in));
+ List queryProfileResults = new ArrayList<>(profileSize);
+ for (int i = 0; i < profileSize; i++) {
+ QueryProfileShardResult result = new QueryProfileShardResult(in);
+ queryProfileResults.add(result);
}
-
- profileCollector = new CollectorResult(in);
- rewriteTime = in.readLong();
+ this.queryProfileResults = Collections.unmodifiableList(queryProfileResults);
+ this.aggProfileShardResult = new AggregationProfileShardResult(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(queryProfileResults.size());
- for (ProfileResult p : queryProfileResults) {
- p.writeTo(out);
+ for (QueryProfileShardResult queryShardResult : queryProfileResults) {
+ queryShardResult.writeTo(out);
}
- profileCollector.writeTo(out);
- out.writeLong(rewriteTime);
+ aggProfileShardResult.writeTo(out);
}
-
- public List getQueryResults() {
- return Collections.unmodifiableList(queryProfileResults);
+ public List getQueryProfileResults() {
+ return queryProfileResults;
}
- public long getRewriteTime() {
- return rewriteTime;
- }
-
- public CollectorResult getCollectorResult() {
- return profileCollector;
- }
-
- @Override
- public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.startArray("query");
- for (ProfileResult p : queryProfileResults) {
- p.toXContent(builder, params);
- }
- builder.endArray();
- builder.field("rewrite_time", rewriteTime);
- builder.startArray("collector");
- profileCollector.toXContent(builder, params);
- builder.endArray();
- return builder;
+ public AggregationProfileShardResult getAggregationProfileResults() {
+ return aggProfileShardResult;
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profilers.java b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java
index e9e6d88db18..d754be41f6d 100644
--- a/core/src/main/java/org/elasticsearch/search/profile/Profilers.java
+++ b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java
@@ -20,22 +20,25 @@
package org.elasticsearch.search.profile;
import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.profile.aggregation.AggregationProfiler;
import org.elasticsearch.search.profile.query.QueryProfiler;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
-/** Wrapper around several {@link QueryProfiler}s that makes management easier. */
+/** Wrapper around all the profilers that makes management easier. */
public final class Profilers {
private final ContextIndexSearcher searcher;
private final List queryProfilers;
+ private final AggregationProfiler aggProfiler;
/** Sole constructor. This {@link Profilers} instance will initially wrap one {@link QueryProfiler}. */
public Profilers(ContextIndexSearcher searcher) {
this.searcher = searcher;
this.queryProfilers = new ArrayList<>();
+ this.aggProfiler = new AggregationProfiler();
addQueryProfiler();
}
@@ -57,4 +60,9 @@ public final class Profilers {
return Collections.unmodifiableList(queryProfilers);
}
+ /** Return the {@link AggregationProfiler}. */
+ public AggregationProfiler getAggregationProfiler() {
+ return aggProfiler;
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java b/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java
index bf265dd9a7e..6794aa49399 100644
--- a/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java
+++ b/core/src/main/java/org/elasticsearch/search/profile/SearchProfileShardResults.java
@@ -24,6 +24,9 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult;
+import org.elasticsearch.search.profile.aggregation.AggregationProfiler;
+import org.elasticsearch.search.profile.query.QueryProfileShardResult;
import org.elasticsearch.search.profile.query.QueryProfiler;
import java.io.IOException;
@@ -32,7 +35,6 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.stream.Collectors;
/**
* A container class to hold all the profile results across all shards. Internally
@@ -40,17 +42,10 @@ import java.util.stream.Collectors;
*/
public final class SearchProfileShardResults implements Writeable, ToXContent{
- private Map> shardResults;
+ private Map shardResults;
- public SearchProfileShardResults(Map> shardResults) {
- Map> transformed =
- shardResults.entrySet()
- .stream()
- .collect(Collectors.toMap(
- Map.Entry::getKey,
- e -> Collections.unmodifiableList(e.getValue()))
- );
- this.shardResults = Collections.unmodifiableMap(transformed);
+ public SearchProfileShardResults(Map shardResults) {
+ this.shardResults = Collections.unmodifiableMap(shardResults);
}
public SearchProfileShardResults(StreamInput in) throws IOException {
@@ -59,33 +54,22 @@ public final class SearchProfileShardResults implements Writeable, ToXContent{
for (int i = 0; i < size; i++) {
String key = in.readString();
- int shardResultsSize = in.readInt();
-
- List shardResult = new ArrayList<>(shardResultsSize);
-
- for (int j = 0; j < shardResultsSize; j++) {
- ProfileShardResult result = new ProfileShardResult(in);
- shardResult.add(result);
- }
- shardResults.put(key, Collections.unmodifiableList(shardResult));
+ ProfileShardResult shardResult = new ProfileShardResult(in);
+ shardResults.put(key, shardResult);
}
shardResults = Collections.unmodifiableMap(shardResults);
}
- public Map> getShardResults() {
+ public Map getShardResults() {
return this.shardResults;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(shardResults.size());
- for (Map.Entry> entry : shardResults.entrySet()) {
+ for (Map.Entry entry : shardResults.entrySet()) {
out.writeString(entry.getKey());
- out.writeInt(entry.getValue().size());
-
- for (ProfileShardResult result : entry.getValue()) {
- result.writeTo(out);
- }
+ entry.getValue().writeTo(out);
}
}
@@ -93,14 +77,18 @@ public final class SearchProfileShardResults implements Writeable, ToXContent{
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("profile").startArray("shards");
- for (Map.Entry> entry : shardResults.entrySet()) {
- builder.startObject().field("id",entry.getKey()).startArray("searches");
- for (ProfileShardResult result : entry.getValue()) {
+ for (Map.Entry entry : shardResults.entrySet()) {
+ builder.startObject();
+ builder.field("id", entry.getKey());
+ builder.startArray("searches");
+ for (QueryProfileShardResult result : entry.getValue().getQueryProfileResults()) {
builder.startObject();
result.toXContent(builder, params);
builder.endObject();
}
- builder.endArray().endObject();
+ builder.endArray();
+ entry.getValue().getAggregationProfileResults().toXContent(builder, params);
+ builder.endObject();
}
builder.endArray().endObject();
@@ -112,16 +100,20 @@ public final class SearchProfileShardResults implements Writeable, ToXContent{
* can be serialized to other nodes, emitted as JSON, etc.
*
* @param profilers
- * A list of Profilers to convert into
- * InternalProfileShardResults
- * @return A list of corresponding InternalProfileShardResults
+ * The {@link Profilers} to convert into results
+ * @return A {@link ProfileShardResult} representing the results for this
+ * shard
*/
- public static List buildShardResults(List profilers) {
- List results = new ArrayList<>(profilers.size());
- for (QueryProfiler profiler : profilers) {
- ProfileShardResult result = new ProfileShardResult(profiler.getQueryTree(), profiler.getRewriteTime(), profiler.getCollector());
- results.add(result);
+ public static ProfileShardResult buildShardResults(Profilers profilers) {
+ List queryProfilers = profilers.getQueryProfilers();
+ AggregationProfiler aggProfiler = profilers.getAggregationProfiler();
+ List queryResults = new ArrayList<>(queryProfilers.size());
+ for (QueryProfiler queryProfiler : queryProfilers) {
+ QueryProfileShardResult result = new QueryProfileShardResult(queryProfiler.getTree(), queryProfiler.getRewriteTime(),
+ queryProfiler.getCollector());
+ queryResults.add(result);
}
- return results;
+ AggregationProfileShardResult aggResults = new AggregationProfileShardResult(aggProfiler.getTree());
+ return new ProfileShardResult(queryResults, aggResults);
}
}
diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Analyzer.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileBreakdown.java
similarity index 64%
rename from modules/lang-painless/src/main/java/org/elasticsearch/painless/Analyzer.java
rename to core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileBreakdown.java
index a06bcdf9840..b4cb1efe5d3 100644
--- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Analyzer.java
+++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileBreakdown.java
@@ -17,21 +17,14 @@
* under the License.
*/
-package org.elasticsearch.painless;
+package org.elasticsearch.search.profile.aggregation;
-import org.elasticsearch.painless.Variables.Reserved;
-import org.elasticsearch.painless.node.SSource;
+import org.elasticsearch.search.profile.AbstractProfileBreakdown;
-/**
- * Runs the analysis phase of compilation using the Painless AST.
- */
-final class Analyzer {
- static Variables analyze(Reserved shortcut, SSource root) {
- Variables variables = new Variables(shortcut);
- root.analyze(variables);
+public class AggregationProfileBreakdown extends AbstractProfileBreakdown {
- return variables;
+ public AggregationProfileBreakdown() {
+ super(AggregationTimingType.values());
}
- private Analyzer() {}
}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResult.java
new file mode 100644
index 00000000000..df55c5592d6
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfileShardResult.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile.aggregation;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.profile.ProfileResult;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * A container class to hold the profile results for a single shard in the request.
+ * Contains a list of query profiles, a collector tree and a total rewrite tree.
+ */
+public final class AggregationProfileShardResult implements Writeable, ToXContent {
+
+ private final List aggProfileResults;
+
+ public AggregationProfileShardResult(List aggProfileResults) {
+ this.aggProfileResults = aggProfileResults;
+ }
+
+ /**
+ * Read from a stream.
+ */
+ public AggregationProfileShardResult(StreamInput in) throws IOException {
+ int profileSize = in.readVInt();
+ aggProfileResults = new ArrayList<>(profileSize);
+ for (int j = 0; j < profileSize; j++) {
+ aggProfileResults.add(new ProfileResult(in));
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(aggProfileResults.size());
+ for (ProfileResult p : aggProfileResults) {
+ p.writeTo(out);
+ }
+ }
+
+
+ public List getProfileResults() {
+ return Collections.unmodifiableList(aggProfileResults);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startArray("aggregations");
+ for (ProfileResult p : aggProfileResults) {
+ p.toXContent(builder, params);
+ }
+ builder.endArray();
+ return builder;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfiler.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfiler.java
new file mode 100644
index 00000000000..45d401ccbdc
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationProfiler.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile.aggregation;
+
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.profile.AbstractProfiler;
+
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+public class AggregationProfiler extends AbstractProfiler {
+
+ private final Map, AggregationProfileBreakdown> profileBrakdownLookup = new HashMap<>();
+
+ public AggregationProfiler() {
+ super(new InternalAggregationProfileTree());
+ }
+
+ @Override
+ public AggregationProfileBreakdown getQueryBreakdown(Aggregator agg) {
+ List path = getAggregatorPath(agg);
+ AggregationProfileBreakdown aggregationProfileBreakdown = profileBrakdownLookup.get(path);
+ if (aggregationProfileBreakdown == null) {
+ aggregationProfileBreakdown = super.getQueryBreakdown(agg);
+ profileBrakdownLookup.put(path, aggregationProfileBreakdown);
+ }
+ return aggregationProfileBreakdown;
+ }
+
+ public static List getAggregatorPath(Aggregator agg) {
+ LinkedList path = new LinkedList<>();
+ while (agg != null) {
+ path.addFirst(agg.name());
+ agg = agg.parent();
+ }
+ return path;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationTimingType.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationTimingType.java
new file mode 100644
index 00000000000..d1c5d3dd538
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/AggregationTimingType.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile.aggregation;
+
+import java.util.Locale;
+
+public enum AggregationTimingType {
+ INITIALIZE,
+ COLLECT,
+ BUILD_AGGREGATION,
+ REDUCE;
+
+ @Override
+ public String toString() {
+ return name().toLowerCase(Locale.ROOT);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/InternalAggregationProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/InternalAggregationProfileTree.java
new file mode 100644
index 00000000000..f367595c84c
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/InternalAggregationProfileTree.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile.aggregation;
+
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactory.MultiBucketAggregatorWrapper;
+import org.elasticsearch.search.profile.AbstractInternalProfileTree;
+
+public class InternalAggregationProfileTree extends AbstractInternalProfileTree {
+
+ @Override
+ protected AggregationProfileBreakdown createProfileBreakdown() {
+ return new AggregationProfileBreakdown();
+ }
+
+ @Override
+ protected String getTypeFromElement(Aggregator element) {
+ if (element instanceof MultiBucketAggregatorWrapper) {
+ return ((MultiBucketAggregatorWrapper) element).getWrappedClass().getName();
+ }
+ return element.getClass().getName();
+ }
+
+ @Override
+ protected String getDescriptionFromElement(Aggregator element) {
+ return element.name();
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java
new file mode 100644
index 00000000000..2883c2903e8
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile.aggregation;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.InternalAggregation;
+import org.elasticsearch.search.aggregations.LeafBucketCollector;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+
+import java.io.IOException;
+
+public class ProfilingAggregator extends Aggregator {
+
+ private final Aggregator delegate;
+ private final AggregationProfiler profiler;
+ private AggregationProfileBreakdown profileBreakdown;
+
+ public ProfilingAggregator(Aggregator delegate, AggregationProfiler profiler) throws IOException {
+ this.profiler = profiler;
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void close() {
+ delegate.close();
+ }
+
+ @Override
+ public boolean needsScores() {
+ return delegate.needsScores();
+ }
+
+ @Override
+ public String name() {
+ return delegate.name();
+ }
+
+ @Override
+ public AggregationContext context() {
+ return delegate.context();
+ }
+
+ @Override
+ public Aggregator parent() {
+ return delegate.parent();
+ }
+
+ @Override
+ public Aggregator subAggregator(String name) {
+ return delegate.subAggregator(name);
+ }
+
+ @Override
+ public InternalAggregation buildAggregation(long bucket) throws IOException {
+ profileBreakdown.startTime(AggregationTimingType.BUILD_AGGREGATION);
+ InternalAggregation result = delegate.buildAggregation(bucket);
+ profileBreakdown.stopAndRecordTime();
+ return result;
+ }
+
+ @Override
+ public InternalAggregation buildEmptyAggregation() {
+ return delegate.buildEmptyAggregation();
+ }
+
+ @Override
+ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException {
+ return new ProfilingLeafBucketCollector(delegate.getLeafCollector(ctx), profileBreakdown);
+ }
+
+ @Override
+ public void preCollection() throws IOException {
+ this.profileBreakdown = profiler.getQueryBreakdown(delegate);
+ profileBreakdown.startTime(AggregationTimingType.INITIALIZE);
+ delegate.preCollection();
+ profileBreakdown.stopAndRecordTime();
+ profiler.pollLastElement();
+ }
+
+ @Override
+ public void postCollection() throws IOException {
+ delegate.postCollection();
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java
new file mode 100644
index 00000000000..75c90ded709
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingLeafBucketCollector.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile.aggregation;
+
+import org.elasticsearch.search.aggregations.LeafBucketCollector;
+
+import java.io.IOException;
+
+public class ProfilingLeafBucketCollector extends LeafBucketCollector {
+
+ private LeafBucketCollector delegate;
+ private AggregationProfileBreakdown profileBreakdown;
+
+ public ProfilingLeafBucketCollector(LeafBucketCollector delegate, AggregationProfileBreakdown profileBreakdown) {
+ this.delegate = delegate;
+ this.profileBreakdown = profileBreakdown;
+ }
+
+ @Override
+ public void collect(int doc, long bucket) throws IOException {
+ profileBreakdown.startTime(AggregationTimingType.COLLECT);
+ delegate.collect(doc, bucket);
+ profileBreakdown.stopAndRecordTime();
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java
index 5b92ef8b2a9..013b7d3a506 100644
--- a/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java
+++ b/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java
@@ -20,89 +20,33 @@
package org.elasticsearch.search.profile.query;
import org.apache.lucene.search.Query;
+import org.elasticsearch.search.profile.AbstractInternalProfileTree;
import org.elasticsearch.search.profile.ProfileResult;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Deque;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.LinkedBlockingDeque;
-
/**
* This class tracks the dependency tree for queries (scoring and rewriting) and
* generates {@link QueryProfileBreakdown} for each node in the tree. It also finalizes the tree
* and returns a list of {@link ProfileResult} that can be serialized back to the client
*/
-final class InternalQueryProfileTree {
-
- private ArrayList timings;
-
- /** Maps the Query to it's list of children. This is basically the dependency tree */
- private ArrayList> tree;
-
- /** A list of the original queries, keyed by index position */
- private ArrayList queries;
-
- /** A list of top-level "roots". Each root can have its own tree of profiles */
- private ArrayList roots;
+final class InternalQueryProfileTree extends AbstractInternalProfileTree {
/** Rewrite time */
private long rewriteTime;
private long rewriteScratch;
- /** A temporary stack used to record where we are in the dependency tree. Only used by scoring queries */
- private Deque stack;
-
- private int currentToken = 0;
-
- public InternalQueryProfileTree() {
- timings = new ArrayList<>(10);
- stack = new LinkedBlockingDeque<>(10);
- tree = new ArrayList<>(10);
- queries = new ArrayList<>(10);
- roots = new ArrayList<>(10);
+ @Override
+ protected QueryProfileBreakdown createProfileBreakdown() {
+ return new QueryProfileBreakdown();
}
- /**
- * Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those
- * that are past the rewrite phase and are now being wrapped by createWeight() ) follow
- * a recursive progression. We can track the dependency tree by a simple stack
- *
- * The only hiccup is that the first scoring query will be identical to the last rewritten
- * query, so we need to take special care to fix that
- *
- * @param query The scoring query we wish to profile
- * @return A ProfileBreakdown for this query
- */
- public QueryProfileBreakdown getQueryBreakdown(Query query) {
- int token = currentToken;
+ @Override
+ protected String getTypeFromElement(Query query) {
+ return query.getClass().getSimpleName();
+ }
- boolean stackEmpty = stack.isEmpty();
-
- // If the stack is empty, we are a new root query
- if (stackEmpty) {
-
- // We couldn't find a rewritten query to attach to, so just add it as a
- // top-level root. This is just a precaution: it really shouldn't happen.
- // We would only get here if a top-level query that never rewrites for some reason.
- roots.add(token);
-
- // Increment the token since we are adding a new node, but notably, do not
- // updateParent() because this was added as a root
- currentToken += 1;
- stack.add(token);
-
- return addDependencyNode(query, token);
- }
-
- updateParent(token);
-
- // Increment the token since we are adding a new node
- currentToken += 1;
- stack.add(token);
-
- return addDependencyNode(query, token);
+ @Override
+ protected String getDescriptionFromElement(Query query) {
+ return query.toString();
}
/**
@@ -128,113 +72,7 @@ final class InternalQueryProfileTree {
return time;
}
- /**
- * Helper method to add a new node to the dependency tree.
- *
- * Initializes a new list in the dependency tree, saves the query and
- * generates a new {@link QueryProfileBreakdown} to track the timings
- * of this query
- *
- * @param query The query to profile
- * @param token The assigned token for this query
- * @return A ProfileBreakdown to profile this query
- */
- private QueryProfileBreakdown addDependencyNode(Query query, int token) {
-
- // Add a new slot in the dependency tree
- tree.add(new ArrayList<>(5));
-
- // Save our query for lookup later
- queries.add(query);
-
- QueryProfileBreakdown queryTimings = new QueryProfileBreakdown();
- timings.add(token, queryTimings);
- return queryTimings;
- }
-
- /**
- * Removes the last (e.g. most recent) value on the stack
- */
- public void pollLast() {
- stack.pollLast();
- }
-
- /**
- * After the query has been run and profiled, we need to merge the flat timing map
- * with the dependency graph to build a data structure that mirrors the original
- * query tree
- *
- * @return a hierarchical representation of the profiled query tree
- */
- public List getQueryTree() {
- ArrayList results = new ArrayList<>(5);
- for (Integer root : roots) {
- results.add(doGetQueryTree(root));
- }
- return results;
- }
-
- /**
- * Recursive helper to finalize a node in the dependency tree
- * @param token The node we are currently finalizing
- * @return A hierarchical representation of the tree inclusive of children at this level
- */
- private ProfileResult doGetQueryTree(int token) {
- Query query = queries.get(token);
- QueryProfileBreakdown breakdown = timings.get(token);
- Map timings = breakdown.toTimingMap();
- List children = tree.get(token);
- List childrenProfileResults = Collections.emptyList();
-
- if (children != null) {
- childrenProfileResults = new ArrayList<>(children.size());
- for (Integer child : children) {
- ProfileResult childNode = doGetQueryTree(child);
- childrenProfileResults.add(childNode);
- }
- }
-
- // TODO this would be better done bottom-up instead of top-down to avoid
- // calculating the same times over and over...but worth the effort?
- long nodeTime = getNodeTime(timings, childrenProfileResults);
- String queryDescription = query.getClass().getSimpleName();
- String luceneName = query.toString();
- return new ProfileResult(queryDescription, luceneName, timings, childrenProfileResults, nodeTime);
- }
-
public long getRewriteTime() {
return rewriteTime;
}
-
- /**
- * Internal helper to add a child to the current parent node
- *
- * @param childToken The child to add to the current parent
- */
- private void updateParent(int childToken) {
- Integer parent = stack.peekLast();
- ArrayList parentNode = tree.get(parent);
- parentNode.add(childToken);
- tree.set(parent, parentNode);
- }
-
- /**
- * Internal helper to calculate the time of a node, inclusive of children
- *
- * @param timings A map of breakdown timing for the node
- * @param children All children profile results at this node
- * @return The total time at this node, inclusive of children
- */
- private static long getNodeTime(Map timings, List children) {
- long nodeTime = 0;
- for (long time : timings.values()) {
- nodeTime += time;
- }
-
- // Then add up our children
- for (ProfileResult child : children) {
- nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren());
- }
- return nodeTime;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java
new file mode 100644
index 00000000000..d5e00aca336
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfileShardResult.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile.query;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.profile.ProfileResult;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * A container class to hold the profile results for a single shard in the request.
+ * Contains a list of query profiles, a collector tree and a total rewrite tree.
+ */
+public final class QueryProfileShardResult implements Writeable, ToXContent {
+
+ private final List queryProfileResults;
+
+ private final CollectorResult profileCollector;
+
+ private final long rewriteTime;
+
+ public QueryProfileShardResult(List queryProfileResults, long rewriteTime,
+ CollectorResult profileCollector) {
+ assert(profileCollector != null);
+ this.queryProfileResults = queryProfileResults;
+ this.profileCollector = profileCollector;
+ this.rewriteTime = rewriteTime;
+ }
+
+ /**
+ * Read from a stream.
+ */
+ public QueryProfileShardResult(StreamInput in) throws IOException {
+ int profileSize = in.readVInt();
+ queryProfileResults = new ArrayList<>(profileSize);
+ for (int j = 0; j < profileSize; j++) {
+ queryProfileResults.add(new ProfileResult(in));
+ }
+
+ profileCollector = new CollectorResult(in);
+ rewriteTime = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(queryProfileResults.size());
+ for (ProfileResult p : queryProfileResults) {
+ p.writeTo(out);
+ }
+ profileCollector.writeTo(out);
+ out.writeLong(rewriteTime);
+ }
+
+
+ public List getQueryResults() {
+ return Collections.unmodifiableList(queryProfileResults);
+ }
+
+ public long getRewriteTime() {
+ return rewriteTime;
+ }
+
+ public CollectorResult getCollectorResult() {
+ return profileCollector;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startArray("query");
+ for (ProfileResult p : queryProfileResults) {
+ p.toXContent(builder, params);
+ }
+ builder.endArray();
+ builder.field("rewrite_time", rewriteTime);
+ builder.startArray("collector");
+ profileCollector.toXContent(builder, params);
+ builder.endArray();
+ return builder;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfiler.java b/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfiler.java
index 57341ee132f..0051356e35a 100644
--- a/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfiler.java
+++ b/core/src/main/java/org/elasticsearch/search/profile/query/QueryProfiler.java
@@ -20,9 +20,8 @@
package org.elasticsearch.search.profile.query;
import org.apache.lucene.search.Query;
-import org.elasticsearch.search.profile.ProfileResult;
+import org.elasticsearch.search.profile.AbstractProfiler;
-import java.util.List;
import java.util.Objects;
/**
@@ -36,16 +35,16 @@ import java.util.Objects;
* request may execute two searches (query + global agg). A Profiler just
* represents one of those
*/
-public final class QueryProfiler {
-
- private final InternalQueryProfileTree queryTree = new InternalQueryProfileTree();
+public final class QueryProfiler extends AbstractProfiler {
/**
* The root Collector used in the search
*/
private InternalProfileCollector collector;
- public QueryProfiler() {}
+ public QueryProfiler() {
+ super(new InternalQueryProfileTree());
+ }
/** Set the collector that is associated with this profiler. */
public void setCollector(InternalProfileCollector collector) {
@@ -55,21 +54,12 @@ public final class QueryProfiler {
this.collector = Objects.requireNonNull(collector);
}
- /**
- * Get the {@link QueryProfileBreakdown} for the given query, potentially creating it if it did not exist.
- * This should only be used for queries that will be undergoing scoring. Do not use it to profile the
- * rewriting phase
- */
- public QueryProfileBreakdown getQueryBreakdown(Query query) {
- return queryTree.getQueryBreakdown(query);
- }
-
/**
* Begin timing the rewrite phase of a request. All rewrites are accumulated together into a
* single metric
*/
public void startRewriteTime() {
- queryTree.startRewriteTime();
+ ((InternalQueryProfileTree) profileTree).startRewriteTime();
}
/**
@@ -79,29 +69,14 @@ public final class QueryProfiler {
* @return cumulative rewrite time
*/
public long stopAndAddRewriteTime() {
- return queryTree.stopAndAddRewriteTime();
- }
-
- /**
- * Removes the last (e.g. most recent) query on the stack. This should only be called for scoring
- * queries, not rewritten queries
- */
- public void pollLastQuery() {
- queryTree.pollLast();
- }
-
- /**
- * @return a hierarchical representation of the profiled query tree
- */
- public List getQueryTree() {
- return queryTree.getQueryTree();
+ return ((InternalQueryProfileTree) profileTree).stopAndAddRewriteTime();
}
/**
* @return total time taken to rewrite all queries in this profile
*/
public long getRewriteTime() {
- return queryTree.getRewriteTime();
+ return ((InternalQueryProfileTree) profileTree).getRewriteTime();
}
/**
diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
index df68064f617..174a337f3d2 100644
--- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
@@ -112,8 +112,8 @@ public class QueryPhase implements SearchPhase {
aggregationPhase.execute(searchContext);
if (searchContext.getProfilers() != null) {
- List shardResults = SearchProfileShardResults
- .buildShardResults(searchContext.getProfilers().getQueryProfilers());
+ ProfileShardResult shardResults = SearchProfileShardResults
+ .buildShardResults(searchContext.getProfilers());
searchContext.queryResult().profileResults(shardResults);
}
}
@@ -385,8 +385,8 @@ public class QueryPhase implements SearchPhase {
queryResult.topDocs(topDocsCallable.call(), sortValueFormats);
if (searchContext.getProfilers() != null) {
- List shardResults = SearchProfileShardResults
- .buildShardResults(searchContext.getProfilers().getQueryProfilers());
+ ProfileShardResult shardResults = SearchProfileShardResults
+ .buildShardResults(searchContext.getProfilers());
searchContext.queryResult().profileResults(shardResults);
}
diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
index 1408ebe8359..be8c895eecd 100644
--- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
+++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
@@ -59,7 +59,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
private Suggest suggest;
private boolean searchTimedOut;
private Boolean terminatedEarly = null;
- private List profileShardResults;
+ private ProfileShardResult profileShardResults;
public QuerySearchResult() {
@@ -143,7 +143,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
* Returns the profiled results for this search, or potentially null if result was empty
* @return The profiled results, or null
*/
- public @Nullable List profileResults() {
+ public @Nullable ProfileShardResult profileResults() {
return profileShardResults;
}
@@ -151,7 +151,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
* Sets the finalized profiling results for this query
* @param shardResults The finalized profile
*/
- public void profileResults(List shardResults) {
+ public void profileResults(ProfileShardResult shardResults) {
this.profileShardResults = shardResults;
}
@@ -237,12 +237,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
terminatedEarly = in.readOptionalBoolean();
if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
- int profileSize = in.readVInt();
- profileShardResults = new ArrayList<>(profileSize);
- for (int i = 0; i < profileSize; i++) {
- ProfileShardResult result = new ProfileShardResult(in);
- profileShardResults.add(result);
- }
+ profileShardResults = new ProfileShardResult(in);
}
}
@@ -296,10 +291,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
- out.writeVInt(profileShardResults.size());
- for (ProfileShardResult shardResult : profileShardResults) {
- shardResult.writeTo(out);
- }
+ profileShardResults.writeTo(out);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/slice/SliceQuery.java b/core/src/main/java/org/elasticsearch/search/slice/SliceQuery.java
index 0d87b275403..2b8040ebd28 100644
--- a/core/src/main/java/org/elasticsearch/search/slice/SliceQuery.java
+++ b/core/src/main/java/org/elasticsearch/search/slice/SliceQuery.java
@@ -61,7 +61,7 @@ public abstract class SliceQuery extends Query {
@Override
public boolean equals(Object o) {
- if (super.equals(o) == false) {
+ if (sameClassAs(o) == false) {
return false;
}
SliceQuery that = (SliceQuery) o;
@@ -70,7 +70,7 @@ public abstract class SliceQuery extends Query {
@Override
public int hashCode() {
- return Objects.hash(super.hashCode(), field, id, max);
+ return Objects.hash(classHash(), field, id, max);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java b/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java
index b967a6b6e71..429a3ebe892 100644
--- a/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java
+++ b/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java
@@ -74,11 +74,7 @@ public final class TermsSliceQuery extends SliceQuery {
int hashCode = term.hashCode();
if (contains(hashCode)) {
docsEnum = te.postings(docsEnum, PostingsEnum.NONE);
- int docId = docsEnum.nextDoc();
- while (docId != DocIdSetIterator.NO_MORE_DOCS) {
- builder.add(docId);
- docId = docsEnum.nextDoc();
- }
+ builder.add(docsEnum);
}
}
return builder.build();
diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java
index 91b6bc120ad..03856017c36 100644
--- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java
+++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransportChannel.java
@@ -42,9 +42,6 @@ import org.jboss.netty.channel.ChannelFutureListener;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
-/**
- *
- */
public class NettyTransportChannel implements TransportChannel {
private final NettyTransport transport;
@@ -55,7 +52,7 @@ public class NettyTransportChannel implements TransportChannel {
private final long requestId;
private final String profileName;
private final long reservedBytes;
- private final AtomicBoolean closed = new AtomicBoolean();
+ private final AtomicBoolean released = new AtomicBoolean();
public NettyTransportChannel(NettyTransport transport, TransportServiceAdapter transportServiceAdapter, String action, Channel channel,
long requestId, Version version, String profileName, long reservedBytes) {
@@ -86,7 +83,7 @@ public class NettyTransportChannel implements TransportChannel {
@Override
public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException {
- close();
+ release();
if (transport.compress) {
options = TransportResponseOptions.builder(options).withCompress(transport.compress).build();
}
@@ -128,7 +125,7 @@ public class NettyTransportChannel implements TransportChannel {
@Override
public void sendResponse(Throwable error) throws IOException {
- close();
+ release();
BytesStreamOutput stream = new BytesStreamOutput();
stream.skip(NettyHeader.HEADER_SIZE);
RemoteTransportException tx = new RemoteTransportException(
@@ -147,10 +144,10 @@ public class NettyTransportChannel implements TransportChannel {
future.addListener(onResponseSentListener);
}
- private void close() {
- // attempt to close once atomically
- if (closed.compareAndSet(false, true) == false) {
- throw new IllegalStateException("Channel is already closed");
+ private void release() {
+ // attempt to release once atomically
+ if (released.compareAndSet(false, true) == false) {
+ throw new IllegalStateException("reserved bytes are already released");
}
transport.inFlightRequestsBreaker().addWithoutBreaking(-reservedBytes);
}
@@ -174,4 +171,5 @@ public class NettyTransportChannel implements TransportChannel {
public Channel getChannel() {
return channel;
}
+
}
diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy
index ff0ea773667..b185289b58d 100644
--- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy
+++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy
@@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.0.jar}" {
//// Very special jar permissions:
//// These are dangerous permissions that we don't want to grant to everything.
-grant codeBase "${codebase.lucene-core-6.0.1.jar}" {
+grant codeBase "${codebase.lucene-core-6.1.0-snapshot-3a57bea.jar}" {
// needed to allow MMapDirectory's "unmap hack" (die unmap hack, die)
// java 8 package
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
@@ -42,6 +42,11 @@ grant codeBase "${codebase.lucene-core-6.0.1.jar}" {
permission java.lang.RuntimePermission "accessDeclaredMembers";
};
+grant codeBase "${codebase.lucene-misc-6.1.0-snapshot-3a57bea.jar}" {
+ // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper
+ permission java.nio.file.LinkPermission "hard";
+};
+
//// Everything else:
grant {
diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
index d4ab6e01ab9..d7faab5eeda 100644
--- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
+++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
@@ -31,9 +31,11 @@ grant codeBase "${codebase.securemock-1.2.jar}" {
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
};
-grant codeBase "${codebase.lucene-test-framework-6.0.1.jar}" {
+grant codeBase "${codebase.lucene-test-framework-6.1.0-snapshot-3a57bea.jar}" {
// needed by RamUsageTester
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
+ // needed for testing hardlinks in StoreRecoveryTests since we install MockFS
+ permission java.nio.file.LinkPermission "hard";
};
grant codeBase "${codebase.randomizedtesting-runner-2.3.2.jar}" {
diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java
index 65c91f5daab..862cccab318 100644
--- a/core/src/test/java/org/elasticsearch/VersionTests.java
+++ b/core/src/test/java/org/elasticsearch/VersionTests.java
@@ -270,7 +270,8 @@ public class VersionTests extends ESTestCase {
assertTrue("lucene versions must be " + other + " >= " + version,
other.luceneVersion.onOrAfter(version.luceneVersion));
}
- if (other.major == version.major && other.minor == version.minor) {
+ if (other.isAlpha() == false && version.isAlpha() == false
+ && other.major == version.major && other.minor == version.minor) {
assertEquals(other.luceneVersion.major, version.luceneVersion.major);
assertEquals(other.luceneVersion.minor, version.luceneVersion.minor);
// should we also assert the lucene bugfix version?
diff --git a/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionUtils.java b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionUtils.java
new file mode 100644
index 00000000000..be4a7b29703
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionUtils.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support.master;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.cluster.ClusterState;
+
+public class TransportMasterNodeActionUtils {
+
+ /**
+ * Allows to directly call {@link TransportMasterNodeAction#masterOperation(MasterNodeRequest, ClusterState, ActionListener)} which is
+ * a protected method.
+ */
+ public static , Response extends ActionResponse> void runMasterOperation(
+ TransportMasterNodeAction masterNodeAction, Request request, ClusterState clusterState,
+ ActionListener actionListener) throws Exception {
+ assert masterNodeAction.checkBlock(request, clusterState) == null;
+ masterNodeAction.masterOperation(request, clusterState, actionListener);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java b/core/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java
index d2ef349625e..a6e74a47706 100644
--- a/core/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java
+++ b/core/src/test/java/org/elasticsearch/bootstrap/JavaVersionTests.java
@@ -72,4 +72,8 @@ public class JavaVersionTests extends ESTestCase {
assertFalse(JavaVersion.isValid(version));
}
}
+
+ public void testJava8Compat() {
+ assertEquals(JavaVersion.parse("1.8"), JavaVersion.parse("8"));
+ }
}
\ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java
index 419104bfe34..494aa7d1095 100644
--- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java
+++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java
@@ -1,4 +1,3 @@
-/*
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
@@ -46,6 +45,7 @@ import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.SortedSet;
@@ -127,6 +127,44 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase {
}
}
+ public void testRestoreSnapshotWithMissingChecksum() throws Exception {
+ final String repo = "test_repo";
+ final String snapshot = "test_1";
+ final String indexName = "index-2.3.4";
+ final String repoFileId = "missing-checksum-repo-2.3.4";
+ Path repoFile = getBwcIndicesPath().resolve(repoFileId + ".zip");
+ URI repoFileUri = repoFile.toUri();
+ URI repoJarUri = new URI("jar:" + repoFileUri.toString() + "!/repo/");
+ logger.info("--> creating repository [{}] for repo file [{}]", repo, repoFileId);
+ assertAcked(client().admin().cluster().preparePutRepository(repo)
+ .setType("url")
+ .setSettings(Settings.builder().put("url", repoJarUri.toString())));
+
+ logger.info("--> get snapshot and check its indices");
+ GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots(repo).setSnapshots(snapshot).get();
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
+ SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
+ assertThat(snapshotInfo.indices(), equalTo(Arrays.asList(indexName)));
+
+ logger.info("--> restoring snapshot");
+ RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(repo, snapshot).setRestoreGlobalState(true).setWaitForCompletion(true).get();
+ assertThat(response.status(), equalTo(RestStatus.OK));
+ RestoreInfo restoreInfo = response.getRestoreInfo();
+ assertThat(restoreInfo.successfulShards(), greaterThan(0));
+ assertThat(restoreInfo.successfulShards(), equalTo(restoreInfo.totalShards()));
+ assertThat(restoreInfo.failedShards(), equalTo(0));
+ String index = restoreInfo.indices().get(0);
+ assertThat(index, equalTo(indexName));
+
+ logger.info("--> check search");
+ SearchResponse searchResponse = client().prepareSearch(index).get();
+ assertThat(searchResponse.getHits().totalHits(), greaterThan(0L));
+
+ logger.info("--> cleanup");
+ cluster().wipeIndices(restoreInfo.indices().toArray(new String[restoreInfo.indices().size()]));
+ cluster().wipeTemplates();
+ }
+
private List repoVersions() throws Exception {
return listRepoVersions("repo");
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java
index 731ecb859ee..7504c778d36 100644
--- a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java
@@ -34,6 +34,7 @@ import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.index.Index;
import org.elasticsearch.test.ESTestCase;
+import org.junit.BeforeClass;
import java.util.ArrayList;
import java.util.Arrays;
@@ -54,11 +55,17 @@ public class ClusterChangedEventTests extends ESTestCase {
private static final ClusterName TEST_CLUSTER_NAME = new ClusterName("test");
private static final String NODE_ID_PREFIX = "node_";
- private static final String INITIAL_CLUSTER_ID = UUIDs.randomBase64UUID();
- // the initial indices which every cluster state test starts out with
- private static final List initialIndices = Arrays.asList(new Index("idx1", UUIDs.randomBase64UUID()),
- new Index("idx2", UUIDs.randomBase64UUID()),
- new Index("idx3", UUIDs.randomBase64UUID()));
+ private static String INITIAL_CLUSTER_ID;
+ private static List initialIndices;
+
+ @BeforeClass
+ public static void beforeClass() {
+ INITIAL_CLUSTER_ID = UUIDs.randomBase64UUID();
+ // the initial indices which every cluster state test starts out with
+ initialIndices = Arrays.asList(new Index("idx1", UUIDs.randomBase64UUID()),
+ new Index("idx2", UUIDs.randomBase64UUID()),
+ new Index("idx3", UUIDs.randomBase64UUID()));
+ }
/**
* Test basic properties of the ClusterChangedEvent class:
@@ -140,24 +147,24 @@ public class ClusterChangedEventTests extends ESTestCase {
*/
public void testIndexMetaDataChange() {
final int numNodesInCluster = 3;
- final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices);
- final ClusterState newState = originalState; // doesn't matter for this test, just need a non-null value
- final ClusterChangedEvent event = new ClusterChangedEvent("_na_", originalState, newState);
+ final ClusterState state = createState(numNodesInCluster, randomBoolean(), initialIndices);
// test when its not the same IndexMetaData
final Index index = initialIndices.get(0);
- final IndexMetaData originalIndexMeta = originalState.metaData().index(index);
+ final IndexMetaData originalIndexMeta = state.metaData().index(index);
// make sure the metadata is actually on the cluster state
assertNotNull("IndexMetaData for " + index + " should exist on the cluster state", originalIndexMeta);
IndexMetaData newIndexMeta = createIndexMetadata(index, originalIndexMeta.getVersion() + 1);
- assertTrue("IndexMetaData with different version numbers must be considered changed", event.indexMetaDataChanged(newIndexMeta));
+ assertTrue("IndexMetaData with different version numbers must be considered changed",
+ ClusterChangedEvent.indexMetaDataChanged(originalIndexMeta, newIndexMeta));
// test when it doesn't exist
newIndexMeta = createIndexMetadata(new Index("doesntexist", UUIDs.randomBase64UUID()));
- assertTrue("IndexMetaData that didn't previously exist should be considered changed", event.indexMetaDataChanged(newIndexMeta));
+ assertTrue("IndexMetaData that didn't previously exist should be considered changed",
+ ClusterChangedEvent.indexMetaDataChanged(originalIndexMeta, newIndexMeta));
// test when its the same IndexMetaData
- assertFalse("IndexMetaData should be the same", event.indexMetaDataChanged(originalIndexMeta));
+ assertFalse("IndexMetaData should be the same", ClusterChangedEvent.indexMetaDataChanged(originalIndexMeta, originalIndexMeta));
}
/**
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
index 307df91c302..1f39706e4f4 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
@@ -161,7 +161,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
}
}
- private static final class RandomAllocationDecider extends AllocationDecider {
+ public static final class RandomAllocationDecider extends AllocationDecider {
private final Random random;
diff --git a/core/src/test/java/org/elasticsearch/common/UUIDTests.java b/core/src/test/java/org/elasticsearch/common/UUIDTests.java
index f82e1a464d9..d963db2d6f6 100644
--- a/core/src/test/java/org/elasticsearch/common/UUIDTests.java
+++ b/core/src/test/java/org/elasticsearch/common/UUIDTests.java
@@ -20,7 +20,9 @@ package org.elasticsearch.common;
import org.elasticsearch.test.ESTestCase;
+import java.security.SecureRandom;
import java.util.HashSet;
+import java.util.Random;
import java.util.Set;
public class UUIDTests extends ESTestCase {
@@ -41,7 +43,18 @@ public class UUIDTests extends ESTestCase {
}
public void testThreadedRandomUUID() {
- testUUIDThreaded(randomUUIDGen);
+ // we can not use a reproducible source of randomness for this
+ // test, the test explicitly relies on each thread having a
+ // unique source of randomness; thus, we fake what production
+ // code does when using a RandomBasedUUIDGenerator
+ testUUIDThreaded(new RandomBasedUUIDGenerator() {
+ private final SecureRandom sr = SecureRandomHolder.INSTANCE;
+
+ @Override
+ public String getBase64UUID() {
+ return getBase64UUID(sr);
+ }
+ });
}
Set verifyUUIDSet(int count, UUIDGenerator uuidSource) {
@@ -98,6 +111,6 @@ public class UUIDTests extends ESTestCase {
for (UUIDGenRunner runner : runners) {
globalSet.addAll(runner.uuidSet);
}
- assertEquals(count*uuids, globalSet.size());
+ assertEquals(count * uuids, globalSet.size());
}
}
diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpRequestSizeLimitIT.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpRequestSizeLimitIT.java
index ba6b4438aaa..e1135f807cf 100644
--- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpRequestSizeLimitIT.java
+++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpRequestSizeLimitIT.java
@@ -54,7 +54,6 @@ public class NettyHttpRequestSizeLimitIT extends ESIntegTestCase {
.build();
}
- @TestLogging("_root:DEBUG,org.elasticsearch.common.breaker:TRACE,org.elasticsearch.test:TRACE,org.elasticsearch.transport:TRACE")
public void testLimitsInFlightRequests() throws Exception {
ensureGreen();
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
index 3c791e72b5f..2b2c9288f17 100644
--- a/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
+++ b/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
@@ -65,4 +65,22 @@ public class CharFilterTests extends ESTokenStreamTestCase {
// Repeat one more time to make sure that char filter is reinitialized correctly
assertTokenStreamContents(analyzer1.tokenStream("test", "hello!"), new String[]{"hello"});
}
+
+ public void testPatternReplaceCharFilter() throws Exception {
+ Settings settings = Settings.builder()
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put("index.analysis.char_filter.my_mapping.type", "pattern_replace")
+ .put("index.analysis.char_filter.my_mapping.pattern", "ab*")
+ .put("index.analysis.char_filter.my_mapping.replacement", "oo")
+ .put("index.analysis.char_filter.my_mapping.flags", "CASE_INSENSITIVE")
+ .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "my_mapping")
+ .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
+ .build();
+ IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
+ AnalysisService analysisService = new AnalysisRegistry(null, new Environment(settings)).build(idxSettings);
+ NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "faBBbBB aBbbbBf"), new String[]{"foo", "oof"});
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java
index 062774bf2f7..4e4d638d355 100644
--- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractGeoFieldDataTestCase.java
@@ -22,9 +22,9 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
-import org.apache.lucene.spatial.util.GeoUtils;
import org.elasticsearch.Version;
import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
import static org.elasticsearch.test.geo.RandomShapeGenerator.randomPoint;
import static org.hamcrest.Matchers.allOf;
@@ -105,8 +105,8 @@ public abstract class AbstractGeoFieldDataTestCase extends AbstractFieldDataImpl
assertThat(docCount, greaterThan(0));
for (int i = 0; i < docCount; ++i) {
final GeoPoint point = values.valueAt(i);
- assertThat(point.lat(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LAT_INCL), lessThanOrEqualTo(GeoUtils.MAX_LAT_INCL)));
- assertThat(point.lon(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LON_INCL), lessThanOrEqualTo(GeoUtils.MAX_LON_INCL)));
+ assertThat(point.lat(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LAT), lessThanOrEqualTo(GeoUtils.MAX_LAT)));
+ assertThat(point.lon(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LON), lessThanOrEqualTo(GeoUtils.MAX_LON)));
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldMapperTests.java
index ee19d094a3f..a3909637548 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldMapperTests.java
@@ -302,4 +302,19 @@ public class DateFieldMapperTests extends ESSingleNodeTestCase {
assertEquals(1457654400000L, dvField.numericValue().longValue());
assertFalse(dvField.fieldType().stored());
}
+
+ public void testNullConfigValuesFail() throws MapperParsingException, IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "date")
+ .field("format", (String) null)
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Exception e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping)));
+ assertEquals("[format] must not have a [null] value", e.getMessage());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java
index de14f38d6a9..224d512cb53 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TextFieldMapperTests.java
@@ -35,6 +35,7 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
import org.elasticsearch.index.mapper.core.TextFieldMapper.TextFieldType;
import org.elasticsearch.index.mapper.ParsedDocument;
@@ -458,4 +459,19 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
assertThat(fieldType.fielddataMaxFrequency(), equalTo((double) Integer.MAX_VALUE));
assertThat(fieldType.fielddataMinSegmentSize(), equalTo(1000));
}
+
+ public void testNullConfigValuesFail() throws MapperParsingException, IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "text")
+ .field("analyzer", (String) null)
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Exception e = expectThrows(MapperParsingException.class, () -> parser.parse("type", new CompressedXContent(mapping)));
+ assertEquals("[analyzer] must not have a [null] value", e.getMessage());
+ }
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java
index 177d3b7b0f7..b2e1989454c 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java
@@ -19,7 +19,7 @@
package org.elasticsearch.index.mapper.externalvalues;
-import org.apache.lucene.spatial.util.GeoEncodingUtils;
+import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
@@ -88,7 +88,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoEncodingUtils.mortonHash(42.0, 51.0)));
+ assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoPointField.encodeLatLon(42.0, 51.0)));
}
assertThat(doc.rootDoc().getField("field.shape"), notNullValue());
@@ -146,7 +146,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoEncodingUtils.mortonHash(42.0, 51.0)));
+ assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoPointField.encodeLatLon(42.0, 51.0)));
}
assertThat(doc.rootDoc().getField("field.shape"), notNullValue());
@@ -208,7 +208,7 @@ public class SimpleExternalMappingTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoEncodingUtils.mortonHash(42.0, 51.0)));
+ assertThat(Long.parseLong(doc.rootDoc().getField("field.point").stringValue()), is(GeoPointField.encodeLatLon(42.0, 51.0)));
}
assertThat(doc.rootDoc().getField("field.shape"), notNullValue());
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java
index a1fdb7ec60f..202afd7a4b1 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.mapper.geo;
import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
@@ -46,7 +47,6 @@ import java.util.List;
import java.util.Map;
import java.lang.NumberFormatException;
-import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonHash;
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
@@ -90,7 +90,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (indexCreatedBefore22 == true) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
}
@@ -197,7 +197,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("89.0,1.0"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(89.0, 1.0)));
+ assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(89.0, 1.0)));
}
doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
@@ -209,7 +209,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("-89.0,-1.0"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(-89.0, -1.0)));
+ assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(-89.0, -1.0)));
}
doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder()
@@ -221,7 +221,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("-1.0,-179.0"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(-1.0, -179.0)));
+ assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(-1.0, -179.0)));
}
}
@@ -408,7 +408,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
}
@@ -441,7 +441,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.2));
assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.3));
// indexed hash
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
// point field for 2nd value
assertThat(doc.rootDoc().getFields("point.lat")[2].numericValue().doubleValue(), equalTo(1.4));
@@ -450,7 +450,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
assertThat(doc.rootDoc().getFields("point.lat")[3].numericValue().doubleValue(), equalTo(1.4));
assertThat(doc.rootDoc().getFields("point.lon")[3].numericValue().doubleValue(), equalTo(1.5));
// indexed hash
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.4, 1.5)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5)));
} else {
assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
@@ -459,14 +459,14 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.4, 1.5)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5)));
}
}
}
@@ -491,7 +491,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
}
@@ -517,7 +517,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
}
@@ -559,12 +559,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.4, 1.5)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5)));
}
}
@@ -588,7 +588,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
}
@@ -613,7 +613,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
}
@@ -639,7 +639,7 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
}
@@ -669,14 +669,14 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.4, 1.5)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5)));
}
} else {
assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(4));
@@ -685,12 +685,12 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase {
assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.2));
assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.3));
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[0].stringValue()), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
assertThat(doc.rootDoc().getFields("point.lat")[2].numericValue().doubleValue(), equalTo(1.4));
assertThat(doc.rootDoc().getFields("point.lat")[3].numericValue().doubleValue(), equalTo(1.4));
assertThat(doc.rootDoc().getFields("point.lon")[2].numericValue().doubleValue(), equalTo(1.5));
assertThat(doc.rootDoc().getFields("point.lon")[3].numericValue().doubleValue(), equalTo(1.5));
- assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(mortonHash(1.4, 1.5)));
+ assertThat(Long.parseLong(doc.rootDoc().getFields("point")[1].stringValue()), equalTo(GeoPointField.encodeLatLon(1.4, 1.5)));
}
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java
index 837cef6a17c..90528c9a8f4 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.index.mapper.geo;
+import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.compress.CompressedXContent;
@@ -35,7 +36,6 @@ import org.elasticsearch.test.VersionUtils;
import java.util.Collection;
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
-import static org.apache.lucene.spatial.util.GeoEncodingUtils.mortonHash;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
@@ -72,7 +72,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
}
@@ -96,7 +96,7 @@ public class GeohashMappingGeoPointTests extends ESSingleNodeTestCase {
if (version.before(Version.V_2_2_0)) {
assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
} else {
- assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(mortonHash(1.2, 1.3)));
+ assertThat(Long.parseLong(doc.rootDoc().get("point")), equalTo(GeoPointField.encodeLatLon(1.2, 1.3)));
}
}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ip/IpFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ip/IpFieldTypeTests.java
index 522a35ccd5d..884f52cc0ed 100644
--- a/core/src/test/java/org/elasticsearch/index/mapper/ip/IpFieldTypeTests.java
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ip/IpFieldTypeTests.java
@@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper.ip;
import java.net.InetAddress;
import org.apache.lucene.document.InetAddressPoint;
-import org.apache.lucene.document.XInetAddressPoint;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.util.BytesRef;
@@ -69,11 +68,11 @@ public class IpFieldTypeTests extends FieldTypeTestCase {
ip = "2001:db8::2:1";
String prefix = ip + "/64";
- assertEquals(XInetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64), ft.termQuery(prefix, null));
+ assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 64), ft.termQuery(prefix, null));
ip = "192.168.1.7";
prefix = ip + "/16";
- assertEquals(XInetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16), ft.termQuery(prefix, null));
+ assertEquals(InetAddressPoint.newPrefixQuery("field", InetAddresses.forString(ip), 16), ft.termQuery(prefix, null));
ft.setIndexOptions(IndexOptions.NONE);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
@@ -88,7 +87,7 @@ public class IpFieldTypeTests extends FieldTypeTestCase {
assertEquals(
InetAddressPoint.newRangeQuery("field",
InetAddresses.forString("::"),
- XInetAddressPoint.MAX_VALUE),
+ InetAddressPoint.MAX_VALUE),
ft.rangeQuery(null, null, randomBoolean(), randomBoolean()));
assertEquals(
@@ -106,13 +105,13 @@ public class IpFieldTypeTests extends FieldTypeTestCase {
assertEquals(
InetAddressPoint.newRangeQuery("field",
InetAddresses.forString("2001:db8::"),
- XInetAddressPoint.MAX_VALUE),
+ InetAddressPoint.MAX_VALUE),
ft.rangeQuery("2001:db8::", null, true, randomBoolean()));
assertEquals(
InetAddressPoint.newRangeQuery("field",
InetAddresses.forString("2001:db8::1"),
- XInetAddressPoint.MAX_VALUE),
+ InetAddressPoint.MAX_VALUE),
ft.rangeQuery("2001:db8::", null, false, randomBoolean()));
assertEquals(
@@ -152,7 +151,7 @@ public class IpFieldTypeTests extends FieldTypeTestCase {
assertEquals(
InetAddressPoint.newRangeQuery("field",
InetAddresses.forString("::1:0:0:0"),
- XInetAddressPoint.MAX_VALUE),
+ InetAddressPoint.MAX_VALUE),
// same lo/hi values but inclusive=false so this won't match anything
ft.rangeQuery("255.255.255.255", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", false, true));
diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java
index 7780d218b52..387df7ac3ca 100644
--- a/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java
+++ b/core/src/test/java/org/elasticsearch/index/query/GeoDistanceQueryBuilderTests.java
@@ -22,10 +22,10 @@ package org.elasticsearch.index.query;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.geopoint.search.GeoPointDistanceQuery;
-import org.apache.lucene.spatial.util.GeoEncodingUtils;
import org.elasticsearch.Version;
import org.elasticsearch.common.geo.GeoDistance;
import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
import org.elasticsearch.test.AbstractQueryTestCase;
@@ -213,7 +213,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase queryBuilderPoints = queryBuilder.points();
- double[] lats = geoQuery.getLats();
- double[] lons = geoQuery.getLons();
+ assertEquals(1, geoQuery.getPolygons().length);
+ double[] lats = geoQuery.getPolygons()[0].getPolyLats();
+ double[] lons = geoQuery.getPolygons()[0].getPolyLons();
assertThat(lats.length, equalTo(queryBuilderPoints.size()));
assertThat(lons.length, equalTo(queryBuilderPoints.size()));
for (int i=0; i < queryBuilderPoints.size(); ++i) {
@@ -321,8 +322,9 @@ public class GeoPolygonQueryBuilderTests extends AbstractQueryTestCase mappingConsumer = (type, mapping) -> {
@@ -1575,7 +1565,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
}
routing = ShardRoutingHelper.moveToStarted(routing);
- newShard.updateRoutingEntry(routing, true);
+ newShard.updateRoutingEntry(routing);
assertHitCount(client().prepareSearch("index_1").get(), 2);
}
// now check that it's persistent ie. that the added shards are committed
@@ -1587,7 +1577,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.LOCAL_SHARDS, localNode, localNode));
assertTrue(newShard.recoverFromStore());
routing = ShardRoutingHelper.moveToStarted(routing);
- newShard.updateRoutingEntry(routing, true);
+ newShard.updateRoutingEntry(routing);
assertHitCount(client().prepareSearch("index_1").get(), 2);
}
diff --git a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java
index 2543668f557..4938f686f60 100644
--- a/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java
+++ b/core/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java
@@ -155,10 +155,12 @@ public class RefreshListenersTests extends ESTestCase {
DummyRefreshListener forcingListener = new DummyRefreshListener();
listeners.addOrNotify(index.getTranslogLocation(), forcingListener);
assertTrue("Forced listener wasn't forced?", forcingListener.forcedRefresh.get());
+ forcingListener.assertNoError();
// That forces all the listeners through. It would be on the listener ThreadPool but we've made all of those execute immediately.
for (DummyRefreshListener listener : nonForcedListeners) {
assertEquals("Expected listener called with unforced refresh!", Boolean.FALSE, listener.forcedRefresh.get());
+ listener.assertNoError();
}
assertFalse(listeners.refreshNeeded());
}
@@ -174,8 +176,9 @@ public class RefreshListenersTests extends ESTestCase {
}
DummyRefreshListener listener = new DummyRefreshListener();
- listeners.addOrNotify(index.getTranslogLocation(), listener);
+ assertTrue(listeners.addOrNotify(index.getTranslogLocation(), listener));
assertFalse(listener.forcedRefresh.get());
+ listener.assertNoError();
}
/**
@@ -192,13 +195,17 @@ public class RefreshListenersTests extends ESTestCase {
});
refresher.start();
try {
- for (int i = 0; i < 100; i++) {
+ for (int i = 0; i < 1000; i++) {
Engine.Index index = index("1");
-
DummyRefreshListener listener = new DummyRefreshListener();
- listeners.addOrNotify(index.getTranslogLocation(), listener);
- assertBusy(() -> assertNotNull(listener.forcedRefresh.get()));
+ boolean immediate = listeners.addOrNotify(index.getTranslogLocation(), listener);
+ if (immediate) {
+ assertNotNull(listener.forcedRefresh.get());
+ } else {
+ assertBusy(() -> assertNotNull(listener.forcedRefresh.get()));
+ }
assertFalse(listener.forcedRefresh.get());
+ listener.assertNoError();
}
} finally {
run.set(false);
@@ -234,6 +241,7 @@ public class RefreshListenersTests extends ESTestCase {
if (threadCount < maxListeners) {
assertFalse(listener.forcedRefresh.get());
}
+ listener.assertNoError();
Engine.Get get = new Engine.Get(false, index.uid());
try (Engine.GetResult getResult = engine.get(get)) {
@@ -281,13 +289,24 @@ public class RefreshListenersTests extends ESTestCase {
/**
* When the listener is called this captures it's only argument.
*/
- private AtomicReference forcedRefresh = new AtomicReference<>();
+ AtomicReference forcedRefresh = new AtomicReference<>();
+ private volatile Throwable error;
@Override
public void accept(Boolean forcedRefresh) {
- assertNotNull(forcedRefresh);
- Boolean oldValue = this.forcedRefresh.getAndSet(forcedRefresh);
- assertNull("Listener called twice", oldValue);
+ try {
+ assertNotNull(forcedRefresh);
+ Boolean oldValue = this.forcedRefresh.getAndSet(forcedRefresh);
+ assertNull("Listener called twice", oldValue);
+ } catch (Throwable e) {
+ error = e;
+ }
+ }
+
+ public void assertNoError() {
+ if (error != null) {
+ throw new RuntimeException(error);
+ }
}
}
}
diff --git a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java
index ffb64f991cc..f31733dc477 100644
--- a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java
+++ b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java
@@ -31,7 +31,6 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.Version;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.test.ESTestCase;
@@ -74,11 +73,9 @@ public class StoreRecoveryTests extends ESTestCase {
assertEquals(numFiles, targetNumFiles);
assertEquals(indexStats.totalFileCount(), targetNumFiles);
if (hardLinksSupported(createTempDir())) {
- assertEquals("upgrade to HardlinkCopyDirectoryWrapper in Lucene 6.1", Version.LATEST, Version.LUCENE_6_0_1);
- // assertEquals(indexStats.reusedFileCount(), targetNumFiles); -- uncomment this once upgraded to Lucene 6.1
- assertEquals(indexStats.reusedFileCount(), 0);
+ assertEquals(targetNumFiles, indexStats.reusedFileCount());
} else {
- assertEquals(indexStats.reusedFileCount(), 0);
+ assertEquals(0, indexStats.reusedFileCount(), 0);
}
DirectoryReader reader = DirectoryReader.open(target);
SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(target);
diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java
index 67c431135a0..70eacaafedb 100644
--- a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java
+++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java
@@ -27,7 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
-import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.Fields;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo;
import org.elasticsearch.index.store.StoreFileMetaData;
import org.elasticsearch.test.ESTestCase;
@@ -105,11 +105,11 @@ public class FileInfoTests extends ESTestCase {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.startObject();
- builder.field(Fields.NAME, name);
- builder.field(Fields.PHYSICAL_NAME, physicalName);
- builder.field(Fields.LENGTH, length);
- builder.field(Fields.WRITTEN_BY, Version.LATEST.toString());
- builder.field(Fields.CHECKSUM, "666");
+ builder.field(FileInfo.NAME, name);
+ builder.field(FileInfo.PHYSICAL_NAME, physicalName);
+ builder.field(FileInfo.LENGTH, length);
+ builder.field(FileInfo.WRITTEN_BY, Version.LATEST.toString());
+ builder.field(FileInfo.CHECKSUM, "666");
builder.endObject();
byte[] xContent = builder.bytes().toBytes();
diff --git a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java
index 2722fc9d9d3..1f1b758f349 100644
--- a/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java
@@ -455,7 +455,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
assertEquals(1, imc.availableShards().size());
assertTrue(newShard.recoverFromStore());
assertTrue("we should have flushed in IMC at least once but did: " + flushes.get(), flushes.get() >= 1);
- newShard.updateRoutingEntry(routing.moveToStarted(), true);
+ newShard.updateRoutingEntry(routing.moveToStarted());
} finally {
newShard.close("simon says", false);
}
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java
index 8d59da7da01..92a411a95de 100644
--- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java
@@ -102,13 +102,13 @@ public class IndicesLifecycleListenerSingleNodeTests extends ESSingleNodeTestCas
newRouting = ShardRoutingHelper.moveToUnassigned(newRouting, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "boom"));
newRouting = ShardRoutingHelper.initialize(newRouting, nodeId);
IndexShard shard = index.createShard(newRouting);
- shard.updateRoutingEntry(newRouting, true);
+ shard.updateRoutingEntry(newRouting);
final DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE,
emptyMap(), emptySet(), Version.CURRENT);
shard.markAsRecovering("store", new RecoveryState(shard.shardId(), newRouting.primary(), RecoveryState.Type.SNAPSHOT, newRouting.restoreSource(), localNode));
shard.recoverFromStore();
newRouting = ShardRoutingHelper.moveToStarted(newRouting);
- shard.updateRoutingEntry(newRouting, true);
+ shard.updateRoutingEntry(newRouting);
} finally {
indicesService.deleteIndex(idx, "simon says");
}
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java
index 5a4aa2e6b24..cd94ee0f8e9 100644
--- a/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java
@@ -35,6 +35,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesQueryCache;
@@ -54,12 +55,12 @@ public class IndicesQueryCacheTests extends ESTestCase {
@Override
public boolean equals(Object obj) {
- return super.equals(obj) && id == ((DummyQuery) obj).id;
+ return sameClassAs(obj) && id == ((DummyQuery) obj).id;
}
@Override
public int hashCode() {
- return 31 * super.hashCode() + id;
+ return 31 * classHash() + id;
}
@Override
@@ -93,6 +94,7 @@ public class IndicesQueryCacheTests extends ESTestCase {
Settings settings = Settings.builder()
.put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10)
+ .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true)
.build();
IndicesQueryCache cache = new IndicesQueryCache(settings);
s.setQueryCache(cache);
@@ -173,6 +175,7 @@ public class IndicesQueryCacheTests extends ESTestCase {
Settings settings = Settings.builder()
.put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10)
+ .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true)
.build();
IndicesQueryCache cache = new IndicesQueryCache(settings);
s1.setQueryCache(cache);
@@ -298,6 +301,7 @@ public class IndicesQueryCacheTests extends ESTestCase {
Settings settings = Settings.builder()
.put(IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING.getKey(), 10)
+ .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true)
.build();
IndicesQueryCache cache = new IndicesQueryCache(settings);
s1.setQueryCache(cache);
diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java
new file mode 100644
index 00000000000..69bee510710
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java
@@ -0,0 +1,318 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.cluster;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.Callback;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.NodeServicesProvider;
+import org.elasticsearch.index.shard.IndexEventListener;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndex;
+import org.elasticsearch.indices.cluster.IndicesClusterStateService.Shard;
+import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices;
+import org.elasticsearch.indices.recovery.RecoveryState;
+import org.elasticsearch.indices.recovery.RecoveryTargetService;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+
+import static java.util.Collections.emptyMap;
+import static java.util.Collections.unmodifiableMap;
+import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Abstract base class for tests against {@link IndicesClusterStateService}
+ */
+public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestCase {
+
+
+ protected void failRandomly() {
+ if (rarely()) {
+ throw new RuntimeException("dummy test failure");
+ }
+ }
+
+ /**
+ * Checks if cluster state matches internal state of IndicesClusterStateService instance
+ *
+ * @param state cluster state used for matching
+ */
+ public static void assertClusterStateMatchesNodeState(ClusterState state, IndicesClusterStateService indicesClusterStateService) {
+ AllocatedIndices extends Shard, ? extends AllocatedIndex extends Shard>> indicesService =
+ indicesClusterStateService.indicesService;
+ ConcurrentMap failedShardsCache = indicesClusterStateService.failedShardsCache;
+ RoutingNode localRoutingNode = state.getRoutingNodes().node(state.getNodes().getLocalNodeId());
+ if (localRoutingNode != null) {
+ // check that all shards in local routing nodes have been allocated
+ for (ShardRouting shardRouting : localRoutingNode) {
+ Index index = shardRouting.index();
+ IndexMetaData indexMetaData = state.metaData().getIndexSafe(index);
+
+ Shard shard = indicesService.getShardOrNull(shardRouting.shardId());
+ ShardRouting failedShard = failedShardsCache.get(shardRouting.shardId());
+ if (shard == null && failedShard == null) {
+ fail("Shard with id " + shardRouting + " expected but missing in indicesService and failedShardsCache");
+ }
+ if (failedShard != null && failedShard.isSameAllocation(shardRouting) == false) {
+ fail("Shard cache has not been properly cleaned for " + failedShard);
+ }
+
+ if (shard != null) {
+ AllocatedIndex extends Shard> indexService = indicesService.indexService(index);
+ assertTrue("Index " + index + " expected but missing in indicesService", indexService != null);
+
+ // index metadata has been updated
+ assertThat(indexService.getIndexSettings().getIndexMetaData(), equalTo(indexMetaData));
+ // shard has been created
+ if (failedShard == null) {
+ assertTrue("Shard with id " + shardRouting + " expected but missing in indexService",
+ shard != null);
+ // shard has latest shard routing
+ assertThat(shard.routingEntry(), equalTo(shardRouting));
+ }
+ }
+ }
+ }
+
+ // all other shards / indices have been cleaned up
+ for (AllocatedIndex extends Shard> indexService : indicesService) {
+ assertTrue(state.metaData().getIndexSafe(indexService.index()) != null);
+
+ boolean shardsFound = false;
+ for (Shard shard : indexService) {
+ shardsFound = true;
+ ShardRouting persistedShardRouting = shard.routingEntry();
+ boolean found = false;
+ for (ShardRouting shardRouting : localRoutingNode) {
+ if (persistedShardRouting.equals(shardRouting)) {
+ found = true;
+ }
+ }
+ assertTrue(found);
+ }
+
+ if (shardsFound == false) {
+ // check if we have shards of that index in failedShardsCache
+ // if yes, we might not have cleaned the index as failedShardsCache can be populated by another thread
+ assertFalse(failedShardsCache.keySet().stream().noneMatch(shardId -> shardId.getIndex().equals(indexService.index())));
+ }
+
+ }
+ }
+
+ /**
+ * Mock for {@link IndicesService}
+ */
+ protected class MockIndicesService implements AllocatedIndices {
+ private volatile Map indices = emptyMap();
+
+ @Override
+ public synchronized MockIndexService createIndex(NodeServicesProvider nodeServicesProvider, IndexMetaData indexMetaData,
+ List buildInIndexListener) throws IOException {
+ MockIndexService indexService = new MockIndexService(new IndexSettings(indexMetaData, Settings.EMPTY));
+ indices = newMapBuilder(indices).put(indexMetaData.getIndexUUID(), indexService).immutableMap();
+ return indexService;
+ }
+
+ @Override
+ public IndexMetaData verifyIndexIsDeleted(Index index, ClusterState state) {
+ return null;
+ }
+
+ @Override
+ public void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) {
+
+ }
+
+ @Override
+ public synchronized void deleteIndex(Index index, String reason) {
+ if (hasIndex(index) == false) {
+ return;
+ }
+ Map newIndices = new HashMap<>(indices);
+ newIndices.remove(index.getUUID());
+ indices = unmodifiableMap(newIndices);
+ }
+
+ @Override
+ public synchronized void removeIndex(Index index, String reason) {
+ if (hasIndex(index) == false) {
+ return;
+ }
+ Map newIndices = new HashMap<>(indices);
+ newIndices.remove(index.getUUID());
+ indices = unmodifiableMap(newIndices);
+ }
+
+ @Override
+ public @Nullable MockIndexService indexService(Index index) {
+ return indices.get(index.getUUID());
+ }
+
+ @Override
+ public MockIndexShard createShard(ShardRouting shardRouting, RecoveryState recoveryState,
+ RecoveryTargetService recoveryTargetService,
+ RecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
+ NodeServicesProvider nodeServicesProvider, Callback onShardFailure)
+ throws IOException {
+ failRandomly();
+ MockIndexService indexService = indexService(recoveryState.getShardId().getIndex());
+ MockIndexShard indexShard = indexService.createShard(shardRouting);
+ indexShard.recoveryState = recoveryState;
+ return indexShard;
+ }
+
+ @Override
+ public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeValue) throws IOException,
+ InterruptedException {
+
+ }
+
+ private boolean hasIndex(Index index) {
+ return indices.containsKey(index.getUUID());
+ }
+
+ @Override
+ public Iterator iterator() {
+ return indices.values().iterator();
+ }
+ }
+
+ /**
+ * Mock for {@link IndexService}
+ */
+ protected class MockIndexService implements AllocatedIndex {
+ private volatile Map shards = emptyMap();
+
+ private final IndexSettings indexSettings;
+
+ public MockIndexService(IndexSettings indexSettings) {
+ this.indexSettings = indexSettings;
+ }
+
+ @Override
+ public IndexSettings getIndexSettings() {
+ return indexSettings;
+ }
+
+ @Override
+ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException {
+ failRandomly();
+ return false;
+ }
+
+ @Override
+ public void updateMetaData(IndexMetaData indexMetaData) {
+ indexSettings.updateIndexMetaData(indexMetaData);
+ }
+
+ @Override
+ public MockIndexShard getShardOrNull(int shardId) {
+ return shards.get(shardId);
+ }
+
+ public synchronized MockIndexShard createShard(ShardRouting routing) throws IOException {
+ failRandomly();
+ MockIndexShard shard = new MockIndexShard(routing);
+ shards = newMapBuilder(shards).put(routing.id(), shard).immutableMap();
+ return shard;
+ }
+
+ @Override
+ public synchronized void removeShard(int shardId, String reason) {
+ if (shards.containsKey(shardId) == false) {
+ return;
+ }
+ HashMap newShards = new HashMap<>(shards);
+ MockIndexShard indexShard = newShards.remove(shardId);
+ assert indexShard != null;
+ shards = unmodifiableMap(newShards);
+ }
+
+ @Override
+ public Iterator iterator() {
+ return shards.values().iterator();
+ }
+
+ @Override
+ public Index index() {
+ return indexSettings.getIndex();
+ }
+ }
+
+ /**
+ * Mock for {@link IndexShard}
+ */
+ protected class MockIndexShard implements IndicesClusterStateService.Shard {
+ private volatile ShardRouting shardRouting;
+ private volatile RecoveryState recoveryState;
+
+ public MockIndexShard(ShardRouting shardRouting) {
+ this.shardRouting = shardRouting;
+ }
+
+ @Override
+ public ShardId shardId() {
+ return shardRouting.shardId();
+ }
+
+ @Override
+ public RecoveryState recoveryState() {
+ return recoveryState;
+ }
+
+ @Override
+ public ShardRouting routingEntry() {
+ return shardRouting;
+ }
+
+ @Override
+ public IndexShardState state() {
+ return null;
+ }
+
+ @Override
+ public void updateRoutingEntry(ShardRouting shardRouting) throws IOException {
+ failRandomly();
+ assert this.shardId().equals(shardRouting.shardId());
+ assert this.shardRouting.isSameAllocation(shardRouting);
+ this.shardRouting = shardRouting;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java
new file mode 100644
index 00000000000..84e83db6d1d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.cluster;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
+import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
+import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction;
+import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
+import org.elasticsearch.action.admin.indices.open.TransportOpenIndexAction;
+import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.action.support.master.MasterNodeRequest;
+import org.elasticsearch.action.support.master.TransportMasterNodeAction;
+import org.elasticsearch.action.support.master.TransportMasterNodeActionUtils;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.EmptyClusterInfoService;
+import org.elasticsearch.cluster.metadata.AliasValidator;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
+import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
+import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
+import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
+import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RandomAllocationDeciderTests;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.IndexScopedSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.NodeServicesProvider;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.shard.IndexEventListener;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
+import static org.elasticsearch.env.Environment.PATH_HOME_SETTING;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.Assert.assertThat;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyList;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class ClusterStateChanges {
+
+ private final ClusterService clusterService;
+ private final AllocationService allocationService;
+
+ // transport actions
+ private final TransportCloseIndexAction transportCloseIndexAction;
+ private final TransportOpenIndexAction transportOpenIndexAction;
+ private final TransportDeleteIndexAction transportDeleteIndexAction;
+ private final TransportUpdateSettingsAction transportUpdateSettingsAction;
+ private final TransportClusterRerouteAction transportClusterRerouteAction;
+ private final TransportCreateIndexAction transportCreateIndexAction;
+
+ public ClusterStateChanges() {
+ Settings settings = Settings.builder().put(PATH_HOME_SETTING.getKey(), "dummy").build();
+
+ allocationService = new AllocationService(settings, new AllocationDeciders(settings,
+ new HashSet<>(Arrays.asList(new SameShardAllocationDecider(settings),
+ new ReplicaAfterPrimaryActiveAllocationDecider(settings),
+ new RandomAllocationDeciderTests.RandomAllocationDecider(getRandom())))),
+ NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings),
+ EmptyClusterInfoService.INSTANCE);
+ ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
+ ActionFilters actionFilters = new ActionFilters(Collections.emptySet());
+ IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
+ DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterSettings);
+ Environment environment = new Environment(settings);
+ ThreadPool threadPool = null; // it's not used
+ Transport transport = null; // it's not used
+
+ // mocks
+ clusterService = mock(ClusterService.class);
+ IndicesService indicesService = mock(IndicesService.class);
+ // MetaDataCreateIndexService creates indices using its IndicesService instance to check mappings -> fake it here
+ try {
+ when(indicesService.createIndex(any(NodeServicesProvider.class), any(IndexMetaData.class), anyList()))
+ .then(invocationOnMock -> {
+ IndexService indexService = mock(IndexService.class);
+ IndexMetaData indexMetaData = (IndexMetaData)invocationOnMock.getArguments()[1];
+ when(indexService.index()).thenReturn(indexMetaData.getIndex());
+ MapperService mapperService = mock(MapperService.class);
+ when(indexService.mapperService()).thenReturn(mapperService);
+ when(mapperService.docMappers(anyBoolean())).thenReturn(Collections.emptyList());
+ when(indexService.getIndexEventListener()).thenReturn(new IndexEventListener() {});
+ return indexService;
+ });
+ } catch (IOException e) {
+ throw new IllegalStateException(e);
+ }
+
+ // services
+ TransportService transportService = new TransportService(settings, transport, threadPool, null);
+ MetaDataIndexUpgradeService metaDataIndexUpgradeService = new MetaDataIndexUpgradeService(settings, null, null) {
+ // metaData upgrader should do nothing
+ @Override
+ public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData) {
+ return indexMetaData;
+ }
+ };
+ NodeServicesProvider nodeServicesProvider = new NodeServicesProvider(threadPool, null, null, null, null, null, clusterService);
+ MetaDataIndexStateService indexStateService = new MetaDataIndexStateService(settings, clusterService, allocationService,
+ metaDataIndexUpgradeService, nodeServicesProvider, indicesService);
+ MetaDataDeleteIndexService deleteIndexService = new MetaDataDeleteIndexService(settings, clusterService, allocationService);
+ MetaDataUpdateSettingsService metaDataUpdateSettingsService = new MetaDataUpdateSettingsService(settings, clusterService,
+ allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, new IndexNameExpressionResolver(settings));
+ MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(settings, clusterService, indicesService,
+ allocationService, Version.CURRENT, new AliasValidator(settings), Collections.emptySet(), environment,
+ nodeServicesProvider, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
+
+ transportCloseIndexAction = new TransportCloseIndexAction(settings, transportService, clusterService, threadPool,
+ indexStateService, clusterSettings, actionFilters, indexNameExpressionResolver, destructiveOperations);
+ transportOpenIndexAction = new TransportOpenIndexAction(settings, transportService,
+ clusterService, threadPool, indexStateService, actionFilters, indexNameExpressionResolver, destructiveOperations);
+ transportDeleteIndexAction = new TransportDeleteIndexAction(settings, transportService,
+ clusterService, threadPool, deleteIndexService, actionFilters, indexNameExpressionResolver, destructiveOperations);
+ transportUpdateSettingsAction = new TransportUpdateSettingsAction(settings,
+ transportService, clusterService, threadPool, metaDataUpdateSettingsService, actionFilters, indexNameExpressionResolver);
+ transportClusterRerouteAction = new TransportClusterRerouteAction(settings,
+ transportService, clusterService, threadPool, allocationService, actionFilters, indexNameExpressionResolver);
+ transportCreateIndexAction = new TransportCreateIndexAction(settings,
+ transportService, clusterService, threadPool, createIndexService, actionFilters, indexNameExpressionResolver);
+ }
+
+ public ClusterState createIndex(ClusterState state, CreateIndexRequest request) {
+ return execute(transportCreateIndexAction, request, state);
+ }
+
+ public ClusterState closeIndices(ClusterState state, CloseIndexRequest request) {
+ return execute(transportCloseIndexAction, request, state);
+ }
+
+ public ClusterState openIndices(ClusterState state, OpenIndexRequest request) {
+ return execute(transportOpenIndexAction, request, state);
+ }
+
+ public ClusterState deleteIndices(ClusterState state, DeleteIndexRequest request) {
+ return execute(transportDeleteIndexAction, request, state);
+ }
+
+ public ClusterState updateSettings(ClusterState state, UpdateSettingsRequest request) {
+ return execute(transportUpdateSettingsAction, request, state);
+ }
+
+ public ClusterState reroute(ClusterState state, ClusterRerouteRequest request) {
+ return execute(transportClusterRerouteAction, request, state);
+ }
+
+ public ClusterState applyFailedShards(ClusterState clusterState, List failedShards) {
+ RoutingAllocation.Result rerouteResult = allocationService.applyFailedShards(clusterState, failedShards);
+ return ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+ }
+
+ public ClusterState applyStartedShards(ClusterState clusterState, List startedShards) {
+ RoutingAllocation.Result rerouteResult = allocationService.applyStartedShards(clusterState, startedShards);
+ return ClusterState.builder(clusterState).routingResult(rerouteResult).build();
+ }
+
+ private , Response extends ActionResponse> ClusterState execute(
+ TransportMasterNodeAction masterNodeAction, Request request, ClusterState clusterState) {
+ return executeClusterStateUpdateTask(clusterState, () -> {
+ try {
+ TransportMasterNodeActionUtils.runMasterOperation(masterNodeAction, request, clusterState, new PlainActionFuture<>());
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ private ClusterState executeClusterStateUpdateTask(ClusterState state, Runnable runnable) {
+ ClusterState[] result = new ClusterState[1];
+ doAnswer(invocationOnMock -> {
+ ClusterStateUpdateTask task = (ClusterStateUpdateTask)invocationOnMock.getArguments()[1];
+ result[0] = task.execute(state);
+ return null;
+ }).when(clusterService).submitStateUpdateTask(anyString(), any(ClusterStateUpdateTask.class));
+ runnable.run();
+ assertThat(result[0], notNullValue());
+ return result[0];
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java
new file mode 100644
index 00000000000..8c63c001a1e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java
@@ -0,0 +1,281 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.cluster;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
+import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation.FailedShard;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.util.set.Sets;
+import org.elasticsearch.indices.recovery.RecoveryTargetService;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.Executor;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndicesClusterStateServiceTestCase {
+
+ private final ClusterStateChanges cluster = new ClusterStateChanges();
+
+ public void testRandomClusterStateUpdates() {
+ // we have an IndicesClusterStateService per node in the cluster
+ final Map clusterStateServiceMap = new HashMap<>();
+ ClusterState state = randomInitialClusterState(clusterStateServiceMap);
+
+ // each of the following iterations represents a new cluster state update processed on all nodes
+ for (int i = 0; i < 30; i++) {
+ logger.info("Iteration {}", i);
+ final ClusterState previousState = state;
+
+ // calculate new cluster state
+ for (int j = 0; j < randomInt(3); j++) { // multiple iterations to simulate batching of cluster states
+ state = randomlyUpdateClusterState(state, clusterStateServiceMap);
+ }
+
+ // apply cluster state to nodes (incl. master)
+ for (DiscoveryNode node : state.nodes()) {
+ IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node);
+ ClusterState localState = adaptClusterStateToLocalNode(state, node);
+ ClusterState previousLocalState = adaptClusterStateToLocalNode(previousState, node);
+ indicesClusterStateService.clusterChanged(new ClusterChangedEvent("simulated change " + i, localState, previousLocalState));
+
+ // check that cluster state has been properly applied to node
+ assertClusterStateMatchesNodeState(localState, indicesClusterStateService);
+ }
+ }
+
+ // TODO: check if we can go to green by starting all shards and finishing all iterations
+ logger.info("Final cluster state: {}", state.prettyPrint());
+ }
+
+ public ClusterState randomInitialClusterState(Map clusterStateServiceMap) {
+ List allNodes = new ArrayList<>();
+ DiscoveryNode localNode = createNode(DiscoveryNode.Role.MASTER); // local node is the master
+ allNodes.add(localNode);
+ // at least two nodes that have the data role so that we can allocate shards
+ allNodes.add(createNode(DiscoveryNode.Role.DATA));
+ allNodes.add(createNode(DiscoveryNode.Role.DATA));
+ for (int i = 0; i < randomIntBetween(2, 5); i++) {
+ allNodes.add(createNode());
+ }
+ ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()]));
+ // add nodes to clusterStateServiceMap
+ updateNodes(state, clusterStateServiceMap);
+ return state;
+ }
+
+ private void updateNodes(ClusterState state, Map clusterStateServiceMap) {
+ for (DiscoveryNode node : state.nodes()) {
+ clusterStateServiceMap.computeIfAbsent(node, discoveryNode -> {
+ IndicesClusterStateService ics = createIndicesClusterStateService();
+ ics.start();
+ return ics;
+ });
+ }
+
+ for (Iterator> it = clusterStateServiceMap.entrySet().iterator(); it.hasNext(); ) {
+ DiscoveryNode node = it.next().getKey();
+ if (state.nodes().nodeExists(node.getId()) == false) {
+ it.remove();
+ }
+ }
+ }
+
+ public ClusterState randomlyUpdateClusterState(ClusterState state,
+ Map clusterStateServiceMap) {
+ // randomly create new indices (until we have 200 max)
+ for (int i = 0; i < randomInt(5); i++) {
+ if (state.metaData().indices().size() > 200) {
+ break;
+ }
+ String name = "index_" + randomAsciiOfLength(15).toLowerCase(Locale.ROOT);
+ CreateIndexRequest request = new CreateIndexRequest(name, Settings.builder()
+ .put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3))
+ .put(SETTING_NUMBER_OF_REPLICAS, randomInt(2))
+ .build());
+ state = cluster.createIndex(state, request);
+ assertTrue(state.metaData().hasIndex(name));
+ }
+
+ // randomly delete indices
+ Set indicesToDelete = new HashSet<>();
+ int numberOfIndicesToDelete = randomInt(Math.min(2, state.metaData().indices().size()));
+ for (String index : randomSubsetOf(numberOfIndicesToDelete, state.metaData().indices().keys().toArray(String.class))) {
+ indicesToDelete.add(state.metaData().index(index).getIndex().getName());
+ }
+ if (indicesToDelete.isEmpty() == false) {
+ DeleteIndexRequest deleteRequest = new DeleteIndexRequest(indicesToDelete.toArray(new String[indicesToDelete.size()]));
+ state = cluster.deleteIndices(state, deleteRequest);
+ for (String index : indicesToDelete) {
+ assertFalse(state.metaData().hasIndex(index));
+ }
+ }
+
+ // randomly close indices
+ int numberOfIndicesToClose = randomInt(Math.min(1, state.metaData().indices().size()));
+ for (String index : randomSubsetOf(numberOfIndicesToClose, state.metaData().indices().keys().toArray(String.class))) {
+ CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metaData().index(index).getIndex().getName());
+ state = cluster.closeIndices(state, closeIndexRequest);
+ }
+
+ // randomly open indices
+ int numberOfIndicesToOpen = randomInt(Math.min(1, state.metaData().indices().size()));
+ for (String index : randomSubsetOf(numberOfIndicesToOpen, state.metaData().indices().keys().toArray(String.class))) {
+ OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metaData().index(index).getIndex().getName());
+ state = cluster.openIndices(state, openIndexRequest);
+ }
+
+ // randomly update settings
+ Set indicesToUpdate = new HashSet<>();
+ boolean containsClosedIndex = false;
+ int numberOfIndicesToUpdate = randomInt(Math.min(2, state.metaData().indices().size()));
+ for (String index : randomSubsetOf(numberOfIndicesToUpdate, state.metaData().indices().keys().toArray(String.class))) {
+ indicesToUpdate.add(state.metaData().index(index).getIndex().getName());
+ if (state.metaData().index(index).getState() == IndexMetaData.State.CLOSE) {
+ containsClosedIndex = true;
+ }
+ }
+ if (indicesToUpdate.isEmpty() == false) {
+ UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(
+ indicesToUpdate.toArray(new String[indicesToUpdate.size()]));
+ Settings.Builder settings = Settings.builder();
+ if (containsClosedIndex == false) {
+ settings.put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
+ }
+ settings.put("index.refresh_interval", randomIntBetween(1, 5) + "s");
+ updateSettingsRequest.settings(settings.build());
+ state = cluster.updateSettings(state, updateSettingsRequest);
+ }
+
+ // randomly reroute
+ if (rarely()) {
+ state = cluster.reroute(state, new ClusterRerouteRequest());
+ }
+
+ // randomly start and fail allocated shards
+ List startedShards = new ArrayList<>();
+ List failedShards = new ArrayList<>();
+ for (DiscoveryNode node : state.nodes()) {
+ IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node);
+ MockIndicesService indicesService = (MockIndicesService) indicesClusterStateService.indicesService;
+ for (MockIndexService indexService : indicesService) {
+ for (MockIndexShard indexShard : indexService) {
+ ShardRouting persistedShardRouting = indexShard.routingEntry();
+ if (persistedShardRouting.initializing() && randomBoolean()) {
+ startedShards.add(persistedShardRouting);
+ } else if (rarely()) {
+ failedShards.add(new FailedShard(persistedShardRouting, "fake shard failure", new Exception()));
+ }
+ }
+ }
+ }
+ state = cluster.applyFailedShards(state, failedShards);
+ state = cluster.applyStartedShards(state, startedShards);
+
+ // randomly add and remove nodes (except current master)
+ if (rarely()) {
+ if (randomBoolean()) {
+ // add node
+ if (state.nodes().getSize() < 10) {
+ DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).put(createNode()).build();
+ state = ClusterState.builder(state).nodes(newNodes).build();
+ state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node leave
+ updateNodes(state, clusterStateServiceMap);
+ }
+ } else {
+ // remove node
+ if (state.nodes().getDataNodes().size() > 3) {
+ DiscoveryNode discoveryNode = randomFrom(state.nodes().getNodes().values().toArray(DiscoveryNode.class));
+ if (discoveryNode.equals(state.nodes().getMasterNode()) == false) {
+ DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).remove(discoveryNode.getId()).build();
+ state = ClusterState.builder(state).nodes(newNodes).build();
+ state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node join
+ updateNodes(state, clusterStateServiceMap);
+ }
+ }
+ }
+ }
+
+ // TODO: go masterless?
+
+ return state;
+ }
+
+ protected DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) {
+ Set roles = new HashSet<>(randomSubsetOf(Sets.newHashSet(DiscoveryNode.Role.values())));
+ for (DiscoveryNode.Role mustHaveRole : mustHaveRoles) {
+ roles.add(mustHaveRole);
+ }
+ return new DiscoveryNode("node_" + randomAsciiOfLength(8), DummyTransportAddress.INSTANCE, Collections.emptyMap(), roles,
+ Version.CURRENT);
+ }
+
+ private static ClusterState adaptClusterStateToLocalNode(ClusterState state, DiscoveryNode node) {
+ return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(node.getId())).build();
+ }
+
+ private IndicesClusterStateService createIndicesClusterStateService() {
+ final ThreadPool threadPool = mock(ThreadPool.class);
+ final Executor executor = mock(Executor.class);
+ when(threadPool.generic()).thenReturn(executor);
+ final MockIndicesService indicesService = new MockIndicesService();
+ final TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool, null);
+ final ClusterService clusterService = mock(ClusterService.class);
+ final RepositoriesService repositoriesService = new RepositoriesService(Settings.EMPTY, clusterService,
+ transportService, null, null);
+ final RecoveryTargetService recoveryTargetService = new RecoveryTargetService(Settings.EMPTY, threadPool,
+ transportService, null, clusterService);
+ final ShardStateAction shardStateAction = mock(ShardStateAction.class);
+ return new IndicesClusterStateService(Settings.EMPTY, indicesService, clusterService,
+ threadPool, recoveryTargetService, shardStateAction, null, repositoriesService, null, null, null, null, null);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java
index 3565cf0147d..a4096fde9da 100644
--- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java
+++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java
@@ -47,6 +47,7 @@ import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.indices.IndicesQueryCache;
import org.elasticsearch.indices.IndicesRequestCache;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.search.sort.SortOrder;
@@ -78,6 +79,7 @@ public class IndexStatsIT extends ESIntegTestCase {
//Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
.put(IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), "1ms")
+ .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true)
.build();
}
diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java
index 7e01f575822..398ef64bc92 100644
--- a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java
+++ b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java
@@ -126,6 +126,8 @@ public class GeoBoundingBoxIT extends ESIntegTestCase {
}
}
+ // norelease
+ @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-7325")
public void testLimitsBoundingBox() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java
index b8b04a8bc33..0debdb263af 100644
--- a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java
+++ b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java
@@ -30,7 +30,6 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
import org.apache.lucene.spatial.query.SpatialArgs;
import org.apache.lucene.spatial.query.SpatialOperation;
import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
-import org.apache.lucene.spatial.util.GeoProjectionUtils;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.action.bulk.BulkItemResponse;
@@ -42,6 +41,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoHashUtils;
import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.geo.builders.CoordinatesBuilder;
import org.elasticsearch.common.geo.builders.LineStringBuilder;
import org.elasticsearch.common.geo.builders.MultiPolygonBuilder;
@@ -540,7 +540,7 @@ public class GeoFilterIT extends ESIntegTestCase {
}
public static double distance(double lat1, double lon1, double lat2, double lon2) {
- return GeoProjectionUtils.SEMIMAJOR_AXIS * DistanceUtils.distHaversineRAD(
+ return GeoUtils.EARTH_SEMI_MAJOR_AXIS * DistanceUtils.distHaversineRAD(
DistanceUtils.toRadians(lat1),
DistanceUtils.toRadians(lon1),
DistanceUtils.toRadians(lat2),
diff --git a/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java b/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java
index ba378a3c404..e0aec941487 100644
--- a/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java
+++ b/core/src/test/java/org/elasticsearch/search/nested/SimpleNestedIT.java
@@ -318,13 +318,7 @@ public class SimpleNestedIT extends ESIntegTestCase {
assertThat(searchResponse.getHits().totalHits(), equalTo(1L));
Explanation explanation = searchResponse.getHits().hits()[0].explanation();
assertThat(explanation.getValue(), equalTo(2f));
- assertThat(explanation.toString(), startsWith("2.0 = sum of:\n 2.0 = Score based on child doc range from 0 to 1\n"));
- // TODO: Enable when changes from BlockJoinQuery#explain are added to Lucene (Most likely version 4.2)
-// assertThat(explanation.getDetails().length, equalTo(2));
-// assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
-// assertThat(explanation.getDetails()[0].getDescription(), equalTo("Child[0]"));
-// assertThat(explanation.getDetails()[1].getValue(), equalTo(1f));
-// assertThat(explanation.getDetails()[1].getDescription(), equalTo("Child[1]"));
+ assertThat(explanation.toString(), startsWith("2.0 = sum of:\n 2.0 = Score based on 2 child docs in range from 0 to 1"));
}
public void testSimpleNestedSorting() throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java
new file mode 100644
index 00000000000..848b230b3fa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java
@@ -0,0 +1,403 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile.aggregation;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator;
+import org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator;
+import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator;
+import org.elasticsearch.search.aggregations.metrics.max.MaxAggregator;
+import org.elasticsearch.search.profile.ProfileResult;
+import org.elasticsearch.search.profile.ProfileShardResult;
+import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult;
+import org.elasticsearch.search.profile.aggregation.AggregationTimingType;
+import org.elasticsearch.test.ESIntegTestCase;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ESIntegTestCase.SuiteScopeTestCase
+public class AggregationProfilerIT extends ESIntegTestCase {
+
+
+ private static final String NUMBER_FIELD = "number";
+ private static final String TAG_FIELD = "tag";
+ private static final String STRING_FIELD = "string_field";
+
+ @Override
+ protected int numberOfShards() {
+ return 1;
+ }
+
+ @Override
+ protected void setupSuiteScopeCluster() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("idx")
+ .addMapping("type", STRING_FIELD, "type=keyword", NUMBER_FIELD, "type=integer", TAG_FIELD, "type=keyword").get());
+ List builders = new ArrayList<>();
+
+ String[] randomStrings = new String[randomIntBetween(2, 10)];
+ for (int i = 0; i < randomStrings.length; i++) {
+ randomStrings[i] = randomAsciiOfLength(10);
+ }
+
+ for (int i = 0; i < 5; i++) {
+ builders.add(client().prepareIndex("idx", "type").setSource(
+ jsonBuilder().startObject()
+ .field(STRING_FIELD, randomFrom(randomStrings))
+ .field(NUMBER_FIELD, randomIntBetween(0, 9))
+ .field(TAG_FIELD, randomBoolean() ? "more" : "less")
+ .endObject()));
+ }
+
+ indexRandom(true, builders);
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ public void testSimpleProfile() {
+ SearchResponse response = client().prepareSearch("idx").setProfile(true)
+ .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)).get();
+ assertSearchResponse(response);
+ Map profileResults = response.getProfileResults();
+ assertThat(profileResults, notNullValue());
+ assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries));
+ for (ProfileShardResult profileShardResult : profileResults.values()) {
+ assertThat(profileShardResult, notNullValue());
+ AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults();
+ assertThat(aggProfileResults, notNullValue());
+ List aggProfileResultsList = aggProfileResults.getProfileResults();
+ assertThat(aggProfileResultsList, notNullValue());
+ assertThat(aggProfileResultsList.size(), equalTo(1));
+ ProfileResult histoAggResult = aggProfileResultsList.get(0);
+ assertThat(histoAggResult, notNullValue());
+ assertThat(histoAggResult.getQueryName(), equalTo(HistogramAggregator.class.getName()));
+ assertThat(histoAggResult.getLuceneDescription(), equalTo("histo"));
+ assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0));
+ assertThat(histoAggResult.getTime(), greaterThan(0L));
+ Map breakdown = histoAggResult.getTimeBreakdown();
+ assertThat(breakdown, notNullValue());
+ assertThat(breakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(breakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(breakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(breakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(breakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(breakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(breakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(breakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+
+ }
+ }
+
+ public void testMultiLevelProfile() {
+ SearchResponse response = client().prepareSearch("idx").setProfile(true)
+ .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)
+ .subAggregation(terms("terms").field(TAG_FIELD)
+ .subAggregation(avg("avg").field(NUMBER_FIELD)))).get();
+ assertSearchResponse(response);
+ Map profileResults = response.getProfileResults();
+ assertThat(profileResults, notNullValue());
+ assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries));
+ for (ProfileShardResult profileShardResult : profileResults.values()) {
+ assertThat(profileShardResult, notNullValue());
+ AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults();
+ assertThat(aggProfileResults, notNullValue());
+ List aggProfileResultsList = aggProfileResults.getProfileResults();
+ assertThat(aggProfileResultsList, notNullValue());
+ assertThat(aggProfileResultsList.size(), equalTo(1));
+ ProfileResult histoAggResult = aggProfileResultsList.get(0);
+ assertThat(histoAggResult, notNullValue());
+ assertThat(histoAggResult.getQueryName(), equalTo(HistogramAggregator.class.getName()));
+ assertThat(histoAggResult.getLuceneDescription(), equalTo("histo"));
+ assertThat(histoAggResult.getTime(), greaterThan(0L));
+ Map histoBreakdown = histoAggResult.getTimeBreakdown();
+ assertThat(histoBreakdown, notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(histoAggResult.getProfiledChildren().size(), equalTo(1));
+
+ ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0);
+ assertThat(termsAggResult, notNullValue());
+ assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.WithHash.class.getName()));
+ assertThat(termsAggResult.getLuceneDescription(), equalTo("terms"));
+ assertThat(termsAggResult.getTime(), greaterThan(0L));
+ Map termsBreakdown = termsAggResult.getTimeBreakdown();
+ assertThat(termsBreakdown, notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(termsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(termsAggResult.getProfiledChildren().size(), equalTo(1));
+
+ ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0);
+ assertThat(avgAggResult, notNullValue());
+ assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName()));
+ assertThat(avgAggResult.getLuceneDescription(), equalTo("avg"));
+ assertThat(avgAggResult.getTime(), greaterThan(0L));
+ Map avgBreakdown = termsAggResult.getTimeBreakdown();
+ assertThat(avgBreakdown, notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0));
+ }
+ }
+
+ public void testComplexProfile() {
+ SearchResponse response = client().prepareSearch("idx").setProfile(true)
+ .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)
+ .subAggregation(terms("tags").field(TAG_FIELD)
+ .subAggregation(avg("avg").field(NUMBER_FIELD))
+ .subAggregation(max("max").field(NUMBER_FIELD)))
+ .subAggregation(terms("strings").field(STRING_FIELD)
+ .subAggregation(avg("avg").field(NUMBER_FIELD))
+ .subAggregation(max("max").field(NUMBER_FIELD))
+ .subAggregation(terms("tags").field(TAG_FIELD)
+ .subAggregation(avg("avg").field(NUMBER_FIELD))
+ .subAggregation(max("max").field(NUMBER_FIELD)))))
+ .get();
+ assertSearchResponse(response);
+ Map profileResults = response.getProfileResults();
+ assertThat(profileResults, notNullValue());
+ assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries));
+ for (ProfileShardResult profileShardResult : profileResults.values()) {
+ assertThat(profileShardResult, notNullValue());
+ AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults();
+ assertThat(aggProfileResults, notNullValue());
+ List aggProfileResultsList = aggProfileResults.getProfileResults();
+ assertThat(aggProfileResultsList, notNullValue());
+ assertThat(aggProfileResultsList.size(), equalTo(1));
+ ProfileResult histoAggResult = aggProfileResultsList.get(0);
+ assertThat(histoAggResult, notNullValue());
+ assertThat(histoAggResult.getQueryName(), equalTo(HistogramAggregator.class.getName()));
+ assertThat(histoAggResult.getLuceneDescription(), equalTo("histo"));
+ assertThat(histoAggResult.getTime(), greaterThan(0L));
+ Map histoBreakdown = histoAggResult.getTimeBreakdown();
+ assertThat(histoBreakdown, notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(histoBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(histoAggResult.getProfiledChildren().size(), equalTo(2));
+
+ ProfileResult tagsAggResult = histoAggResult.getProfiledChildren().get(0);
+ assertThat(tagsAggResult, notNullValue());
+ assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.WithHash.class.getName()));
+ assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags"));
+ assertThat(tagsAggResult.getTime(), greaterThan(0L));
+ Map tagsBreakdown = tagsAggResult.getTimeBreakdown();
+ assertThat(tagsBreakdown, notNullValue());
+ assertThat(tagsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(tagsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(tagsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(tagsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(tagsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(tagsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(tagsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(tagsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2));
+
+ ProfileResult avgAggResult = tagsAggResult.getProfiledChildren().get(0);
+ assertThat(avgAggResult, notNullValue());
+ assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName()));
+ assertThat(avgAggResult.getLuceneDescription(), equalTo("avg"));
+ assertThat(avgAggResult.getTime(), greaterThan(0L));
+ Map avgBreakdown = tagsAggResult.getTimeBreakdown();
+ assertThat(avgBreakdown, notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0));
+
+ ProfileResult maxAggResult = tagsAggResult.getProfiledChildren().get(1);
+ assertThat(maxAggResult, notNullValue());
+ assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName()));
+ assertThat(maxAggResult.getLuceneDescription(), equalTo("max"));
+ assertThat(maxAggResult.getTime(), greaterThan(0L));
+ Map maxBreakdown = tagsAggResult.getTimeBreakdown();
+ assertThat(maxBreakdown, notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0));
+
+ ProfileResult stringsAggResult = histoAggResult.getProfiledChildren().get(1);
+ assertThat(stringsAggResult, notNullValue());
+ assertThat(stringsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.WithHash.class.getName()));
+ assertThat(stringsAggResult.getLuceneDescription(), equalTo("strings"));
+ assertThat(stringsAggResult.getTime(), greaterThan(0L));
+ Map stringsBreakdown = stringsAggResult.getTimeBreakdown();
+ assertThat(stringsBreakdown, notNullValue());
+ assertThat(stringsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(stringsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(stringsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(stringsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(stringsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(stringsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(stringsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(stringsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(stringsAggResult.getProfiledChildren().size(), equalTo(3));
+
+ avgAggResult = stringsAggResult.getProfiledChildren().get(0);
+ assertThat(avgAggResult, notNullValue());
+ assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName()));
+ assertThat(avgAggResult.getLuceneDescription(), equalTo("avg"));
+ assertThat(avgAggResult.getTime(), greaterThan(0L));
+ avgBreakdown = stringsAggResult.getTimeBreakdown();
+ assertThat(avgBreakdown, notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0));
+
+ maxAggResult = stringsAggResult.getProfiledChildren().get(1);
+ assertThat(maxAggResult, notNullValue());
+ assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName()));
+ assertThat(maxAggResult.getLuceneDescription(), equalTo("max"));
+ assertThat(maxAggResult.getTime(), greaterThan(0L));
+ maxBreakdown = stringsAggResult.getTimeBreakdown();
+ assertThat(maxBreakdown, notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0));
+
+ tagsAggResult = stringsAggResult.getProfiledChildren().get(2);
+ assertThat(tagsAggResult, notNullValue());
+ assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.WithHash.class.getName()));
+ assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags"));
+ assertThat(tagsAggResult.getTime(), greaterThan(0L));
+ tagsBreakdown = tagsAggResult.getTimeBreakdown();
+ assertThat(tagsBreakdown, notNullValue());
+ assertThat(tagsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(tagsBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(tagsBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(tagsBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(tagsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(tagsBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(tagsBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(tagsBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(tagsAggResult.getProfiledChildren().size(), equalTo(2));
+
+ avgAggResult = tagsAggResult.getProfiledChildren().get(0);
+ assertThat(avgAggResult, notNullValue());
+ assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName()));
+ assertThat(avgAggResult.getLuceneDescription(), equalTo("avg"));
+ assertThat(avgAggResult.getTime(), greaterThan(0L));
+ avgBreakdown = tagsAggResult.getTimeBreakdown();
+ assertThat(avgBreakdown, notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(avgBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(avgAggResult.getProfiledChildren().size(), equalTo(0));
+
+ maxAggResult = tagsAggResult.getProfiledChildren().get(1);
+ assertThat(maxAggResult, notNullValue());
+ assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName()));
+ assertThat(maxAggResult.getLuceneDescription(), equalTo("max"));
+ assertThat(maxAggResult.getTime(), greaterThan(0L));
+ maxBreakdown = tagsAggResult.getTimeBreakdown();
+ assertThat(maxBreakdown, notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.INITIALIZE.toString()), greaterThan(0L));
+ assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.COLLECT.toString()), greaterThan(0L));
+ assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.BUILD_AGGREGATION.toString()), greaterThan(0L));
+ assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), notNullValue());
+ assertThat(maxBreakdown.get(AggregationTimingType.REDUCE.toString()), equalTo(0L));
+ assertThat(maxAggResult.getProfiledChildren().size(), equalTo(0));
+ }
+ }
+
+ public void testNoProfile() {
+ SearchResponse response = client().prepareSearch("idx").setProfile(false)
+ .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)
+ .subAggregation(terms("tags").field(TAG_FIELD)
+ .subAggregation(avg("avg").field(NUMBER_FIELD))
+ .subAggregation(max("max").field(NUMBER_FIELD)))
+ .subAggregation(terms("strings").field(STRING_FIELD)
+ .subAggregation(avg("avg").field(NUMBER_FIELD))
+ .subAggregation(max("max").field(NUMBER_FIELD))
+ .subAggregation(terms("tags").field(TAG_FIELD)
+ .subAggregation(avg("avg").field(NUMBER_FIELD))
+ .subAggregation(max("max").field(NUMBER_FIELD)))))
+ .get();
+ assertSearchResponse(response);
+ Map profileResults = response.getProfileResults();
+ assertThat(profileResults, notNullValue());
+ assertThat(profileResults.size(), equalTo(0));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java
index 371aaadd3a7..b6935f021d4 100644
--- a/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java
+++ b/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java
@@ -85,8 +85,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
assertNotNull("Profile response element should not be null", resp.getProfileResults());
assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0));
- for (Map.Entry> shard : resp.getProfileResults().entrySet()) {
- for (ProfileShardResult searchProfiles : shard.getValue()) {
+ for (Map.Entry shard : resp.getProfileResults().entrySet()) {
+ for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
@@ -163,8 +163,9 @@ public class QueryProfilerIT extends ESIntegTestCase {
nearlyEqual(vanillaMaxScore, profileMaxScore, 0.001));
}
- assertThat("Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits ["
- + vanillaResponse.getHits().totalHits() + "]",
+ assertThat(
+ "Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits ["
+ + vanillaResponse.getHits().totalHits() + "]",
vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits()));
SearchHit[] vanillaHits = vanillaResponse.getHits().getHits();
@@ -203,12 +204,12 @@ public class QueryProfilerIT extends ESIntegTestCase {
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
- Map> p = resp.getProfileResults();
+ Map p = resp.getProfileResults();
assertNotNull(p);
assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0));
- for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) {
- for (ProfileShardResult searchProfiles : shardResult.getValue()) {
+ for (Map.Entry shardResult : resp.getProfileResults().entrySet()) {
+ for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertEquals(result.getQueryName(), "TermQuery");
assertEquals(result.getLuceneDescription(), "field1:one");
@@ -250,12 +251,12 @@ public class QueryProfilerIT extends ESIntegTestCase {
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
- Map> p = resp.getProfileResults();
+ Map p = resp.getProfileResults();
assertNotNull(p);
assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0));
- for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) {
- for (ProfileShardResult searchProfiles : shardResult.getValue()) {
+ for (Map.Entry shardResult : resp.getProfileResults().entrySet()) {
+ for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertEquals(result.getQueryName(), "BooleanQuery");
assertEquals(result.getLuceneDescription(), "+field1:one +field1:two");
@@ -322,8 +323,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
assertNotNull("Profile response element should not be null", resp.getProfileResults());
assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0));
- for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) {
- for (ProfileShardResult searchProfiles : shardResult.getValue()) {
+ for (Map.Entry shardResult : resp.getProfileResults().entrySet()) {
+ for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
@@ -374,8 +375,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
assertNotNull("Profile response element should not be null", resp.getProfileResults());
assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0));
- for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) {
- for (ProfileShardResult searchProfiles : shardResult.getValue()) {
+ for (Map.Entry shardResult : resp.getProfileResults().entrySet()) {
+ for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
@@ -421,8 +422,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
assertNotNull("Profile response element should not be null", resp.getProfileResults());
assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0));
- for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) {
- for (ProfileShardResult searchProfiles : shardResult.getValue()) {
+ for (Map.Entry shardResult : resp.getProfileResults().entrySet()) {
+ for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
@@ -468,8 +469,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
assertNotNull("Profile response element should not be null", resp.getProfileResults());
assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0));
- for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) {
- for (ProfileShardResult searchProfiles : shardResult.getValue()) {
+ for (Map.Entry shardResult : resp.getProfileResults().entrySet()) {
+ for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
@@ -514,8 +515,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
assertNotNull("Profile response element should not be null", resp.getProfileResults());
assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0));
- for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) {
- for (ProfileShardResult searchProfiles : shardResult.getValue()) {
+ for (Map.Entry shardResult : resp.getProfileResults().entrySet()) {
+ for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
@@ -569,8 +570,8 @@ public class QueryProfilerIT extends ESIntegTestCase {
assertNotNull("Profile response element should not be null", resp.getProfileResults());
assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0));
- for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) {
- for (ProfileShardResult searchProfiles : shardResult.getValue()) {
+ for (Map.Entry shardResult : resp.getProfileResults().entrySet()) {
+ for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
diff --git a/core/src/test/java/org/elasticsearch/search/profile/query/ProfileTests.java b/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java
similarity index 96%
rename from core/src/test/java/org/elasticsearch/search/profile/query/ProfileTests.java
rename to core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java
index 7488dbceee7..ffad39bc3f2 100644
--- a/core/src/test/java/org/elasticsearch/search/profile/query/ProfileTests.java
+++ b/core/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerTests.java
@@ -51,7 +51,7 @@ import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
-public class ProfileTests extends ESTestCase {
+public class QueryProfilerTests extends ESTestCase {
static Directory dir;
static IndexReader reader;
@@ -90,7 +90,7 @@ public class ProfileTests extends ESTestCase {
searcher.setProfiler(profiler);
Query query = new TermQuery(new Term("foo", "bar"));
searcher.search(query, 1);
- List results = profiler.getQueryTree();
+ List results = profiler.getTree();
assertEquals(1, results.size());
Map breakdown = results.get(0).getTimeBreakdown();
assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L));
@@ -109,7 +109,7 @@ public class ProfileTests extends ESTestCase {
searcher.setProfiler(profiler);
Query query = new TermQuery(new Term("foo", "bar"));
searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed
- List results = profiler.getQueryTree();
+ List results = profiler.getTree();
assertEquals(1, results.size());
Map breakdown = results.get(0).getTimeBreakdown();
assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L));
@@ -128,7 +128,7 @@ public class ProfileTests extends ESTestCase {
searcher.setProfiler(profiler);
Query query = new TermQuery(new Term("foo", "bar"));
searcher.count(query); // will use index stats
- List results = profiler.getQueryTree();
+ List results = profiler.getTree();
assertEquals(0, results.size());
long rewriteTime = profiler.getRewriteTime();
@@ -144,7 +144,7 @@ public class ProfileTests extends ESTestCase {
searcher.setProfiler(profiler);
Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random());
searcher.count(query);
- List results = profiler.getQueryTree();
+ List results = profiler.getTree();
assertEquals(1, results.size());
Map