Merge branch 'master' into feature/query-refactoring
This commit is contained in:
commit
bbbeb46488
|
@ -103,4 +103,6 @@ if [ -e "$CONF_FILE" ]; then
|
|||
esac
|
||||
fi
|
||||
|
||||
export HOSTNAME=`hostname -s`
|
||||
|
||||
exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS -Xmx64m -Xms16m -Delasticsearch -Des.path.home="$ES_HOME" $properties -cp "$ES_HOME/lib/*" org.elasticsearch.plugins.PluginManager $args
|
||||
|
|
|
@ -9,6 +9,8 @@ for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI
|
|||
|
||||
TITLE Elasticsearch Plugin Manager ${project.version}
|
||||
|
||||
SET HOSTNAME=%COMPUTERNAME%
|
||||
|
||||
"%JAVA_HOME%\bin\java" %JAVA_OPTS% %ES_JAVA_OPTS% -Xmx64m -Xms16m -Des.path.home="%ES_HOME%" -cp "%ES_HOME%/lib/*;" "org.elasticsearch.plugins.PluginManager" %*
|
||||
goto finally
|
||||
|
||||
|
|
|
@ -54,13 +54,26 @@ curl 'http://localhost:9200/twitter/_upgrade?pretty&human'
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"twitter": {
|
||||
"size": "21gb",
|
||||
"size_in_bytes": "21000000000",
|
||||
"size_to_upgrade": "10gb",
|
||||
"size_to_upgrade_in_bytes": "10000000000"
|
||||
"size_to_upgrade_ancient": "1gb",
|
||||
"size_to_upgrade_ancient_in_bytes": "1000000000"
|
||||
"indices": {
|
||||
"twitter": {
|
||||
"size": "21gb",
|
||||
"size_in_bytes": "21000000000",
|
||||
"size_to_upgrade": "10gb",
|
||||
"size_to_upgrade_in_bytes": "10000000000"
|
||||
"size_to_upgrade_ancient": "1gb",
|
||||
"size_to_upgrade_ancient_in_bytes": "1000000000"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
The level of details in the upgrade status command can be controlled by
|
||||
setting `level` parameter to `cluster`, `index` (default) or `shard` levels.
|
||||
For example, you can run the upgrade status command with `level=shard` to
|
||||
get detailed upgrade information of each individual shard.
|
|
@ -29,6 +29,16 @@
|
|||
/^ heap\.current \s+ heap\.percent \s+ heap\.max \s+ \n
|
||||
(\s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+ \s+ \d+(\.\d+)?[ptgmk]?b \s+ \n)+ $/
|
||||
|
||||
- do:
|
||||
cat.nodes:
|
||||
h: heap.*
|
||||
v: true
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^ heap\.current \s+ heap\.percent \s+ heap\.max \s+ \n
|
||||
(\s+ \d+(\.\d+)?[ptgmk]?b \s+ \d+ \s+ \d+(\.\d+)?[ptgmk]?b \s+ \n)+ $/
|
||||
|
||||
- do:
|
||||
cat.nodes:
|
||||
h: file_desc.current,file_desc.percent,file_desc.max
|
||||
|
|
|
@ -29,18 +29,6 @@
|
|||
/ #pid id host ip port
|
||||
^ (\d+ \s+ \S{4} \s+ \S+ \s+ (\d{1,3}\.){3}\d{1,3} \s+ (\d+|-) \s+ \n)+ $/
|
||||
|
||||
|
||||
- do:
|
||||
cat.thread_pool:
|
||||
h: bulk.m*
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^ bulk.min \s+ bulk.max \s+ \n
|
||||
(\s+ \d+ \s+ \d+ \s+ \n)+ $/
|
||||
|
||||
#(\s+ \d+ \s+ \d+ \n)+ $/
|
||||
|
||||
- do:
|
||||
cat.thread_pool:
|
||||
h: id,ba,fa,gea,ga,ia,maa,ma,oa,pa
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
"Basic test for upgrade indices":
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.upgrade:
|
||||
index: test_index
|
||||
|
||||
- match: {upgraded_indices.test_index: '/(\d\.)+\d/'}
|
|
@ -111,6 +111,12 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesActi
|
|||
import org.elasticsearch.action.admin.indices.template.get.TransportGetIndexTemplatesAction;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.template.put.TransportPutIndexTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.TransportUpgradeStatusAction;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusAction;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.TransportUpgradeAction;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.TransportUpgradeSettingsAction;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
|
||||
|
@ -256,6 +262,9 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
|
||||
registerAction(SealIndicesAction.INSTANCE, TransportSealIndicesAction.class);
|
||||
registerAction(OptimizeAction.INSTANCE, TransportOptimizeAction.class);
|
||||
registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
|
||||
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
|
||||
registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class);
|
||||
registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
|
||||
registerAction(PutWarmerAction.INSTANCE, TransportPutWarmerAction.class);
|
||||
registerAction(DeleteWarmerAction.INSTANCE, TransportDeleteWarmerAction.class);
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.health;
|
|||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -229,9 +228,7 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
|
|||
}
|
||||
}
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_6_0)) {
|
||||
numberOfInFlightFetch = in.readInt();
|
||||
}
|
||||
numberOfInFlightFetch = in.readInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -258,9 +255,7 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
|
|||
out.writeString(failure);
|
||||
}
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_6_0)) {
|
||||
out.writeInt(numberOfInFlightFetch);
|
||||
}
|
||||
out.writeInt(numberOfInFlightFetch);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.hotthreads;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -100,12 +99,7 @@ public class NodesHotThreadsRequest extends BaseNodesRequest<NodesHotThreadsRequ
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
threads = in.readInt();
|
||||
if (in.getVersion().before(Version.V_1_5_0)) {
|
||||
// Pre-1.5.0 did not filter hot threads, so we shouldn't:
|
||||
ignoreIdleThreads = false;
|
||||
} else {
|
||||
ignoreIdleThreads = in.readBoolean();
|
||||
}
|
||||
ignoreIdleThreads = in.readBoolean();
|
||||
type = in.readString();
|
||||
interval = TimeValue.readTimeValue(in);
|
||||
snapshots = in.readInt();
|
||||
|
@ -115,9 +109,7 @@ public class NodesHotThreadsRequest extends BaseNodesRequest<NodesHotThreadsRequ
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeInt(threads);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
out.writeBoolean(ignoreIdleThreads);
|
||||
}
|
||||
out.writeBoolean(ignoreIdleThreads);
|
||||
out.writeString(type);
|
||||
interval.writeTo(out);
|
||||
out.writeInt(snapshots);
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.cluster.snapshots.restore;
|
||||
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
|
@ -51,29 +50,17 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBo
|
|||
public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotRequest> {
|
||||
|
||||
private String snapshot;
|
||||
|
||||
private String repository;
|
||||
|
||||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
|
||||
|
||||
private String renamePattern;
|
||||
|
||||
private String renameReplacement;
|
||||
|
||||
private boolean waitForCompletion;
|
||||
|
||||
private boolean includeGlobalState = true;
|
||||
|
||||
private boolean partial = false;
|
||||
|
||||
private boolean includeAliases = true;
|
||||
|
||||
private Settings settings = EMPTY_SETTINGS;
|
||||
|
||||
private Settings indexSettings = EMPTY_SETTINGS;
|
||||
|
||||
private String[] ignoreIndexSettings = Strings.EMPTY_ARRAY;
|
||||
|
||||
RestoreSnapshotRequest() {
|
||||
|
@ -638,10 +625,8 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
|
|||
partial = in.readBoolean();
|
||||
includeAliases = in.readBoolean();
|
||||
settings = readSettingsFromStream(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
indexSettings = readSettingsFromStream(in);
|
||||
ignoreIndexSettings = in.readStringArray();
|
||||
}
|
||||
indexSettings = readSettingsFromStream(in);
|
||||
ignoreIndexSettings = in.readStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -658,9 +643,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
|
|||
out.writeBoolean(partial);
|
||||
out.writeBoolean(includeAliases);
|
||||
writeSettingsToStream(settings, out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
writeSettingsToStream(indexSettings, out);
|
||||
out.writeStringArray(ignoreIndexSettings);
|
||||
}
|
||||
writeSettingsToStream(indexSettings, out);
|
||||
out.writeStringArray(ignoreIndexSettings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,15 +42,11 @@ public class OptimizeRequest extends BroadcastRequest<OptimizeRequest> {
|
|||
public static final int MAX_NUM_SEGMENTS = -1;
|
||||
public static final boolean ONLY_EXPUNGE_DELETES = false;
|
||||
public static final boolean FLUSH = true;
|
||||
public static final boolean UPGRADE = false;
|
||||
public static final boolean UPGRADE_ONLY_ANCIENT_SEGMENTS = false;
|
||||
}
|
||||
|
||||
private int maxNumSegments = Defaults.MAX_NUM_SEGMENTS;
|
||||
private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES;
|
||||
private boolean flush = Defaults.FLUSH;
|
||||
private boolean upgrade = Defaults.UPGRADE;
|
||||
private boolean upgradeOnlyAncientSegments = Defaults.UPGRADE_ONLY_ANCIENT_SEGMENTS;
|
||||
|
||||
/**
|
||||
* Constructs an optimization request over one or more indices.
|
||||
|
@ -114,30 +110,12 @@ public class OptimizeRequest extends BroadcastRequest<OptimizeRequest> {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the merge upgrade all old segments to the current index format.
|
||||
* Defaults to <tt>false</tt>.
|
||||
*/
|
||||
public boolean upgrade() {
|
||||
return upgrade;
|
||||
}
|
||||
|
||||
/**
|
||||
* See {@link #upgrade()}
|
||||
*/
|
||||
public OptimizeRequest upgrade(boolean upgrade) {
|
||||
this.upgrade = upgrade;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
maxNumSegments = in.readInt();
|
||||
onlyExpungeDeletes = in.readBoolean();
|
||||
flush = in.readBoolean();
|
||||
upgrade = in.readBoolean();
|
||||
upgradeOnlyAncientSegments = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -146,24 +124,6 @@ public class OptimizeRequest extends BroadcastRequest<OptimizeRequest> {
|
|||
out.writeInt(maxNumSegments);
|
||||
out.writeBoolean(onlyExpungeDeletes);
|
||||
out.writeBoolean(flush);
|
||||
out.writeBoolean(upgrade);
|
||||
out.writeBoolean(upgradeOnlyAncientSegments);
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the merge upgrade only the ancient (older major version of Lucene) segments?
|
||||
* Defaults to <tt>false</tt>.
|
||||
*/
|
||||
public boolean upgradeOnlyAncientSegments() {
|
||||
return upgradeOnlyAncientSegments;
|
||||
}
|
||||
|
||||
/**
|
||||
* See {@link #upgradeOnlyAncientSegments()}
|
||||
*/
|
||||
public OptimizeRequest upgradeOnlyAncientSegments(boolean upgradeOnlyAncientSegments) {
|
||||
this.upgradeOnlyAncientSegments = upgradeOnlyAncientSegments;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -172,8 +132,6 @@ public class OptimizeRequest extends BroadcastRequest<OptimizeRequest> {
|
|||
"maxNumSegments=" + maxNumSegments +
|
||||
", onlyExpungeDeletes=" + onlyExpungeDeletes +
|
||||
", flush=" + flush +
|
||||
", upgrade=" + upgrade +
|
||||
", upgradeOnlyAncientSegments=" + upgradeOnlyAncientSegments +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
public class IndexShardUpgradeStatus implements Iterable<ShardUpgradeStatus> {
|
||||
|
||||
private final ShardId shardId;
|
||||
|
||||
private final ShardUpgradeStatus[] shards;
|
||||
|
||||
IndexShardUpgradeStatus(ShardId shardId, ShardUpgradeStatus[] shards) {
|
||||
this.shardId = shardId;
|
||||
this.shards = shards;
|
||||
}
|
||||
|
||||
public ShardId getShardId() {
|
||||
return this.shardId;
|
||||
}
|
||||
|
||||
public ShardUpgradeStatus getAt(int i) {
|
||||
return shards[i];
|
||||
}
|
||||
|
||||
public ShardUpgradeStatus[] getShards() {
|
||||
return this.shards;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<ShardUpgradeStatus> iterator() {
|
||||
return Iterators.forArray(shards);
|
||||
}
|
||||
|
||||
public long getTotalBytes() {
|
||||
long totalBytes = 0;
|
||||
for (ShardUpgradeStatus indexShardUpgradeStatus : shards) {
|
||||
totalBytes += indexShardUpgradeStatus.getTotalBytes();
|
||||
}
|
||||
return totalBytes;
|
||||
}
|
||||
|
||||
public long getToUpgradeBytes() {
|
||||
long upgradeBytes = 0;
|
||||
for (ShardUpgradeStatus indexShardUpgradeStatus : shards) {
|
||||
upgradeBytes += indexShardUpgradeStatus.getToUpgradeBytes();
|
||||
}
|
||||
return upgradeBytes;
|
||||
}
|
||||
|
||||
public long getToUpgradeBytesAncient() {
|
||||
long upgradeBytesAncient = 0;
|
||||
for (ShardUpgradeStatus indexShardUpgradeStatus : shards) {
|
||||
upgradeBytesAncient += indexShardUpgradeStatus.getToUpgradeBytesAncient();
|
||||
}
|
||||
return upgradeBytesAncient;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class IndexUpgradeStatus implements Iterable<IndexShardUpgradeStatus> {
|
||||
|
||||
private final String index;
|
||||
|
||||
private final Map<Integer, IndexShardUpgradeStatus> indexShards;
|
||||
|
||||
IndexUpgradeStatus(String index, ShardUpgradeStatus[] shards) {
|
||||
this.index = index;
|
||||
|
||||
Map<Integer, List<ShardUpgradeStatus>> tmpIndexShards = Maps.newHashMap();
|
||||
for (ShardUpgradeStatus shard : shards) {
|
||||
List<ShardUpgradeStatus> lst = tmpIndexShards.get(shard.getShardRouting().id());
|
||||
if (lst == null) {
|
||||
lst = Lists.newArrayList();
|
||||
tmpIndexShards.put(shard.getShardRouting().id(), lst);
|
||||
}
|
||||
lst.add(shard);
|
||||
}
|
||||
indexShards = Maps.newHashMap();
|
||||
for (Map.Entry<Integer, List<ShardUpgradeStatus>> entry : tmpIndexShards.entrySet()) {
|
||||
indexShards.put(entry.getKey(), new IndexShardUpgradeStatus(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardUpgradeStatus[entry.getValue().size()])));
|
||||
}
|
||||
}
|
||||
|
||||
public String getIndex() {
|
||||
return this.index;
|
||||
}
|
||||
|
||||
/**
|
||||
* A shard id to index shard upgrade status map (note, index shard upgrade status is the replication shard group that maps
|
||||
* to the shard id).
|
||||
*/
|
||||
public Map<Integer, IndexShardUpgradeStatus> getShards() {
|
||||
return this.indexShards;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<IndexShardUpgradeStatus> iterator() {
|
||||
return indexShards.values().iterator();
|
||||
}
|
||||
|
||||
public long getTotalBytes() {
|
||||
long totalBytes = 0;
|
||||
for (IndexShardUpgradeStatus indexShardUpgradeStatus : indexShards.values()) {
|
||||
totalBytes += indexShardUpgradeStatus.getTotalBytes();
|
||||
}
|
||||
return totalBytes;
|
||||
}
|
||||
|
||||
public long getToUpgradeBytes() {
|
||||
long upgradeBytes = 0;
|
||||
for (IndexShardUpgradeStatus indexShardUpgradeStatus : indexShards.values()) {
|
||||
upgradeBytes += indexShardUpgradeStatus.getToUpgradeBytes();
|
||||
}
|
||||
return upgradeBytes;
|
||||
}
|
||||
|
||||
public long getToUpgradeBytesAncient() {
|
||||
long upgradeBytesAncient = 0;
|
||||
for (IndexShardUpgradeStatus indexShardUpgradeStatus : indexShards.values()) {
|
||||
upgradeBytesAncient += indexShardUpgradeStatus.getToUpgradeBytesAncient();
|
||||
}
|
||||
return upgradeBytesAncient;
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ImmutableShardRouting.readShardRoutingEntry;
|
||||
|
||||
public class ShardUpgradeStatus extends BroadcastShardResponse {
|
||||
|
||||
private ShardRouting shardRouting;
|
||||
|
||||
private long totalBytes;
|
||||
|
||||
private long toUpgradeBytes;
|
||||
|
||||
private long toUpgradeBytesAncient;
|
||||
|
||||
ShardUpgradeStatus() {
|
||||
}
|
||||
|
||||
ShardUpgradeStatus(ShardRouting shardRouting, long totalBytes, long toUpgradeBytes, long upgradeBytesAncient) {
|
||||
super(shardRouting.shardId());
|
||||
this.shardRouting = shardRouting;
|
||||
this.totalBytes = totalBytes;
|
||||
this.toUpgradeBytes = toUpgradeBytes;
|
||||
this.toUpgradeBytesAncient = upgradeBytesAncient;
|
||||
|
||||
}
|
||||
|
||||
public ShardRouting getShardRouting() {
|
||||
return this.shardRouting;
|
||||
}
|
||||
|
||||
public long getTotalBytes() {
|
||||
return totalBytes;
|
||||
}
|
||||
|
||||
public long getToUpgradeBytes() {
|
||||
return toUpgradeBytes;
|
||||
}
|
||||
|
||||
public long getToUpgradeBytesAncient() {
|
||||
return toUpgradeBytesAncient;
|
||||
}
|
||||
|
||||
public static ShardUpgradeStatus readShardUpgradeStatus(StreamInput in) throws IOException {
|
||||
ShardUpgradeStatus shard = new ShardUpgradeStatus();
|
||||
shard.readFrom(in);
|
||||
return shard;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardRouting = readShardRoutingEntry(in);
|
||||
totalBytes = in.readLong();
|
||||
toUpgradeBytes = in.readLong();
|
||||
toUpgradeBytesAncient = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardRouting.writeTo(out);
|
||||
out.writeLong(totalBytes);
|
||||
out.writeLong(toUpgradeBytes);
|
||||
out.writeLong(toUpgradeBytesAncient);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,152 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportUpgradeStatusAction extends TransportBroadcastAction<UpgradeStatusRequest, UpgradeStatusResponse, TransportUpgradeStatusAction.IndexShardUpgradeStatusRequest, ShardUpgradeStatus> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@Inject
|
||||
public TransportUpgradeStatusAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
|
||||
IndicesService indicesService, ActionFilters actionFilters) {
|
||||
super(settings, UpgradeStatusAction.NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
UpgradeStatusRequest.class, IndexShardUpgradeStatusRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
/**
|
||||
* Getting upgrade stats from *all* active shards.
|
||||
*/
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState clusterState, UpgradeStatusRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state, UpgradeStatusRequest request) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, UpgradeStatusRequest countRequest, String[] concreteIndices) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
final List<ShardUpgradeStatus> shards = newArrayList();
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// simply ignore non active shards
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
shards.add((ShardUpgradeStatus) shardResponse);
|
||||
successfulShards++;
|
||||
}
|
||||
}
|
||||
return new UpgradeStatusResponse(shards.toArray(new ShardUpgradeStatus[shards.size()]), shardsResponses.length(), successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexShardUpgradeStatusRequest newShardRequest(int numShards, ShardRouting shard, UpgradeStatusRequest request) {
|
||||
return new IndexShardUpgradeStatusRequest(shard.shardId(), request);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardUpgradeStatus newShardResponse() {
|
||||
return new ShardUpgradeStatus();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardUpgradeStatus shardOperation(IndexShardUpgradeStatusRequest request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
||||
List<Segment> segments = indexShard.engine().segments(false);
|
||||
long total_bytes = 0;
|
||||
long to_upgrade_bytes = 0;
|
||||
long to_upgrade_bytes_ancient = 0;
|
||||
for (Segment seg : segments) {
|
||||
total_bytes += seg.sizeInBytes;
|
||||
if (seg.version.major != Version.CURRENT.luceneVersion.major) {
|
||||
to_upgrade_bytes_ancient += seg.sizeInBytes;
|
||||
to_upgrade_bytes += seg.sizeInBytes;
|
||||
} else if (seg.version.minor != Version.CURRENT.luceneVersion.minor) {
|
||||
// TODO: this comparison is bogus! it would cause us to upgrade even with the same format
|
||||
// instead, we should check if the codec has changed
|
||||
to_upgrade_bytes += seg.sizeInBytes;
|
||||
}
|
||||
}
|
||||
|
||||
return new ShardUpgradeStatus(indexShard.routingEntry(), total_bytes, to_upgrade_bytes, to_upgrade_bytes_ancient);
|
||||
}
|
||||
|
||||
static class IndexShardUpgradeStatusRequest extends BroadcastShardRequest {
|
||||
|
||||
IndexShardUpgradeStatusRequest() {
|
||||
|
||||
}
|
||||
|
||||
IndexShardUpgradeStatusRequest(ShardId shardId, UpgradeStatusRequest request) {
|
||||
super(shardId, request);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class UpgradeStatusAction extends Action<UpgradeStatusRequest, UpgradeStatusResponse, UpgradeStatusRequestBuilder> {
|
||||
|
||||
public static final UpgradeStatusAction INSTANCE = new UpgradeStatusAction();
|
||||
public static final String NAME = "indices:monitor/upgrade";
|
||||
|
||||
private UpgradeStatusAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpgradeStatusResponse newResponse() {
|
||||
return new UpgradeStatusResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpgradeStatusRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new UpgradeStatusRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -17,23 +17,23 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.lucene.store;
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class ThreadSafeInputStreamIndexInput extends InputStreamIndexInput {
|
||||
public class UpgradeStatusRequest extends BroadcastRequest<UpgradeStatusRequest> {
|
||||
|
||||
public ThreadSafeInputStreamIndexInput(IndexInput indexInput, long limit) {
|
||||
super(indexInput, limit);
|
||||
public UpgradeStatusRequest() {
|
||||
this(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int read(byte[] b, int off, int len) throws IOException {
|
||||
return super.read(b, off, len);
|
||||
public UpgradeStatusRequest(String... indices) {
|
||||
super(indices);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class UpgradeStatusRequestBuilder extends BroadcastOperationRequestBuilder<UpgradeStatusRequest, UpgradeStatusResponse, UpgradeStatusRequestBuilder> {
|
||||
|
||||
public UpgradeStatusRequestBuilder(ElasticsearchClient client, UpgradeStatusAction action) {
|
||||
super(client, action, new UpgradeStatusRequest());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,191 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class UpgradeStatusResponse extends BroadcastResponse implements ToXContent {
|
||||
|
||||
|
||||
private ShardUpgradeStatus[] shards;
|
||||
|
||||
private Map<String, IndexUpgradeStatus> indicesUpgradeStatus;
|
||||
|
||||
UpgradeStatusResponse() {
|
||||
|
||||
}
|
||||
|
||||
UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
this.shards = shards;
|
||||
}
|
||||
|
||||
public Map<String, IndexUpgradeStatus> getIndices() {
|
||||
if (indicesUpgradeStatus != null) {
|
||||
return indicesUpgradeStatus;
|
||||
}
|
||||
Map<String, IndexUpgradeStatus> indicesUpgradeStats = Maps.newHashMap();
|
||||
|
||||
Set<String> indices = Sets.newHashSet();
|
||||
for (ShardUpgradeStatus shard : shards) {
|
||||
indices.add(shard.getIndex());
|
||||
}
|
||||
|
||||
for (String index : indices) {
|
||||
List<ShardUpgradeStatus> shards = Lists.newArrayList();
|
||||
for (ShardUpgradeStatus shard : this.shards) {
|
||||
if (shard.getShardRouting().index().equals(index)) {
|
||||
shards.add(shard);
|
||||
}
|
||||
}
|
||||
indicesUpgradeStats.put(index, new IndexUpgradeStatus(index, shards.toArray(new ShardUpgradeStatus[shards.size()])));
|
||||
}
|
||||
this.indicesUpgradeStatus = indicesUpgradeStats;
|
||||
return indicesUpgradeStats;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shards = new ShardUpgradeStatus[in.readVInt()];
|
||||
for (int i = 0; i < shards.length; i++) {
|
||||
shards[i] = ShardUpgradeStatus.readShardUpgradeStatus(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(shards.length);
|
||||
for (ShardUpgradeStatus shard : shards) {
|
||||
shard.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
public long getTotalBytes() {
|
||||
long totalBytes = 0;
|
||||
for (IndexUpgradeStatus indexShardUpgradeStatus : getIndices().values()) {
|
||||
totalBytes += indexShardUpgradeStatus.getTotalBytes();
|
||||
}
|
||||
return totalBytes;
|
||||
}
|
||||
|
||||
public long getToUpgradeBytes() {
|
||||
long upgradeBytes = 0;
|
||||
for (IndexUpgradeStatus indexShardUpgradeStatus : getIndices().values()) {
|
||||
upgradeBytes += indexShardUpgradeStatus.getToUpgradeBytes();
|
||||
}
|
||||
return upgradeBytes;
|
||||
}
|
||||
|
||||
public long getToUpgradeBytesAncient() {
|
||||
long upgradeBytesAncient = 0;
|
||||
for (IndexUpgradeStatus indexShardUpgradeStatus : getIndices().values()) {
|
||||
upgradeBytesAncient += indexShardUpgradeStatus.getToUpgradeBytesAncient();
|
||||
}
|
||||
return upgradeBytesAncient;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
|
||||
|
||||
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes());
|
||||
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes());
|
||||
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient());
|
||||
|
||||
String level = params.param("level", "indices");
|
||||
boolean outputShards = "shards".equals(level);
|
||||
boolean outputIndices = "indices".equals(level) || outputShards;
|
||||
if (outputIndices) {
|
||||
builder.startObject(Fields.INDICES);
|
||||
for (IndexUpgradeStatus indexUpgradeStatus : getIndices().values()) {
|
||||
builder.startObject(indexUpgradeStatus.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
||||
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, indexUpgradeStatus.getTotalBytes());
|
||||
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, indexUpgradeStatus.getToUpgradeBytes());
|
||||
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, indexUpgradeStatus.getToUpgradeBytesAncient());
|
||||
if (outputShards) {
|
||||
builder.startObject(Fields.SHARDS);
|
||||
for (IndexShardUpgradeStatus indexShardUpgradeStatus : indexUpgradeStatus) {
|
||||
builder.startArray(Integer.toString(indexShardUpgradeStatus.getShardId().id()));
|
||||
for (ShardUpgradeStatus shardUpgradeStatus : indexShardUpgradeStatus) {
|
||||
builder.startObject();
|
||||
|
||||
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes());
|
||||
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes());
|
||||
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient());
|
||||
|
||||
builder.startObject(Fields.ROUTING);
|
||||
builder.field(Fields.STATE, shardUpgradeStatus.getShardRouting().state());
|
||||
builder.field(Fields.PRIMARY, shardUpgradeStatus.getShardRouting().primary());
|
||||
builder.field(Fields.NODE, shardUpgradeStatus.getShardRouting().currentNodeId());
|
||||
if (shardUpgradeStatus.getShardRouting().relocatingNodeId() != null) {
|
||||
builder.field(Fields.RELOCATING_NODE, shardUpgradeStatus.getShardRouting().relocatingNodeId());
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
|
||||
static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
|
||||
static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
|
||||
static final XContentBuilderString STATE = new XContentBuilderString("state");
|
||||
static final XContentBuilderString PRIMARY = new XContentBuilderString("primary");
|
||||
static final XContentBuilderString NODE = new XContentBuilderString("node");
|
||||
static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node");
|
||||
static final XContentBuilderString SIZE = new XContentBuilderString("size");
|
||||
static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes");
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE = new XContentBuilderString("size_to_upgrade");
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT = new XContentBuilderString("size_to_upgrade_ancient");
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE_IN_BYTES = new XContentBuilderString("size_to_upgrade_in_bytes");
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT_IN_BYTES = new XContentBuilderString("size_to_upgrade_ancient_in_bytes");
|
||||
|
||||
}
|
||||
}
|
|
@ -17,48 +17,44 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.unit;
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class Percent implements Streamable, Serializable {
|
||||
final class ShardUpgradeRequest extends BroadcastShardRequest {
|
||||
|
||||
private double value;
|
||||
private UpgradeRequest request = new UpgradeRequest();
|
||||
|
||||
public Percent(double value) {
|
||||
this.value = value;
|
||||
ShardUpgradeRequest() {
|
||||
}
|
||||
|
||||
public double value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return format(value);
|
||||
}
|
||||
|
||||
public static String format(double value) {
|
||||
String p = String.valueOf(value * 100.0);
|
||||
int ix = p.indexOf(".") + 1;
|
||||
return p.substring(0, ix) + p.substring(ix, ix + 1) + "%";
|
||||
ShardUpgradeRequest(ShardId shardId, UpgradeRequest request) {
|
||||
super(shardId, request);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
value = in.readDouble();
|
||||
super.readFrom(in);
|
||||
request.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeDouble(value);
|
||||
super.writeTo(out);
|
||||
request.writeTo(out);
|
||||
}
|
||||
|
||||
public UpgradeRequest upgradeRequest() {
|
||||
return this.request;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.ParseException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class ShardUpgradeResponse extends BroadcastShardResponse {
|
||||
|
||||
private org.apache.lucene.util.Version version;
|
||||
|
||||
private boolean primary;
|
||||
|
||||
|
||||
ShardUpgradeResponse() {
|
||||
}
|
||||
|
||||
ShardUpgradeResponse(ShardId shardId, boolean primary, org.apache.lucene.util.Version version) {
|
||||
super(shardId);
|
||||
this.primary = primary;
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
public org.apache.lucene.util.Version version() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
public boolean primary() {
|
||||
return primary;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
primary = in.readBoolean();
|
||||
try {
|
||||
version = org.apache.lucene.util.Version.parse(in.readString());
|
||||
} catch (ParseException ex) {
|
||||
throw new IOException("failed to parse lucene version [" + version + "]", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(primary);
|
||||
out.writeString(version.toString());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,214 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.PrimaryMissingActionException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
import static com.google.common.collect.Sets.newHashSet;
|
||||
|
||||
/**
|
||||
* Upgrade index/indices action.
|
||||
*/
|
||||
public class TransportUpgradeAction extends TransportBroadcastAction<UpgradeRequest, UpgradeResponse, ShardUpgradeRequest, ShardUpgradeResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
private final TransportUpgradeSettingsAction upgradeSettingsAction;
|
||||
|
||||
@Inject
|
||||
public TransportUpgradeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService, ActionFilters actionFilters,
|
||||
TransportUpgradeSettingsAction upgradeSettingsAction) {
|
||||
super(settings, UpgradeAction.NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
UpgradeRequest.class, ShardUpgradeRequest.class, ThreadPool.Names.OPTIMIZE);
|
||||
this.indicesService = indicesService;
|
||||
this.upgradeSettingsAction = upgradeSettingsAction;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpgradeResponse newResponse(UpgradeRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
Map<String, Integer> successfulPrimaryShards = newHashMap();
|
||||
Map<String, Version> versions = newHashMap();
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// a non active shard, ignore...
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
successfulShards++;
|
||||
ShardUpgradeResponse shardUpgradeResponse = (ShardUpgradeResponse) shardResponse;
|
||||
String index = shardUpgradeResponse.getIndex();
|
||||
if (shardUpgradeResponse.primary()) {
|
||||
Integer count = successfulPrimaryShards.get(index);
|
||||
successfulPrimaryShards.put(index, count == null ? 1 : count + 1);
|
||||
}
|
||||
Version version = versions.get(index);
|
||||
if (version == null || shardUpgradeResponse.version().onOrAfter(version) == false) {
|
||||
versions.put(index, shardUpgradeResponse.version());
|
||||
}
|
||||
}
|
||||
}
|
||||
Map<String, String> updatedVersions = newHashMap();
|
||||
MetaData metaData = clusterState.metaData();
|
||||
for (Map.Entry<String, Version> versionEntry : versions.entrySet()) {
|
||||
String index = versionEntry.getKey();
|
||||
Integer primaryCount = successfulPrimaryShards.get(index);
|
||||
int expectedPrimaryCount = metaData.index(index).getNumberOfShards();
|
||||
if (primaryCount == metaData.index(index).getNumberOfShards()) {
|
||||
updatedVersions.put(index, versionEntry.getValue().toString());
|
||||
} else {
|
||||
logger.warn("Not updating settings for the index [{}] because upgraded of some primary shards failed - expected[{}], received[{}]", index,
|
||||
expectedPrimaryCount, primaryCount == null ? 0 : primaryCount);
|
||||
}
|
||||
}
|
||||
|
||||
return new UpgradeResponse(updatedVersions, shardsResponses.length(), successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardUpgradeRequest newShardRequest(int numShards, ShardRouting shard, UpgradeRequest request) {
|
||||
return new ShardUpgradeRequest(shard.shardId(), request);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardUpgradeResponse newShardResponse() {
|
||||
return new ShardUpgradeResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardUpgradeResponse shardOperation(ShardUpgradeRequest request) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
||||
org.apache.lucene.util.Version version = indexShard.upgrade(request.upgradeRequest());
|
||||
return new ShardUpgradeResponse(request.shardId(), indexShard.routingEntry().primary(), version);
|
||||
}
|
||||
|
||||
/**
|
||||
* The upgrade request works against *all* shards.
|
||||
*/
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState clusterState, UpgradeRequest request, String[] concreteIndices) {
|
||||
GroupShardsIterator iterator = clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
|
||||
Set<String> indicesWithMissingPrimaries = indicesWithMissingPrimaries(clusterState, concreteIndices);
|
||||
if (indicesWithMissingPrimaries.isEmpty()) {
|
||||
return iterator;
|
||||
}
|
||||
// If some primary shards are not available the request should fail.
|
||||
throw new PrimaryMissingActionException("Cannot upgrade indices because the following indices are missing primary shards " + indicesWithMissingPrimaries);
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds all indices that have not all primaries available
|
||||
*/
|
||||
private Set<String> indicesWithMissingPrimaries(ClusterState clusterState, String[] concreteIndices) {
|
||||
Set<String> indices = newHashSet();
|
||||
RoutingTable routingTable = clusterState.routingTable();
|
||||
for (String index : concreteIndices) {
|
||||
IndexRoutingTable indexRoutingTable = routingTable.index(index);
|
||||
if (indexRoutingTable.allPrimaryShardsActive() == false) {
|
||||
indices.add(index);
|
||||
}
|
||||
}
|
||||
return indices;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state, UpgradeRequest request) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, UpgradeRequest request, String[] concreteIndices) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(UpgradeRequest request, final ActionListener<UpgradeResponse> listener) {
|
||||
ActionListener<UpgradeResponse> settingsUpdateListener = new ActionListener<UpgradeResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpgradeResponse upgradeResponse) {
|
||||
try {
|
||||
if (upgradeResponse.versions().isEmpty()) {
|
||||
listener.onResponse(upgradeResponse);
|
||||
} else {
|
||||
updateSettings(upgradeResponse, listener);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
};
|
||||
super.doExecute(request, settingsUpdateListener);
|
||||
}
|
||||
|
||||
private void updateSettings(final UpgradeResponse upgradeResponse, final ActionListener<UpgradeResponse> listener) {
|
||||
UpgradeSettingsRequest upgradeSettingsRequest = new UpgradeSettingsRequest(upgradeResponse.versions());
|
||||
upgradeSettingsAction.execute(upgradeSettingsRequest, new ActionListener<UpgradeSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpgradeSettingsResponse updateSettingsResponse) {
|
||||
listener.onResponse(upgradeResponse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportUpgradeSettingsAction extends TransportMasterNodeAction<UpgradeSettingsRequest, UpgradeSettingsResponse> {
|
||||
|
||||
private final MetaDataUpdateSettingsService updateSettingsService;
|
||||
|
||||
@Inject
|
||||
public TransportUpgradeSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
|
||||
MetaDataUpdateSettingsService updateSettingsService, ActionFilters actionFilters) {
|
||||
super(settings, UpgradeSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, UpgradeSettingsRequest.class);
|
||||
this.updateSettingsService = updateSettingsService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
// we go async right away....
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(UpgradeSettingsRequest request, ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpgradeSettingsResponse newResponse() {
|
||||
return new UpgradeSettingsResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final UpgradeSettingsRequest request, final ClusterState state, final ActionListener<UpgradeSettingsResponse> listener) {
|
||||
UpgradeSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpgradeSettingsClusterStateUpdateRequest()
|
||||
.ackTimeout(request.timeout())
|
||||
.versions(request.versions())
|
||||
.masterNodeTimeout(request.masterNodeTimeout());
|
||||
|
||||
updateSettingsService.upgradeIndexSettings(clusterStateUpdateRequest, new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse response) {
|
||||
listener.onResponse(new UpgradeSettingsResponse(response.isAcknowledged()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.debug("failed to upgrade minimum compatibility version settings on indices [{}]", t, request.versions().keySet());
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Upgrade index/indices action.
|
||||
*/
|
||||
public class UpgradeAction extends Action<UpgradeRequest, UpgradeResponse, UpgradeRequestBuilder> {
|
||||
|
||||
public static final UpgradeAction INSTANCE = new UpgradeAction();
|
||||
public static final String NAME = "indices:admin/upgrade";
|
||||
|
||||
private UpgradeAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpgradeResponse newResponse() {
|
||||
return new UpgradeResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpgradeRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new UpgradeRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or
|
||||
* <tt>null</tt> for the indices.
|
||||
* <p/>
|
||||
* @see org.elasticsearch.client.Requests#upgradeRequest(String...)
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#upgrade(UpgradeRequest)
|
||||
* @see UpgradeResponse
|
||||
*/
|
||||
public class UpgradeRequest extends BroadcastRequest<UpgradeRequest> {
|
||||
|
||||
public static final class Defaults {
|
||||
public static final boolean UPGRADE_ONLY_ANCIENT_SEGMENTS = false;
|
||||
}
|
||||
|
||||
private boolean upgradeOnlyAncientSegments = Defaults.UPGRADE_ONLY_ANCIENT_SEGMENTS;
|
||||
|
||||
/**
|
||||
* Constructs an optimization request over one or more indices.
|
||||
*
|
||||
* @param indices The indices to optimize, no indices passed means all indices will be optimized.
|
||||
*/
|
||||
public UpgradeRequest(String... indices) {
|
||||
super(indices);
|
||||
}
|
||||
|
||||
public UpgradeRequest() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
upgradeOnlyAncientSegments = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(upgradeOnlyAncientSegments);
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the upgrade only the ancient (older major version of Lucene) segments?
|
||||
* Defaults to <tt>false</tt>.
|
||||
*/
|
||||
public boolean upgradeOnlyAncientSegments() {
|
||||
return upgradeOnlyAncientSegments;
|
||||
}
|
||||
|
||||
/**
|
||||
* See {@link #upgradeOnlyAncientSegments()}
|
||||
*/
|
||||
public UpgradeRequest upgradeOnlyAncientSegments(boolean upgradeOnlyAncientSegments) {
|
||||
this.upgradeOnlyAncientSegments = upgradeOnlyAncientSegments;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "UpgradeRequest{" +
|
||||
"upgradeOnlyAncientSegments=" + upgradeOnlyAncientSegments +
|
||||
'}';
|
||||
}
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* A request to upgrade one or more indices. In order to optimize on all the indices, pass an empty array or
|
||||
* <tt>null</tt> for the indices.
|
||||
*/
|
||||
public class UpgradeRequestBuilder extends BroadcastOperationRequestBuilder<UpgradeRequest, UpgradeResponse, UpgradeRequestBuilder> {
|
||||
|
||||
public UpgradeRequestBuilder(ElasticsearchClient client, UpgradeAction action) {
|
||||
super(client, action, new UpgradeRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the upgrade only the ancient (older major version of Lucene) segments?
|
||||
*/
|
||||
public UpgradeRequestBuilder setUpgradeOnlyAncientSegments(boolean upgradeOnlyAncientSegments) {
|
||||
request.upgradeOnlyAncientSegments(upgradeOnlyAncientSegments);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
|
||||
/**
|
||||
* A response for optimize action.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public class UpgradeResponse extends BroadcastResponse {
|
||||
|
||||
private Map<String, String> versions;
|
||||
|
||||
UpgradeResponse() {
|
||||
|
||||
}
|
||||
|
||||
UpgradeResponse(Map<String, String> versions, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
this.versions = versions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
versions = newHashMap();
|
||||
for (int i=0; i<size; i++) {
|
||||
String index = in.readString();
|
||||
String version = in.readString();
|
||||
versions.put(index, version);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(versions.size());
|
||||
for(Map.Entry<String, String> entry : versions.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeString(entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, String> versions() {
|
||||
return versions;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class UpgradeSettingsAction extends Action<UpgradeSettingsRequest, UpgradeSettingsResponse, UpgradeSettingsRequestBuilder> {
|
||||
|
||||
public static final UpgradeSettingsAction INSTANCE = new UpgradeSettingsAction();
|
||||
public static final String NAME = "internal:indices/admin/upgrade";
|
||||
|
||||
private UpgradeSettingsAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpgradeSettingsResponse newResponse() {
|
||||
return new UpgradeSettingsResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpgradeSettingsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new UpgradeSettingsRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Cluster state update request that allows to change minimum compatibility settings for some indices
|
||||
*/
|
||||
public class UpgradeSettingsClusterStateUpdateRequest extends ClusterStateUpdateRequest<UpgradeSettingsClusterStateUpdateRequest> {
|
||||
|
||||
private Map<String, String> versions;
|
||||
|
||||
public UpgradeSettingsClusterStateUpdateRequest() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the index to version map for indices that should be updated
|
||||
*/
|
||||
public Map<String, String> versions() {
|
||||
return versions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the index to version map for indices that should be updated
|
||||
*/
|
||||
public UpgradeSettingsClusterStateUpdateRequest versions(Map<String, String> versions) {
|
||||
this.versions = versions;
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* Request for an update index settings action
|
||||
*/
|
||||
public class UpgradeSettingsRequest extends AcknowledgedRequest<UpgradeSettingsRequest> {
|
||||
|
||||
|
||||
private Map<String, String> versions;
|
||||
|
||||
UpgradeSettingsRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new request to update minimum compatible version settings for one or more indices
|
||||
*/
|
||||
public UpgradeSettingsRequest(Map<String, String> versions) {
|
||||
this.versions = versions;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (versions.isEmpty()) {
|
||||
validationException = addValidationError("no indices to update", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
|
||||
Map<String, String> versions() {
|
||||
return versions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the index versions to be updated
|
||||
*/
|
||||
public UpgradeSettingsRequest versions(Map<String, String> versions) {
|
||||
this.versions = versions;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
versions = newHashMap();
|
||||
for (int i=0; i<size; i++) {
|
||||
String index = in.readString();
|
||||
String version = in.readString();
|
||||
versions.put(index, version);
|
||||
}
|
||||
readTimeout(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(versions.size());
|
||||
for(Map.Entry<String, String> entry : versions.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeString(entry.getValue());
|
||||
}
|
||||
writeTimeout(out);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Builder for an update index settings request
|
||||
*/
|
||||
public class UpgradeSettingsRequestBuilder extends AcknowledgedRequestBuilder<UpgradeSettingsRequest, UpgradeSettingsResponse, UpgradeSettingsRequestBuilder> {
|
||||
|
||||
public UpgradeSettingsRequestBuilder(ElasticsearchClient client, UpgradeSettingsAction action) {
|
||||
super(client, action, new UpgradeSettingsRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the index versions to be updated
|
||||
*/
|
||||
public UpgradeSettingsRequestBuilder setVersions(Map<String, String> versions) {
|
||||
request.versions(versions);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -17,47 +17,35 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.collect;
|
||||
package org.elasticsearch.action.admin.indices.upgrade.post;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.TreeSet;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A {@link TreeSet} that is bounded by size.
|
||||
*
|
||||
*
|
||||
* A response for an update index settings action
|
||||
*/
|
||||
public class BoundedTreeSet<E> extends TreeSet<E> {
|
||||
public class UpgradeSettingsResponse extends AcknowledgedResponse {
|
||||
|
||||
private final int size;
|
||||
|
||||
public BoundedTreeSet(int size) {
|
||||
this.size = size;
|
||||
UpgradeSettingsResponse() {
|
||||
}
|
||||
|
||||
public BoundedTreeSet(Comparator<? super E> comparator, int size) {
|
||||
super(comparator);
|
||||
this.size = size;
|
||||
UpgradeSettingsResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean add(E e) {
|
||||
boolean result = super.add(e);
|
||||
rebound();
|
||||
return result;
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
readAcknowledged(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean addAll(Collection<? extends E> c) {
|
||||
boolean result = super.addAll(c);
|
||||
rebound();
|
||||
return result;
|
||||
}
|
||||
|
||||
private void rebound() {
|
||||
while (size() > size) {
|
||||
remove(last());
|
||||
}
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.indexedscripts.get;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
|
@ -31,7 +30,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -147,47 +145,19 @@ public class GetIndexedScriptRequest extends ActionRequest<GetIndexedScriptReque
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().before(Version.V_1_4_0_Beta1)) {
|
||||
//the index was previously serialized although not needed
|
||||
in.readString();
|
||||
}
|
||||
scriptLang = in.readString();
|
||||
id = in.readString();
|
||||
if (in.getVersion().before(Version.V_1_5_0)) {
|
||||
in.readOptionalString(); //Preference
|
||||
in.readBoolean(); //Refresh
|
||||
in.readByte(); //Realtime
|
||||
}
|
||||
this.versionType = VersionType.fromValue(in.readByte());
|
||||
this.version = in.readLong();
|
||||
|
||||
if (in.getVersion().before(Version.V_1_5_0)) {
|
||||
FetchSourceContext.optionalReadFromStream(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().before(Version.V_1_4_0_Beta1)) {
|
||||
//the index was previously serialized although not needed
|
||||
out.writeString(ScriptService.SCRIPT_INDEX);
|
||||
}
|
||||
out.writeString(scriptLang);
|
||||
out.writeString(id);
|
||||
if (out.getVersion().before(Version.V_1_5_0)) {
|
||||
out.writeOptionalString("_local"); //Preference
|
||||
out.writeBoolean(true); //Refresh
|
||||
out.writeByte((byte) -1); //Realtime
|
||||
}
|
||||
|
||||
out.writeByte(versionType.getValue());
|
||||
out.writeLong(version);
|
||||
|
||||
if (out.getVersion().before(Version.V_1_5_0)) {
|
||||
FetchSourceContext.optionalWriteToStream(null, out);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -96,6 +96,12 @@ import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResp
|
|||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
|
||||
|
@ -406,6 +412,53 @@ public interface IndicesAdminClient extends ElasticsearchClient {
|
|||
*/
|
||||
OptimizeRequestBuilder prepareOptimize(String... indices);
|
||||
|
||||
|
||||
/**
|
||||
* Explicitly upgrade one or more indices
|
||||
*
|
||||
* @param request The upgrade request
|
||||
* @return A result future
|
||||
* @see org.elasticsearch.client.Requests#upgradeRequest(String...)
|
||||
*/
|
||||
ActionFuture<UpgradeResponse> upgrade(UpgradeRequest request);
|
||||
|
||||
/**
|
||||
* Explicitly upgrade one or more indices
|
||||
*
|
||||
* @param request The upgrade request
|
||||
* @param listener A listener to be notified with a result
|
||||
* @see org.elasticsearch.client.Requests#upgradeRequest(String...)
|
||||
*/
|
||||
void upgrade(UpgradeRequest request, ActionListener<UpgradeResponse> listener);
|
||||
|
||||
/**
|
||||
* Explicitly upgrade one or more indices
|
||||
*/
|
||||
UpgradeStatusRequestBuilder prepareUpgradeStatus(String... indices);
|
||||
|
||||
/**
|
||||
* Check upgrade status of one or more indices
|
||||
*
|
||||
* @param request The upgrade request
|
||||
* @return A result future
|
||||
* @see org.elasticsearch.client.Requests#upgradeRequest(String...)
|
||||
*/
|
||||
ActionFuture<UpgradeStatusResponse> upgradeStatus(UpgradeStatusRequest request);
|
||||
|
||||
/**
|
||||
* Check upgrade status of one or more indices
|
||||
*
|
||||
* @param request The upgrade request
|
||||
* @param listener A listener to be notified with a result
|
||||
* @see org.elasticsearch.client.Requests#upgradeRequest(String...)
|
||||
*/
|
||||
void upgradeStatus(UpgradeStatusRequest request, ActionListener<UpgradeStatusResponse> listener);
|
||||
|
||||
/**
|
||||
* Check upgrade status of one or more indices
|
||||
*/
|
||||
UpgradeRequestBuilder prepareUpgrade(String... indices);
|
||||
|
||||
/**
|
||||
* Get the complete mappings of one or more types
|
||||
*/
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
|
|||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.count.CountRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
|
@ -291,6 +292,17 @@ public class Requests {
|
|||
return new OptimizeRequest(indices);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an upgrade request.
|
||||
*
|
||||
* @param indices The indices to upgrade. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
|
||||
* @return The upgrade request
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#upgrade(UpgradeRequest)
|
||||
*/
|
||||
public static UpgradeRequest upgradeRequest(String... indices) {
|
||||
return new UpgradeRequest(indices);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a clean indices cache request.
|
||||
*
|
||||
|
|
|
@ -196,6 +196,14 @@ import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateActio
|
|||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusAction;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
||||
|
@ -1415,6 +1423,36 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new OptimizeRequestBuilder(this, OptimizeAction.INSTANCE).setIndices(indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<UpgradeResponse> upgrade(final UpgradeRequest request) {
|
||||
return execute(UpgradeAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void upgrade(final UpgradeRequest request, final ActionListener<UpgradeResponse> listener) {
|
||||
execute(UpgradeAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpgradeRequestBuilder prepareUpgrade(String... indices) {
|
||||
return new UpgradeRequestBuilder(this, UpgradeAction.INSTANCE).setIndices(indices);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public ActionFuture<UpgradeStatusResponse> upgradeStatus(final UpgradeStatusRequest request) {
|
||||
return execute(UpgradeStatusAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void upgradeStatus(final UpgradeStatusRequest request, final ActionListener<UpgradeStatusResponse> listener) {
|
||||
execute(UpgradeStatusAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpgradeStatusRequestBuilder prepareUpgradeStatus(String... indices) {
|
||||
return new UpgradeStatusRequestBuilder(this, UpgradeStatusAction.INSTANCE).setIndices(indices);
|
||||
}
|
||||
@Override
|
||||
public ActionFuture<RefreshResponse> refresh(final RefreshRequest request) {
|
||||
return execute(RefreshAction.INSTANCE, request);
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.elasticsearch.rest.RestStatus;
|
|||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.ParseException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
|
@ -159,6 +160,7 @@ public class IndexMetaData implements Diffable<IndexMetaData> {
|
|||
public static final String SETTING_BLOCKS_WRITE = "index.blocks.write";
|
||||
public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata";
|
||||
public static final String SETTING_VERSION_CREATED = "index.version.created";
|
||||
public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded";
|
||||
public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible";
|
||||
public static final String SETTING_CREATION_DATE = "index.creation_date";
|
||||
public static final String SETTING_UUID = "index.uuid";
|
||||
|
@ -192,7 +194,8 @@ public class IndexMetaData implements Diffable<IndexMetaData> {
|
|||
private final DiscoveryNodeFilters excludeFilters;
|
||||
|
||||
private final Version indexCreatedVersion;
|
||||
private final Version indexMinimumCompatibleVersion;
|
||||
private final Version indexUpgradedVersion;
|
||||
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
private final HashFunction routingHashFunction;
|
||||
private final boolean useTypeForRouting;
|
||||
|
||||
|
@ -227,7 +230,17 @@ public class IndexMetaData implements Diffable<IndexMetaData> {
|
|||
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
|
||||
}
|
||||
indexCreatedVersion = Version.indexCreated(settings);
|
||||
indexMinimumCompatibleVersion = settings.getAsVersion(SETTING_VERSION_MINIMUM_COMPATIBLE, indexCreatedVersion);
|
||||
indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
|
||||
String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
|
||||
if (stringLuceneVersion != null) {
|
||||
try {
|
||||
this.minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
|
||||
} catch (ParseException ex) {
|
||||
throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE +"] setting", ex);
|
||||
}
|
||||
} else {
|
||||
this.minimumCompatibleLuceneVersion = null;
|
||||
}
|
||||
final Class<? extends HashFunction> hashFunctionClass = settings.getAsClass(SETTING_LEGACY_ROUTING_HASH_FUNCTION, null);
|
||||
if (hashFunctionClass == null) {
|
||||
routingHashFunction = MURMUR3_HASH_FUNCTION;
|
||||
|
@ -280,8 +293,6 @@ public class IndexMetaData implements Diffable<IndexMetaData> {
|
|||
/**
|
||||
* Return the {@link Version} on which this index has been created. This
|
||||
* information is typically useful for backward compatibility.
|
||||
*
|
||||
* Returns null if the index was created before 0.19.0.RC1.
|
||||
*/
|
||||
public Version creationVersion() {
|
||||
return indexCreatedVersion;
|
||||
|
@ -292,17 +303,22 @@ public class IndexMetaData implements Diffable<IndexMetaData> {
|
|||
}
|
||||
|
||||
/**
|
||||
* Return the {@link Version} of that created the oldest segment in the index.
|
||||
*
|
||||
* If the index was created before v1.6 and didn't go through upgrade API the creation verion is returned.
|
||||
* Returns null if the index was created before 0.19.0.RC1.
|
||||
* Return the {@link Version} on which this index has been upgraded. This
|
||||
* information is typically useful for backward compatibility.
|
||||
*/
|
||||
public Version minimumCompatibleVersion() {
|
||||
return indexMinimumCompatibleVersion;
|
||||
public Version upgradeVersion() {
|
||||
return indexUpgradedVersion;
|
||||
}
|
||||
|
||||
public Version getMinimumCompatibleVersion() {
|
||||
return minimumCompatibleVersion();
|
||||
public Version getUpgradeVersion() {
|
||||
return upgradeVersion();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link org.apache.lucene.util.Version} of the oldest lucene segment in the index
|
||||
*/
|
||||
public org.apache.lucene.util.Version getMinimumCompatibleVersion() {
|
||||
return minimumCompatibleLuceneVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.TimestampParsingException;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -40,10 +38,8 @@ import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
|
||||
/**
|
||||
|
@ -571,10 +567,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
out.writeOptionalString(timestamp().path());
|
||||
out.writeString(timestamp().format());
|
||||
out.writeOptionalString(timestamp().defaultTimestamp());
|
||||
// TODO Remove the test in elasticsearch 2.0.0
|
||||
if (out.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
out.writeOptionalBoolean(timestamp().ignoreMissing());
|
||||
}
|
||||
out.writeOptionalBoolean(timestamp().ignoreMissing());
|
||||
out.writeBoolean(hasParentField());
|
||||
}
|
||||
|
||||
|
@ -619,10 +612,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
String defaultTimestamp = in.readOptionalString();
|
||||
Boolean ignoreMissing = null;
|
||||
|
||||
// TODO Remove the test in elasticsearch 2.0.0
|
||||
if (in.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
ignoreMissing = in.readOptionalBoolean();
|
||||
}
|
||||
ignoreMissing = in.readOptionalBoolean();
|
||||
|
||||
final Timestamp timestamp = new Timestamp(enabled, path, format, defaultTimestamp, ignoreMissing);
|
||||
final boolean hasParentField = in.readBoolean();
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
|
||||
/**
|
||||
* This service is responsible for upgrading legacy index metadata to the current version
|
||||
*
|
||||
* <p/>
|
||||
* Every time an existing index is introduced into cluster this service should be used
|
||||
* to upgrade the existing index metadata to the latest version of the cluster. It typically
|
||||
* occurs during cluster upgrade, when dangling indices are imported into the cluster or indices
|
||||
|
@ -64,7 +64,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
pre20HashFunction = DjbHashFunction.class;
|
||||
}
|
||||
pre20UseType = settings.getAsBoolean(DEPRECATED_SETTING_ROUTING_USE_TYPE, null);
|
||||
if (hasCustomPre20HashFunction|| pre20UseType != null) {
|
||||
if (hasCustomPre20HashFunction || pre20UseType != null) {
|
||||
logger.warn("Settings [{}] and [{}] are deprecated. Index settings from your old indices have been updated to record the fact that they "
|
||||
+ "used some custom routing logic, you can now remove these settings from your `elasticsearch.yml` file", DEPRECATED_SETTING_ROUTING_HASH_FUNCTION, DEPRECATED_SETTING_ROUTING_USE_TYPE);
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
|
||||
/**
|
||||
* Checks that the index can be upgraded to the current version of the master node.
|
||||
*
|
||||
* <p/>
|
||||
* If the index does need upgrade it returns the index metadata unchanged, otherwise it returns a modified index metadata. If index cannot be
|
||||
* updated the method throws an exception.
|
||||
*/
|
||||
|
@ -101,8 +101,16 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
* Returns true if this index can be supported by the current version of elasticsearch
|
||||
*/
|
||||
private static boolean isSupportedVersion(IndexMetaData indexMetaData) {
|
||||
return indexMetaData.minimumCompatibleVersion() != null &&
|
||||
indexMetaData.minimumCompatibleVersion().luceneVersion.onOrAfter(Version.V_0_90_0_Beta1.luceneVersion);
|
||||
if (indexMetaData.creationVersion().onOrAfter(Version.V_0_90_0_Beta1)) {
|
||||
// The index was created with elasticsearch that was using Lucene 4.0
|
||||
return true;
|
||||
}
|
||||
if (indexMetaData.getMinimumCompatibleVersion() != null &&
|
||||
indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_4_0_0)) {
|
||||
//The index was upgraded we can work with it
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,8 +20,10 @@
|
|||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
|
@ -40,6 +42,8 @@ import org.elasticsearch.index.settings.IndexDynamicSettings;
|
|||
|
||||
import java.util.*;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
||||
/**
|
||||
* Service responsible for submitting update index settings requests
|
||||
*/
|
||||
|
@ -307,4 +311,37 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
|
||||
|
||||
clusterService.submitStateUpdateTask("update-index-compatibility-versions", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData());
|
||||
for (Map.Entry<String, String> entry : request.versions().entrySet()) {
|
||||
String index = entry.getKey();
|
||||
IndexMetaData indexMetaData = metaDataBuilder.get(index);
|
||||
if (indexMetaData != null) {
|
||||
if (Version.CURRENT.equals(indexMetaData.creationVersion()) == false) {
|
||||
// No reason to pollute the settings, we didn't really upgrade anything
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData)
|
||||
.settings(settingsBuilder().put(indexMetaData.settings())
|
||||
.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, entry.getValue())
|
||||
.put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ClusterState.builder(currentState).metaData(metaDataBuilder).build();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
import org.elasticsearch.common.collect.IdentityHashSet;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -597,7 +596,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
int secondaryLength = 0;
|
||||
int primaryLength = primary.length;
|
||||
ArrayUtil.timSort(primary, comparator);
|
||||
final Set<ModelNode> throttledNodes = new IdentityHashSet<>();
|
||||
final Set<ModelNode> throttledNodes = Collections.newSetFromMap(new IdentityHashMap<ModelNode, Boolean>());
|
||||
do {
|
||||
for (int i = 0; i < primaryLength; i++) {
|
||||
MutableShardRouting shard = primary[i];
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
|
@ -35,9 +34,6 @@ public final class Priority implements Comparable<Priority> {
|
|||
|
||||
public static void writeTo(Priority priority, StreamOutput output) throws IOException {
|
||||
byte b = priority.value;
|
||||
if (output.getVersion().before(Version.V_1_1_0)) {
|
||||
b = (byte) Math.max(URGENT.value, b);
|
||||
}
|
||||
output.writeByte(b);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,201 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.collect;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class IdentityHashSet<E>
|
||||
extends AbstractSet<E>
|
||||
implements Set<E>, Cloneable, java.io.Serializable {
|
||||
|
||||
static final long serialVersionUID = -5024744406713321677L;
|
||||
|
||||
private transient IdentityHashMap<E, Object> map;
|
||||
|
||||
// Dummy value to associate with an Object in the backing Map
|
||||
private static final Object PRESENT = new Object();
|
||||
|
||||
public IdentityHashSet() {
|
||||
map = new IdentityHashMap<>();
|
||||
}
|
||||
|
||||
public IdentityHashSet(Collection<? extends E> c) {
|
||||
map = new IdentityHashMap<>(Math.max((int) (c.size() / .75f) + 1, 16));
|
||||
addAll(c);
|
||||
}
|
||||
|
||||
public IdentityHashSet(int expectedSize) {
|
||||
map = new IdentityHashMap<>(expectedSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an iterator over the elements in this set. The elements
|
||||
* are returned in no particular order.
|
||||
*
|
||||
* @return an Iterator over the elements in this set
|
||||
* @see ConcurrentModificationException
|
||||
*/
|
||||
@Override
|
||||
public Iterator<E> iterator() {
|
||||
return map.keySet().iterator();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of elements in this set (its cardinality).
|
||||
*
|
||||
* @return the number of elements in this set (its cardinality)
|
||||
*/
|
||||
@Override
|
||||
public int size() {
|
||||
return map.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <tt>true</tt> if this set contains no elements.
|
||||
*
|
||||
* @return <tt>true</tt> if this set contains no elements
|
||||
*/
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return map.isEmpty();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <tt>true</tt> if this set contains the specified element.
|
||||
* More formally, returns <tt>true</tt> if and only if this set
|
||||
* contains an element <tt>e</tt> such that
|
||||
* <tt>(o==e)</tt>.
|
||||
*
|
||||
* @param o element whose presence in this set is to be tested
|
||||
* @return <tt>true</tt> if this set contains the specified element
|
||||
*/
|
||||
@Override
|
||||
public boolean contains(Object o) {
|
||||
return map.containsKey(o);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the specified element to this set if it is not already present.
|
||||
* More formally, adds the specified element <tt>e</tt> to this set if
|
||||
* this set contains no element <tt>e2</tt> such that
|
||||
* <tt>(e==e2)</tt>.
|
||||
* If this set already contains the element, the call leaves the set
|
||||
* unchanged and returns <tt>false</tt>.
|
||||
*
|
||||
* @param e element to be added to this set
|
||||
* @return <tt>true</tt> if this set did not already contain the specified
|
||||
* element
|
||||
*/
|
||||
@Override
|
||||
public boolean add(E e) {
|
||||
return map.put(e, PRESENT) == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the specified element from this set if it is present.
|
||||
* More formally, removes an element <tt>e</tt> such that
|
||||
* <tt>(o==e)</tt>,
|
||||
* if this set contains such an element. Returns <tt>true</tt> if
|
||||
* this set contained the element (or equivalently, if this set
|
||||
* changed as a result of the call). (This set will not contain the
|
||||
* element once the call returns.)
|
||||
*
|
||||
* @param o object to be removed from this set, if present
|
||||
* @return <tt>true</tt> if the set contained the specified element
|
||||
*/
|
||||
@Override
|
||||
public boolean remove(Object o) {
|
||||
return map.remove(o) == PRESENT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes all of the elements from this set.
|
||||
* The set will be empty after this call returns.
|
||||
*/
|
||||
@Override
|
||||
public void clear() {
|
||||
map.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a shallow copy of this <tt>HashSet</tt> instance: the elements
|
||||
* themselves are not cloned.
|
||||
*
|
||||
* @return a shallow copy of this set
|
||||
*/
|
||||
@Override
|
||||
public Object clone() {
|
||||
try {
|
||||
IdentityHashSet<E> newSet = (IdentityHashSet<E>) super.clone();
|
||||
newSet.map = (IdentityHashMap<E, Object>) map.clone();
|
||||
return newSet;
|
||||
} catch (CloneNotSupportedException e) {
|
||||
throw new InternalError();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Index the state of this <tt>HashSet</tt> instance to a stream (that is,
|
||||
* serialize it).
|
||||
*
|
||||
* @serialData The capacity of the backing <tt>HashMap</tt> instance
|
||||
* (int), and its load factor (float) are emitted, followed by
|
||||
* the size of the set (the number of elements it contains)
|
||||
* (int), followed by all of its elements (each an Object) in
|
||||
* no particular order.
|
||||
*/
|
||||
private void writeObject(java.io.ObjectOutputStream s)
|
||||
throws java.io.IOException {
|
||||
// Write out any hidden serialization magic
|
||||
s.defaultWriteObject();
|
||||
|
||||
// Write out size
|
||||
s.writeInt(map.size());
|
||||
|
||||
// Write out all elements in the proper order.
|
||||
for (Iterator i = map.keySet().iterator(); i.hasNext(); )
|
||||
s.writeObject(i.next());
|
||||
}
|
||||
|
||||
/**
|
||||
* Reconstitute the <tt>HashSet</tt> instance from a stream (that is,
|
||||
* deserialize it).
|
||||
*/
|
||||
private void readObject(java.io.ObjectInputStream s)
|
||||
throws java.io.IOException, ClassNotFoundException {
|
||||
// Read in any hidden serialization magic
|
||||
s.defaultReadObject();
|
||||
|
||||
// Read in size
|
||||
int size = s.readInt();
|
||||
|
||||
map = new IdentityHashMap<>(size);
|
||||
|
||||
// Read in all elements in the proper order.
|
||||
for (int i = 0; i < size; i++) {
|
||||
E e = (E) s.readObject();
|
||||
map.put(e, PRESENT);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,376 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.collect;
|
||||
|
||||
import com.carrotsearch.hppc.*;
|
||||
import com.carrotsearch.hppc.cursors.LongCursor;
|
||||
import com.carrotsearch.hppc.cursors.LongObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.predicates.IntObjectPredicate;
|
||||
import com.carrotsearch.hppc.predicates.LongObjectPredicate;
|
||||
import com.carrotsearch.hppc.predicates.LongPredicate;
|
||||
import com.carrotsearch.hppc.procedures.LongObjectProcedure;
|
||||
import com.google.common.collect.UnmodifiableIterator;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* An immutable map implementation based on open hash map.
|
||||
* <p/>
|
||||
* Can be constructed using a {@link #builder()}, or using {@link #builder(org.elasticsearch.common.collect.ImmutableOpenLongMap)} (which is an optimized
|
||||
* option to copy over existing content and modify it).
|
||||
*/
|
||||
public final class ImmutableOpenLongMap<VType> implements Iterable<LongObjectCursor<VType>> {
|
||||
|
||||
private final LongObjectHashMap<VType> map;
|
||||
|
||||
private ImmutableOpenLongMap(LongObjectHashMap<VType> map) {
|
||||
this.map = map;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Returns the value associated with the given key or the default value
|
||||
* for the key type, if the key is not associated with any value.
|
||||
* <p/>
|
||||
* <b>Important note:</b> For primitive type values, the value returned for a non-existing
|
||||
* key may not be the default value of the primitive type (it may be any value previously
|
||||
* assigned to that slot).
|
||||
*/
|
||||
public VType get(long key) {
|
||||
return map.get(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if this container has an association to a value for
|
||||
* the given key.
|
||||
*/
|
||||
public boolean containsKey(long key) {
|
||||
return map.containsKey(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Returns the current size (number of assigned keys) in the container.
|
||||
*/
|
||||
public int size() {
|
||||
return map.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Return <code>true</code> if this hash map contains no assigned keys.
|
||||
*/
|
||||
public boolean isEmpty() {
|
||||
return map.isEmpty();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a cursor over the entries (key-value pairs) in this map. The iterator is
|
||||
* implemented as a cursor and it returns <b>the same cursor instance</b> on every
|
||||
* call to {@link java.util.Iterator#next()}. To read the current key and value use the cursor's
|
||||
* public fields. An example is shown below.
|
||||
* <pre>
|
||||
* for (IntShortCursor c : intShortMap)
|
||||
* {
|
||||
* System.out.println("index=" + c.index
|
||||
* + " key=" + c.key
|
||||
* + " value=" + c.value);
|
||||
* }
|
||||
* </pre>
|
||||
* <p/>
|
||||
* <p>The <code>index</code> field inside the cursor gives the internal index inside
|
||||
* the container's implementation. The interpretation of this index depends on
|
||||
* to the container.
|
||||
*/
|
||||
@Override
|
||||
public Iterator<LongObjectCursor<VType>> iterator() {
|
||||
return map.iterator();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a specialized view of the keys of this associated container.
|
||||
* The view additionally implements {@link com.carrotsearch.hppc.ObjectLookupContainer}.
|
||||
*/
|
||||
public LongLookupContainer keys() {
|
||||
return map.keys();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a direct iterator over the keys.
|
||||
*/
|
||||
public UnmodifiableIterator<Long> keysIt() {
|
||||
final Iterator<LongCursor> iterator = map.keys().iterator();
|
||||
return new UnmodifiableIterator<Long>() {
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return iterator.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long next() {
|
||||
return iterator.next().value;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Returns a container with all values stored in this map.
|
||||
*/
|
||||
public ObjectContainer<VType> values() {
|
||||
return map.values();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a direct iterator over the keys.
|
||||
*/
|
||||
public UnmodifiableIterator<VType> valuesIt() {
|
||||
final Iterator<ObjectCursor<VType>> iterator = map.values().iterator();
|
||||
return new UnmodifiableIterator<VType>() {
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return iterator.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public VType next() {
|
||||
return iterator.next().value;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return map.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
ImmutableOpenLongMap that = (ImmutableOpenLongMap) o;
|
||||
|
||||
if (!map.equals(that.map)) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return map.hashCode();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ImmutableOpenLongMap EMPTY = new ImmutableOpenLongMap(new LongObjectHashMap());
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <VType> ImmutableOpenLongMap<VType> of() {
|
||||
return EMPTY;
|
||||
}
|
||||
|
||||
public static <VType> Builder<VType> builder() {
|
||||
return new Builder<>();
|
||||
}
|
||||
|
||||
public static <VType> Builder<VType> builder(int size) {
|
||||
return new Builder<>(size);
|
||||
}
|
||||
|
||||
public static <VType> Builder<VType> builder(ImmutableOpenLongMap<VType> map) {
|
||||
return new Builder<>(map);
|
||||
}
|
||||
|
||||
public static class Builder<VType> implements LongObjectMap<VType> {
|
||||
|
||||
private LongObjectHashMap<VType> map;
|
||||
|
||||
public Builder() {
|
||||
//noinspection unchecked
|
||||
this(EMPTY);
|
||||
}
|
||||
|
||||
public Builder(int size) {
|
||||
this.map = new LongObjectHashMap<>(size);
|
||||
}
|
||||
|
||||
public Builder(ImmutableOpenLongMap<VType> map) {
|
||||
this.map = map.map.clone();
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a new instance of the
|
||||
*/
|
||||
public ImmutableOpenLongMap<VType> build() {
|
||||
LongObjectHashMap<VType> map = this.map;
|
||||
this.map = null; // nullify the map, so any operation post build will fail! (hackish, but safest)
|
||||
return new ImmutableOpenLongMap<>(map);
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts all the entries in the map to the builder.
|
||||
*/
|
||||
public Builder<VType> putAll(Map<Long, VType> map) {
|
||||
for (Map.Entry<Long, VType> entry : map.entrySet()) {
|
||||
this.map.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A put operation that can be used in the fluent pattern.
|
||||
*/
|
||||
public Builder<VType> fPut(long key, VType value) {
|
||||
map.put(key, value);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public VType put(long key, VType value) {
|
||||
return map.put(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public VType get(long key) {
|
||||
return map.get(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public VType getOrDefault(long kType, VType vType) {
|
||||
return map.getOrDefault(kType, vType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove that can be used in the fluent pattern.
|
||||
*/
|
||||
public Builder<VType> fRemove(long key) {
|
||||
map.remove(key);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public VType remove(long key) {
|
||||
return map.remove(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<LongObjectCursor<VType>> iterator() {
|
||||
return map.iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsKey(long key) {
|
||||
return map.containsKey(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return map.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return map.isEmpty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
map.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int putAll(LongObjectAssociativeContainer<? extends VType> container) {
|
||||
return map.putAll(container);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int putAll(Iterable<? extends LongObjectCursor<? extends VType>> iterable) {
|
||||
return map.putAll(iterable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int removeAll(LongContainer container) {
|
||||
return map.removeAll(container);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int removeAll(LongPredicate predicate) {
|
||||
return map.removeAll(predicate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LongCollection keys() {
|
||||
return map.keys();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectContainer<VType> values() {
|
||||
return map.values();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends LongObjectProcedure<? super VType>> T forEach(T procedure) {
|
||||
return map.forEach(procedure);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int indexOf(long key) {
|
||||
return map.indexOf(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean indexExists(int index) {
|
||||
return map.indexExists(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public VType indexGet(int index) {
|
||||
return map.indexGet(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public VType indexReplace(int index, VType newValue) {
|
||||
return map.indexReplace(index, newValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void indexInsert(int index, long key, VType value) {
|
||||
map.indexInsert(index, key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void release() {
|
||||
map.release();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String visualizeKeyDistribution(int characters) {
|
||||
return map.visualizeKeyDistribution(characters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int removeAll(LongObjectPredicate<? super VType> predicate) {
|
||||
return map.removeAll(predicate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends LongObjectPredicate<? super VType>> T forEach(T predicate) {
|
||||
return map.forEach(predicate);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.collect;
|
||||
|
||||
import com.google.common.collect.Iterators;
|
||||
import com.google.common.collect.PeekingIterator;
|
||||
import com.google.common.collect.UnmodifiableIterator;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
|
||||
public enum Iterators2 {
|
||||
;
|
||||
|
||||
/** Remove duplicated elements from an iterator over sorted content. */
|
||||
public static <T> Iterator<T> deduplicateSorted(Iterator<? extends T> iterator, final Comparator<? super T> comparator) {
|
||||
// TODO: infer type once JI-9019884 is fixed
|
||||
final PeekingIterator<T> it = Iterators.<T>peekingIterator(iterator);
|
||||
return new UnmodifiableIterator<T>() {
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return it.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public T next() {
|
||||
final T ret = it.next();
|
||||
while (it.hasNext() && comparator.compare(ret, it.peek()) == 0) {
|
||||
it.next();
|
||||
}
|
||||
assert !it.hasNext() || comparator.compare(ret, it.peek()) < 0 : "iterator is not sorted: " + ret + " > " + it.peek();
|
||||
return ret;
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
/** Return a merged view over several iterators, optionally deduplicating equivalent entries. */
|
||||
public static <T> Iterator<T> mergeSorted(Iterable<Iterator<? extends T>> iterators, Comparator<? super T> comparator, boolean deduplicate) {
|
||||
Iterator<T> it = Iterators.mergeSorted(iterators, comparator);
|
||||
if (deduplicate) {
|
||||
it = deduplicateSorted(it, comparator);
|
||||
}
|
||||
return it;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.lucene;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
* A wrapped to {@link BytesRef} that also caches the hashCode for it.
|
||||
*/
|
||||
public class HashedBytesRef {
|
||||
|
||||
public BytesRef bytes;
|
||||
public int hash;
|
||||
|
||||
public HashedBytesRef() {
|
||||
}
|
||||
|
||||
public HashedBytesRef(String bytes) {
|
||||
this(new BytesRef(bytes));
|
||||
}
|
||||
|
||||
public HashedBytesRef(BytesRef bytes) {
|
||||
this(bytes, bytes.hashCode());
|
||||
}
|
||||
|
||||
public HashedBytesRef(BytesRef bytes, int hash) {
|
||||
this.bytes = bytes;
|
||||
this.hash = hash;
|
||||
}
|
||||
|
||||
public HashedBytesRef resetHashCode() {
|
||||
this.hash = bytes.hashCode();
|
||||
return this;
|
||||
}
|
||||
|
||||
public HashedBytesRef reset(BytesRef bytes, int hash) {
|
||||
this.bytes = bytes;
|
||||
this.hash = hash;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return hash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (other instanceof HashedBytesRef) {
|
||||
return bytes.equals(((HashedBytesRef) other).bytes);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return bytes.toString();
|
||||
}
|
||||
|
||||
public HashedBytesRef deepCopy() {
|
||||
return deepCopyOf(this);
|
||||
}
|
||||
|
||||
public static HashedBytesRef deepCopyOf(HashedBytesRef other) {
|
||||
BytesRef copy = BytesRef.deepCopyOf(other.bytes);
|
||||
return new HashedBytesRef(copy, other.hash);
|
||||
}
|
||||
}
|
|
@ -173,6 +173,28 @@ public class Lucene {
|
|||
return SegmentInfos.readCommit(directory, segmentsFileName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to acquire the {@link IndexWriter#WRITE_LOCK_NAME} on the given directory. The returned lock must be closed once
|
||||
* the lock is released. If the lock can't be obtained a {@link LockObtainFailedException} is thrown.
|
||||
* This method uses the {@link IndexWriterConfig#getDefaultWriteLockTimeout()} as the lock timeout.
|
||||
*/
|
||||
public static Lock acquireWriteLock(Directory directory) throws IOException {
|
||||
return acquireLock(directory, IndexWriter.WRITE_LOCK_NAME, IndexWriterConfig.getDefaultWriteLockTimeout());
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to acquire a lock on the given directory. The returned lock must be closed once
|
||||
* the lock is released. If the lock can't be obtained a {@link LockObtainFailedException} is thrown.
|
||||
*/
|
||||
@SuppressForbidden(reason = "this method uses trappy Directory#makeLock API")
|
||||
public static Lock acquireLock(Directory directory, String lockName, long timeout) throws IOException {
|
||||
final Lock writeLock = directory.makeLock(lockName);
|
||||
if (writeLock.obtain(timeout) == false) {
|
||||
throw new LockObtainFailedException("failed to obtain lock: " + writeLock);
|
||||
}
|
||||
return writeLock;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method removes all files from the given directory that are not referenced by the given segments file.
|
||||
* This method will open an IndexWriter and relies on index file deleter to remove all unreferenced files. Segment files
|
||||
|
@ -184,10 +206,7 @@ public class Lucene {
|
|||
*/
|
||||
public static SegmentInfos pruneUnreferencedFiles(String segmentsFileName, Directory directory) throws IOException {
|
||||
final SegmentInfos si = readSegmentInfos(segmentsFileName, directory);
|
||||
try (Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) {
|
||||
if (!writeLock.obtain(IndexWriterConfig.getDefaultWriteLockTimeout())) { // obtain write lock
|
||||
throw new LockObtainFailedException("Index locked for write: " + writeLock);
|
||||
}
|
||||
try (Lock writeLock = acquireWriteLock(directory)) {
|
||||
int foundSegmentFiles = 0;
|
||||
for (final String file : directory.listAll()) {
|
||||
/**
|
||||
|
@ -226,10 +245,7 @@ public class Lucene {
|
|||
* this operation fails.
|
||||
*/
|
||||
public static void cleanLuceneIndex(Directory directory) throws IOException {
|
||||
try (Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) {
|
||||
if (!writeLock.obtain(IndexWriterConfig.getDefaultWriteLockTimeout())) { // obtain write lock
|
||||
throw new LockObtainFailedException("Index locked for write: " + writeLock);
|
||||
}
|
||||
try (Lock writeLock = acquireWriteLock(directory)) {
|
||||
for (final String file : directory.listAll()) {
|
||||
if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
|
||||
directory.deleteFile(file); // remove all segment_N files
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.index.CheckIndex;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -32,6 +33,7 @@ import org.elasticsearch.common.io.FileSystemUtils;
|
|||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
|
@ -84,13 +86,12 @@ public class MultiDataPathUpgrader {
|
|||
ShardStateMetaData.FORMAT.write(loaded, loaded.version, targetPath.getShardStatePath());
|
||||
Files.createDirectories(targetPath.resolveIndex());
|
||||
try (SimpleFSDirectory directory = new SimpleFSDirectory(targetPath.resolveIndex())) {
|
||||
try (final Lock lock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) {
|
||||
if (lock.obtain(5000)) {
|
||||
upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths);
|
||||
} else {
|
||||
throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex());
|
||||
}
|
||||
try (final Lock lock = Lucene.acquireWriteLock(directory)) {
|
||||
upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths);
|
||||
} catch (LockObtainFailedException ex) {
|
||||
throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex(), ex);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.discovery.zen.fd;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -428,19 +427,11 @@ public class MasterFaultDetection extends FaultDetection {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrBefore(Version.V_1_4_0_Beta1)) {
|
||||
// old listedOnMaster
|
||||
in.readBoolean();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrBefore(Version.V_1_4_0_Beta1)) {
|
||||
// old listedOnMaster
|
||||
out.writeBoolean(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,9 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.io.stream.*;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.network.MulticastChannel;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -63,6 +65,8 @@ public class MulticastZenPing extends AbstractLifecycleComponent<ZenPing> implem
|
|||
|
||||
private static final byte[] INTERNAL_HEADER = new byte[]{1, 9, 8, 4};
|
||||
|
||||
private static final int PING_SIZE_ESTIMATE = 150;
|
||||
|
||||
private final String address;
|
||||
private final int port;
|
||||
private final String group;
|
||||
|
@ -247,7 +251,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent<ZenPing> implem
|
|||
|
||||
private void sendPingRequest(int id) {
|
||||
try {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
BytesStreamOutput out = new BytesStreamOutput(PING_SIZE_ESTIMATE);
|
||||
out.writeBytes(INTERNAL_HEADER);
|
||||
// TODO: change to min_required version!
|
||||
Version.writeVersion(version, out);
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
@ -146,18 +147,17 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
|
|||
|
||||
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
|
||||
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
|
||||
Lock tmpLock = luceneDir.makeLock(NODE_LOCK_FILENAME);
|
||||
boolean obtained = tmpLock.obtain();
|
||||
if (obtained) {
|
||||
try {
|
||||
locks[dirIndex] = Lucene.acquireLock(luceneDir, NODE_LOCK_FILENAME, 0);
|
||||
nodePaths[dirIndex] = new NodePath(dir, environment);
|
||||
locks[dirIndex] = tmpLock;
|
||||
localNodeId = possibleLockId;
|
||||
} else {
|
||||
} catch (LockObtainFailedException ex) {
|
||||
logger.trace("failed to obtain node lock on {}", dir.toAbsolutePath());
|
||||
// release all the ones that were obtained up until now
|
||||
releaseAndNullLocks(locks);
|
||||
break;
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
logger.trace("failed to obtain node lock on {}", e, dir.toAbsolutePath());
|
||||
lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
|
||||
|
@ -314,8 +314,9 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
|
|||
// open a directory (will be immediately closed) on the shard's location
|
||||
dirs[i] = new SimpleFSDirectory(p, FsDirectoryService.buildLockFactory(indexSettings));
|
||||
// create a lock for the "write.lock" file
|
||||
locks[i] = dirs[i].makeLock(IndexWriter.WRITE_LOCK_NAME);
|
||||
if (locks[i].obtain() == false) {
|
||||
try {
|
||||
locks[i] = Lucene.acquireWriteLock(dirs[i]);
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("unable to acquire " +
|
||||
IndexWriter.WRITE_LOCK_NAME + " for " + p);
|
||||
}
|
||||
|
|
|
@ -319,11 +319,19 @@ public abstract class Engine implements Closeable {
|
|||
/**
|
||||
* Read the last segments info from the commit pointed to by the searcher manager
|
||||
*/
|
||||
protected static SegmentInfos readLastCommittedSegmentInfos(SearcherManager sm) throws IOException {
|
||||
protected static SegmentInfos readLastCommittedSegmentInfos(final SearcherManager sm, final Store store) throws IOException {
|
||||
IndexSearcher searcher = sm.acquire();
|
||||
try {
|
||||
IndexCommit latestCommit = ((DirectoryReader) searcher.getIndexReader()).getIndexCommit();
|
||||
return Lucene.readSegmentInfos(latestCommit);
|
||||
} catch (IOException e) {
|
||||
// Fall back to reading from the store if reading from the commit fails
|
||||
try {
|
||||
return store. readLastCommittedSegmentsInfo();
|
||||
} catch (IOException e2) {
|
||||
e2.addSuppressed(e);
|
||||
throw e2;
|
||||
}
|
||||
} finally {
|
||||
sm.release(searcher);
|
||||
}
|
||||
|
|
|
@ -275,7 +275,7 @@ public class InternalEngine extends Engine {
|
|||
try {
|
||||
final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter, true), shardId);
|
||||
searcherManager = new SearcherManager(directoryReader, searcherFactory);
|
||||
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager);
|
||||
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
|
||||
success = true;
|
||||
return searcherManager;
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -79,7 +79,7 @@ public class ShadowEngine extends Engine {
|
|||
if (Lucene.waitForIndex(store.directory(), nonexistentRetryTime)) {
|
||||
reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId);
|
||||
this.searcherManager = new SearcherManager(reader, searcherFactory);
|
||||
this.lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager);
|
||||
this.lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
|
||||
success = true;
|
||||
} else {
|
||||
throw new IndexShardException(shardId, "failed to open a shadow engine after" +
|
||||
|
@ -148,7 +148,7 @@ public class ShadowEngine extends Engine {
|
|||
store.incRef();
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
// reread the last committed segment infos
|
||||
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager);
|
||||
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
|
||||
} catch (Throwable e) {
|
||||
if (isClosed.get() == false) {
|
||||
logger.warn("failed to read latest segment infos on flush", e);
|
||||
|
|
|
@ -478,23 +478,11 @@ public abstract class AbstractFieldMapper implements FieldMapper {
|
|||
|
||||
@Override
|
||||
public Query termsQuery(List values, @Nullable QueryParseContext context) {
|
||||
switch (values.size()) {
|
||||
case 0:
|
||||
return Queries.newMatchNoDocsQuery();
|
||||
case 1:
|
||||
// When there is a single term, it's important to return a term filter so that
|
||||
// it can return a DocIdSet that is directly backed by a postings list, instead
|
||||
// of loading everything into a bit set and returning an iterator based on the
|
||||
// bit set
|
||||
return termQuery(values.get(0), context);
|
||||
default:
|
||||
BytesRef[] bytesRefs = new BytesRef[values.size()];
|
||||
for (int i = 0; i < bytesRefs.length; i++) {
|
||||
bytesRefs[i] = indexedValueForSearch(values.get(i));
|
||||
}
|
||||
return new TermsQuery(names.indexName(), bytesRefs);
|
||||
|
||||
BytesRef[] bytesRefs = new BytesRef[values.size()];
|
||||
for (int i = 0; i < bytesRefs.length; i++) {
|
||||
bytesRefs[i] = indexedValueForSearch(values.get(i));
|
||||
}
|
||||
return new TermsQuery(names.indexName(), bytesRefs);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -188,11 +188,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements RootMapper {
|
|||
return super.termQuery(value, context);
|
||||
}
|
||||
final BytesRef[] uids = Uid.createUidsForTypesAndId(context.queryTypes(), value);
|
||||
if (uids.length == 1) {
|
||||
return new TermQuery(new Term(UidFieldMapper.NAME, uids[0]));
|
||||
} else {
|
||||
return new TermsQuery(UidFieldMapper.NAME, uids);
|
||||
}
|
||||
return new TermsQuery(UidFieldMapper.NAME, uids);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.index.percolator.stats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -152,11 +151,6 @@ public class PercolateStats implements Streamable, ToXContent {
|
|||
percolateCount = in.readVLong();
|
||||
percolateTimeInMillis = in.readVLong();
|
||||
current = in.readVLong();
|
||||
if (in.getVersion().before(Version.V_1_1_0)) {
|
||||
in.readVLong();
|
||||
} else {
|
||||
in.readLong();
|
||||
}
|
||||
numQueries = in.readVLong();
|
||||
}
|
||||
|
||||
|
@ -165,11 +159,6 @@ public class PercolateStats implements Streamable, ToXContent {
|
|||
out.writeVLong(percolateCount);
|
||||
out.writeVLong(percolateTimeInMillis);
|
||||
out.writeVLong(current);
|
||||
if (out.getVersion().before(Version.V_1_1_0)) {
|
||||
out.writeVLong(0);
|
||||
} else {
|
||||
out.writeLong(-1);
|
||||
}
|
||||
out.writeVLong(numQueries);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -717,8 +718,38 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("optimize with {}", optimize);
|
||||
}
|
||||
engine().forceMerge(optimize.flush(), optimize.maxNumSegments(), optimize.onlyExpungeDeletes(),
|
||||
optimize.upgrade(), optimize.upgradeOnlyAncientSegments());
|
||||
engine().forceMerge(optimize.flush(), optimize.maxNumSegments(), optimize.onlyExpungeDeletes(), false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Upgrades the shard to the current version of Lucene and returns the minimum segment version
|
||||
*/
|
||||
public org.apache.lucene.util.Version upgrade(UpgradeRequest upgrade) {
|
||||
verifyStarted();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("upgrade with {}", upgrade);
|
||||
}
|
||||
org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion();
|
||||
// we just want to upgrade the segments, not actually optimize to a single segment
|
||||
engine().forceMerge(true, // we need to flush at the end to make sure the upgrade is durable
|
||||
Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment
|
||||
false, true, upgrade.upgradeOnlyAncientSegments());
|
||||
org.apache.lucene.util.Version version = minimumCompatibleVersion();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("upgraded segment {} from version {} to version {}", previousVersion, version);
|
||||
}
|
||||
|
||||
return version;
|
||||
}
|
||||
|
||||
public org.apache.lucene.util.Version minimumCompatibleVersion() {
|
||||
org.apache.lucene.util.Version luceneVersion = Version.LUCENE_3_EMULATION_VERSION;
|
||||
for(Segment segment : engine().segments(false)) {
|
||||
if (luceneVersion.onOrAfter(segment.getVersion())) {
|
||||
luceneVersion = segment.getVersion();
|
||||
}
|
||||
}
|
||||
return luceneVersion;
|
||||
}
|
||||
|
||||
public SnapshotIndexCommit snapshotIndex(boolean flushFirst) throws EngineException {
|
||||
|
|
|
@ -259,10 +259,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
metadataLock.writeLock().lock();
|
||||
// we make sure that nobody fetches the metadata while we do this rename operation here to ensure we don't
|
||||
// get exceptions if files are still open.
|
||||
try (Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) {
|
||||
if (!writeLock.obtain(IndexWriterConfig.getDefaultWriteLockTimeout())) { // obtain write lock
|
||||
throw new LockObtainFailedException("Index locked for write: " + writeLock);
|
||||
}
|
||||
try (Lock writeLock = Lucene.acquireWriteLock(directory())) {
|
||||
for (Map.Entry<String, String> entry : entries) {
|
||||
String tempFile = entry.getKey();
|
||||
String origFile = entry.getValue();
|
||||
|
@ -586,10 +583,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
*/
|
||||
public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException {
|
||||
metadataLock.writeLock().lock();
|
||||
try (Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME)) {
|
||||
if (!writeLock.obtain(IndexWriterConfig.getDefaultWriteLockTimeout())) { // obtain write lock
|
||||
throw new LockObtainFailedException("Index locked for write: " + writeLock);
|
||||
}
|
||||
try (Lock writeLock = Lucene.acquireWriteLock(directory)) {
|
||||
final StoreDirectory dir = directory;
|
||||
for (String existingFile : dir.listAll()) {
|
||||
if (existingFile.equals(IndexWriter.WRITE_LOCK_NAME) || Store.isChecksum(existingFile) || sourceMetaData.contains(existingFile)) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.node.internal;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.Names;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -27,6 +28,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.FailedToResolveConfigException;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.Strings.cleanPath;
|
||||
|
@ -37,6 +39,8 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
|||
*/
|
||||
public class InternalSettingsPreparer {
|
||||
|
||||
static final List<String> ALLOWED_SUFFIXES = ImmutableList.of(".yml", ".yaml", ".json", ".properties");
|
||||
|
||||
public static Tuple<Settings, Environment> prepareSettings(Settings pSettings, boolean loadConfigSettings) {
|
||||
// ignore this prefixes when getting properties from es. and elasticsearch.
|
||||
String[] ignorePrefixes = new String[]{"es.default.", "elasticsearch.default."};
|
||||
|
@ -72,22 +76,12 @@ public class InternalSettingsPreparer {
|
|||
}
|
||||
}
|
||||
if (loadFromEnv) {
|
||||
try {
|
||||
settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.yml"));
|
||||
} catch (FailedToResolveConfigException e) {
|
||||
// ignore
|
||||
} catch (NoClassDefFoundError e) {
|
||||
// ignore, no yaml
|
||||
}
|
||||
try {
|
||||
settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.json"));
|
||||
} catch (FailedToResolveConfigException e) {
|
||||
// ignore
|
||||
}
|
||||
try {
|
||||
settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch.properties"));
|
||||
} catch (FailedToResolveConfigException e) {
|
||||
// ignore
|
||||
for (String allowedSuffix : ALLOWED_SUFFIXES) {
|
||||
try {
|
||||
settingsBuilder.loadFromUrl(environment.resolveConfig("elasticsearch" + allowedSuffix));
|
||||
} catch (FailedToResolveConfigException e) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,17 +19,14 @@
|
|||
|
||||
package org.elasticsearch.rest.action.admin.indices.upgrade;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.segments.*;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
|
@ -37,7 +34,8 @@ import org.elasticsearch.rest.RestController;
|
|||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.action.support.RestBuilderListener;
|
||||
import java.io.IOException;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
@ -66,72 +64,36 @@ public class RestUpgradeAction extends BaseRestHandler {
|
|||
}
|
||||
}
|
||||
|
||||
void handleGet(RestRequest request, RestChannel channel, Client client) {
|
||||
IndicesSegmentsRequest segsReq = new IndicesSegmentsRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
client.admin().indices().segments(segsReq, new RestBuilderListener<IndicesSegmentResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(IndicesSegmentResponse response, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
|
||||
// TODO: getIndices().values() is what IndicesSegmentsResponse uses, but this will produce different orders with jdk8?
|
||||
for (IndexSegments indexSegments : response.getIndices().values()) {
|
||||
builder.startObject(indexSegments.getIndex());
|
||||
buildUpgradeStatus(indexSegments, builder);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(OK, builder);
|
||||
}
|
||||
});
|
||||
void handleGet(final RestRequest request, RestChannel channel, Client client) {
|
||||
client.admin().indices().prepareUpgradeStatus(Strings.splitStringByCommaToArray(request.param("index")))
|
||||
.execute(new RestBuilderListener<UpgradeStatusResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
response.toXContent(builder, request);
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(OK, builder);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void handlePost(final RestRequest request, RestChannel channel, Client client) {
|
||||
OptimizeRequest optimizeReq = new OptimizeRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
optimizeReq.flush(true);
|
||||
optimizeReq.upgrade(true);
|
||||
optimizeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false));
|
||||
optimizeReq.maxNumSegments(Integer.MAX_VALUE); // we just want to upgrade the segments, not actually optimize to a single segment
|
||||
client.admin().indices().optimize(optimizeReq, new RestBuilderListener<OptimizeResponse>(channel) {
|
||||
UpgradeRequest upgradeReq = new UpgradeRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
upgradeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false));
|
||||
client.admin().indices().upgrade(upgradeReq, new RestBuilderListener<UpgradeResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(OptimizeResponse response, XContentBuilder builder) throws Exception {
|
||||
public RestResponse buildResponse(UpgradeResponse response, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
buildBroadcastShardsHeader(builder, request, response);
|
||||
builder.startObject("upgraded_indices");
|
||||
for (Map.Entry<String, String> entry : response.versions().entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(OK, builder);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void buildUpgradeStatus(IndexSegments indexSegments, XContentBuilder builder) throws IOException {
|
||||
long total_bytes = 0;
|
||||
long to_upgrade_bytes = 0;
|
||||
long to_upgrade_bytes_ancient = 0;
|
||||
for (IndexShardSegments shard : indexSegments) {
|
||||
for (ShardSegments segs : shard.getShards()) {
|
||||
for (Segment seg : segs.getSegments()) {
|
||||
total_bytes += seg.sizeInBytes;
|
||||
if (seg.version.major != Version.CURRENT.luceneVersion.major) {
|
||||
to_upgrade_bytes_ancient += seg.sizeInBytes;
|
||||
to_upgrade_bytes += seg.sizeInBytes;
|
||||
} else if (seg.version.minor != Version.CURRENT.luceneVersion.minor) {
|
||||
// TODO: this comparison is bogus! it would cause us to upgrade even with the same format
|
||||
// instead, we should check if the codec has changed
|
||||
to_upgrade_bytes += seg.sizeInBytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
builder.byteSizeField(SIZE_IN_BYTES, SIZE, total_bytes);
|
||||
builder.byteSizeField(SIZE_TO_UPGRADE_IN_BYTES, SIZE_TO_UPGRADE, to_upgrade_bytes);
|
||||
builder.byteSizeField(SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, SIZE_TO_UPGRADE_ANCIENT, to_upgrade_bytes_ancient);
|
||||
}
|
||||
|
||||
static final XContentBuilderString SIZE = new XContentBuilderString("size");
|
||||
static final XContentBuilderString SIZE_IN_BYTES = new XContentBuilderString("size_in_bytes");
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE = new XContentBuilderString("size_to_upgrade");
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT = new XContentBuilderString("size_to_upgrade_ancient");
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE_IN_BYTES = new XContentBuilderString("size_to_upgrade_in_bytes");
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT_IN_BYTES = new XContentBuilderString("size_to_upgrade_ancient_in_bytes");
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.metrics.percentiles;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -79,12 +78,6 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega
|
|||
@Override
|
||||
protected void doReadFrom(StreamInput in) throws IOException {
|
||||
valueFormatter = ValueFormatterStreams.readOptional(in);
|
||||
if (in.getVersion().before(Version.V_1_2_0)) {
|
||||
final byte id = in.readByte();
|
||||
if (id != 0) {
|
||||
throw new IllegalArgumentException("Unexpected percentiles aggregator id [" + id + "]");
|
||||
}
|
||||
}
|
||||
keys = new double[in.readInt()];
|
||||
for (int i = 0; i < keys.length; ++i) {
|
||||
keys[i] = in.readDouble();
|
||||
|
@ -96,9 +89,6 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega
|
|||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
ValueFormatterStreams.writeOptional(valueFormatter, out);
|
||||
if (out.getVersion().before(Version.V_1_2_0)) {
|
||||
out.writeByte((byte) 0);
|
||||
}
|
||||
out.writeInt(keys.length);
|
||||
for (int i = 0 ; i < keys.length; ++i) {
|
||||
out.writeDouble(keys[i]);
|
||||
|
|
|
@ -133,8 +133,11 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
|
||||
public void search(Query query, Collector collector) throws IOException {
|
||||
// Wrap the caller's collector with various wrappers e.g. those used to siphon
|
||||
// matches off for aggregation or to impose a time-limit on collection.
|
||||
final boolean timeoutSet = searchContext.timeoutInMillis() != -1;
|
||||
final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER;
|
||||
|
||||
|
@ -166,8 +169,13 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
|
|||
collector = new MinimumScoreCollector(collector, searchContext.minimumScore());
|
||||
}
|
||||
}
|
||||
super.search(query, collector);
|
||||
}
|
||||
|
||||
// we only compute the doc id set once since within a context, we execute the same query always...
|
||||
@Override
|
||||
public void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
|
||||
final boolean timeoutSet = searchContext.timeoutInMillis() != -1;
|
||||
final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER;
|
||||
try {
|
||||
if (timeoutSet || terminateAfterSet) {
|
||||
try {
|
||||
|
|
|
@ -106,6 +106,8 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
|||
.addAll(UNMODIFIABLE_SETTINGS)
|
||||
.add(SETTING_NUMBER_OF_REPLICAS)
|
||||
.add(SETTING_AUTO_EXPAND_REPLICAS)
|
||||
.add(SETTING_VERSION_UPGRADED)
|
||||
.add(SETTING_VERSION_MINIMUM_COMPATIBLE)
|
||||
.build();
|
||||
|
||||
private final ClusterService clusterService;
|
||||
|
|
|
@ -54,7 +54,6 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
|||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
import org.elasticsearch.test.index.merge.NoMergePolicyProvider;
|
||||
import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -67,8 +66,9 @@ import java.nio.file.attribute.BasicFileAttributes;
|
|||
import java.util.*;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.junit.matchers.JUnitMatchers.containsString;
|
||||
|
||||
// needs at least 2 nodes since it bumps replicas to 1
|
||||
@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0)
|
||||
|
@ -110,7 +110,6 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio
|
|||
@Override
|
||||
public Settings nodeSettings(int ord) {
|
||||
return Settings.builder()
|
||||
.put(Node.HTTP_ENABLED, true) // for _upgrade
|
||||
.put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class) // disable merging so no segments will be upgraded
|
||||
.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 30) // increase recovery speed for small files
|
||||
.build();
|
||||
|
@ -438,13 +437,11 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio
|
|||
}
|
||||
|
||||
void assertUpgradeWorks(String indexName, boolean alreadyLatest) throws Exception {
|
||||
HttpRequestBuilder httpClient = httpClient();
|
||||
|
||||
if (alreadyLatest == false) {
|
||||
UpgradeTest.assertNotUpgraded(httpClient, indexName);
|
||||
UpgradeTest.assertNotUpgraded(client(), indexName);
|
||||
}
|
||||
UpgradeTest.runUpgrade(httpClient, indexName);
|
||||
UpgradeTest.assertUpgraded(httpClient, indexName);
|
||||
assertNoFailures(client().admin().indices().prepareUpgrade(indexName).get());
|
||||
UpgradeTest.assertUpgraded(client(), indexName);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.collect;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Ordering;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class Iterators2Tests extends ElasticsearchTestCase {
|
||||
|
||||
public void testDeduplicateSorted() {
|
||||
final List<String> list = Lists.newArrayList();
|
||||
for (int i = randomInt(100); i >= 0; --i) {
|
||||
final int frequency = randomIntBetween(1, 10);
|
||||
final String s = randomAsciiOfLength(randomIntBetween(2, 20));
|
||||
for (int j = 0; j < frequency; ++j) {
|
||||
list.add(s);
|
||||
}
|
||||
}
|
||||
CollectionUtil.introSort(list);
|
||||
final List<String> deduplicated = Lists.newArrayList();
|
||||
for (Iterator<String> it = Iterators2.deduplicateSorted(list.iterator(), Ordering.natural()); it.hasNext(); ) {
|
||||
deduplicated.add(it.next());
|
||||
}
|
||||
assertEquals(Lists.newArrayList(Sets.newTreeSet(list)), deduplicated);
|
||||
}
|
||||
|
||||
}
|
|
@ -518,18 +518,4 @@ public class SimpleStringMappingTests extends ElasticsearchSingleNodeTest {
|
|||
assertTrue(mergeResult.buildConflicts()[0].contains("cannot enable norms"));
|
||||
}
|
||||
|
||||
public void testTermsQuery() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").endObject().endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper defaultMapper = parser.parse(mapping);
|
||||
FieldMapper mapper = defaultMapper.mappers().getMapper("field");
|
||||
assertNotNull(mapper);
|
||||
assertTrue(mapper instanceof StringFieldMapper);
|
||||
assertEquals(Queries.newMatchNoDocsQuery(), mapper.termsQuery(Collections.emptyList(), null));
|
||||
assertEquals(new TermQuery(new Term("field", "value")), mapper.termsQuery(Collections.singletonList("value"), null));
|
||||
assertEquals(new TermsQuery(new Term("field", "value1"), new Term("field", "value2")), mapper.termsQuery(Arrays.asList("value1", "value2"), null));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -61,4 +61,17 @@ public class InternalSettingsPreparerTests extends ElasticsearchTestCase {
|
|||
// Should use setting from the system property
|
||||
assertThat(tuple.v1().get("node.zone"), equalTo("bar"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAlternateConfigFileSuffixes() {
|
||||
// test that we can read config files with .yaml, .json, and .properties suffixes
|
||||
Tuple<Settings, Environment> tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder()
|
||||
.put("config.ignore_system_properties", true)
|
||||
.put("path.home", createTempDir().toString())
|
||||
.build(), true);
|
||||
|
||||
assertThat(tuple.v1().get("yaml.config.exists"), equalTo("true"));
|
||||
assertThat(tuple.v1().get("json.config.exists"), equalTo("true"));
|
||||
assertThat(tuple.v1().get("properties.config.exists"), equalTo("true"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,20 +20,22 @@
|
|||
package org.elasticsearch.rest.action.admin.indices.upgrade;
|
||||
|
||||
import org.elasticsearch.bwcompat.StaticIndexBackwardCompatibilityTest;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
|
||||
public class UpgradeReallyOldIndexTest extends StaticIndexBackwardCompatibilityTest {
|
||||
|
||||
public void testUpgrade_0_90_6() throws Exception {
|
||||
String indexName = "index-0.90.6";
|
||||
loadIndex(indexName, Node.HTTP_ENABLED, true);
|
||||
|
||||
UpgradeTest.assertNotUpgraded(httpClient(), indexName);
|
||||
assertTrue(UpgradeTest.hasAncientSegments(httpClient(), indexName));
|
||||
UpgradeTest.runUpgrade(httpClient(), indexName, "wait_for_completion", "true", "only_ancient_segments", "true");
|
||||
assertFalse(UpgradeTest.hasAncientSegments(httpClient(), "index-0.90.6"));
|
||||
|
||||
loadIndex(indexName);
|
||||
UpgradeTest.assertNotUpgraded(client(), indexName);
|
||||
assertTrue(UpgradeTest.hasAncientSegments(client(), indexName));
|
||||
assertNoFailures(client().admin().indices().prepareUpgrade(indexName).setUpgradeOnlyAncientSegments(true).get());
|
||||
|
||||
assertFalse(UpgradeTest.hasAncientSegments(client(), "index-0.90.6"));
|
||||
// This index has only ancient segments, so it should now be fully upgraded:
|
||||
UpgradeTest.assertUpgraded(httpClient(), indexName);
|
||||
UpgradeTest.assertUpgraded(client(), indexName);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,26 +26,26 @@ import org.elasticsearch.action.admin.indices.segments.IndexSegments;
|
|||
import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
|
||||
import org.elasticsearch.action.admin.indices.segments.ShardSegments;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.IndexUpgradeStatus;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
|
||||
import org.elasticsearch.test.rest.client.http.HttpResponse;
|
||||
import org.elasticsearch.test.rest.json.JsonPath;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
|
||||
@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) // test scope since we set cluster wide settings
|
||||
public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest {
|
||||
|
@ -134,20 +134,20 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest {
|
|||
logger.info("--> Nodes upgrade complete");
|
||||
logSegmentsState();
|
||||
|
||||
assertNotUpgraded(httpClient(), null);
|
||||
assertNotUpgraded(client());
|
||||
final String indexToUpgrade = "test" + randomInt(numIndexes - 1);
|
||||
|
||||
// This test fires up another node running an older version of ES, but because wire protocol changes across major ES versions, it
|
||||
// means we can never generate ancient segments in this test (unless Lucene major version bumps but ES major version does not):
|
||||
assertFalse(hasAncientSegments(httpClient(), indexToUpgrade));
|
||||
assertFalse(hasAncientSegments(client(), indexToUpgrade));
|
||||
|
||||
logger.info("--> Running upgrade on index " + indexToUpgrade);
|
||||
runUpgrade(httpClient(), indexToUpgrade);
|
||||
assertNoFailures(client().admin().indices().prepareUpgrade(indexToUpgrade).get());
|
||||
awaitBusy(new Predicate<Object>() {
|
||||
@Override
|
||||
public boolean apply(Object o) {
|
||||
try {
|
||||
return isUpgraded(httpClient(), indexToUpgrade);
|
||||
return isUpgraded(client(), indexToUpgrade);
|
||||
} catch (Exception e) {
|
||||
throw ExceptionsHelper.convertToRuntime(e);
|
||||
}
|
||||
|
@ -156,48 +156,40 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest {
|
|||
logger.info("--> Single index upgrade complete");
|
||||
|
||||
logger.info("--> Running upgrade on the rest of the indexes");
|
||||
runUpgrade(httpClient(), null);
|
||||
assertNoFailures(client().admin().indices().prepareUpgrade().get());
|
||||
logSegmentsState();
|
||||
logger.info("--> Full upgrade complete");
|
||||
assertUpgraded(httpClient(), null);
|
||||
assertUpgraded(client());
|
||||
}
|
||||
|
||||
static String upgradePath(String index) {
|
||||
String path = "/_upgrade";
|
||||
if (index != null) {
|
||||
path = "/" + index + path;
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
public static void assertNotUpgraded(HttpRequestBuilder httpClient, String index) throws Exception {
|
||||
for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) {
|
||||
assertTrue("index " + status.indexName + " should not be zero sized", status.totalBytes != 0);
|
||||
public static void assertNotUpgraded(Client client, String... index) throws Exception {
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0);
|
||||
// TODO: it would be better for this to be strictly greater, but sometimes an extra flush
|
||||
// mysteriously happens after the second round of docs are indexed
|
||||
assertTrue("index " + status.indexName + " should have recovered some segments from transaction log",
|
||||
status.totalBytes >= status.toUpgradeBytes);
|
||||
assertTrue("index " + status.indexName + " should need upgrading", status.toUpgradeBytes != 0);
|
||||
assertTrue("index " + status.getIndex() + " should have recovered some segments from transaction log",
|
||||
status.getTotalBytes() >= status.getToUpgradeBytes());
|
||||
assertTrue("index " + status.getIndex() + " should need upgrading", status.getToUpgradeBytes() != 0);
|
||||
}
|
||||
}
|
||||
|
||||
public static void assertNoAncientSegments(HttpRequestBuilder httpClient, String index) throws Exception {
|
||||
for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) {
|
||||
assertTrue("index " + status.indexName + " should not be zero sized", status.totalBytes != 0);
|
||||
public static void assertNoAncientSegments(Client client, String... index) throws Exception {
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0);
|
||||
// TODO: it would be better for this to be strictly greater, but sometimes an extra flush
|
||||
// mysteriously happens after the second round of docs are indexed
|
||||
assertTrue("index " + status.indexName + " should not have any ancient segments",
|
||||
status.toUpgradeBytesAncient == 0);
|
||||
assertTrue("index " + status.indexName + " should have recovered some segments from transaction log",
|
||||
status.totalBytes >= status.toUpgradeBytes);
|
||||
assertTrue("index " + status.indexName + " should need upgrading", status.toUpgradeBytes != 0);
|
||||
assertTrue("index " + status.getIndex() + " should not have any ancient segments",
|
||||
status.getToUpgradeBytesAncient() == 0);
|
||||
assertTrue("index " + status.getIndex() + " should have recovered some segments from transaction log",
|
||||
status.getTotalBytes() >= status.getToUpgradeBytes());
|
||||
assertTrue("index " + status.getIndex() + " should need upgrading", status.getToUpgradeBytes() != 0);
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns true if there are any ancient segments. */
|
||||
public static boolean hasAncientSegments(HttpRequestBuilder httpClient, String index) throws Exception {
|
||||
for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) {
|
||||
if (status.toUpgradeBytesAncient != 0) {
|
||||
public static boolean hasAncientSegments(Client client, String index) throws Exception {
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
if (status.getToUpgradeBytesAncient() != 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -205,20 +197,20 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest {
|
|||
}
|
||||
|
||||
/** Returns true if there are any old but not ancient segments. */
|
||||
public static boolean hasOldButNotAncientSegments(HttpRequestBuilder httpClient, String index) throws Exception {
|
||||
for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) {
|
||||
if (status.toUpgradeBytes > status.toUpgradeBytesAncient) {
|
||||
public static boolean hasOldButNotAncientSegments(Client client, String index) throws Exception {
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
if (status.getToUpgradeBytes() > status.getToUpgradeBytesAncient()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static void assertUpgraded(HttpRequestBuilder httpClient, String index) throws Exception {
|
||||
for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) {
|
||||
assertTrue("index " + status.indexName + " should not be zero sized", status.totalBytes != 0);
|
||||
assertEquals("index " + status.indexName + " should be upgraded",
|
||||
0, status.toUpgradeBytes);
|
||||
public static void assertUpgraded(Client client, String... index) throws Exception {
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0);
|
||||
assertEquals("index " + status.getIndex() + " should be upgraded",
|
||||
0, status.getToUpgradeBytes());
|
||||
}
|
||||
|
||||
// double check using the segments api that all segments are actually upgraded
|
||||
|
@ -242,12 +234,12 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest {
|
|||
}
|
||||
}
|
||||
|
||||
static boolean isUpgraded(HttpRequestBuilder httpClient, String index) throws Exception {
|
||||
static boolean isUpgraded(Client client, String index) throws Exception {
|
||||
ESLogger logger = Loggers.getLogger(UpgradeTest.class);
|
||||
int toUpgrade = 0;
|
||||
for (UpgradeStatus status : getUpgradeStatus(httpClient, upgradePath(index))) {
|
||||
logger.info("Index: " + status.indexName + ", total: " + status.totalBytes + ", toUpgrade: " + status.toUpgradeBytes);
|
||||
toUpgrade += status.toUpgradeBytes;
|
||||
for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
|
||||
logger.info("Index: " + status.getIndex() + ", total: " + status.getTotalBytes() + ", toUpgrade: " + status.getToUpgradeBytes());
|
||||
toUpgrade += status.getToUpgradeBytes();
|
||||
}
|
||||
return toUpgrade == 0;
|
||||
}
|
||||
|
@ -257,7 +249,7 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest {
|
|||
public final int totalBytes;
|
||||
public final int toUpgradeBytes;
|
||||
public final int toUpgradeBytesAncient;
|
||||
|
||||
|
||||
public UpgradeStatus(String indexName, int totalBytes, int toUpgradeBytes, int toUpgradeBytesAncient) {
|
||||
this.indexName = indexName;
|
||||
this.totalBytes = totalBytes;
|
||||
|
@ -266,49 +258,11 @@ public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest {
|
|||
assert toUpgradeBytesAncient <= toUpgradeBytes;
|
||||
}
|
||||
}
|
||||
|
||||
public static void runUpgrade(HttpRequestBuilder httpClient, String index, String... params) throws Exception {
|
||||
assert params.length % 2 == 0;
|
||||
HttpRequestBuilder builder = httpClient.method("POST").path(upgradePath(index));
|
||||
for (int i = 0; i < params.length; i += 2) {
|
||||
builder.addParam(params[i], params[i + 1]);
|
||||
}
|
||||
HttpResponse rsp = builder.execute();
|
||||
assertNotNull(rsp);
|
||||
assertEquals(200, rsp.getStatusCode());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static List<UpgradeStatus> getUpgradeStatus(HttpRequestBuilder httpClient, String path) throws Exception {
|
||||
HttpResponse rsp = httpClient.method("GET").path(path).execute();
|
||||
Map<String,Object> data = validateAndParse(rsp);
|
||||
List<UpgradeStatus> ret = new ArrayList<>();
|
||||
for (String index : data.keySet()) {
|
||||
Map<String, Object> status = (Map<String,Object>)data.get(index);
|
||||
assertTrue("missing key size_in_bytes for index " + index, status.containsKey("size_in_bytes"));
|
||||
Object totalBytes = status.get("size_in_bytes");
|
||||
assertTrue("size_in_bytes for index " + index + " is not an integer", totalBytes instanceof Integer);
|
||||
assertTrue("missing key size_to_upgrade_in_bytes for index " + index, status.containsKey("size_to_upgrade_in_bytes"));
|
||||
Object toUpgradeBytes = status.get("size_to_upgrade_in_bytes");
|
||||
assertTrue("size_to_upgrade_in_bytes for index " + index + " is not an integer", toUpgradeBytes instanceof Integer);
|
||||
Object toUpgradeBytesAncient = status.get("size_to_upgrade_ancient_in_bytes");
|
||||
assertTrue("size_to_upgrade_ancient_in_bytes for index " + index + " is not an integer", toUpgradeBytesAncient instanceof Integer);
|
||||
ret.add(new UpgradeStatus(index, (Integer) totalBytes, (Integer) toUpgradeBytes, (Integer) toUpgradeBytesAncient));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static Map<String, Object> validateAndParse(HttpResponse rsp) throws Exception {
|
||||
assertNotNull(rsp);
|
||||
assertEquals(200, rsp.getStatusCode());
|
||||
assertTrue(rsp.hasBody());
|
||||
return (Map<String,Object>)new JsonPath(rsp.getBody()).evaluate("");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(Node.HTTP_ENABLED, true).build();
|
||||
static Collection<IndexUpgradeStatus> getUpgradeStatus(Client client, String... indices) throws Exception {
|
||||
UpgradeStatusResponse upgradeStatusResponse = client.admin().indices().prepareUpgradeStatus(indices).get();
|
||||
assertNoFailures(upgradeStatusResponse);
|
||||
return upgradeStatusResponse.getIndices().values();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,7 +63,15 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
import static org.hamcrest.Matchers.arrayContaining;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -228,7 +236,9 @@ public class TopHitsTests extends ElasticsearchIntegrationTest {
|
|||
|
||||
@Test
|
||||
public void testBasics() throws Exception {
|
||||
SearchResponse response = client().prepareSearch("idx").setTypes("type")
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
.setTypes("type")
|
||||
.addAggregation(terms("terms")
|
||||
.executionHint(randomExecutionHint())
|
||||
.field(TERMS_AGGS_FIELD)
|
||||
|
@ -264,6 +274,65 @@ public class TopHitsTests extends ElasticsearchIntegrationTest {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIssue11119() throws Exception {
|
||||
// Test that top_hits aggregation is fed scores if query results size=0
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
.setTypes("field-collapsing")
|
||||
.setSize(0)
|
||||
.setQuery(matchQuery("text", "x y z"))
|
||||
.addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits")))
|
||||
.get();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
||||
assertThat(response.getHits().getTotalHits(), equalTo(8l));
|
||||
assertThat(response.getHits().hits().length, equalTo(0));
|
||||
assertThat(response.getHits().maxScore(), equalTo(0f));
|
||||
Terms terms = response.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
assertThat(terms.getName(), equalTo("terms"));
|
||||
assertThat(terms.getBuckets().size(), equalTo(3));
|
||||
|
||||
for (Terms.Bucket bucket : terms.getBuckets()) {
|
||||
assertThat(bucket, notNullValue());
|
||||
TopHits topHits = bucket.getAggregations().get("hits");
|
||||
SearchHits hits = topHits.getHits();
|
||||
float bestScore = Float.MAX_VALUE;
|
||||
for (int h = 0; h < hits.getHits().length; h++) {
|
||||
float score=hits.getAt(h).getScore();
|
||||
assertThat(score, lessThanOrEqualTo(bestScore));
|
||||
assertThat(score, greaterThan(0f));
|
||||
bestScore = hits.getAt(h).getScore();
|
||||
}
|
||||
}
|
||||
|
||||
// Also check that min_score setting works when size=0
|
||||
// (technically not a test of top_hits but implementation details are
|
||||
// tied up with the need to feed scores into the agg tree even when
|
||||
// users don't want ranked set of query results.)
|
||||
response = client()
|
||||
.prepareSearch("idx")
|
||||
.setTypes("field-collapsing")
|
||||
.setSize(0)
|
||||
.setMinScore(0.0001f)
|
||||
.setQuery(matchQuery("text", "x y z"))
|
||||
.addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group"))
|
||||
.get();
|
||||
|
||||
assertSearchResponse(response);
|
||||
|
||||
assertThat(response.getHits().getTotalHits(), equalTo(8l));
|
||||
assertThat(response.getHits().hits().length, equalTo(0));
|
||||
assertThat(response.getHits().maxScore(), equalTo(0f));
|
||||
terms = response.getAggregations().get("terms");
|
||||
assertThat(terms, notNullValue());
|
||||
assertThat(terms.getName(), equalTo("terms"));
|
||||
assertThat(terms.getBuckets().size(), equalTo(3));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testBreadthFirst() throws Exception {
|
||||
// breadth_first will be ignored since we need scores
|
||||
|
|
|
@ -1907,7 +1907,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
|||
}
|
||||
assertThat(count, equalTo(expectedCount));
|
||||
}
|
||||
});
|
||||
}, 1, TimeUnit.MINUTES);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1847,7 +1847,12 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
|||
* Returns path to a random directory that can be used to create a temporary file system repo
|
||||
*/
|
||||
public Path randomRepoPath() {
|
||||
return randomRepoPath(internalCluster().getDefaultSettings());
|
||||
if (currentCluster instanceof InternalTestCluster) {
|
||||
return randomRepoPath(((InternalTestCluster) currentCluster).getDefaultSettings());
|
||||
} else if (currentCluster instanceof CompositeTestCluster) {
|
||||
return randomRepoPath(((CompositeTestCluster) currentCluster).internalCluster().getDefaultSettings());
|
||||
}
|
||||
throw new UnsupportedOperationException("unsupported cluster type");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -312,7 +312,7 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration
|
|||
//skip test if it matches one of the blacklist globs
|
||||
for (PathMatcher blacklistedPathMatcher : blacklistPathMatchers) {
|
||||
//we need to replace a few characters otherwise the test section name can't be parsed as a path on windows
|
||||
String testSection = testCandidate.getTestSection().getName().replace("*", "").replace("\\", "/").replaceAll("\\s+/", "/").trim();
|
||||
String testSection = testCandidate.getTestSection().getName().replace("*", "").replace("\\", "/").replaceAll("\\s+/", "/").replace(":", "").trim();
|
||||
String testPath = testCandidate.getSuitePath() + "/" + testSection;
|
||||
assumeFalse("[" + testCandidate.getTestPath() + "] skipped, reason: blacklisted", blacklistedPathMatcher.matches(PathUtils.get(testPath)));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"json.config.exists" : "true"
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
|
||||
properties.config.exists: true
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
yaml.config.exists: true
|
||||
|
Loading…
Reference in New Issue