Merge branch 'master' into feature/multi_cluster_search
This commit is contained in:
commit
1ef98ede17
|
@ -55,7 +55,7 @@ dependencies {
|
|||
runtime 'org.apache.commons:commons-math3:3.2'
|
||||
}
|
||||
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked,-processing"
|
||||
// enable the JMH's BenchmarkProcessor to generate the final benchmark classes
|
||||
// needs to be added separately otherwise Gradle will quote it and javac will fail
|
||||
compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"])
|
||||
|
|
|
@ -515,11 +515,9 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
|
||||
// System assertions (-esa) are disabled for now because of what looks like a
|
||||
// JDK bug triggered by Groovy on JDK7. We should look at re-enabling system
|
||||
// assertions when we upgrade to a new version of Groovy (currently 2.4.4) or
|
||||
// require JDK8. See https://issues.apache.org/jira/browse/GROOVY-7528.
|
||||
enableSystemAssertions false
|
||||
boolean assertionsEnabled = Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))
|
||||
enableSystemAssertions assertionsEnabled
|
||||
enableAssertions assertionsEnabled
|
||||
|
||||
testLogging {
|
||||
showNumFailuresAtEnd 25
|
||||
|
|
|
@ -72,12 +72,10 @@ class ClusterConfiguration {
|
|||
boolean useMinimumMasterNodes = true
|
||||
|
||||
@Input
|
||||
String jvmArgs = "-ea" +
|
||||
" " + "-Xms" + System.getProperty('tests.heap.size', '512m') +
|
||||
String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + System.getProperty('tests.jvm.argline', '')
|
||||
|
||||
|
||||
/**
|
||||
* A closure to call which returns the unicast host to connect to for cluster formation.
|
||||
*
|
||||
|
|
|
@ -151,6 +151,9 @@ class NodeInfo {
|
|||
args.addAll("-E", "node.portsfile=true")
|
||||
String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
|
||||
String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs
|
||||
if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) {
|
||||
esJavaOpts += " -ea -esa"
|
||||
}
|
||||
env.put('ES_JAVA_OPTS', esJavaOpts)
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.key.startsWith('tests.es.')) {
|
||||
|
|
|
@ -53,7 +53,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]stats[/\\]TransportClusterStatsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]tasks[/\\]PendingClusterTasksAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]tasks[/\\]PendingClusterTasksRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]tasks[/\\]PendingClusterTasksResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]tasks[/\\]TransportPendingClusterTasksAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]validate[/\\]template[/\\]RenderSearchTemplateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]validate[/\\]template[/\\]RenderSearchTemplateRequestBuilder.java" checks="LineLength" />
|
||||
|
|
|
@ -103,9 +103,7 @@ public class RestNoopBulkAction extends BaseRestHandler {
|
|||
builder.field(Fields.ERRORS, false);
|
||||
builder.startArray(Fields.ITEMS);
|
||||
for (int idx = 0; idx < bulkRequest.numberOfActions(); idx++) {
|
||||
builder.startObject();
|
||||
ITEM_RESPONSE.toXContent(builder, request);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
|
|
|
@ -94,6 +94,8 @@ dependencies {
|
|||
exclude group: 'org.elasticsearch', module: 'elasticsearch'
|
||||
}
|
||||
}
|
||||
testCompile 'com.google.jimfs:jimfs:1.1'
|
||||
testCompile 'com.google.guava:guava:18.0'
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
|
@ -42,7 +42,7 @@ import java.util.Locale;
|
|||
/**
|
||||
* A base class for the response of a write operation that involves a single doc
|
||||
*/
|
||||
public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContent {
|
||||
public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContentObject {
|
||||
|
||||
/**
|
||||
* An enum that represents the the results of CRUD operations, primarily used to communicate the type of
|
||||
|
@ -244,15 +244,22 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
innerToXContent(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
ReplicationResponse.ShardInfo shardInfo = getShardInfo();
|
||||
builder.field("_index", shardId.getIndexName())
|
||||
.field("_type", type)
|
||||
.field("_id", id)
|
||||
.field("_version", version)
|
||||
.field("result", getResult().getLowercase());
|
||||
.field("_type", type)
|
||||
.field("_id", id)
|
||||
.field("_version", version)
|
||||
.field("result", getResult().getLowercase());
|
||||
if (forcedRefresh) {
|
||||
builder.field("forced_refresh", forcedRefresh);
|
||||
builder.field("forced_refresh", true);
|
||||
}
|
||||
shardInfo.toXContent(builder, params);
|
||||
if (getSeqNo() >= 0) {
|
||||
|
|
|
@ -24,19 +24,19 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.cluster.health.ClusterStateHealth;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
public class ClusterHealthResponse extends ActionResponse implements StatusToXContent {
|
||||
public class ClusterHealthResponse extends ActionResponse implements StatusToXContentObject {
|
||||
private String clusterName;
|
||||
private int numberOfPendingTasks = 0;
|
||||
private int numberOfInFlightFetch = 0;
|
||||
|
@ -200,18 +200,9 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
taskMaxWaitingTime.writeTo(out);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
|
||||
builder.startObject();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
return builder.string();
|
||||
} catch (IOException e) {
|
||||
return "{ \"error\" : \"" + e.getMessage() + "\"}";
|
||||
}
|
||||
return Strings.toString(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -240,6 +231,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(CLUSTER_NAME, getClusterName());
|
||||
builder.field(STATUS, getStatus().name().toLowerCase(Locale.ROOT));
|
||||
builder.field(TIMED_OUT, isTimedOut());
|
||||
|
@ -268,6 +260,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
|
|||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.TaskResult;
|
||||
|
||||
|
@ -34,7 +34,7 @@ import static java.util.Objects.requireNonNull;
|
|||
/**
|
||||
* Returns the list of tasks currently running on the nodes
|
||||
*/
|
||||
public class GetTaskResponse extends ActionResponse implements ToXContent {
|
||||
public class GetTaskResponse extends ActionResponse implements ToXContentObject {
|
||||
private TaskResult task;
|
||||
|
||||
public GetTaskResponse() {
|
||||
|
@ -65,7 +65,10 @@ public class GetTaskResponse extends ActionResponse implements ToXContent {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
return task.innerToXContent(builder, params);
|
||||
builder.startObject();
|
||||
task.innerToXContent(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.tasks.TaskInfo;
|
||||
|
@ -43,7 +43,7 @@ import java.util.stream.Collectors;
|
|||
/**
|
||||
* Returns the list of tasks currently running on the nodes
|
||||
*/
|
||||
public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
||||
public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject {
|
||||
|
||||
private List<TaskInfo> tasks;
|
||||
|
||||
|
@ -187,7 +187,10 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
return toXContentGroupedByParents(builder, params);
|
||||
builder.startObject();
|
||||
toXContentGroupedByParents(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
private void toXContentCommon(XContentBuilder builder, Params params) throws IOException {
|
||||
|
@ -214,6 +217,6 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this, true);
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,18 +22,18 @@ package org.elasticsearch.action.admin.cluster.repositories.verify;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Unregister repository response
|
||||
*/
|
||||
public class VerifyRepositoryResponse extends ActionResponse implements ToXContent {
|
||||
public class VerifyRepositoryResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private DiscoveryNode[] nodes;
|
||||
|
||||
|
@ -83,6 +83,7 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startObject(Fields.NODES);
|
||||
for (DiscoveryNode node : nodes) {
|
||||
builder.startObject(node.getId());
|
||||
|
@ -90,11 +91,12 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
|||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return XContentHelper.toString(this);
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
|
||||
|
@ -32,7 +32,7 @@ import java.io.IOException;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class ClusterSearchShardsResponse extends ActionResponse implements ToXContent {
|
||||
public class ClusterSearchShardsResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private ClusterSearchShardsGroup[] groups;
|
||||
private DiscoveryNode[] nodes;
|
||||
|
@ -104,6 +104,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startObject("nodes");
|
||||
for (DiscoveryNode node : nodes) {
|
||||
node.toXContent(builder, params);
|
||||
|
@ -129,7 +130,7 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
|||
group.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
|
@ -33,7 +33,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* Create snapshot response
|
||||
*/
|
||||
public class CreateSnapshotResponse extends ActionResponse implements ToXContent {
|
||||
public class CreateSnapshotResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
@Nullable
|
||||
private SnapshotInfo snapshotInfo;
|
||||
|
@ -83,12 +83,14 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
if (snapshotInfo != null) {
|
||||
builder.field("snapshot");
|
||||
snapshotInfo.toXContent(builder, params);
|
||||
} else {
|
||||
builder.field("accepted", true);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
|
||||
|
@ -34,7 +35,7 @@ import java.util.List;
|
|||
/**
|
||||
* Get snapshots response
|
||||
*/
|
||||
public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
|
||||
public class GetSnapshotsResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private List<SnapshotInfo> snapshots = Collections.emptyList();
|
||||
|
||||
|
@ -76,11 +77,13 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startArray("snapshots");
|
||||
for (SnapshotInfo snapshotInfo : snapshots) {
|
||||
snapshotInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
currentSnapshotIds.add(snapshotId);
|
||||
}
|
||||
if (isCurrentSnapshotsOnly(request.snapshots()) == false) {
|
||||
for (SnapshotId snapshotId : snapshotsService.snapshotIds(repository)) {
|
||||
for (SnapshotId snapshotId : snapshotsService.getRepositoryData(repository).getAllSnapshotIds()) {
|
||||
allSnapshotIds.put(snapshotId.getName(), snapshotId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.snapshots.RestoreInfo;
|
||||
|
@ -33,7 +34,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* Contains information about restores snapshot
|
||||
*/
|
||||
public class RestoreSnapshotResponse extends ActionResponse implements ToXContent {
|
||||
public class RestoreSnapshotResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
@Nullable
|
||||
private RestoreInfo restoreInfo;
|
||||
|
@ -75,12 +76,14 @@ public class RestoreSnapshotResponse extends ActionResponse implements ToXConten
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject();
|
||||
if (restoreInfo != null) {
|
||||
builder.field("snapshot");
|
||||
restoreInfo.toXContent(builder, params);
|
||||
} else {
|
||||
builder.field("accepted", true);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.status;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -33,7 +33,7 @@ import java.util.List;
|
|||
/**
|
||||
* Snapshot status response
|
||||
*/
|
||||
public class SnapshotsStatusResponse extends ActionResponse implements ToXContent {
|
||||
public class SnapshotsStatusResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private List<SnapshotStatus> snapshots = Collections.emptyList();
|
||||
|
||||
|
@ -75,11 +75,13 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startArray("snapshots");
|
||||
for (SnapshotStatus snapshot : snapshots) {
|
||||
snapshot.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,9 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
||||
import org.elasticsearch.repositories.RepositoryData;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.snapshots.SnapshotException;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotMissingException;
|
||||
|
@ -201,7 +203,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
final String repositoryName = request.repository();
|
||||
if (Strings.hasText(repositoryName) && request.snapshots() != null && request.snapshots().length > 0) {
|
||||
final Set<String> requestedSnapshotNames = Sets.newHashSet(request.snapshots());
|
||||
final Map<String, SnapshotId> matchedSnapshotIds = snapshotsService.snapshotIds(repositoryName).stream()
|
||||
final RepositoryData repositoryData = snapshotsService.getRepositoryData(repositoryName);
|
||||
final Map<String, SnapshotId> matchedSnapshotIds = repositoryData.getAllSnapshotIds().stream()
|
||||
.filter(s -> requestedSnapshotNames.contains(s.getName()))
|
||||
.collect(Collectors.toMap(SnapshotId::getName, Function.identity()));
|
||||
for (final String snapshotName : request.snapshots()) {
|
||||
|
@ -220,6 +223,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
} else {
|
||||
throw new SnapshotMissingException(repositoryName, snapshotName);
|
||||
}
|
||||
} else if (repositoryData.getIncompatibleSnapshotIds().contains(snapshotId)) {
|
||||
throw new SnapshotException(repositoryName, snapshotName, "cannot get the status for an incompatible snapshot");
|
||||
}
|
||||
SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId);
|
||||
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
|
||||
|
@ -243,7 +248,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
default:
|
||||
throw new IllegalArgumentException("Unknown snapshot state " + snapshotInfo.state());
|
||||
}
|
||||
builder.add(new SnapshotStatus(new Snapshot(repositoryName, snapshotInfo.snapshotId()), state, Collections.unmodifiableList(shardStatusBuilder)));
|
||||
builder.add(new SnapshotStatus(new Snapshot(repositoryName, snapshotId), state, Collections.unmodifiableList(shardStatusBuilder)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -31,7 +31,7 @@ import java.util.ArrayList;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class PendingClusterTasksResponse extends ActionResponse implements Iterable<PendingClusterTask>, ToXContent {
|
||||
public class PendingClusterTasksResponse extends ActionResponse implements Iterable<PendingClusterTask>, ToXContentObject {
|
||||
|
||||
private List<PendingClusterTask> pendingTasks;
|
||||
|
||||
|
@ -63,13 +63,15 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera
|
|||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("tasks: (").append(pendingTasks.size()).append("):\n");
|
||||
for (PendingClusterTask pendingClusterTask : this) {
|
||||
sb.append(pendingClusterTask.getInsertOrder()).append("/").append(pendingClusterTask.getPriority()).append("/").append(pendingClusterTask.getSource()).append("/").append(pendingClusterTask.getTimeInQueue()).append("\n");
|
||||
sb.append(pendingClusterTask.getInsertOrder()).append("/").append(pendingClusterTask.getPriority()).append("/")
|
||||
.append(pendingClusterTask.getSource()).append("/").append(pendingClusterTask.getTimeInQueue()).append("\n");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startArray(Fields.TASKS);
|
||||
for (PendingClusterTask pendingClusterTask : this) {
|
||||
builder.startObject();
|
||||
|
@ -82,6 +84,7 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera
|
|||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -32,9 +32,9 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeResponse.AnalyzeToken>, ToXContent {
|
||||
public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeResponse.AnalyzeToken>, ToXContentObject {
|
||||
|
||||
public static class AnalyzeToken implements Streamable, ToXContent {
|
||||
public static class AnalyzeToken implements Streamable, ToXContentObject {
|
||||
private String term;
|
||||
private int startOffset;
|
||||
private int endOffset;
|
||||
|
@ -154,6 +154,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
if (tokens != null) {
|
||||
builder.startArray(Fields.TOKENS);
|
||||
for (AnalyzeToken token : tokens) {
|
||||
|
@ -167,6 +168,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
detail.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.rollover;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -32,7 +32,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public final class RolloverResponse extends ActionResponse implements ToXContent {
|
||||
public final class RolloverResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private static final String NEW_INDEX = "new_index";
|
||||
private static final String OLD_INDEX = "old_index";
|
||||
|
@ -157,6 +157,7 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(OLD_INDEX, oldIndex);
|
||||
builder.field(NEW_INDEX, newIndex);
|
||||
builder.field(ROLLED_OVER, rolledOver);
|
||||
|
@ -168,6 +169,7 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
builder.field(entry.getKey(), entry.getValue());
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -31,7 +32,7 @@ import java.util.List;
|
|||
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
||||
public class GetIndexTemplatesResponse extends ActionResponse implements ToXContent {
|
||||
public class GetIndexTemplatesResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private List<IndexTemplateMetaData> indexTemplates;
|
||||
|
||||
|
@ -68,10 +69,11 @@ public class GetIndexTemplatesResponse extends ActionResponse implements ToXCont
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
params = new ToXContent.DelegatingMapParams(singletonMap("reduce_mappings", "true"), params);
|
||||
|
||||
builder.startObject();
|
||||
for (IndexTemplateMetaData indexTemplateMetaData : getIndexTemplates()) {
|
||||
IndexTemplateMetaData.Builder.toXContent(indexTemplateMetaData, builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -43,7 +43,7 @@ import java.io.IOException;
|
|||
* Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id
|
||||
* of the relevant action, and if it has failed or not (with the failure message incase it failed).
|
||||
*/
|
||||
public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
|
@ -52,9 +52,10 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startObject(opType.getLowercase());
|
||||
if (failure == null) {
|
||||
response.toXContent(builder, params);
|
||||
response.innerToXContent(builder, params);
|
||||
builder.field(Fields.STATUS, response.status().getStatus());
|
||||
} else {
|
||||
builder.field(Fields._INDEX, failure.getIndex());
|
||||
|
@ -66,6 +67,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
@ -179,7 +181,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this, true);
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,9 +48,9 @@ public class DeleteResponse extends DocWriteResponse {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("found", result == Result.DELETED);
|
||||
super.toXContent(builder, params);
|
||||
super.innerToXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.get.GetField;
|
||||
|
@ -42,7 +42,7 @@ import java.util.Objects;
|
|||
* @see GetRequest
|
||||
* @see org.elasticsearch.client.Client#get(GetRequest)
|
||||
*/
|
||||
public class GetResponse extends ActionResponse implements Iterable<GetField>, ToXContent {
|
||||
public class GetResponse extends ActionResponse implements Iterable<GetField>, ToXContentObject {
|
||||
|
||||
GetResult getResult;
|
||||
|
||||
|
@ -194,6 +194,6 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this, true);
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,14 +24,14 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class MultiGetResponse extends ActionResponse implements Iterable<MultiGetItemResponse>, ToXContent {
|
||||
public class MultiGetResponse extends ActionResponse implements Iterable<MultiGetItemResponse>, ToXContentObject {
|
||||
|
||||
/**
|
||||
* Represents a failure.
|
||||
|
@ -128,6 +128,7 @@ public class MultiGetResponse extends ActionResponse implements Iterable<MultiGe
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startArray(Fields.DOCS);
|
||||
for (MultiGetItemResponse response : responses) {
|
||||
if (response.isFailed()) {
|
||||
|
@ -144,6 +145,7 @@ public class MultiGetResponse extends ActionResponse implements Iterable<MultiGe
|
|||
}
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,13 +57,13 @@ public class IndexResponse extends DocWriteResponse {
|
|||
builder.append(",version=").append(getVersion());
|
||||
builder.append(",result=").append(getResult().getLowercase());
|
||||
builder.append(",seqNo=").append(getSeqNo());
|
||||
builder.append(",shards=").append(Strings.toString(getShardInfo(), true));
|
||||
builder.append(",shards=").append(Strings.toString(getShardInfo()));
|
||||
return builder.append("]").toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
super.toXContent(builder, params);
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
super.innerToXContent(builder, params);
|
||||
builder.field("created", result == Result.CREATED);
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.ingest;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.ingest.PipelineConfiguration;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -31,7 +31,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class GetPipelineResponse extends ActionResponse implements StatusToXContent {
|
||||
public class GetPipelineResponse extends ActionResponse implements StatusToXContentObject {
|
||||
|
||||
private List<PipelineConfiguration> pipelines;
|
||||
|
||||
|
@ -76,9 +76,11 @@ public class GetPipelineResponse extends ActionResponse implements StatusToXCont
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
for (PipelineConfiguration pipeline : pipelines) {
|
||||
builder.field(pipeline.getId(), pipeline.getConfigAsMap());
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.ingest;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -30,7 +30,7 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class SimulatePipelineResponse extends ActionResponse implements ToXContent {
|
||||
public class SimulatePipelineResponse extends ActionResponse implements ToXContentObject {
|
||||
private String pipelineId;
|
||||
private boolean verbose;
|
||||
private List<SimulateDocumentResult> results;
|
||||
|
@ -88,11 +88,13 @@ public class SimulatePipelineResponse extends ActionResponse implements ToXConte
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startArray(Fields.DOCUMENTS);
|
||||
for (SimulateDocumentResult response : results) {
|
||||
response.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.search;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
|
@ -31,7 +31,7 @@ import java.io.IOException;
|
|||
import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
|
||||
import static org.elasticsearch.rest.RestStatus.OK;
|
||||
|
||||
public class ClearScrollResponse extends ActionResponse implements StatusToXContent {
|
||||
public class ClearScrollResponse extends ActionResponse implements StatusToXContentObject {
|
||||
|
||||
private boolean succeeded;
|
||||
private int numFreed;
|
||||
|
@ -66,8 +66,10 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Fields.SUCCEEDED, succeeded);
|
||||
builder.field(Fields.NUMFREED, numFreed);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
@ -89,5 +91,4 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont
|
|||
static final String SUCCEEDED = "succeeded";
|
||||
static final String NUMFREED = "num_freed";
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,12 +23,12 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
@ -37,7 +37,7 @@ import java.util.Iterator;
|
|||
/**
|
||||
* A multi search response.
|
||||
*/
|
||||
public class MultiSearchResponse extends ActionResponse implements Iterable<MultiSearchResponse.Item>, ToXContent {
|
||||
public class MultiSearchResponse extends ActionResponse implements Iterable<MultiSearchResponse.Item>, ToXContentObject {
|
||||
|
||||
/**
|
||||
* A search response item, holding the actual search response, or an error message if it failed.
|
||||
|
@ -151,6 +151,7 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startArray(Fields.RESPONSES);
|
||||
for (Item item : items) {
|
||||
builder.startObject();
|
||||
|
@ -158,32 +159,23 @@ public class MultiSearchResponse extends ActionResponse implements Iterable<Mult
|
|||
ElasticsearchException.renderException(builder, params, item.getFailure());
|
||||
builder.field(Fields.STATUS, ExceptionsHelper.status(item.getFailure()).getStatus());
|
||||
} else {
|
||||
item.getResponse().toXContent(builder, params);
|
||||
item.getResponse().innerToXContent(builder, params);
|
||||
builder.field(Fields.STATUS, item.getResponse().status().getStatus());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String RESPONSES = "responses";
|
||||
static final String STATUS = "status";
|
||||
static final String ERROR = "error";
|
||||
static final String ROOT_CAUSE = "root_cause";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
|
||||
builder.startObject();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
return builder.string();
|
||||
} catch (IOException e) {
|
||||
return "{ \"error\" : \"" + e.getMessage() + "\"}";
|
||||
}
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.RestActions;
|
||||
|
@ -44,7 +44,7 @@ import static org.elasticsearch.search.internal.InternalSearchResponse.readInter
|
|||
/**
|
||||
* A response of a search request.
|
||||
*/
|
||||
public class SearchResponse extends ActionResponse implements StatusToXContent {
|
||||
public class SearchResponse extends ActionResponse implements StatusToXContentObject {
|
||||
|
||||
private InternalSearchResponse internalResponse;
|
||||
|
||||
|
@ -181,6 +181,13 @@ public class SearchResponse extends ActionResponse implements StatusToXContent {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
innerToXContent(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (scrollId != null) {
|
||||
builder.field(Fields._SCROLL_ID, scrollId);
|
||||
}
|
||||
|
@ -231,6 +238,6 @@ public class SearchResponse extends ActionResponse implements StatusToXContent {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this, true);
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -250,7 +251,7 @@ public class ReplicationResponse extends ActionResponse {
|
|||
return shardInfo;
|
||||
}
|
||||
|
||||
public static class Failure implements ShardOperationFailedException, ToXContent {
|
||||
public static class Failure implements ShardOperationFailedException, ToXContentObject {
|
||||
|
||||
private static final String _INDEX = "_index";
|
||||
private static final String _SHARD = "_shard";
|
||||
|
|
|
@ -24,14 +24,14 @@ import org.elasticsearch.action.ActionResponse;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class MultiTermVectorsResponse extends ActionResponse implements Iterable<MultiTermVectorsItemResponse>, ToXContent {
|
||||
public class MultiTermVectorsResponse extends ActionResponse implements Iterable<MultiTermVectorsItemResponse>, ToXContentObject {
|
||||
|
||||
/**
|
||||
* Represents a failure.
|
||||
|
@ -124,6 +124,7 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startArray(Fields.DOCS);
|
||||
for (MultiTermVectorsItemResponse response : responses) {
|
||||
if (response.isFailed()) {
|
||||
|
@ -136,12 +137,11 @@ public class MultiTermVectorsResponse extends ActionResponse implements Iterable
|
|||
builder.endObject();
|
||||
} else {
|
||||
TermVectorsResponse getResponse = response.getResponse();
|
||||
builder.startObject();
|
||||
getResponse.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
|
||||
|
@ -46,7 +46,7 @@ import java.util.EnumSet;
|
|||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
|
||||
public class TermVectorsResponse extends ActionResponse implements ToXContent {
|
||||
public class TermVectorsResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private static class FieldStrings {
|
||||
// term statistics strings
|
||||
|
@ -174,6 +174,7 @@ public class TermVectorsResponse extends ActionResponse implements ToXContent {
|
|||
assert index != null;
|
||||
assert type != null;
|
||||
assert id != null;
|
||||
builder.startObject();
|
||||
builder.field(FieldStrings._INDEX, index);
|
||||
builder.field(FieldStrings._TYPE, type);
|
||||
if (!isArtificial()) {
|
||||
|
@ -182,15 +183,15 @@ public class TermVectorsResponse extends ActionResponse implements ToXContent {
|
|||
builder.field(FieldStrings._VERSION, docVersion);
|
||||
builder.field(FieldStrings.FOUND, isExists());
|
||||
builder.field(FieldStrings.TOOK, tookInMillis);
|
||||
if (!isExists()) {
|
||||
return builder;
|
||||
}
|
||||
builder.startObject(FieldStrings.TERM_VECTORS);
|
||||
final CharsRefBuilder spare = new CharsRefBuilder();
|
||||
Fields theFields = getFields();
|
||||
Iterator<String> fieldIter = theFields.iterator();
|
||||
while (fieldIter.hasNext()) {
|
||||
buildField(builder, spare, theFields, fieldIter);
|
||||
if (isExists()) {
|
||||
builder.startObject(FieldStrings.TERM_VECTORS);
|
||||
final CharsRefBuilder spare = new CharsRefBuilder();
|
||||
Fields theFields = getFields();
|
||||
Iterator<String> fieldIter = theFields.iterator();
|
||||
while (fieldIter.hasNext()) {
|
||||
buildField(builder, spare, theFields, fieldIter);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
|
|
|
@ -87,8 +87,8 @@ public class UpdateResponse extends DocWriteResponse {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
super.toXContent(builder, params);
|
||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
super.innerToXContent(builder, params);
|
||||
if (getGetResult() != null) {
|
||||
builder.startObject(Fields.GET);
|
||||
getGetResult().toXContentEmbedded(builder, params);
|
||||
|
|
|
@ -30,16 +30,16 @@ import org.apache.lucene.util.IOUtils;
|
|||
import org.apache.lucene.util.StringHelper;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.PidFile;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.IfConfig;
|
||||
import org.elasticsearch.common.settings.KeyStoreWrapper;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -207,6 +207,9 @@ final class Bootstrap {
|
|||
throw new BootstrapException(e);
|
||||
}
|
||||
|
||||
// Log ifconfig output before SecurityManager is installed
|
||||
IfConfig.logIfNecessary();
|
||||
|
||||
// install SM after natives, shutdown hooks, etc.
|
||||
try {
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
|
@ -224,13 +227,36 @@ final class Bootstrap {
|
|||
};
|
||||
}
|
||||
|
||||
private static Environment initialEnvironment(boolean foreground, Path pidFile, Settings initialSettings) {
|
||||
private static KeyStoreWrapper loadKeyStore(Environment initialEnv) throws BootstrapException {
|
||||
final KeyStoreWrapper keystore;
|
||||
try {
|
||||
keystore = KeyStoreWrapper.load(initialEnv.configFile());
|
||||
} catch (IOException e) {
|
||||
throw new BootstrapException(e);
|
||||
}
|
||||
if (keystore == null) {
|
||||
return null; // no keystore
|
||||
}
|
||||
|
||||
try {
|
||||
keystore.decrypt(new char[0] /* TODO: read password from stdin */);
|
||||
} catch (Exception e) {
|
||||
throw new BootstrapException(e);
|
||||
}
|
||||
return keystore;
|
||||
}
|
||||
|
||||
private static Environment createEnvironment(boolean foreground, Path pidFile,
|
||||
KeyStoreWrapper keystore, Settings initialSettings) {
|
||||
Terminal terminal = foreground ? Terminal.DEFAULT : null;
|
||||
Settings.Builder builder = Settings.builder();
|
||||
if (pidFile != null) {
|
||||
builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile);
|
||||
}
|
||||
builder.put(initialSettings);
|
||||
if (keystore != null) {
|
||||
builder.setKeyStore(keystore);
|
||||
}
|
||||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, Collections.emptyMap());
|
||||
}
|
||||
|
||||
|
@ -261,7 +287,7 @@ final class Bootstrap {
|
|||
final boolean foreground,
|
||||
final Path pidFile,
|
||||
final boolean quiet,
|
||||
final Settings initialSettings) throws BootstrapException, NodeValidationException, UserException {
|
||||
final Environment initialEnv) throws BootstrapException, NodeValidationException, UserException {
|
||||
// Set the system property before anything has a chance to trigger its use
|
||||
initLoggerPrefix();
|
||||
|
||||
|
@ -271,7 +297,8 @@ final class Bootstrap {
|
|||
|
||||
INSTANCE = new Bootstrap();
|
||||
|
||||
Environment environment = initialEnvironment(foreground, pidFile, initialSettings);
|
||||
final KeyStoreWrapper keystore = loadKeyStore(initialEnv);
|
||||
Environment environment = createEnvironment(foreground, pidFile, keystore, initialEnv.settings());
|
||||
try {
|
||||
LogConfigurator.configure(environment);
|
||||
} catch (IOException e) {
|
||||
|
@ -309,6 +336,13 @@ final class Bootstrap {
|
|||
|
||||
INSTANCE.setup(true, environment);
|
||||
|
||||
try {
|
||||
// any secure settings must be read during node construction
|
||||
IOUtils.close(keystore);
|
||||
} catch (IOException e) {
|
||||
throw new BootstrapException(e);
|
||||
}
|
||||
|
||||
INSTANCE.start();
|
||||
|
||||
if (closeStandardStreams) {
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.nio.file.Path;
|
|||
* during bootstrap should explicitly declare the checked exceptions that they can throw, rather
|
||||
* than declaring the top-level checked exception {@link Exception}. This exception exists to wrap
|
||||
* these checked exceptions so that
|
||||
* {@link Bootstrap#init(boolean, Path, boolean, org.elasticsearch.common.settings.Settings)}
|
||||
* {@link Bootstrap#init(boolean, Path, boolean, org.elasticsearch.env.Environment)}
|
||||
* does not have to declare all of these checked exceptions.
|
||||
*/
|
||||
class BootstrapException extends Exception {
|
||||
|
|
|
@ -111,16 +111,16 @@ class Elasticsearch extends EnvironmentAwareCommand {
|
|||
final boolean quiet = options.has(quietOption);
|
||||
|
||||
try {
|
||||
init(daemonize, pidFile, quiet, env.settings());
|
||||
init(daemonize, pidFile, quiet, env);
|
||||
} catch (NodeValidationException e) {
|
||||
throw new UserException(ExitCodes.CONFIG, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
void init(final boolean daemonize, final Path pidFile, final boolean quiet, Settings initialSettings)
|
||||
void init(final boolean daemonize, final Path pidFile, final boolean quiet, Environment initialEnv)
|
||||
throws NodeValidationException, UserException {
|
||||
try {
|
||||
Bootstrap.init(!daemonize, pidFile, quiet, initialSettings);
|
||||
Bootstrap.init(!daemonize, pidFile, quiet, initialEnv);
|
||||
} catch (BootstrapException | RuntimeException e) {
|
||||
// format exceptions to the console in a special way
|
||||
// to avoid 2MB stacktraces from guice, etc.
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.io.IOException;
|
|||
import java.io.InputStreamReader;
|
||||
import java.io.PrintWriter;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* A Terminal wraps access to reading input and writing output for a cli.
|
||||
|
@ -92,6 +93,26 @@ public abstract class Terminal {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prompt for a yes or no answer from the user. This method will loop until 'y' or 'n'
|
||||
* (or the default empty value) is entered.
|
||||
*/
|
||||
public final boolean promptYesNo(String prompt, boolean defaultYes) {
|
||||
String answerPrompt = defaultYes ? " [Y/n]" : " [y/N]";
|
||||
while (true) {
|
||||
String answer = readText(prompt + answerPrompt).toLowerCase(Locale.ROOT);
|
||||
if (answer.isEmpty()) {
|
||||
return defaultYes;
|
||||
}
|
||||
boolean answerYes = answer.equals("y");
|
||||
if (answerYes == false && answer.equals("n") == false) {
|
||||
println("Did not understand answer '" + answer + "'");
|
||||
continue;
|
||||
}
|
||||
return answerYes;
|
||||
}
|
||||
}
|
||||
|
||||
private static class ConsoleTerminal extends Terminal {
|
||||
|
||||
private static final Console CONSOLE = System.console();
|
||||
|
|
|
@ -545,6 +545,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
throw new IllegalArgumentException("mappings are not allowed when shrinking indices" +
|
||||
", all mappings are copied from the source index");
|
||||
}
|
||||
|
||||
if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
|
||||
// this method applies all necessary checks ie. if the target shards are less than the source shards
|
||||
// of if the source shards are divisible by the number of target shards
|
||||
|
@ -588,9 +589,14 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
.put("index.allocation.max_retries", 1)
|
||||
// now copy all similarity / analysis settings - this overrides all settings from the user unless they
|
||||
// wanna add extra settings
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion())
|
||||
.put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion())
|
||||
.put(sourceMetaData.getSettings().filter(analysisSimilarityPredicate))
|
||||
.put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), shrinkFromIndex.getName())
|
||||
.put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), shrinkFromIndex.getUUID());
|
||||
if (sourceMetaData.getMinimumCompatibleVersion() != null) {
|
||||
indexSettingsBuilder.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, sourceMetaData.getMinimumCompatibleVersion());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.io.FastStringReader;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
||||
|
@ -857,26 +858,17 @@ public class Strings {
|
|||
}
|
||||
|
||||
/**
|
||||
* Return a {@link String} that is the json representation of the provided
|
||||
* {@link ToXContent}.
|
||||
* Return a {@link String} that is the json representation of the provided {@link ToXContent}.
|
||||
* Wraps the output into an anonymous object.
|
||||
*/
|
||||
public static String toString(ToXContent toXContent) {
|
||||
return toString(toXContent, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a {@link String} that is the json representation of the provided
|
||||
* {@link ToXContent}.
|
||||
* @param wrapInObject set this to true if the ToXContent instance expects to be inside an object
|
||||
*/
|
||||
public static String toString(ToXContent toXContent, boolean wrapInObject) {
|
||||
try {
|
||||
XContentBuilder builder = JsonXContent.contentBuilder();
|
||||
if (wrapInObject) {
|
||||
if (toXContent.isFragment()) {
|
||||
builder.startObject();
|
||||
}
|
||||
toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
if (wrapInObject) {
|
||||
if (toXContent.isFragment()) {
|
||||
builder.endObject();
|
||||
}
|
||||
return builder.string();
|
||||
|
|
|
@ -34,17 +34,17 @@ import java.util.Locale;
|
|||
/**
|
||||
* Simple class to log {@code ifconfig}-style output at DEBUG logging.
|
||||
*/
|
||||
final class IfConfig {
|
||||
public final class IfConfig {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(IfConfig.class);
|
||||
private static final String INDENT = " ";
|
||||
|
||||
/** log interface configuration at debug level, if its enabled */
|
||||
static void logIfNecessary() {
|
||||
public static void logIfNecessary() {
|
||||
if (logger.isDebugEnabled()) {
|
||||
try {
|
||||
doLogging();
|
||||
} catch (IOException | SecurityException e) {
|
||||
} catch (IOException e) {
|
||||
logger.warn("unable to gather network information", e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,6 @@ public class NetworkService extends AbstractComponent {
|
|||
|
||||
public NetworkService(Settings settings, List<CustomNameResolver> customNameResolvers) {
|
||||
super(settings);
|
||||
IfConfig.logIfNecessary();
|
||||
this.customNameResolvers = customNameResolvers;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.elasticsearch.cli.EnvironmentAwareCommand;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
/**
|
||||
* A subcommand for the keystore cli which adds a string setting.
|
||||
*/
|
||||
class AddStringKeyStoreCommand extends EnvironmentAwareCommand {
|
||||
|
||||
private final OptionSpec<Void> stdinOption;
|
||||
private final OptionSpec<Void> forceOption;
|
||||
private final OptionSpec<String> arguments;
|
||||
|
||||
AddStringKeyStoreCommand() {
|
||||
super("Add a string setting to the keystore");
|
||||
this.stdinOption = parser.acceptsAll(Arrays.asList("x", "stdin"), "Read setting value from stdin");
|
||||
this.forceOption = parser.acceptsAll(Arrays.asList("f", "force"), "Overwrite existing setting without prompting");
|
||||
this.arguments = parser.nonOptions("setting name");
|
||||
}
|
||||
|
||||
// pkg private so tests can manipulate
|
||||
InputStream getStdin() {
|
||||
return System.in;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile());
|
||||
if (keystore == null) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found. Use 'create' command to create one.");
|
||||
}
|
||||
|
||||
keystore.decrypt(new char[0] /* TODO: prompt for password when they are supported */);
|
||||
|
||||
String setting = arguments.value(options);
|
||||
if (keystore.getSettings().contains(setting) && options.has(forceOption) == false) {
|
||||
if (terminal.promptYesNo("Setting " + setting + " already exists. Overwrite?", false) == false) {
|
||||
terminal.println("Exiting without modifying keystore.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
final char[] value;
|
||||
if (options.has(stdinOption)) {
|
||||
BufferedReader stdinReader = new BufferedReader(new InputStreamReader(getStdin(), StandardCharsets.UTF_8));
|
||||
value = stdinReader.readLine().toCharArray();
|
||||
} else {
|
||||
value = terminal.readSecret("Enter value for " + setting + ": ");
|
||||
}
|
||||
|
||||
try {
|
||||
keystore.setStringSetting(setting, value);
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "String value must contain only ASCII");
|
||||
}
|
||||
keystore.save(env.configFile());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import org.elasticsearch.cli.EnvironmentAwareCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
/**
|
||||
* A subcommand for the keystore cli to create a new keystore.
|
||||
*/
|
||||
class CreateKeyStoreCommand extends EnvironmentAwareCommand {
|
||||
|
||||
CreateKeyStoreCommand() {
|
||||
super("Creates a new elasticsearch keystore");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
|
||||
Path keystoreFile = KeyStoreWrapper.keystorePath(env.configFile());
|
||||
if (Files.exists(keystoreFile)) {
|
||||
if (terminal.promptYesNo("An elasticsearch keystore already exists. Overwrite?", false) == false) {
|
||||
terminal.println("Exiting without creating keystore.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
char[] password = new char[0];// terminal.readSecret("Enter passphrase (empty for no passphrase): ");
|
||||
/* TODO: uncomment when entering passwords on startup is supported
|
||||
char[] passwordRepeat = terminal.readSecret("Enter same passphrase again: ");
|
||||
if (Arrays.equals(password, passwordRepeat) == false) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "Passphrases are not equal, exiting.");
|
||||
}*/
|
||||
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.create(password);
|
||||
keystore.save(env.configFile());
|
||||
terminal.println("Created elasticsearch keystore in " + env.configFile());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.elasticsearch.cli.MultiCommand;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
|
||||
/**
|
||||
* A cli tool for managing secrets in the elasticsearch keystore.
|
||||
*/
|
||||
public class KeyStoreCli extends MultiCommand {
|
||||
|
||||
private KeyStoreCli() {
|
||||
super("A tool for managing settings stored in the elasticsearch keystore");
|
||||
subcommands.put("create", new CreateKeyStoreCommand());
|
||||
subcommands.put("list", new ListKeyStoreCommand());
|
||||
subcommands.put("add", new AddStringKeyStoreCommand());
|
||||
subcommands.put("remove", new RemoveSettingKeyStoreCommand());
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
exit(new KeyStoreCli().main(args, Terminal.DEFAULT));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,278 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.crypto.SecretKeyFactory;
|
||||
import javax.crypto.spec.PBEKeySpec;
|
||||
import javax.security.auth.DestroyFailedException;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.CharBuffer;
|
||||
import java.nio.charset.CharsetEncoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.nio.file.attribute.PosixFileAttributeView;
|
||||
import java.nio.file.attribute.PosixFilePermissions;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.security.KeyStore;
|
||||
import java.security.KeyStoreException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.store.BufferedChecksumIndexInput;
|
||||
import org.apache.lucene.store.ChecksumIndexInput;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
|
||||
/**
|
||||
* A wrapper around a Java KeyStore which provides supplements the keystore with extra metadata.
|
||||
*
|
||||
* Loading a keystore has 2 phases. First, call {@link #load(Path)}. Then call
|
||||
* {@link #decrypt(char[])} with the keystore password, or an empty char array if
|
||||
* {@link #hasPassword()} is {@code false}. Loading and decrypting should happen
|
||||
* in a single thread. Once decrypted, keys may be read with the wrapper in
|
||||
* multiple threads.
|
||||
*/
|
||||
public class KeyStoreWrapper implements Closeable {
|
||||
|
||||
/** The name of the keystore file to read and write. */
|
||||
private static final String KEYSTORE_FILENAME = "elasticsearch.keystore";
|
||||
|
||||
/** The version of the metadata written before the keystore data. */
|
||||
private static final int FORMAT_VERSION = 1;
|
||||
|
||||
/** The keystore type for a newly created keystore. */
|
||||
private static final String NEW_KEYSTORE_TYPE = "PKCS12";
|
||||
|
||||
/** The algorithm used to store password for a newly created keystore. */
|
||||
private static final String NEW_KEYSTORE_SECRET_KEY_ALGO = "PBE";//"PBEWithHmacSHA256AndAES_128";
|
||||
|
||||
/** An encoder to check whether string values are ascii. */
|
||||
private static final CharsetEncoder ASCII_ENCODER = StandardCharsets.US_ASCII.newEncoder();
|
||||
|
||||
/** True iff the keystore has a password needed to read. */
|
||||
private final boolean hasPassword;
|
||||
|
||||
/** The type of the keystore, as passed to {@link java.security.KeyStore#getInstance(String)} */
|
||||
private final String type;
|
||||
|
||||
/** A factory necessary for constructing instances of secrets in a {@link KeyStore}. */
|
||||
private final SecretKeyFactory secretFactory;
|
||||
|
||||
/** The raw bytes of the encrypted keystore. */
|
||||
private final byte[] keystoreBytes;
|
||||
|
||||
/** The loaded keystore. See {@link #decrypt(char[])}. */
|
||||
private final SetOnce<KeyStore> keystore = new SetOnce<>();
|
||||
|
||||
/** The password for the keystore. See {@link #decrypt(char[])}. */
|
||||
private final SetOnce<KeyStore.PasswordProtection> keystorePassword = new SetOnce<>();
|
||||
|
||||
/** The setting names contained in the loaded keystore. */
|
||||
private final Set<String> settingNames = new HashSet<>();
|
||||
|
||||
private KeyStoreWrapper(boolean hasPassword, String type, String secretKeyAlgo, byte[] keystoreBytes) {
|
||||
this.hasPassword = hasPassword;
|
||||
this.type = type;
|
||||
try {
|
||||
secretFactory = SecretKeyFactory.getInstance(secretKeyAlgo);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
this.keystoreBytes = keystoreBytes;
|
||||
}
|
||||
|
||||
/** Returns a path representing the ES keystore in the given config dir. */
|
||||
static Path keystorePath(Path configDir) {
|
||||
return configDir.resolve(KEYSTORE_FILENAME);
|
||||
}
|
||||
|
||||
/** Constructs a new keystore with the given password. */
|
||||
static KeyStoreWrapper create(char[] password) throws Exception {
|
||||
KeyStoreWrapper wrapper = new KeyStoreWrapper(password.length != 0, NEW_KEYSTORE_TYPE, NEW_KEYSTORE_SECRET_KEY_ALGO, null);
|
||||
KeyStore keyStore = KeyStore.getInstance(NEW_KEYSTORE_TYPE);
|
||||
keyStore.load(null, null);
|
||||
wrapper.keystore.set(keyStore);
|
||||
wrapper.keystorePassword.set(new KeyStore.PasswordProtection(password));
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads information about the Elasticsearch keystore from the provided config directory.
|
||||
*
|
||||
* {@link #decrypt(char[])} must be called before reading or writing any entries.
|
||||
* Returns {@code null} if no keystore exists.
|
||||
*/
|
||||
public static KeyStoreWrapper load(Path configDir) throws IOException {
|
||||
Path keystoreFile = keystorePath(configDir);
|
||||
if (Files.exists(keystoreFile) == false) {
|
||||
return null;
|
||||
}
|
||||
|
||||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
try (IndexInput indexInput = directory.openInput(KEYSTORE_FILENAME, IOContext.READONCE)) {
|
||||
ChecksumIndexInput input = new BufferedChecksumIndexInput(indexInput);
|
||||
CodecUtil.checkHeader(input, KEYSTORE_FILENAME, FORMAT_VERSION, FORMAT_VERSION);
|
||||
byte hasPasswordByte = input.readByte();
|
||||
boolean hasPassword = hasPasswordByte == 1;
|
||||
if (hasPassword == false && hasPasswordByte != 0) {
|
||||
throw new IllegalStateException("hasPassword boolean is corrupt: "
|
||||
+ String.format(Locale.ROOT, "%02x", hasPasswordByte));
|
||||
}
|
||||
String type = input.readString();
|
||||
String secretKeyAlgo = input.readString();
|
||||
byte[] keystoreBytes = new byte[input.readInt()];
|
||||
input.readBytes(keystoreBytes, 0, keystoreBytes.length);
|
||||
CodecUtil.checkFooter(input);
|
||||
return new KeyStoreWrapper(hasPassword, type, secretKeyAlgo, keystoreBytes);
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns true iff {@link #decrypt(char[])} has been called. */
|
||||
public boolean isLoaded() {
|
||||
return keystore.get() != null;
|
||||
}
|
||||
|
||||
/** Return true iff calling {@link #decrypt(char[])} requires a non-empty password. */
|
||||
public boolean hasPassword() {
|
||||
return hasPassword;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrypts the underlying java keystore.
|
||||
*
|
||||
* This may only be called once. The provided password will be zeroed out.
|
||||
*/
|
||||
public void decrypt(char[] password) throws GeneralSecurityException, IOException {
|
||||
if (keystore.get() != null) {
|
||||
throw new IllegalStateException("Keystore has already been decrypted");
|
||||
}
|
||||
keystore.set(KeyStore.getInstance(type));
|
||||
try (InputStream in = new ByteArrayInputStream(keystoreBytes)) {
|
||||
keystore.get().load(in, password);
|
||||
} finally {
|
||||
Arrays.fill(keystoreBytes, (byte)0);
|
||||
}
|
||||
|
||||
keystorePassword.set(new KeyStore.PasswordProtection(password));
|
||||
Arrays.fill(password, '\0');
|
||||
|
||||
// convert keystore aliases enum into a set for easy lookup
|
||||
Enumeration<String> aliases = keystore.get().aliases();
|
||||
while (aliases.hasMoreElements()) {
|
||||
settingNames.add(aliases.nextElement());
|
||||
}
|
||||
}
|
||||
|
||||
/** Write the keystore to the given config directory. */
|
||||
void save(Path configDir) throws Exception {
|
||||
char[] password = this.keystorePassword.get().getPassword();
|
||||
|
||||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
// write to tmp file first, then overwrite
|
||||
String tmpFile = KEYSTORE_FILENAME + ".tmp";
|
||||
try (IndexOutput output = directory.createOutput(tmpFile, IOContext.DEFAULT)) {
|
||||
CodecUtil.writeHeader(output, KEYSTORE_FILENAME, FORMAT_VERSION);
|
||||
output.writeByte(password.length == 0 ? (byte)0 : (byte)1);
|
||||
output.writeString(type);
|
||||
output.writeString(secretFactory.getAlgorithm());
|
||||
|
||||
ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream();
|
||||
keystore.get().store(keystoreBytesStream, password);
|
||||
byte[] keystoreBytes = keystoreBytesStream.toByteArray();
|
||||
output.writeInt(keystoreBytes.length);
|
||||
output.writeBytes(keystoreBytes, keystoreBytes.length);
|
||||
CodecUtil.writeFooter(output);
|
||||
}
|
||||
|
||||
Path keystoreFile = keystorePath(configDir);
|
||||
Files.move(configDir.resolve(tmpFile), keystoreFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
|
||||
PosixFileAttributeView attrs = Files.getFileAttributeView(keystoreFile, PosixFileAttributeView.class);
|
||||
if (attrs != null) {
|
||||
// don't rely on umask: ensure the keystore has minimal permissions
|
||||
attrs.setPermissions(PosixFilePermissions.fromString("rw-------"));
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns the names of all settings in this keystore. */
|
||||
public Set<String> getSettings() {
|
||||
return settingNames;
|
||||
}
|
||||
|
||||
// TODO: make settings accessible only to code that registered the setting
|
||||
/** Retrieve a string setting. The {@link SecureString} should be closed once it is used. */
|
||||
SecureString getStringSetting(String setting) throws GeneralSecurityException {
|
||||
KeyStore.Entry entry = keystore.get().getEntry(setting, keystorePassword.get());
|
||||
if (entry instanceof KeyStore.SecretKeyEntry == false) {
|
||||
throw new IllegalStateException("Secret setting " + setting + " is not a string");
|
||||
}
|
||||
// TODO: only allow getting a setting once?
|
||||
KeyStore.SecretKeyEntry secretKeyEntry = (KeyStore.SecretKeyEntry)entry;
|
||||
PBEKeySpec keySpec = (PBEKeySpec) secretFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class);
|
||||
SecureString value = new SecureString(keySpec.getPassword());
|
||||
keySpec.clearPassword();
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a string setting.
|
||||
*
|
||||
* @throws IllegalArgumentException if the value is not ASCII
|
||||
*/
|
||||
void setStringSetting(String setting, char[] value) throws GeneralSecurityException {
|
||||
if (ASCII_ENCODER.canEncode(CharBuffer.wrap(value)) == false) {
|
||||
throw new IllegalArgumentException("Value must be ascii");
|
||||
}
|
||||
SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec(value));
|
||||
keystore.get().setEntry(setting, new KeyStore.SecretKeyEntry(secretKey), keystorePassword.get());
|
||||
settingNames.add(setting);
|
||||
}
|
||||
|
||||
/** Remove the given setting from the keystore. */
|
||||
void remove(String setting) throws KeyStoreException {
|
||||
keystore.get().deleteEntry(setting);
|
||||
settingNames.remove(setting);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
if (keystorePassword.get() != null) {
|
||||
keystorePassword.get().destroy();
|
||||
}
|
||||
} catch (DestroyFailedException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import org.elasticsearch.cli.EnvironmentAwareCommand;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
/**
|
||||
* A subcommand for the keystore cli to list all settings in the keystore.
|
||||
*/
|
||||
class ListKeyStoreCommand extends EnvironmentAwareCommand {
|
||||
|
||||
ListKeyStoreCommand() {
|
||||
super("List entries in the keystore");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile());
|
||||
if (keystore == null) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found. Use 'create' command to create one.");
|
||||
}
|
||||
|
||||
keystore.decrypt(new char[0] /* TODO: prompt for password when they are supported */);
|
||||
|
||||
List<String> sortedEntries = new ArrayList<>(keystore.getSettings());
|
||||
Collections.sort(sortedEntries);
|
||||
for (String entry : sortedEntries) {
|
||||
terminal.println(entry);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.elasticsearch.cli.EnvironmentAwareCommand;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
/**
|
||||
* A subcommand for the keystore cli to remove a setting.
|
||||
*/
|
||||
class RemoveSettingKeyStoreCommand extends EnvironmentAwareCommand {
|
||||
|
||||
private final OptionSpec<String> arguments;
|
||||
|
||||
RemoveSettingKeyStoreCommand() {
|
||||
super("Remove a setting from the keystore");
|
||||
arguments = parser.nonOptions("setting names");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
|
||||
List<String> settings = arguments.values(options);
|
||||
if (settings.isEmpty()) {
|
||||
throw new UserException(ExitCodes.USAGE, "Must supply at least one setting to remove");
|
||||
}
|
||||
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile());
|
||||
if (keystore == null) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found. Use 'create' command to create one.");
|
||||
}
|
||||
|
||||
keystore.decrypt(new char[0] /* TODO: prompt for password when they are supported */);
|
||||
|
||||
for (String setting : arguments.values(options)) {
|
||||
if (keystore.getSettings().contains(setting) == false) {
|
||||
throw new UserException(ExitCodes.CONFIG, "Setting [" + setting + "] does not exist in the keystore.");
|
||||
}
|
||||
keystore.remove(setting);
|
||||
}
|
||||
keystore.save(env.configFile());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
|
||||
/**
|
||||
* A secure setting.
|
||||
*
|
||||
* This class allows access to settings from the Elasticsearch keystore.
|
||||
*/
|
||||
public abstract class SecureSetting<T> extends Setting<T> {
|
||||
private static final Set<Property> ALLOWED_PROPERTIES = new HashSet<>(
|
||||
Arrays.asList(Property.Deprecated, Property.Shared)
|
||||
);
|
||||
|
||||
private SecureSetting(String key, Setting.Property... properties) {
|
||||
super(key, (String)null, null, properties);
|
||||
assert assertAllowedProperties(properties);
|
||||
}
|
||||
|
||||
private boolean assertAllowedProperties(Setting.Property... properties) {
|
||||
for (Setting.Property property : properties) {
|
||||
if (ALLOWED_PROPERTIES.contains(property) == false) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDefaultRaw(Settings settings) {
|
||||
throw new UnsupportedOperationException("secure settings are not strings");
|
||||
}
|
||||
|
||||
@Override
|
||||
public T getDefault(Settings settings) {
|
||||
throw new UnsupportedOperationException("secure settings are not strings");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRaw(Settings settings) {
|
||||
throw new UnsupportedOperationException("secure settings are not strings");
|
||||
}
|
||||
|
||||
@Override
|
||||
public T get(Settings settings) {
|
||||
checkDeprecation(settings);
|
||||
final KeyStoreWrapper keystore = Objects.requireNonNull(settings.getKeyStore());
|
||||
if (keystore.getSettings().contains(getKey()) == false) {
|
||||
return getFallback(settings);
|
||||
}
|
||||
try {
|
||||
return getSecret(keystore);
|
||||
} catch (GeneralSecurityException e) {
|
||||
throw new RuntimeException("failed to read secure setting " + getKey(), e);
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns the secret setting from the keyStoreReader store. */
|
||||
abstract T getSecret(KeyStoreWrapper keystore) throws GeneralSecurityException;
|
||||
|
||||
/** Returns the value from a fallback setting. Returns null if no fallback exists. */
|
||||
abstract T getFallback(Settings settings);
|
||||
|
||||
// TODO: override toXContent
|
||||
|
||||
/**
|
||||
* A setting which contains a sensitive string.
|
||||
*
|
||||
* This may be any sensitive string, e.g. a username, a password, an auth token, etc.
|
||||
*/
|
||||
public static SecureSetting<SecureString> stringSetting(String name, Setting<String> fallback, Property... properties) {
|
||||
return new SecureSetting<SecureString>(name, properties) {
|
||||
@Override
|
||||
protected SecureString getSecret(KeyStoreWrapper keystore) throws GeneralSecurityException {
|
||||
return keystore.getStringSetting(getKey());
|
||||
}
|
||||
@Override
|
||||
SecureString getFallback(Settings settings) {
|
||||
if (fallback != null) {
|
||||
return new SecureString(fallback.get(settings).toCharArray());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A String implementations which allows clearing the underlying char array.
|
||||
*/
|
||||
public final class SecureString implements CharSequence, AutoCloseable {
|
||||
|
||||
private char[] chars;
|
||||
|
||||
/**
|
||||
* Constructs a new SecureString which controls the passed in char array.
|
||||
*
|
||||
* Note: When this instance is closed, the array will be zeroed out.
|
||||
*/
|
||||
public SecureString(char[] chars) {
|
||||
this.chars = Objects.requireNonNull(chars);
|
||||
}
|
||||
|
||||
/** Constant time equality to avoid potential timing attacks. */
|
||||
@Override
|
||||
public synchronized boolean equals(Object o) {
|
||||
ensureNotClosed();
|
||||
if (this == o) return true;
|
||||
if (o == null || o instanceof CharSequence == false) return false;
|
||||
CharSequence that = (CharSequence) o;
|
||||
if (chars.length != that.length()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int equals = 0;
|
||||
for (int i = 0; i < chars.length; i++) {
|
||||
equals |= chars[i] ^ that.charAt(i);
|
||||
}
|
||||
|
||||
return equals == 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int hashCode() {
|
||||
return Arrays.hashCode(chars);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized int length() {
|
||||
ensureNotClosed();
|
||||
return chars.length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized char charAt(int index) {
|
||||
ensureNotClosed();
|
||||
return chars[index];
|
||||
}
|
||||
|
||||
@Override
|
||||
public SecureString subSequence(int start, int end) {
|
||||
throw new UnsupportedOperationException("Cannot get subsequence of SecureString");
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert to a {@link String}. This should only be used with APIs that do not take {@link CharSequence}.
|
||||
*/
|
||||
@Override
|
||||
public synchronized String toString() {
|
||||
return new String(chars);
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the string by clearing the underlying char array.
|
||||
*/
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
Arrays.fill(chars, '\0');
|
||||
chars = null;
|
||||
}
|
||||
|
||||
/** Throw an exception if this string has been closed, indicating something is trying to access the data after being closed. */
|
||||
private void ensureNotClosed() {
|
||||
if (chars == null) {
|
||||
throw new IllegalStateException("SecureString has already been closed");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -274,7 +274,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* Returns the default value string representation for this setting.
|
||||
* @param settings a settings object for settings that has a default value depending on another setting if available
|
||||
*/
|
||||
public final String getDefaultRaw(Settings settings) {
|
||||
public String getDefaultRaw(Settings settings) {
|
||||
return defaultValue.apply(settings);
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* Returns the default value for this setting.
|
||||
* @param settings a settings object for settings that has a default value depending on another setting if available
|
||||
*/
|
||||
public final T getDefault(Settings settings) {
|
||||
public T getDefault(Settings settings) {
|
||||
return parser.apply(getDefaultRaw(settings));
|
||||
}
|
||||
|
||||
|
@ -290,7 +290,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* Returns <code>true</code> iff this setting is present in the given settings object. Otherwise <code>false</code>
|
||||
*/
|
||||
public boolean exists(Settings settings) {
|
||||
return settings.get(getKey()) != null;
|
||||
return settings.contains(getKey());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -330,14 +330,19 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
* instead. This is useful if the value can't be parsed due to an invalid value to access the actual value.
|
||||
*/
|
||||
public String getRaw(Settings settings) {
|
||||
checkDeprecation(settings);
|
||||
return settings.get(getKey(), defaultValue.apply(settings));
|
||||
}
|
||||
|
||||
/** Logs a deprecation warning if the setting is deprecated and used. */
|
||||
protected void checkDeprecation(Settings settings) {
|
||||
// They're using the setting, so we need to tell them to stop
|
||||
if (this.isDeprecated() && this.exists(settings)) {
|
||||
// It would be convenient to show its replacement key, but replacement is often not so simple
|
||||
final DeprecationLogger deprecationLogger = new DeprecationLogger(Loggers.getLogger(getClass()));
|
||||
deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and it will be removed in a future release! " +
|
||||
"See the breaking changes lists in the documentation for details", getKey());
|
||||
"See the breaking changes lists in the documentation for details", getKey());
|
||||
}
|
||||
return settings.get(getKey(), defaultValue.apply(settings));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -76,10 +77,29 @@ public final class Settings implements ToXContent {
|
|||
public static final Settings EMPTY = new Builder().build();
|
||||
private static final Pattern ARRAY_PATTERN = Pattern.compile("(.*)\\.\\d+$");
|
||||
|
||||
/** The raw settings from the full key to raw string value. */
|
||||
private Map<String, String> settings;
|
||||
|
||||
Settings(Map<String, String> settings) {
|
||||
this.settings = Collections.unmodifiableMap(settings);
|
||||
/** The keystore storage associated with these settings. */
|
||||
private KeyStoreWrapper keystore;
|
||||
|
||||
Settings(Map<String, String> settings, KeyStoreWrapper keystore) {
|
||||
// we use a sorted map for consistent serialization when using getAsMap()
|
||||
this.settings = Collections.unmodifiableSortedMap(new TreeMap<>(settings));
|
||||
this.keystore = keystore;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the keystore that contains secure settings.
|
||||
*/
|
||||
KeyStoreWrapper getKeyStore() {
|
||||
// pkg private so it can only be accessed by local subclasses of SecureSetting
|
||||
return keystore;
|
||||
}
|
||||
|
||||
/** Returns true if the setting exists, false otherwise. */
|
||||
public boolean contains(String key) {
|
||||
return settings.containsKey(key) || keystore != null && keystore.getSettings().contains(key);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -185,16 +205,18 @@ public final class Settings implements ToXContent {
|
|||
|
||||
/**
|
||||
* A settings that are filtered (and key is removed) with the specified prefix.
|
||||
* Secure settings may not be access through the prefixed settings.
|
||||
*/
|
||||
public Settings getByPrefix(String prefix) {
|
||||
return new Settings(new FilteredMap(this.settings, (k) -> k.startsWith(prefix), prefix));
|
||||
return new Settings(new FilteredMap(this.settings, (k) -> k.startsWith(prefix), prefix), null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new settings object that contains all setting of the current one filtered by the given settings key predicate.
|
||||
* Secure settings may not be accessed through a filter.
|
||||
*/
|
||||
public Settings filter(Predicate<String> predicate) {
|
||||
return new Settings(new FilteredMap(this.settings, predicate, null));
|
||||
return new Settings(new FilteredMap(this.settings, predicate, null), null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -456,7 +478,7 @@ public final class Settings implements ToXContent {
|
|||
}
|
||||
Map<String, Settings> retVal = new LinkedHashMap<>();
|
||||
for (Map.Entry<String, Map<String, String>> entry : map.entrySet()) {
|
||||
retVal.put(entry.getKey(), new Settings(Collections.unmodifiableMap(entry.getValue())));
|
||||
retVal.put(entry.getKey(), new Settings(Collections.unmodifiableMap(entry.getValue()), keystore));
|
||||
}
|
||||
return Collections.unmodifiableMap(retVal);
|
||||
}
|
||||
|
@ -591,6 +613,8 @@ public final class Settings implements ToXContent {
|
|||
// we use a sorted map for consistent serialization when using getAsMap()
|
||||
private final Map<String, String> map = new TreeMap<>();
|
||||
|
||||
private SetOnce<KeyStoreWrapper> keystore = new SetOnce<>();
|
||||
|
||||
private Builder() {
|
||||
|
||||
}
|
||||
|
@ -613,6 +637,14 @@ public final class Settings implements ToXContent {
|
|||
return map.get(key);
|
||||
}
|
||||
|
||||
/** Sets the secret store for these settings. */
|
||||
public void setKeyStore(KeyStoreWrapper keystore) {
|
||||
if (keystore.isLoaded()) {
|
||||
throw new IllegalStateException("The keystore wrapper must already be loaded");
|
||||
}
|
||||
this.keystore.set(keystore);
|
||||
}
|
||||
|
||||
/**
|
||||
* Puts tuples of key value pairs of settings. Simplified version instead of repeating calling
|
||||
* put for each one.
|
||||
|
@ -1019,7 +1051,7 @@ public final class Settings implements ToXContent {
|
|||
* set on this builder.
|
||||
*/
|
||||
public Settings build() {
|
||||
return new Settings(map);
|
||||
return new Settings(map, keystore.get());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.rest.RestStatus;
|
|||
* Objects that can both render themselves in as json/yaml/etc and can provide a {@link RestStatus} for their response. Usually should be
|
||||
* implemented by top level responses sent back to users from REST endpoints.
|
||||
*/
|
||||
public interface StatusToXContent extends ToXContent {
|
||||
public interface StatusToXContentObject extends ToXContentObject {
|
||||
|
||||
/**
|
||||
* Returns the REST status to make sure it is returned correctly
|
|
@ -26,6 +26,8 @@ import java.util.Map;
|
|||
|
||||
/**
|
||||
* An interface allowing to transfer an object to "XContent" using an {@link XContentBuilder}.
|
||||
* The output may or may not be a value object. Objects implementing {@link ToXContentObject} output a valid value
|
||||
* but those that don't may or may not require emitting a startObject and an endObject.
|
||||
*/
|
||||
public interface ToXContent {
|
||||
|
||||
|
@ -126,4 +128,8 @@ public interface ToXContent {
|
|||
}
|
||||
|
||||
XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException;
|
||||
|
||||
default boolean isFragment() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
/**
|
||||
* An interface allowing to transfer an object to "XContent" using an {@link XContentBuilder}.
|
||||
* The difference between {@link ToXContent} and {@link ToXContentObject} is that the former may output a fragment that
|
||||
* requires to start and end a new anonymous object externally, while the latter guarantees that what gets printed
|
||||
* out is fully valid syntax without any external addition.
|
||||
*/
|
||||
public interface ToXContentObject extends ToXContent {
|
||||
|
||||
@Override
|
||||
default boolean isFragment() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -378,19 +378,18 @@ public class XContentHelper {
|
|||
|
||||
/**
|
||||
* Returns the bytes that represent the XContent output of the provided {@link ToXContent} object, using the provided
|
||||
* {@link XContentType}. Wraps the output into a new anonymous object depending on the value of the wrapInObject argument.
|
||||
* {@link XContentType}. Wraps the output into a new anonymous object.
|
||||
*/
|
||||
public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, boolean wrapInObject) throws IOException {
|
||||
public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType) throws IOException {
|
||||
try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) {
|
||||
if (wrapInObject) {
|
||||
if (toXContent.isFragment()) {
|
||||
builder.startObject();
|
||||
}
|
||||
toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
if (wrapInObject) {
|
||||
if (toXContent.isFragment()) {
|
||||
builder.endObject();
|
||||
}
|
||||
return builder.bytes();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.common.compress.CompressorFactory;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -46,7 +46,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpect
|
|||
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
|
||||
import static org.elasticsearch.index.get.GetField.readGetField;
|
||||
|
||||
public class GetResult implements Streamable, Iterable<GetField>, ToXContent {
|
||||
public class GetResult implements Streamable, Iterable<GetField>, ToXContentObject {
|
||||
|
||||
private static final String _INDEX = "_index";
|
||||
private static final String _TYPE = "_type";
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.index.refresh;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -27,6 +28,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class RefreshStats implements Streamable, ToXContent {
|
||||
|
||||
|
@ -34,18 +36,19 @@ public class RefreshStats implements Streamable, ToXContent {
|
|||
|
||||
private long totalTimeInMillis;
|
||||
|
||||
/**
|
||||
* Number of waiting refresh listeners.
|
||||
*/
|
||||
private int listeners;
|
||||
|
||||
public RefreshStats() {
|
||||
|
||||
}
|
||||
|
||||
public RefreshStats(long total, long totalTimeInMillis) {
|
||||
public RefreshStats(long total, long totalTimeInMillis, int listeners) {
|
||||
this.total = total;
|
||||
this.totalTimeInMillis = totalTimeInMillis;
|
||||
}
|
||||
|
||||
public void add(long total, long totalTimeInMillis) {
|
||||
this.total += total;
|
||||
this.totalTimeInMillis += totalTimeInMillis;
|
||||
this.listeners = listeners;
|
||||
}
|
||||
|
||||
public void add(RefreshStats refreshStats) {
|
||||
|
@ -58,6 +61,7 @@ public class RefreshStats implements Streamable, ToXContent {
|
|||
}
|
||||
this.total += refreshStats.total;
|
||||
this.totalTimeInMillis += refreshStats.totalTimeInMillis;
|
||||
this.listeners += refreshStats.listeners;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -81,31 +85,56 @@ public class RefreshStats implements Streamable, ToXContent {
|
|||
return new TimeValue(totalTimeInMillis);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.REFRESH);
|
||||
builder.field(Fields.TOTAL, total);
|
||||
builder.timeValueField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, totalTimeInMillis);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
/**
|
||||
* The number of waiting refresh listeners.
|
||||
*/
|
||||
public int getListeners() {
|
||||
return listeners;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String REFRESH = "refresh";
|
||||
static final String TOTAL = "total";
|
||||
static final String TOTAL_TIME = "total_time";
|
||||
static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis";
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("refresh");
|
||||
builder.field("total", total);
|
||||
builder.timeValueField("total_time_in_millis", "total_time", totalTimeInMillis);
|
||||
builder.field("listeners", listeners);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
total = in.readVLong();
|
||||
totalTimeInMillis = in.readVLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
listeners = in.readVInt();
|
||||
} else {
|
||||
listeners = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(total);
|
||||
out.writeVLong(totalTimeInMillis);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
out.writeVInt(listeners);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != RefreshStats.class) {
|
||||
return false;
|
||||
}
|
||||
RefreshStats rhs = (RefreshStats) obj;
|
||||
return total == rhs.total
|
||||
&& totalTimeInMillis == rhs.totalTimeInMillis
|
||||
&& listeners == rhs.listeners;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(total, totalTimeInMillis, listeners);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -661,7 +661,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
public RefreshStats refreshStats() {
|
||||
return new RefreshStats(refreshMetric.count(), TimeUnit.NANOSECONDS.toMillis(refreshMetric.sum()));
|
||||
// Null refreshListeners means this shard doesn't support them so there can't be any.
|
||||
int listeners = refreshListeners == null ? 0 : refreshListeners.pendingCount();
|
||||
return new RefreshStats(refreshMetric.count(), TimeUnit.NANOSECONDS.toMillis(refreshMetric.sum()), listeners);
|
||||
}
|
||||
|
||||
public FlushStats flushStats() {
|
||||
|
@ -932,8 +934,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
if (engine != null && flushEngine) {
|
||||
engine.flushAndClose();
|
||||
}
|
||||
} finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times
|
||||
IOUtils.close(engine);
|
||||
} finally {
|
||||
// playing safe here and close the engine even if the above succeeds - close can be called multiple times
|
||||
// Also closing refreshListeners to prevent us from accumulating any more listeners
|
||||
IOUtils.close(engine, refreshListeners);
|
||||
indexShardOperationsLock.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.search.ReferenceManager;
|
|||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
@ -35,18 +36,26 @@ import static java.util.Objects.requireNonNull;
|
|||
|
||||
/**
|
||||
* Allows for the registration of listeners that are called when a change becomes visible for search. This functionality is exposed from
|
||||
* {@link IndexShard} but kept here so it can be tested without standing up the entire thing.
|
||||
* {@link IndexShard} but kept here so it can be tested without standing up the entire thing.
|
||||
*
|
||||
* When {@link Closeable#close()}d it will no longer accept listeners and flush any existing listeners.
|
||||
*/
|
||||
public final class RefreshListeners implements ReferenceManager.RefreshListener {
|
||||
public final class RefreshListeners implements ReferenceManager.RefreshListener, Closeable {
|
||||
private final IntSupplier getMaxRefreshListeners;
|
||||
private final Runnable forceRefresh;
|
||||
private final Executor listenerExecutor;
|
||||
private final Logger logger;
|
||||
|
||||
/**
|
||||
* Is this closed? If true then we won't add more listeners and have flushed all pending listeners.
|
||||
*/
|
||||
private volatile boolean closed = false;
|
||||
/**
|
||||
* List of refresh listeners. Defaults to null and built on demand because most refresh cycles won't need it. Entries are never removed
|
||||
* from it, rather, it is nulled and rebuilt when needed again. The (hopefully) rare entries that didn't make the current refresh cycle
|
||||
* are just added back to the new list. Both the reference and the contents are always modified while synchronized on {@code this}.
|
||||
*
|
||||
* We never set this to non-null while closed it {@code true}.
|
||||
*/
|
||||
private volatile List<Tuple<Translog.Location, Consumer<Boolean>>> refreshListeners = null;
|
||||
/**
|
||||
|
@ -80,12 +89,17 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
|
|||
return true;
|
||||
}
|
||||
synchronized (this) {
|
||||
if (refreshListeners == null) {
|
||||
refreshListeners = new ArrayList<>();
|
||||
List<Tuple<Translog.Location, Consumer<Boolean>>> listeners = refreshListeners;
|
||||
if (listeners == null) {
|
||||
if (closed) {
|
||||
throw new IllegalStateException("can't wait for refresh on a closed index");
|
||||
}
|
||||
listeners = new ArrayList<>();
|
||||
refreshListeners = listeners;
|
||||
}
|
||||
if (refreshListeners.size() < getMaxRefreshListeners.getAsInt()) {
|
||||
if (listeners.size() < getMaxRefreshListeners.getAsInt()) {
|
||||
// We have a free slot so register the listener
|
||||
refreshListeners.add(new Tuple<>(location, listener));
|
||||
listeners.add(new Tuple<>(location, listener));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -95,12 +109,34 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
|
|||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
List<Tuple<Translog.Location, Consumer<Boolean>>> oldListeners;
|
||||
synchronized (this) {
|
||||
oldListeners = refreshListeners;
|
||||
refreshListeners = null;
|
||||
closed = true;
|
||||
}
|
||||
// Fire any listeners we might have had
|
||||
fireListeners(oldListeners);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if there are pending listeners.
|
||||
*/
|
||||
public boolean refreshNeeded() {
|
||||
// A null list doesn't need a refresh. If we're closed we don't need a refresh either.
|
||||
return refreshListeners != null && false == closed;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of pending listeners.
|
||||
*/
|
||||
public int pendingCount() {
|
||||
// No need to synchronize here because we're doing a single volatile read
|
||||
return refreshListeners != null;
|
||||
List<Tuple<Translog.Location, Consumer<Boolean>>> listeners = refreshListeners;
|
||||
// A null list means we haven't accumulated any listeners. Otherwise we need the size.
|
||||
return listeners == null ? 0 : listeners.size();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -125,33 +161,25 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
|
|||
|
||||
@Override
|
||||
public void afterRefresh(boolean didRefresh) throws IOException {
|
||||
/*
|
||||
* We intentionally ignore didRefresh here because our timing is a little off. It'd be a useful flag if we knew everything that made
|
||||
/* We intentionally ignore didRefresh here because our timing is a little off. It'd be a useful flag if we knew everything that made
|
||||
* it into the refresh, but the way we snapshot the translog position before the refresh, things can sneak into the refresh that we
|
||||
* don't know about.
|
||||
*/
|
||||
* don't know about. */
|
||||
if (null == currentRefreshLocation) {
|
||||
/*
|
||||
* The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This
|
||||
* usually happens during recovery. The next refresh cycle out to pick up this refresh.
|
||||
*/
|
||||
/* The translog had an empty last write location at the start of the refresh so we can't alert anyone to anything. This
|
||||
* usually happens during recovery. The next refresh cycle out to pick up this refresh. */
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Set the lastRefreshedLocation so listeners that come in for locations before that will just execute inline without messing
|
||||
/* Set the lastRefreshedLocation so listeners that come in for locations before that will just execute inline without messing
|
||||
* around with refreshListeners or synchronizing at all. Note that it is not safe for us to abort early if we haven't advanced the
|
||||
* position here because we set and read lastRefreshedLocation outside of a synchronized block. We do that so that waiting for a
|
||||
* refresh that has already passed is just a volatile read but the cost is that any check whether or not we've advanced the
|
||||
* position will introduce a race between adding the listener and the position check. We could work around this by moving this
|
||||
* assignment into the synchronized block below and double checking lastRefreshedLocation in addOrNotify's synchronized block but
|
||||
* that doesn't seem worth it given that we already skip this process early if there aren't any listeners to iterate.
|
||||
*/
|
||||
* that doesn't seem worth it given that we already skip this process early if there aren't any listeners to iterate. */
|
||||
lastRefreshedLocation = currentRefreshLocation;
|
||||
/*
|
||||
* Grab the current refresh listeners and replace them with null while synchronized. Any listeners that come in after this won't be
|
||||
/* Grab the current refresh listeners and replace them with null while synchronized. Any listeners that come in after this won't be
|
||||
* in the list we iterate over and very likely won't be candidates for refresh anyway because we've already moved the
|
||||
* lastRefreshedLocation.
|
||||
*/
|
||||
* lastRefreshedLocation. */
|
||||
List<Tuple<Translog.Location, Consumer<Boolean>>> candidates;
|
||||
synchronized (this) {
|
||||
candidates = refreshListeners;
|
||||
|
@ -162,16 +190,15 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
|
|||
refreshListeners = null;
|
||||
}
|
||||
// Iterate the list of listeners, copying the listeners to fire to one list and those to preserve to another list.
|
||||
List<Consumer<Boolean>> listenersToFire = null;
|
||||
List<Tuple<Translog.Location, Consumer<Boolean>>> listenersToFire = null;
|
||||
List<Tuple<Translog.Location, Consumer<Boolean>>> preservedListeners = null;
|
||||
for (Tuple<Translog.Location, Consumer<Boolean>> tuple : candidates) {
|
||||
Translog.Location location = tuple.v1();
|
||||
Consumer<Boolean> listener = tuple.v2();
|
||||
if (location.compareTo(currentRefreshLocation) <= 0) {
|
||||
if (listenersToFire == null) {
|
||||
listenersToFire = new ArrayList<>();
|
||||
}
|
||||
listenersToFire.add(listener);
|
||||
listenersToFire.add(tuple);
|
||||
} else {
|
||||
if (preservedListeners == null) {
|
||||
preservedListeners = new ArrayList<>();
|
||||
|
@ -179,27 +206,36 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener
|
|||
preservedListeners.add(tuple);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Now add any preserved listeners back to the running list of refresh listeners while under lock. We'll try them next time. While
|
||||
* we were iterating the list of listeners new listeners could have come in. That means that adding all of our preserved listeners
|
||||
* might push our list of listeners above the maximum number of slots allowed. This seems unlikely because we expect few listeners
|
||||
* to be preserved. And the next listener while we're full will trigger a refresh anyway.
|
||||
*/
|
||||
/* Now deal with the listeners that it isn't time yet to fire. We need to do this under lock so we don't miss a concurrent close or
|
||||
* newly registered listener. If we're not closed we just add the listeners to the list of listeners we check next time. If we are
|
||||
* closed we fire the listeners even though it isn't time for them. */
|
||||
if (preservedListeners != null) {
|
||||
synchronized (this) {
|
||||
if (refreshListeners == null) {
|
||||
refreshListeners = new ArrayList<>();
|
||||
if (closed) {
|
||||
listenersToFire.addAll(preservedListeners);
|
||||
} else {
|
||||
refreshListeners = preservedListeners;
|
||||
}
|
||||
} else {
|
||||
assert closed == false : "Can't be closed and have non-null refreshListeners";
|
||||
refreshListeners.addAll(preservedListeners);
|
||||
}
|
||||
refreshListeners.addAll(preservedListeners);
|
||||
}
|
||||
}
|
||||
// Lastly, fire the listeners that are ready on the listener thread pool
|
||||
fireListeners(listenersToFire);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fire some listeners. Does nothing if the list of listeners is null.
|
||||
*/
|
||||
private void fireListeners(List<Tuple<Translog.Location, Consumer<Boolean>>> listenersToFire) {
|
||||
if (listenersToFire != null) {
|
||||
final List<Consumer<Boolean>> finalListenersToFire = listenersToFire;
|
||||
listenerExecutor.execute(() -> {
|
||||
for (Consumer<Boolean> listener : finalListenersToFire) {
|
||||
for (Tuple<Translog.Location, Consumer<Boolean>> listener : listenersToFire) {
|
||||
try {
|
||||
listener.accept(false);
|
||||
listener.v2().accept(false);
|
||||
} catch (Exception e) {
|
||||
logger.warn("Error firing refresh listener", e);
|
||||
}
|
||||
|
|
|
@ -329,7 +329,7 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
}
|
||||
|
||||
prepareTargetForTranslog(translogView.totalOperations());
|
||||
prepareTargetForTranslog(translogView.totalOperations(), shard.segmentStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
|
||||
logger.trace("[{}][{}] recovery [phase1] to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime());
|
||||
response.phase1Time = stopWatch.totalTime().millis();
|
||||
|
@ -341,15 +341,14 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
|
||||
|
||||
protected void prepareTargetForTranslog(final int totalTranslogOps) throws IOException {
|
||||
protected void prepareTargetForTranslog(final int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException {
|
||||
StopWatch stopWatch = new StopWatch().start();
|
||||
logger.trace("{} recovery [phase1] to {}: prepare remote engine for translog", request.shardId(), request.targetNode());
|
||||
final long startEngineStart = stopWatch.totalTime().millis();
|
||||
// Send a request preparing the new shard's translog to receive
|
||||
// operations. This ensures the shard engine is started and disables
|
||||
// garbage collection (not the JVM's GC!) of tombstone deletes
|
||||
cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps,
|
||||
shard.segmentStats(false).getMaxUnsafeAutoIdTimestamp()));
|
||||
cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps, maxUnsafeAutoIdTimestamp));
|
||||
stopWatch.stop();
|
||||
|
||||
response.startTime = stopWatch.totalTime().millis() - startEngineStart;
|
||||
|
|
|
@ -50,6 +50,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
|
|||
boolean engineClosed = false;
|
||||
try {
|
||||
logger.trace("{} recovery [phase1] to {}: skipping phase 1 for shared filesystem", request.shardId(), request.targetNode());
|
||||
long maxUnsafeAutoIdTimestamp = shard.segmentStats(false).getMaxUnsafeAutoIdTimestamp();
|
||||
if (request.isPrimaryRelocation()) {
|
||||
logger.debug("[phase1] closing engine on primary for shared filesystem recovery");
|
||||
try {
|
||||
|
@ -62,7 +63,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
|
|||
shard.failShard("failed to close engine (phase1)", e);
|
||||
}
|
||||
}
|
||||
prepareTargetForTranslog(0);
|
||||
prepareTargetForTranslog(0, maxUnsafeAutoIdTimestamp);
|
||||
finalizeRecovery();
|
||||
return response;
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -19,14 +19,6 @@
|
|||
|
||||
package org.elasticsearch.node.internal;
|
||||
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
@ -39,6 +31,14 @@ import java.util.Set;
|
|||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import static org.elasticsearch.common.Strings.cleanPath;
|
||||
|
||||
public class InternalSettingsPreparer {
|
||||
|
|
|
@ -42,7 +42,7 @@ import java.util.stream.Collectors;
|
|||
* A class that represents the data in a repository, as captured in the
|
||||
* repository's index blob.
|
||||
*/
|
||||
public final class RepositoryData implements ToXContent {
|
||||
public final class RepositoryData {
|
||||
|
||||
/**
|
||||
* The generation value indicating the repository has no index generational files.
|
||||
|
@ -51,7 +51,8 @@ public final class RepositoryData implements ToXContent {
|
|||
/**
|
||||
* An instance initialized for an empty repository.
|
||||
*/
|
||||
public static final RepositoryData EMPTY = new RepositoryData(EMPTY_REPO_GEN, Collections.emptyList(), Collections.emptyMap());
|
||||
public static final RepositoryData EMPTY =
|
||||
new RepositoryData(EMPTY_REPO_GEN, Collections.emptyList(), Collections.emptyMap(), Collections.emptyList());
|
||||
|
||||
/**
|
||||
* The generational id of the index file from which the repository data was read.
|
||||
|
@ -69,25 +70,24 @@ public final class RepositoryData implements ToXContent {
|
|||
* The snapshots that each index belongs to.
|
||||
*/
|
||||
private final Map<IndexId, Set<SnapshotId>> indexSnapshots;
|
||||
/**
|
||||
* The snapshots that are no longer compatible with the current cluster ES version.
|
||||
*/
|
||||
private final List<SnapshotId> incompatibleSnapshotIds;
|
||||
|
||||
private RepositoryData(long genId, List<SnapshotId> snapshotIds, Map<IndexId, Set<SnapshotId>> indexSnapshots) {
|
||||
public RepositoryData(long genId, List<SnapshotId> snapshotIds, Map<IndexId, Set<SnapshotId>> indexSnapshots,
|
||||
List<SnapshotId> incompatibleSnapshotIds) {
|
||||
this.genId = genId;
|
||||
this.snapshotIds = Collections.unmodifiableList(snapshotIds);
|
||||
this.indices = Collections.unmodifiableMap(indexSnapshots.keySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(IndexId::getName, Function.identity())));
|
||||
this.indexSnapshots = Collections.unmodifiableMap(indexSnapshots);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an instance of {@link RepositoryData} on a fresh repository (one that has no index-N files).
|
||||
*/
|
||||
public static RepositoryData initRepositoryData(List<SnapshotId> snapshotIds, Map<IndexId, Set<SnapshotId>> indexSnapshots) {
|
||||
return new RepositoryData(EMPTY_REPO_GEN, snapshotIds, indexSnapshots);
|
||||
this.incompatibleSnapshotIds = Collections.unmodifiableList(incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
protected RepositoryData copy() {
|
||||
return new RepositoryData(genId, snapshotIds, indexSnapshots);
|
||||
return new RepositoryData(genId, snapshotIds, indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -104,6 +104,25 @@ public final class RepositoryData implements ToXContent {
|
|||
return snapshotIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an immutable collection of the snapshot ids in the repository that are incompatible with the
|
||||
* current ES version.
|
||||
*/
|
||||
public List<SnapshotId> getIncompatibleSnapshotIds() {
|
||||
return incompatibleSnapshotIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an immutable collection of all the snapshot ids in the repository, both active and
|
||||
* incompatible snapshots.
|
||||
*/
|
||||
public List<SnapshotId> getAllSnapshotIds() {
|
||||
List<SnapshotId> allSnapshotIds = new ArrayList<>(snapshotIds.size() + incompatibleSnapshotIds.size());
|
||||
allSnapshotIds.addAll(snapshotIds);
|
||||
allSnapshotIds.addAll(incompatibleSnapshotIds);
|
||||
return Collections.unmodifiableList(allSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an unmodifiable map of the index names to {@link IndexId} in the repository.
|
||||
*/
|
||||
|
@ -139,7 +158,7 @@ public final class RepositoryData implements ToXContent {
|
|||
allIndexSnapshots.put(indexId, ids);
|
||||
}
|
||||
}
|
||||
return new RepositoryData(genId, snapshots, allIndexSnapshots);
|
||||
return new RepositoryData(genId, snapshots, allIndexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -168,7 +187,21 @@ public final class RepositoryData implements ToXContent {
|
|||
indexSnapshots.put(indexId, set);
|
||||
}
|
||||
|
||||
return new RepositoryData(genId, newSnapshotIds, indexSnapshots);
|
||||
return new RepositoryData(genId, newSnapshotIds, indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new {@link RepositoryData} instance containing the same snapshot data as the
|
||||
* invoking instance, with the given incompatible snapshots added to the new instance.
|
||||
*/
|
||||
public RepositoryData addIncompatibleSnapshots(final List<SnapshotId> incompatibleSnapshotIds) {
|
||||
List<SnapshotId> newSnapshotIds = new ArrayList<>(this.snapshotIds);
|
||||
List<SnapshotId> newIncompatibleSnapshotIds = new ArrayList<>(this.incompatibleSnapshotIds);
|
||||
for (SnapshotId snapshotId : incompatibleSnapshotIds) {
|
||||
newSnapshotIds.remove(snapshotId);
|
||||
newIncompatibleSnapshotIds.add(snapshotId);
|
||||
}
|
||||
return new RepositoryData(this.genId, newSnapshotIds, this.indexSnapshots, newIncompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -182,6 +215,13 @@ public final class RepositoryData implements ToXContent {
|
|||
return snapshotIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes the indices in the repository metadata; returns a new instance.
|
||||
*/
|
||||
public RepositoryData initIndices(final Map<IndexId, Set<SnapshotId>> indexSnapshots) {
|
||||
return new RepositoryData(genId, snapshotIds, indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
|
@ -193,12 +233,13 @@ public final class RepositoryData implements ToXContent {
|
|||
@SuppressWarnings("unchecked") RepositoryData that = (RepositoryData) obj;
|
||||
return snapshotIds.equals(that.snapshotIds)
|
||||
&& indices.equals(that.indices)
|
||||
&& indexSnapshots.equals(that.indexSnapshots);
|
||||
&& indexSnapshots.equals(that.indexSnapshots)
|
||||
&& incompatibleSnapshotIds.equals(that.incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(snapshotIds, indices, indexSnapshots);
|
||||
return Objects.hash(snapshotIds, indices, indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -247,11 +288,15 @@ public final class RepositoryData implements ToXContent {
|
|||
}
|
||||
|
||||
private static final String SNAPSHOTS = "snapshots";
|
||||
private static final String INCOMPATIBLE_SNAPSHOTS = "incompatible-snapshots";
|
||||
private static final String INDICES = "indices";
|
||||
private static final String INDEX_ID = "id";
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
|
||||
/**
|
||||
* Writes the snapshots metadata and the related indices metadata to x-content, omitting the
|
||||
* incompatible snapshots.
|
||||
*/
|
||||
public XContentBuilder snapshotsToXContent(final XContentBuilder builder, final ToXContent.Params params) throws IOException {
|
||||
builder.startObject();
|
||||
// write the snapshots list
|
||||
builder.startArray(SNAPSHOTS);
|
||||
|
@ -278,7 +323,10 @@ public final class RepositoryData implements ToXContent {
|
|||
return builder;
|
||||
}
|
||||
|
||||
public static RepositoryData fromXContent(final XContentParser parser, final long genId) throws IOException {
|
||||
/**
|
||||
* Reads an instance of {@link RepositoryData} from x-content, loading the snapshots and indices metadata.
|
||||
*/
|
||||
public static RepositoryData snapshotsFromXContent(final XContentParser parser, long genId) throws IOException {
|
||||
List<SnapshotId> snapshots = new ArrayList<>();
|
||||
Map<IndexId, Set<SnapshotId>> indexSnapshots = new HashMap<>();
|
||||
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
|
||||
|
@ -327,7 +375,51 @@ public final class RepositoryData implements ToXContent {
|
|||
} else {
|
||||
throw new ElasticsearchParseException("start object expected");
|
||||
}
|
||||
return new RepositoryData(genId, snapshots, indexSnapshots);
|
||||
return new RepositoryData(genId, snapshots, indexSnapshots, Collections.emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the incompatible snapshot ids to x-content.
|
||||
*/
|
||||
public XContentBuilder incompatibleSnapshotsToXContent(final XContentBuilder builder, final ToXContent.Params params)
|
||||
throws IOException {
|
||||
|
||||
builder.startObject();
|
||||
// write the incompatible snapshots list
|
||||
builder.startArray(INCOMPATIBLE_SNAPSHOTS);
|
||||
for (final SnapshotId snapshot : getIncompatibleSnapshotIds()) {
|
||||
snapshot.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the incompatible snapshot ids from x-content, loading them into a new instance of {@link RepositoryData}
|
||||
* that is created from the invoking instance, plus the incompatible snapshots that are read from x-content.
|
||||
*/
|
||||
public RepositoryData incompatibleSnapshotsFromXContent(final XContentParser parser) throws IOException {
|
||||
List<SnapshotId> incompatibleSnapshotIds = new ArrayList<>();
|
||||
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
|
||||
while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
|
||||
String currentFieldName = parser.currentName();
|
||||
if (INCOMPATIBLE_SNAPSHOTS.equals(currentFieldName)) {
|
||||
if (parser.nextToken() == XContentParser.Token.START_ARRAY) {
|
||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||
incompatibleSnapshotIds.add(SnapshotId.fromXContent(parser));
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("expected array for [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("unknown field name [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("start object expected");
|
||||
}
|
||||
return new RepositoryData(this.genId, this.snapshotIds, this.indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -128,8 +128,9 @@ import static java.util.Collections.unmodifiableMap;
|
|||
* <pre>
|
||||
* {@code
|
||||
* STORE_ROOT
|
||||
* |- index-N - list of all snapshot name as JSON array, N is the generation of the file
|
||||
* |- index-N - list of all snapshot ids and the indices belonging to each snapshot, N is the generation of the file
|
||||
* |- index.latest - contains the numeric value of the latest generation of the index file (i.e. N from above)
|
||||
* |- incompatible-snapshots - list of all snapshot ids that are no longer compatible with the current version of the cluster
|
||||
* |- snap-20131010 - JSON serialized Snapshot for snapshot "20131010"
|
||||
* |- meta-20131010.dat - JSON serialized MetaData for snapshot "20131010" (includes only global metadata)
|
||||
* |- snap-20131011 - JSON serialized Snapshot for snapshot "20131011"
|
||||
|
@ -181,6 +182,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
|
||||
private static final String INDEX_LATEST_BLOB = "index.latest";
|
||||
|
||||
private static final String INCOMPATIBLE_SNAPSHOTS_BLOB = "incompatible-snapshots";
|
||||
|
||||
private static final String TESTS_FILE = "tests-";
|
||||
|
||||
private static final String METADATA_NAME_FORMAT = "meta-%s.dat";
|
||||
|
@ -232,11 +235,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB));
|
||||
restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB));
|
||||
readOnly = metadata.settings().getAsBoolean("readonly", false);
|
||||
|
||||
indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT,
|
||||
BlobStoreIndexShardSnapshot::fromXContent, namedXContentRegistry, isCompress());
|
||||
indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT,
|
||||
BlobStoreIndexShardSnapshots::fromXContent, namedXContentRegistry, isCompress());
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -305,7 +308,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
try {
|
||||
final String snapshotName = snapshotId.getName();
|
||||
// check if the snapshot name already exists in the repository
|
||||
if (getSnapshots().stream().anyMatch(s -> s.getName().equals(snapshotName))) {
|
||||
final RepositoryData repositoryData = getRepositoryData();
|
||||
if (repositoryData.getAllSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) {
|
||||
throw new SnapshotCreationException(metadata.name(), snapshotId, "snapshot with the same name already exists");
|
||||
}
|
||||
if (snapshotFormat.exists(snapshotsBlobContainer, snapshotId.getUUID())) {
|
||||
|
@ -480,10 +484,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
}
|
||||
}
|
||||
|
||||
public List<SnapshotId> getSnapshots() {
|
||||
return getRepositoryData().getSnapshotIds();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MetaData getSnapshotMetaData(SnapshotInfo snapshot, List<IndexId> indices) throws IOException {
|
||||
return readSnapshotMetaData(snapshot.snapshotId(), snapshot.version(), indices, false);
|
||||
|
@ -491,6 +491,15 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
|
||||
@Override
|
||||
public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) {
|
||||
if (getRepositoryData().getIncompatibleSnapshotIds().contains(snapshotId)) {
|
||||
// an incompatible snapshot - cannot read its snapshot metadata file, just return
|
||||
// a SnapshotInfo indicating its incompatible
|
||||
return SnapshotInfo.incompatible(snapshotId);
|
||||
}
|
||||
return getSnapshotInfoInternal(snapshotId);
|
||||
}
|
||||
|
||||
private SnapshotInfo getSnapshotInfoInternal(final SnapshotId snapshotId) {
|
||||
try {
|
||||
return snapshotFormat.read(snapshotsBlobContainer, snapshotId.getUUID());
|
||||
} catch (NoSuchFileException ex) {
|
||||
|
@ -633,9 +642,21 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
Streams.copy(blob, out);
|
||||
// EMPTY is safe here because RepositoryData#fromXContent calls namedObject
|
||||
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes())) {
|
||||
repositoryData = RepositoryData.fromXContent(parser, indexGen);
|
||||
repositoryData = RepositoryData.snapshotsFromXContent(parser, indexGen);
|
||||
}
|
||||
}
|
||||
|
||||
// now load the incompatible snapshot ids, if they exist
|
||||
try (InputStream blob = snapshotsBlobContainer.readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
Streams.copy(blob, out);
|
||||
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes())) {
|
||||
repositoryData = repositoryData.incompatibleSnapshotsFromXContent(parser);
|
||||
}
|
||||
} catch (NoSuchFileException e) {
|
||||
logger.debug("[{}] Incompatible snapshots blob [{}] does not exist, the likely reason is that " +
|
||||
"there are no incompatible snapshots in the repository", metadata.name(), INCOMPATIBLE_SNAPSHOTS_BLOB);
|
||||
}
|
||||
return repositoryData;
|
||||
} catch (NoSuchFileException ex) {
|
||||
// repository doesn't have an index blob, its a new blank repo
|
||||
|
@ -674,7 +695,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
try (BytesStreamOutput bStream = new BytesStreamOutput()) {
|
||||
try (StreamOutput stream = new OutputStreamStreamOutput(bStream)) {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
|
||||
repositoryData.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
repositoryData.snapshotsToXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.close();
|
||||
}
|
||||
snapshotsBytes = bStream.bytes();
|
||||
|
@ -687,10 +708,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
if (snapshotsBlobContainer.blobExists(oldSnapshotIndexFile)) {
|
||||
snapshotsBlobContainer.deleteBlob(oldSnapshotIndexFile);
|
||||
}
|
||||
// delete the old index file (non-generational) if it exists
|
||||
if (snapshotsBlobContainer.blobExists(SNAPSHOTS_FILE)) {
|
||||
snapshotsBlobContainer.deleteBlob(SNAPSHOTS_FILE);
|
||||
}
|
||||
}
|
||||
|
||||
// write the current generation to the index-latest file
|
||||
|
@ -705,6 +722,26 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
writeAtomic(INDEX_LATEST_BLOB, genBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the incompatible snapshot ids list to the `incompatible-snapshots` blob in the repository.
|
||||
*
|
||||
* Package private for testing.
|
||||
*/
|
||||
void writeIncompatibleSnapshots(RepositoryData repositoryData) throws IOException {
|
||||
assert isReadOnly() == false; // can not write to a read only repository
|
||||
final BytesReference bytes;
|
||||
try (BytesStreamOutput bStream = new BytesStreamOutput()) {
|
||||
try (StreamOutput stream = new OutputStreamStreamOutput(bStream)) {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
|
||||
repositoryData.incompatibleSnapshotsToXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.close();
|
||||
}
|
||||
bytes = bStream.bytes();
|
||||
}
|
||||
// write the incompatible snapshots blob
|
||||
writeAtomic(INCOMPATIBLE_SNAPSHOTS_BLOB, bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest snapshot index blob id. Snapshot index blobs are named index-N, where N is
|
||||
* the next version number from when the index blob was written. Each individual index-N blob is
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.rest.action;
|
||||
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
|
@ -30,7 +30,7 @@ import java.util.function.Function;
|
|||
/**
|
||||
* Content listener that extracts that {@link RestStatus} from the response.
|
||||
*/
|
||||
public class RestStatusToXContentListener<Response extends StatusToXContent> extends RestResponseListener<Response> {
|
||||
public class RestStatusToXContentListener<Response extends StatusToXContentObject> extends RestToXContentListener<Response> {
|
||||
private final Function<Response, String> extractLocation;
|
||||
|
||||
/**
|
||||
|
@ -52,15 +52,10 @@ public class RestStatusToXContentListener<Response extends StatusToXContent> ext
|
|||
}
|
||||
|
||||
@Override
|
||||
public final RestResponse buildResponse(Response response) throws Exception {
|
||||
return buildResponse(response, channel.newBuilder());
|
||||
}
|
||||
|
||||
public final RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
public RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception {
|
||||
assert response.isFragment() == false; //would be nice if we could make default methods final
|
||||
response.toXContent(builder, channel.request());
|
||||
builder.endObject();
|
||||
BytesRestResponse restResponse = new BytesRestResponse(response.status(), builder);
|
||||
RestResponse restResponse = new BytesRestResponse(response.status(), builder);
|
||||
if (RestStatus.CREATED == restResponse.status()) {
|
||||
String location = extractLocation.apply(response);
|
||||
if (location != null) {
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.rest.action;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
|
@ -30,7 +31,7 @@ import org.elasticsearch.rest.RestStatus;
|
|||
* A REST based action listener that assumes the response is of type {@link ToXContent} and automatically
|
||||
* builds an XContent based response (wrapping the toXContent in startObject/endObject).
|
||||
*/
|
||||
public class RestToXContentListener<Response extends ToXContent> extends RestResponseListener<Response> {
|
||||
public class RestToXContentListener<Response extends ToXContentObject> extends RestResponseListener<Response> {
|
||||
|
||||
public RestToXContentListener(RestChannel channel) {
|
||||
super(channel);
|
||||
|
@ -41,24 +42,12 @@ public class RestToXContentListener<Response extends ToXContent> extends RestRes
|
|||
return buildResponse(response, channel.newBuilder());
|
||||
}
|
||||
|
||||
public final RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception {
|
||||
if (wrapInObject()) {
|
||||
builder.startObject();
|
||||
}
|
||||
public RestResponse buildResponse(Response response, XContentBuilder builder) throws Exception {
|
||||
assert response.isFragment() == false; //would be nice if we could make default methods final
|
||||
response.toXContent(builder, channel.request());
|
||||
if (wrapInObject()) {
|
||||
builder.endObject();
|
||||
}
|
||||
return new BytesRestResponse(getStatus(response), builder);
|
||||
}
|
||||
|
||||
protected boolean wrapInObject() {
|
||||
//Ideally, the toXContent method starts with startObject and ends with endObject.
|
||||
//In practice, we have many places where toXContent produces a json fragment that's not valid by itself. We will
|
||||
//migrate those step by step, so that we never have to start objects here, and we can remove this method.
|
||||
return true;
|
||||
}
|
||||
|
||||
protected RestStatus getStatus(Response response) {
|
||||
return RestStatus.OK;
|
||||
}
|
||||
|
|
|
@ -260,6 +260,9 @@ public class RestIndicesAction extends AbstractCatAction {
|
|||
table.addCell("refresh.time", "sibling:pri;alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
|
||||
table.addCell("pri.refresh.time", "default:false;text-align:right;desc:time spent in refreshes");
|
||||
|
||||
table.addCell("refresh.listeners", "sibling:pri;alias:rli,refreshListeners;default:false;text-align:right;desc:number of pending refresh listeners");
|
||||
table.addCell("pri.refresh.listeners", "default:false;text-align:right;desc:number of pending refresh listeners");
|
||||
|
||||
table.addCell("search.fetch_current", "sibling:pri;alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops");
|
||||
table.addCell("pri.search.fetch_current", "default:false;text-align:right;desc:current fetch phase ops");
|
||||
|
||||
|
@ -475,6 +478,9 @@ public class RestIndicesAction extends AbstractCatAction {
|
|||
table.addCell(indexStats == null ? null : indexStats.getTotal().getRefresh().getTotalTime());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRefresh().getTotalTime());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getRefresh().getListeners());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRefresh().getListeners());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getSearch().getTotal().getFetchCurrent());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSearch().getTotal().getFetchCurrent());
|
||||
|
||||
|
|
|
@ -193,6 +193,8 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
|
||||
table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
|
||||
table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
|
||||
table.addCell("refresh.listeners", "alias:rli,refreshListeners;default:false;text-align:right;"
|
||||
+ "desc:number of pending refresh listeners");
|
||||
|
||||
table.addCell("script.compilations", "alias:scrcc,scriptCompilations;default:false;text-align:right;desc:script compilations");
|
||||
table.addCell("script.cache_evictions",
|
||||
|
@ -346,6 +348,7 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh();
|
||||
table.addCell(refreshStats == null ? null : refreshStats.getTotal());
|
||||
table.addCell(refreshStats == null ? null : refreshStats.getTotalTime());
|
||||
table.addCell(refreshStats == null ? null : refreshStats.getListeners());
|
||||
|
||||
ScriptStats scriptStats = stats == null ? null : stats.getScriptStats();
|
||||
table.addCell(scriptStats == null ? null : scriptStats.getCompilations());
|
||||
|
|
|
@ -144,6 +144,7 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
|
||||
table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
|
||||
table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
|
||||
table.addCell("refresh.listeners", "alias:rli,refreshListeners;default:false;text-align:right;desc:number of pending refresh listeners");
|
||||
|
||||
table.addCell("search.fetch_current", "alias:sfc,searchFetchCurrent;default:false;text-align:right;desc:current fetch phase ops");
|
||||
table.addCell("search.fetch_time", "alias:sfti,searchFetchTime;default:false;text-align:right;desc:time spent in fetch phase");
|
||||
|
@ -290,6 +291,7 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
|
||||
table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotal());
|
||||
table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotalTime());
|
||||
table.addCell(commonStats == null ? null : commonStats.getRefresh().getListeners());
|
||||
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getFetchCurrent());
|
||||
table.addCell(commonStats == null ? null : commonStats.getSearch().getTotal().getFetchTime());
|
||||
|
|
|
@ -108,9 +108,7 @@ public class RestBulkAction extends BaseRestHandler {
|
|||
builder.field(Fields.ERRORS, response.hasFailures());
|
||||
builder.startArray(Fields.ITEMS);
|
||||
for (BulkItemResponse itemResponse : response) {
|
||||
builder.startObject();
|
||||
itemResponse.toXContent(builder, request);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
|
|
|
@ -75,11 +75,6 @@ public class RestGetAction extends BaseRestHandler {
|
|||
getRequest.fetchSourceContext(FetchSourceContext.parseFromRestRequest(request));
|
||||
|
||||
return channel -> client.get(getRequest, new RestToXContentListener<GetResponse>(channel) {
|
||||
@Override
|
||||
protected boolean wrapInObject() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RestStatus getStatus(GetResponse response) {
|
||||
return response.isExists() ? OK : NOT_FOUND;
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -46,7 +46,7 @@ import java.util.Objects;
|
|||
* compile and execute a script from the {@link ScriptService}
|
||||
* based on the {@link ScriptType}.
|
||||
*/
|
||||
public final class Script implements ToXContent, Writeable {
|
||||
public final class Script implements ToXContentObject, Writeable {
|
||||
|
||||
/**
|
||||
* The name of the of the default scripting language.
|
||||
|
|
|
@ -176,6 +176,11 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp
|
|||
// Read snapshot info and metadata from the repository
|
||||
Repository repository = repositoriesService.repository(request.repositoryName);
|
||||
final RepositoryData repositoryData = repository.getRepositoryData();
|
||||
final Optional<SnapshotId> incompatibleSnapshotId =
|
||||
repositoryData.getIncompatibleSnapshotIds().stream().filter(s -> request.snapshotName.equals(s.getName())).findFirst();
|
||||
if (incompatibleSnapshotId.isPresent()) {
|
||||
throw new SnapshotRestoreException(request.repositoryName, request.snapshotName, "cannot restore incompatible snapshot");
|
||||
}
|
||||
final Optional<SnapshotId> matchingSnapshotId = repositoryData.getSnapshotIds().stream()
|
||||
.filter(s -> request.snapshotName.equals(s.getName())).findFirst();
|
||||
if (matchingSnapshotId.isPresent() == false) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.snapshots;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -67,6 +68,8 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
private static final String TOTAL_SHARDS = "total_shards";
|
||||
private static final String SUCCESSFUL_SHARDS = "successful_shards";
|
||||
|
||||
private static final Version VERSION_INCOMPATIBLE_INTRODUCED = Version.V_5_2_0_UNRELEASED;
|
||||
|
||||
private final SnapshotId snapshotId;
|
||||
|
||||
private final SnapshotState state;
|
||||
|
@ -83,6 +86,7 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
|
||||
private final int successfulShards;
|
||||
|
||||
@Nullable
|
||||
private final Version version;
|
||||
|
||||
private final List<SnapshotShardFailure> shardFailures;
|
||||
|
@ -138,7 +142,21 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
} else {
|
||||
shardFailures = Collections.emptyList();
|
||||
}
|
||||
version = Version.readVersion(in);
|
||||
if (in.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED)) {
|
||||
version = Version.readVersion(in);
|
||||
} else {
|
||||
version = in.readBoolean() ? Version.readVersion(in) : null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a new {@link SnapshotInfo} instance for a snapshot that is incompatible with the
|
||||
* current version of the cluster.
|
||||
*/
|
||||
public static SnapshotInfo incompatible(SnapshotId snapshotId) {
|
||||
return new SnapshotInfo(snapshotId, Collections.emptyList(), SnapshotState.INCOMPATIBLE,
|
||||
"the snapshot is incompatible with the current version of Elasticsearch and its exact version is unknown",
|
||||
null, 0L, 0L, 0, 0, Collections.emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -234,10 +252,12 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the version of elasticsearch that the snapshot was created with
|
||||
* Returns the version of elasticsearch that the snapshot was created with. Will only
|
||||
* return {@code null} if {@link #state()} returns {@link SnapshotState#INCOMPATIBLE}.
|
||||
*
|
||||
* @return version of elasticsearch that the snapshot was created with
|
||||
*/
|
||||
@Nullable
|
||||
public Version version() {
|
||||
return version;
|
||||
}
|
||||
|
@ -305,8 +325,12 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
builder.startObject();
|
||||
builder.field(SNAPSHOT, snapshotId.getName());
|
||||
builder.field(UUID, snapshotId.getUUID());
|
||||
builder.field(VERSION_ID, version.id);
|
||||
builder.field(VERSION, version.toString());
|
||||
if (version != null) {
|
||||
builder.field(VERSION_ID, version.id);
|
||||
builder.field(VERSION, version.toString());
|
||||
} else {
|
||||
builder.field(VERSION, "unknown");
|
||||
}
|
||||
builder.startArray(INDICES);
|
||||
for (String index : indices) {
|
||||
builder.value(index);
|
||||
|
@ -345,6 +369,7 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
builder.startObject(SNAPSHOT);
|
||||
builder.field(NAME, snapshotId.getName());
|
||||
builder.field(UUID, snapshotId.getUUID());
|
||||
assert version != null : "version must always be known when writing a snapshot metadata blob";
|
||||
builder.field(VERSION_ID, version.id);
|
||||
builder.startArray(INDICES);
|
||||
for (String index : indices) {
|
||||
|
@ -471,7 +496,11 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
for (String index : indices) {
|
||||
out.writeString(index);
|
||||
}
|
||||
out.writeByte(state.value());
|
||||
if (out.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED) && state == SnapshotState.INCOMPATIBLE) {
|
||||
out.writeByte(SnapshotState.FAILED.value());
|
||||
} else {
|
||||
out.writeByte(state.value());
|
||||
}
|
||||
out.writeOptionalString(reason);
|
||||
out.writeVLong(startTime);
|
||||
out.writeVLong(endTime);
|
||||
|
@ -481,7 +510,20 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
for (SnapshotShardFailure failure : shardFailures) {
|
||||
failure.writeTo(out);
|
||||
}
|
||||
Version.writeVersion(version, out);
|
||||
if (out.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED)) {
|
||||
Version versionToWrite = version;
|
||||
if (versionToWrite == null) {
|
||||
versionToWrite = Version.CURRENT;
|
||||
}
|
||||
Version.writeVersion(versionToWrite, out);
|
||||
} else {
|
||||
if (version != null) {
|
||||
out.writeBoolean(true);
|
||||
Version.writeVersion(version, out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static SnapshotState snapshotState(final String reason, final List<SnapshotShardFailure> shardFailures) {
|
||||
|
|
|
@ -39,7 +39,11 @@ public enum SnapshotState {
|
|||
/**
|
||||
* Snapshot was partial successful
|
||||
*/
|
||||
PARTIAL((byte) 3, true, true);
|
||||
PARTIAL((byte) 3, true, true),
|
||||
/**
|
||||
* Snapshot is incompatible with the current version of the cluster
|
||||
*/
|
||||
INCOMPATIBLE((byte) 4, true, false);
|
||||
|
||||
private byte value;
|
||||
|
||||
|
@ -47,7 +51,7 @@ public enum SnapshotState {
|
|||
|
||||
private boolean restorable;
|
||||
|
||||
private SnapshotState(byte value, boolean completed, boolean restorable) {
|
||||
SnapshotState(byte value, boolean completed, boolean restorable) {
|
||||
this.value = value;
|
||||
this.completed = completed;
|
||||
this.restorable = restorable;
|
||||
|
@ -97,6 +101,8 @@ public enum SnapshotState {
|
|||
return FAILED;
|
||||
case 3:
|
||||
return PARTIAL;
|
||||
case 4:
|
||||
return INCOMPATIBLE;
|
||||
default:
|
||||
throw new IllegalArgumentException("No snapshot state for value [" + value + "]");
|
||||
}
|
||||
|
|
|
@ -131,15 +131,15 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
}
|
||||
|
||||
/**
|
||||
* Retrieves list of snapshot ids that are present in a repository
|
||||
* Gets the {@link RepositoryData} for the given repository.
|
||||
*
|
||||
* @param repositoryName repository name
|
||||
* @return list of snapshot ids
|
||||
* @return repository data
|
||||
*/
|
||||
public List<SnapshotId> snapshotIds(final String repositoryName) {
|
||||
public RepositoryData getRepositoryData(final String repositoryName) {
|
||||
Repository repository = repositoriesService.repository(repositoryName);
|
||||
assert repository != null; // should only be called once we've validated the repository exists
|
||||
return repository.getRepositoryData().getSnapshotIds();
|
||||
return repository.getRepositoryData();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1004,6 +1004,11 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
// First, look for the snapshot in the repository
|
||||
final Repository repository = repositoriesService.repository(repositoryName);
|
||||
final RepositoryData repositoryData = repository.getRepositoryData();
|
||||
final Optional<SnapshotId> incompatibleSnapshotId =
|
||||
repositoryData.getIncompatibleSnapshotIds().stream().filter(s -> snapshotName.equals(s.getName())).findFirst();
|
||||
if (incompatibleSnapshotId.isPresent()) {
|
||||
throw new SnapshotException(repositoryName, snapshotName, "cannot delete incompatible snapshot");
|
||||
}
|
||||
Optional<SnapshotId> matchedEntry = repositoryData.getSnapshotIds()
|
||||
.stream()
|
||||
.filter(s -> s.getName().equals(snapshotName))
|
||||
|
|
|
@ -99,18 +99,14 @@ public class DocWriteResponseTests extends ESTestCase {
|
|||
response.setShardInfo(new ShardInfo(1, 1));
|
||||
response.setForcedRefresh(false);
|
||||
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
|
||||
builder.startObject();
|
||||
response.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes())) {
|
||||
assertThat(parser.map(), not(hasKey("forced_refresh")));
|
||||
}
|
||||
}
|
||||
response.setForcedRefresh(true);
|
||||
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
|
||||
builder.startObject();
|
||||
response.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
try (XContentParser parser = createParser(JsonXContent.jsonXContent, builder.bytes())) {
|
||||
assertThat(parser.map(), hasEntry("forced_refresh", true));
|
||||
}
|
||||
|
|
|
@ -21,26 +21,19 @@ package org.elasticsearch.action.admin.indices.create;
|
|||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.UnavailableShardsException;
|
||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.InternalClusterInfoService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.query.RangeQueryBuilder;
|
||||
import org.elasticsearch.index.query.TermsQueryBuilder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
|
@ -53,7 +46,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_WAIT_FOR_
|
|||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -63,6 +55,7 @@ import static org.hamcrest.core.IsNull.notNullValue;
|
|||
|
||||
@ClusterScope(scope = Scope.TEST)
|
||||
public class CreateIndexIT extends ESIntegTestCase {
|
||||
|
||||
public void testCreationDateGivenFails() {
|
||||
try {
|
||||
prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 4L)).get();
|
||||
|
@ -288,192 +281,6 @@ public class CreateIndexIT extends ESIntegTestCase {
|
|||
ensureGreen("test");
|
||||
}
|
||||
|
||||
public void testCreateShrinkIndexToN() {
|
||||
int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}};
|
||||
int[] shardSplits = randomFrom(possibleShardSplits);
|
||||
assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]);
|
||||
assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]);
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
client().prepareIndex("source", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
|
||||
.getDataNodes();
|
||||
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
|
||||
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
|
||||
String mergeNode = discoveryNodes[0].getName();
|
||||
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
|
||||
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
|
||||
// to the require._name below.
|
||||
ensureGreen();
|
||||
// relocate all shards to one node such that we can merge it.
|
||||
client().admin().indices().prepareUpdateSettings("source")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.require._name", mergeNode)
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen();
|
||||
// now merge source into a 4 shard index
|
||||
assertAcked(client().admin().indices().prepareShrinkIndex("source", "first_shrink")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_replicas", 0)
|
||||
.put("index.number_of_shards", shardSplits[1]).build()).get());
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
|
||||
for (int i = 0; i < 20; i++) { // now update
|
||||
client().prepareIndex("first_shrink", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
flushAndRefresh();
|
||||
assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
|
||||
// relocate all shards to one node such that we can merge it.
|
||||
client().admin().indices().prepareUpdateSettings("first_shrink")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.require._name", mergeNode)
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen();
|
||||
// now merge source into a 2 shard index
|
||||
assertAcked(client().admin().indices().prepareShrinkIndex("first_shrink", "second_shrink")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_replicas", 0)
|
||||
.put("index.number_of_shards", shardSplits[2]).build()).get());
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
// let it be allocated anywhere and bump replicas
|
||||
client().admin().indices().prepareUpdateSettings("second_shrink")
|
||||
.setSettings(Settings.builder()
|
||||
.putNull("index.routing.allocation.include._id")
|
||||
.put("index.number_of_replicas", 1)).get();
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
|
||||
for (int i = 0; i < 20; i++) { // now update
|
||||
client().prepareIndex("second_shrink", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
flushAndRefresh();
|
||||
assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
}
|
||||
|
||||
public void testCreateShrinkIndex() {
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7))).get();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
client().prepareIndex("source", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
|
||||
.getDataNodes();
|
||||
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
|
||||
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
|
||||
String mergeNode = discoveryNodes[0].getName();
|
||||
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
|
||||
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
|
||||
// to the require._name below.
|
||||
ensureGreen();
|
||||
// relocate all shards to one node such that we can merge it.
|
||||
client().admin().indices().prepareUpdateSettings("source")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.require._name", mergeNode)
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen();
|
||||
// now merge source into a single shard index
|
||||
|
||||
final boolean createWithReplicas = randomBoolean();
|
||||
assertAcked(client().admin().indices().prepareShrinkIndex("source", "target")
|
||||
.setSettings(Settings.builder().put("index.number_of_replicas", createWithReplicas ? 1 : 0).build()).get());
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
|
||||
if (createWithReplicas == false) {
|
||||
// bump replicas
|
||||
client().admin().indices().prepareUpdateSettings("target")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_replicas", 1)).get();
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
}
|
||||
|
||||
for (int i = 20; i < 40; i++) {
|
||||
client().prepareIndex("target", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
flushAndRefresh();
|
||||
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 40);
|
||||
assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
|
||||
}
|
||||
/**
|
||||
* Tests that we can manually recover from a failed allocation due to shards being moved away etc.
|
||||
*/
|
||||
public void testCreateShrinkIndexFails() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
prepareCreate("source").setSettings(Settings.builder().put(indexSettings())
|
||||
.put("number_of_shards", randomIntBetween(2, 7))
|
||||
.put("number_of_replicas", 0)).get();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
client().prepareIndex("source", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
|
||||
.getDataNodes();
|
||||
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
|
||||
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
|
||||
String spareNode = discoveryNodes[0].getName();
|
||||
String mergeNode = discoveryNodes[1].getName();
|
||||
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
|
||||
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
|
||||
// to the require._name below.
|
||||
ensureGreen();
|
||||
// relocate all shards to one node such that we can merge it.
|
||||
client().admin().indices().prepareUpdateSettings("source")
|
||||
.setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode)
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen();
|
||||
|
||||
// now merge source into a single shard index
|
||||
client().admin().indices().prepareShrinkIndex("source", "target")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.exclude._name", mergeNode) // we manually exclude the merge node to forcefully fuck it up
|
||||
.put("index.number_of_replicas", 0)
|
||||
.put("index.allocation.max_retries", 1).build()).get();
|
||||
|
||||
// now we move all shards away from the merge node
|
||||
client().admin().indices().prepareUpdateSettings("source")
|
||||
.setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode)
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen("source");
|
||||
|
||||
client().admin().indices().prepareUpdateSettings("target") // erase the forcefully fuckup!
|
||||
.setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")).get();
|
||||
// wait until it fails
|
||||
assertBusy(() -> {
|
||||
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
|
||||
RoutingTable routingTables = clusterStateResponse.getState().routingTable();
|
||||
assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned());
|
||||
assertEquals(UnassignedInfo.Reason.ALLOCATION_FAILED,
|
||||
routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason());
|
||||
assertEquals(1,
|
||||
routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations());
|
||||
});
|
||||
client().admin().indices().prepareUpdateSettings("source") // now relocate them all to the right node
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.require._name", mergeNode)).get();
|
||||
ensureGreen("source");
|
||||
|
||||
final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class,
|
||||
internalCluster().getMasterName());
|
||||
infoService.refresh();
|
||||
// kick off a retry and wait until it's done!
|
||||
ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get();
|
||||
long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target")
|
||||
.shard(0).getShards().get(0).getExpectedShardSize();
|
||||
// we support the expected shard size in the allocator to sum up over the source index shards
|
||||
assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0);
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
}
|
||||
|
||||
/**
|
||||
* This test ensures that index creation adheres to the {@link IndexMetaData#SETTING_WAIT_FOR_ACTIVE_SHARDS}.
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,246 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.InternalClusterInfoService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.TermsQueryBuilder;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
|
||||
public class ShrinkIndexIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
public void testCreateShrinkIndexToN() {
|
||||
int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}};
|
||||
int[] shardSplits = randomFrom(possibleShardSplits);
|
||||
assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]);
|
||||
assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]);
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
client().prepareIndex("source", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
|
||||
.getDataNodes();
|
||||
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
|
||||
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
|
||||
String mergeNode = discoveryNodes[0].getName();
|
||||
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
|
||||
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
|
||||
// to the require._name below.
|
||||
ensureGreen();
|
||||
// relocate all shards to one node such that we can merge it.
|
||||
client().admin().indices().prepareUpdateSettings("source")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.require._name", mergeNode)
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen();
|
||||
// now merge source into a 4 shard index
|
||||
assertAcked(client().admin().indices().prepareShrinkIndex("source", "first_shrink")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_replicas", 0)
|
||||
.put("index.number_of_shards", shardSplits[1]).build()).get());
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
|
||||
for (int i = 0; i < 20; i++) { // now update
|
||||
client().prepareIndex("first_shrink", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
flushAndRefresh();
|
||||
assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
|
||||
// relocate all shards to one node such that we can merge it.
|
||||
client().admin().indices().prepareUpdateSettings("first_shrink")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.require._name", mergeNode)
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen();
|
||||
// now merge source into a 2 shard index
|
||||
assertAcked(client().admin().indices().prepareShrinkIndex("first_shrink", "second_shrink")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_replicas", 0)
|
||||
.put("index.number_of_shards", shardSplits[2]).build()).get());
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
// let it be allocated anywhere and bump replicas
|
||||
client().admin().indices().prepareUpdateSettings("second_shrink")
|
||||
.setSettings(Settings.builder()
|
||||
.putNull("index.routing.allocation.include._id")
|
||||
.put("index.number_of_replicas", 1)).get();
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
|
||||
for (int i = 0; i < 20; i++) { // now update
|
||||
client().prepareIndex("second_shrink", "t1", Integer.toString(i)).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
flushAndRefresh();
|
||||
assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
}
|
||||
|
||||
public void testCreateShrinkIndex() {
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
Version version = VersionUtils.randomVersion(random());
|
||||
prepareCreate("source").setSettings(Settings.builder().put(indexSettings())
|
||||
.put("number_of_shards", randomIntBetween(2, 7))
|
||||
.put("index.version.created", version)
|
||||
).get();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
client().prepareIndex("source", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
|
||||
.getDataNodes();
|
||||
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
|
||||
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
|
||||
String mergeNode = discoveryNodes[0].getName();
|
||||
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
|
||||
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
|
||||
// to the require._name below.
|
||||
ensureGreen();
|
||||
// relocate all shards to one node such that we can merge it.
|
||||
client().admin().indices().prepareUpdateSettings("source")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.require._name", mergeNode)
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen();
|
||||
// now merge source into a single shard index
|
||||
|
||||
final boolean createWithReplicas = randomBoolean();
|
||||
assertAcked(client().admin().indices().prepareShrinkIndex("source", "target")
|
||||
.setSettings(Settings.builder().put("index.number_of_replicas", createWithReplicas ? 1 : 0).build()).get());
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
|
||||
if (createWithReplicas == false) {
|
||||
// bump replicas
|
||||
client().admin().indices().prepareUpdateSettings("target")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_replicas", 1)).get();
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
}
|
||||
|
||||
for (int i = 20; i < 40; i++) {
|
||||
client().prepareIndex("target", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
flushAndRefresh();
|
||||
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 40);
|
||||
assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get();
|
||||
assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null));
|
||||
}
|
||||
/**
|
||||
* Tests that we can manually recover from a failed allocation due to shards being moved away etc.
|
||||
*/
|
||||
public void testCreateShrinkIndexFails() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
prepareCreate("source").setSettings(Settings.builder().put(indexSettings())
|
||||
.put("number_of_shards", randomIntBetween(2, 7))
|
||||
.put("number_of_replicas", 0)).get();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
client().prepareIndex("source", randomFrom("t1", "t2", "t3")).setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}").get();
|
||||
}
|
||||
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes()
|
||||
.getDataNodes();
|
||||
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
|
||||
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
|
||||
String spareNode = discoveryNodes[0].getName();
|
||||
String mergeNode = discoveryNodes[1].getName();
|
||||
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
|
||||
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
|
||||
// to the require._name below.
|
||||
ensureGreen();
|
||||
// relocate all shards to one node such that we can merge it.
|
||||
client().admin().indices().prepareUpdateSettings("source")
|
||||
.setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode)
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen();
|
||||
|
||||
// now merge source into a single shard index
|
||||
client().admin().indices().prepareShrinkIndex("source", "target")
|
||||
.setWaitForActiveShards(ActiveShardCount.NONE)
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.exclude._name", mergeNode) // we manually exclude the merge node to forcefully fuck it up
|
||||
.put("index.number_of_replicas", 0)
|
||||
.put("index.allocation.max_retries", 1).build()).get();
|
||||
client().admin().cluster().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get();
|
||||
|
||||
// now we move all shards away from the merge node
|
||||
client().admin().indices().prepareUpdateSettings("source")
|
||||
.setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode)
|
||||
.put("index.blocks.write", true)).get();
|
||||
ensureGreen("source");
|
||||
|
||||
client().admin().indices().prepareUpdateSettings("target") // erase the forcefully fuckup!
|
||||
.setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")).get();
|
||||
// wait until it fails
|
||||
assertBusy(() -> {
|
||||
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
|
||||
RoutingTable routingTables = clusterStateResponse.getState().routingTable();
|
||||
assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned());
|
||||
assertEquals(UnassignedInfo.Reason.ALLOCATION_FAILED,
|
||||
routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason());
|
||||
assertEquals(1,
|
||||
routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations());
|
||||
});
|
||||
client().admin().indices().prepareUpdateSettings("source") // now relocate them all to the right node
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.require._name", mergeNode)).get();
|
||||
ensureGreen("source");
|
||||
|
||||
final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class,
|
||||
internalCluster().getMasterName());
|
||||
infoService.refresh();
|
||||
// kick off a retry and wait until it's done!
|
||||
ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get();
|
||||
long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target")
|
||||
.shard(0).getShards().get(0).getExpectedShardSize();
|
||||
// we support the expected shard size in the allocator to sum up over the source index shards
|
||||
assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0);
|
||||
ensureGreen();
|
||||
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
|
||||
}
|
||||
}
|
|
@ -19,7 +19,11 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
|
@ -111,6 +115,32 @@ public class IndicesStatsTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testRefreshListeners() throws Exception {
|
||||
// Create an index without automatic refreshes
|
||||
createIndex("test", Settings.builder().put("refresh_interval", -1).build());
|
||||
|
||||
// Index a document asynchronously so the request will only return when document is refreshed
|
||||
ListenableActionFuture<IndexResponse> index = client().prepareIndex("test", "test", "test").setSource("test", "test")
|
||||
.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).execute();
|
||||
|
||||
// Wait for the refresh listener to appear in the stats
|
||||
assertBusy(() -> {
|
||||
IndicesStatsResponse stats = client().admin().indices().prepareStats("test").clear().setRefresh(true).get();
|
||||
CommonStats common = stats.getIndices().get("test").getTotal();
|
||||
assertEquals(1, common.refresh.getListeners());
|
||||
});
|
||||
|
||||
// Refresh the index and wait for the request to come back
|
||||
client().admin().indices().prepareRefresh("test").get();
|
||||
index.get();
|
||||
|
||||
// The document should appear in the statistics and the refresh listener should be gone
|
||||
IndicesStatsResponse stats = client().admin().indices().prepareStats("test").clear().setRefresh(true).setDocs(true).get();
|
||||
CommonStats common = stats.getIndices().get("test").getTotal();
|
||||
assertEquals(1, common.docs.getCount());
|
||||
assertEquals(0, common.refresh.getListeners());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gives access to package private IndicesStatsResponse constructor for test purpose.
|
||||
**/
|
||||
|
|
|
@ -46,7 +46,7 @@ public class GetResponseTests extends ESTestCase {
|
|||
Tuple<GetResult, GetResult> tuple = randomGetResult(xContentType);
|
||||
GetResponse getResponse = new GetResponse(tuple.v1());
|
||||
GetResponse expectedGetResponse = new GetResponse(tuple.v2());
|
||||
BytesReference originalBytes = toXContent(getResponse, xContentType, false);
|
||||
BytesReference originalBytes = toXContent(getResponse, xContentType);
|
||||
//test that we can parse what we print out
|
||||
GetResponse parsedGetResponse;
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) {
|
||||
|
@ -55,11 +55,10 @@ public class GetResponseTests extends ESTestCase {
|
|||
}
|
||||
assertEquals(expectedGetResponse, parsedGetResponse);
|
||||
//print the parsed object out and test that the output is the same as the original output
|
||||
BytesReference finalBytes = toXContent(parsedGetResponse, xContentType, false);
|
||||
BytesReference finalBytes = toXContent(parsedGetResponse, xContentType);
|
||||
assertToXContentEquivalent(originalBytes, finalBytes, xContentType);
|
||||
//check that the source stays unchanged, no shuffling of keys nor anything like that
|
||||
assertEquals(expectedGetResponse.getSourceAsString(), parsedGetResponse.getSourceAsString());
|
||||
|
||||
}
|
||||
|
||||
public void testToXContent() throws IOException {
|
||||
|
@ -67,13 +66,13 @@ public class GetResponseTests extends ESTestCase {
|
|||
GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", 1, true, new BytesArray("{ \"field1\" : " +
|
||||
"\"value1\", \"field2\":\"value2\"}"), Collections.singletonMap("field1", new GetField("field1",
|
||||
Collections.singletonList("value1")))));
|
||||
String output = Strings.toString(getResponse, false);
|
||||
String output = Strings.toString(getResponse);
|
||||
assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"_version\":1,\"found\":true,\"_source\":{ \"field1\" " +
|
||||
": \"value1\", \"field2\":\"value2\"},\"fields\":{\"field1\":[\"value1\"]}}", output);
|
||||
}
|
||||
{
|
||||
GetResponse getResponse = new GetResponse(new GetResult("index", "type", "id", 1, false, null, null));
|
||||
String output = Strings.toString(getResponse, false);
|
||||
String output = Strings.toString(getResponse);
|
||||
assertEquals("{\"_index\":\"index\",\"_type\":\"type\",\"_id\":\"id\",\"found\":false}", output);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,13 +25,9 @@ import org.elasticsearch.common.ParseFieldMatcher;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryParser;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.search.RestMultiSearchAction;
|
||||
import org.elasticsearch.search.SearchRequestParsers;
|
||||
|
@ -144,11 +140,6 @@ public class MultiSearchRequestTests extends ESTestCase {
|
|||
new MultiSearchResponse.Item(null, new IllegalStateException("baaaaaazzzz"))
|
||||
});
|
||||
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
response.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
|
||||
assertEquals("{\"responses\":["
|
||||
+ "{"
|
||||
+ "\"error\":{\"root_cause\":[{\"type\":\"illegal_state_exception\",\"reason\":\"foobar\"}],"
|
||||
|
@ -159,7 +150,7 @@ public class MultiSearchRequestTests extends ESTestCase {
|
|||
+ "\"type\":\"illegal_state_exception\",\"reason\":\"baaaaaazzzz\"},\"status\":500"
|
||||
+ "}"
|
||||
+ "]}",
|
||||
builder.string());
|
||||
Strings.toString(response));
|
||||
}
|
||||
|
||||
public void testMaxConcurrentSearchRequests() {
|
||||
|
|
|
@ -134,7 +134,7 @@ public class ReplicationResponseTests extends ESTestCase {
|
|||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
|
||||
final ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(5, 3);
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType, true);
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType);
|
||||
|
||||
// Expected JSON is {"_shards":{"total":5,"successful":3,"failed":0}}
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) {
|
||||
|
@ -164,7 +164,7 @@ public class ReplicationResponseTests extends ESTestCase {
|
|||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
|
||||
final ReplicationResponse.ShardInfo shardInfo = new ReplicationResponse.ShardInfo(randomIntBetween(1, 5), randomIntBetween(1, 5));
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType, true);
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType);
|
||||
|
||||
ReplicationResponse.ShardInfo parsedShardInfo;
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) {
|
||||
|
@ -177,7 +177,7 @@ public class ReplicationResponseTests extends ESTestCase {
|
|||
// We can use assertEquals because the shardInfo doesn't have a failure (and exceptions)
|
||||
assertEquals(shardInfo, parsedShardInfo);
|
||||
|
||||
BytesReference parsedShardInfoBytes = XContentHelper.toXContent(parsedShardInfo, xContentType, true);
|
||||
BytesReference parsedShardInfoBytes = XContentHelper.toXContent(parsedShardInfo, xContentType);
|
||||
assertEquals(shardInfoBytes, parsedShardInfoBytes);
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ public class ReplicationResponseTests extends ESTestCase {
|
|||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
|
||||
final ReplicationResponse.ShardInfo shardInfo = randomShardInfo();
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType, true);
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType);
|
||||
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) {
|
||||
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
|
||||
|
@ -226,7 +226,7 @@ public class ReplicationResponseTests extends ESTestCase {
|
|||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
|
||||
final ReplicationResponse.ShardInfo shardInfo = randomShardInfo();
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType, true);
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfo, xContentType);
|
||||
|
||||
ReplicationResponse.ShardInfo parsedShardInfo;
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) {
|
||||
|
@ -267,7 +267,7 @@ public class ReplicationResponseTests extends ESTestCase {
|
|||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
|
||||
final ReplicationResponse.ShardInfo.Failure shardInfoFailure = randomFailure();
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfoFailure, xContentType, false);
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfoFailure, xContentType);
|
||||
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) {
|
||||
assertFailure(parser, shardInfoFailure);
|
||||
|
@ -278,7 +278,7 @@ public class ReplicationResponseTests extends ESTestCase {
|
|||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
|
||||
final ReplicationResponse.ShardInfo.Failure shardInfoFailure = randomFailure();
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfoFailure, xContentType, false);
|
||||
final BytesReference shardInfoBytes = XContentHelper.toXContent(shardInfoFailure, xContentType);
|
||||
|
||||
ReplicationResponse.ShardInfo.Failure parsedFailure;
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), shardInfoBytes)) {
|
||||
|
|
|
@ -116,7 +116,7 @@ public class TransportWriteActionTests extends ESTestCase {
|
|||
Result result = action.apply(new TestAction(), request, indexShard);
|
||||
CapturingActionListener<Response> listener = new CapturingActionListener<>();
|
||||
responder.accept(result, listener);
|
||||
assertNull(listener.response); // Haven't reallresponded yet
|
||||
assertNull(listener.response); // Haven't responded yet
|
||||
|
||||
@SuppressWarnings({ "unchecked", "rawtypes" })
|
||||
ArgumentCaptor<Consumer<Boolean>> refreshListener = ArgumentCaptor.forClass((Class) Consumer.class);
|
||||
|
|
|
@ -137,9 +137,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase {
|
|||
assertThat(iterator.next(), Matchers.nullValue());
|
||||
|
||||
XContentBuilder xBuilder = XContentFactory.jsonBuilder();
|
||||
xBuilder.startObject();
|
||||
response.toXContent(xBuilder, null);
|
||||
xBuilder.endObject();
|
||||
String utf8 = xBuilder.bytes().utf8ToString().replaceFirst("\"took\":\\d+,", "");;
|
||||
String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
|
||||
+ i
|
||||
|
@ -193,9 +191,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase {
|
|||
assertThat(iterator.next(), Matchers.nullValue());
|
||||
|
||||
XContentBuilder xBuilder = XContentFactory.jsonBuilder();
|
||||
xBuilder.startObject();
|
||||
response.toXContent(xBuilder, null);
|
||||
xBuilder.endObject();
|
||||
String utf8 = xBuilder.bytes().utf8ToString().replaceFirst("\"took\":\\d+,", "");;
|
||||
String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
|
||||
+ i
|
||||
|
@ -252,9 +248,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase {
|
|||
assertThat(iterator.next(), Matchers.nullValue());
|
||||
|
||||
XContentBuilder xBuilder = XContentFactory.jsonBuilder();
|
||||
xBuilder.startObject();
|
||||
response.toXContent(xBuilder, ToXContent.EMPTY_PARAMS);
|
||||
xBuilder.endObject();
|
||||
String utf8 = xBuilder.bytes().utf8ToString().replaceFirst("\"took\":\\d+,", "");;
|
||||
String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
|
||||
+ i
|
||||
|
|
|
@ -82,7 +82,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
|||
assertThat(actionGet.getIndex(), equalTo("test"));
|
||||
assertThat(actionGet.isExists(), equalTo(false));
|
||||
// check response is nevertheless serializable to json
|
||||
actionGet.toXContent(jsonBuilder().startObject(), ToXContent.EMPTY_PARAMS);
|
||||
actionGet.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -152,8 +152,8 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
|
|||
ExitCodes.OK,
|
||||
true,
|
||||
output -> {},
|
||||
(foreground, pidFile, quiet, esSettings) -> {
|
||||
Map<String, String> settings = esSettings.getAsMap();
|
||||
(foreground, pidFile, quiet, env) -> {
|
||||
Map<String, String> settings = env.settings().getAsMap();
|
||||
assertThat(settings, hasEntry("foo", "bar"));
|
||||
assertThat(settings, hasEntry("baz", "qux"));
|
||||
},
|
||||
|
|
|
@ -46,6 +46,33 @@ public class TerminalTests extends ESTestCase {
|
|||
assertPrinted(terminal, Terminal.Verbosity.NORMAL, "This message contains percent like %20n");
|
||||
}
|
||||
|
||||
public void testPromptYesNoDefault() throws Exception {
|
||||
MockTerminal terminal = new MockTerminal();
|
||||
terminal.addTextInput("");
|
||||
assertTrue(terminal.promptYesNo("Answer?", true));
|
||||
terminal.addTextInput("");
|
||||
assertFalse(terminal.promptYesNo("Answer?", false));
|
||||
}
|
||||
|
||||
public void testPromptYesNoReprompt() throws Exception {
|
||||
MockTerminal terminal = new MockTerminal();
|
||||
terminal.addTextInput("blah");
|
||||
terminal.addTextInput("y");
|
||||
assertTrue(terminal.promptYesNo("Answer? [Y/n]\nDid not understand answer 'blah'\nAnswer? [Y/n]", true));
|
||||
}
|
||||
|
||||
public void testPromptYesNoCase() throws Exception {
|
||||
MockTerminal terminal = new MockTerminal();
|
||||
terminal.addTextInput("Y");
|
||||
assertTrue(terminal.promptYesNo("Answer?", false));
|
||||
terminal.addTextInput("y");
|
||||
assertTrue(terminal.promptYesNo("Answer?", false));
|
||||
terminal.addTextInput("N");
|
||||
assertFalse(terminal.promptYesNo("Answer?", true));
|
||||
terminal.addTextInput("n");
|
||||
assertFalse(terminal.promptYesNo("Answer?", true));
|
||||
}
|
||||
|
||||
private void assertPrinted(MockTerminal logTerminal, Terminal.Verbosity verbosity, String text) throws Exception {
|
||||
logTerminal.println(verbosity, text);
|
||||
String output = logTerminal.getOutput();
|
||||
|
|
|
@ -38,13 +38,16 @@ import org.elasticsearch.index.IndexNotFoundException;
|
|||
import org.elasticsearch.ResourceAlreadyExistsException;
|
||||
import org.elasticsearch.indices.InvalidIndexNameException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.gateway.TestGatewayAllocator;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.min;
|
||||
import static org.hamcrest.Matchers.endsWith;
|
||||
|
||||
public class MetaDataCreateIndexServiceTests extends ESTestCase {
|
||||
|
@ -150,11 +153,20 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
|
|||
|
||||
public void testShrinkIndexSettings() {
|
||||
String indexName = randomAsciiOfLength(10);
|
||||
List<Version> versions = Arrays.asList(VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random()),
|
||||
VersionUtils.randomVersion(random()));
|
||||
versions.sort((l, r) -> Long.compare(l.id, r.id));
|
||||
Version version = versions.get(0);
|
||||
Version minCompat = versions.get(1);
|
||||
Version upgraded = versions.get(2);
|
||||
// create one that won't fail
|
||||
ClusterState clusterState = ClusterState.builder(createClusterState(indexName, randomIntBetween(2, 10), 0,
|
||||
Settings.builder()
|
||||
.put("index.blocks.write", true)
|
||||
.put("index.similarity.default.type", "BM25")
|
||||
.put("index.version.created", version)
|
||||
.put("index.version.upgraded", upgraded)
|
||||
.put("index.version.minimum_compatible", minCompat.luceneVersion)
|
||||
.put("index.analysis.analyzer.my_analyzer.tokenizer", "keyword")
|
||||
.build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
|
||||
.build();
|
||||
|
@ -177,6 +189,10 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
|
|||
"keyword", builder.build().get("index.analysis.analyzer.my_analyzer.tokenizer"));
|
||||
assertEquals("node1", builder.build().get("index.routing.allocation.initial_recovery._id"));
|
||||
assertEquals("1", builder.build().get("index.allocation.max_retries"));
|
||||
assertEquals(version, builder.build().getAsVersion("index.version.created", null));
|
||||
assertEquals(upgraded, builder.build().getAsVersion("index.version.upgraded", null));
|
||||
assertEquals(minCompat.luceneVersion.toString(), builder.build().get("index.version.minimum_compatible", null));
|
||||
|
||||
}
|
||||
|
||||
private DiscoveryNode newNode(String nodeId) {
|
||||
|
|
|
@ -56,7 +56,7 @@ public class ClusterStateToStringTests extends ESAllocationTestCase {
|
|||
AllocationService strategy = createAllocationService();
|
||||
clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState, "reroute").routingTable()).build();
|
||||
|
||||
String clusterStateString = Strings.toString(clusterState, true);
|
||||
String clusterStateString = Strings.toString(clusterState);
|
||||
assertNotNull(clusterStateString);
|
||||
|
||||
assertThat(clusterStateString, containsString("test_idx"));
|
||||
|
|
|
@ -21,11 +21,9 @@ package org.elasticsearch.common;
|
|||
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class StringsTests extends ESTestCase {
|
||||
|
@ -58,21 +56,36 @@ public class StringsTests extends ESTestCase {
|
|||
assertEquals("", Strings.cleanTruncate("foo", 0));
|
||||
}
|
||||
|
||||
public void testEvilToString() {
|
||||
ToXContent needsEnclosingObject = new ToXContent() {
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
return builder.field("ok", "here").field("catastrophe", "");
|
||||
public void testToStringToXContent() {
|
||||
final ToXContent toXContent;
|
||||
final boolean error;
|
||||
if (randomBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
error = false;
|
||||
toXContent = (builder, params) -> builder.field("ok", "here").field("catastrophe", "");
|
||||
} else {
|
||||
error = true;
|
||||
toXContent = (builder, params) ->
|
||||
builder.startObject().field("ok", "here").field("catastrophe", "").endObject();
|
||||
}
|
||||
};
|
||||
String toString = Strings.toString(needsEnclosingObject);
|
||||
assertThat(toString, containsString("Error building toString out of XContent"));
|
||||
assertThat(toString, containsString("Can not write a field name, expecting a value"));
|
||||
} else {
|
||||
if (randomBoolean()) {
|
||||
error = false;
|
||||
toXContent = (ToXContentObject) (builder, params) ->
|
||||
builder.startObject().field("ok", "here").field("catastrophe", "").endObject();
|
||||
} else {
|
||||
error = true;
|
||||
toXContent = (ToXContentObject) (builder, params) -> builder.field("ok", "here").field("catastrophe", "");
|
||||
}
|
||||
}
|
||||
|
||||
// We can salvage it!
|
||||
toString = Strings.toString(needsEnclosingObject, true);
|
||||
assertThat(toString, containsString("\"ok\":\"here\""));
|
||||
assertThat(toString, containsString("\"catastrophe\":\"\""));
|
||||
String toString = Strings.toString(toXContent);
|
||||
if (error) {
|
||||
assertThat(toString, containsString("Error building toString out of XContent"));
|
||||
} else {
|
||||
assertThat(toString, containsString("\"ok\":\"here\""));
|
||||
assertThat(toString, containsString("\"catastrophe\":\"\""));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSplitStringToSet() {
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.InputStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class AddStringKeyStoreCommandTests extends KeyStoreCommandTestCase {
|
||||
InputStream input;
|
||||
|
||||
@Override
|
||||
protected Command newCommand() {
|
||||
return new AddStringKeyStoreCommand() {
|
||||
@Override
|
||||
protected Environment createEnv(Terminal terminal, Map<String, String> settings) {
|
||||
return env;
|
||||
}
|
||||
@Override
|
||||
InputStream getStdin() {
|
||||
return input;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public void testMissing() throws Exception {
|
||||
UserException e = expectThrows(UserException.class, this::execute);
|
||||
assertEquals(ExitCodes.DATA_ERROR, e.exitCode);
|
||||
assertThat(e.getMessage(), containsString("keystore not found"));
|
||||
}
|
||||
|
||||
public void testOverwritePromptDefault() throws Exception {
|
||||
createKeystore("", "foo", "bar");
|
||||
terminal.addTextInput("");
|
||||
execute("foo");
|
||||
assertSecureString("foo", "bar");
|
||||
}
|
||||
|
||||
public void testOverwritePromptExplicitNo() throws Exception {
|
||||
createKeystore("", "foo", "bar");
|
||||
terminal.addTextInput("n"); // explicit no
|
||||
execute("foo");
|
||||
assertSecureString("foo", "bar");
|
||||
}
|
||||
|
||||
public void testOverwritePromptExplicitYes() throws Exception {
|
||||
createKeystore("", "foo", "bar");
|
||||
terminal.addTextInput("y");
|
||||
terminal.addSecretInput("newvalue");
|
||||
execute("foo");
|
||||
assertSecureString("foo", "newvalue");
|
||||
}
|
||||
|
||||
public void testOverwriteForceShort() throws Exception {
|
||||
createKeystore("", "foo", "bar");
|
||||
terminal.addSecretInput("newvalue");
|
||||
execute("-f", "foo"); // force
|
||||
assertSecureString("foo", "newvalue");
|
||||
}
|
||||
|
||||
public void testOverwriteForceLong() throws Exception {
|
||||
createKeystore("", "foo", "bar");
|
||||
terminal.addSecretInput("and yet another secret value");
|
||||
execute("--force", "foo"); // force
|
||||
assertSecureString("foo", "and yet another secret value");
|
||||
}
|
||||
|
||||
public void testForceNonExistent() throws Exception {
|
||||
createKeystore("");
|
||||
terminal.addSecretInput("value");
|
||||
execute("--force", "foo"); // force
|
||||
assertSecureString("foo", "value");
|
||||
}
|
||||
|
||||
public void testPromptForValue() throws Exception {
|
||||
KeyStoreWrapper.create(new char[0]).save(env.configFile());
|
||||
terminal.addSecretInput("secret value");
|
||||
execute("foo");
|
||||
assertSecureString("foo", "secret value");
|
||||
}
|
||||
|
||||
public void testStdinShort() throws Exception {
|
||||
KeyStoreWrapper.create(new char[0]).save(env.configFile());
|
||||
setInput("secret value 1");
|
||||
execute("-x", "foo");
|
||||
assertSecureString("foo", "secret value 1");
|
||||
}
|
||||
|
||||
public void testStdinLong() throws Exception {
|
||||
KeyStoreWrapper.create(new char[0]).save(env.configFile());
|
||||
setInput("secret value 2");
|
||||
execute("--stdin", "foo");
|
||||
assertSecureString("foo", "secret value 2");
|
||||
}
|
||||
|
||||
public void testNonAsciiValue() throws Exception {
|
||||
KeyStoreWrapper.create(new char[0]).save(env.configFile());
|
||||
terminal.addSecretInput("non-äsčîï");
|
||||
UserException e = expectThrows(UserException.class, () -> execute("foo"));
|
||||
assertEquals(ExitCodes.DATA_ERROR, e.exitCode);
|
||||
assertEquals("String value must contain only ASCII", e.getMessage());
|
||||
}
|
||||
|
||||
void setInput(String inputStr) {
|
||||
input = new ByteArrayInputStream(inputStr.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
public class CreateKeyStoreCommandTests extends KeyStoreCommandTestCase {
|
||||
|
||||
@Override
|
||||
protected Command newCommand() {
|
||||
return new CreateKeyStoreCommand() {
|
||||
@Override
|
||||
protected Environment createEnv(Terminal terminal, Map<String, String> settings) {
|
||||
return env;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public void testPosix() throws Exception {
|
||||
execute();
|
||||
Path configDir = env.configFile();
|
||||
assertNotNull(KeyStoreWrapper.load(configDir));
|
||||
}
|
||||
|
||||
public void testNotPosix() throws Exception {
|
||||
setupEnv(false);
|
||||
execute();
|
||||
Path configDir = env.configFile();
|
||||
assertNotNull(KeyStoreWrapper.load(configDir));
|
||||
}
|
||||
|
||||
public void testOverwrite() throws Exception {
|
||||
Path keystoreFile = KeyStoreWrapper.keystorePath(env.configFile());
|
||||
byte[] content = "not a keystore".getBytes(StandardCharsets.UTF_8);
|
||||
Files.write(keystoreFile, content);
|
||||
|
||||
terminal.addTextInput(""); // default is no
|
||||
execute();
|
||||
assertArrayEquals(content, Files.readAllBytes(keystoreFile));
|
||||
|
||||
terminal.addTextInput("n"); // explicit no
|
||||
execute();
|
||||
assertArrayEquals(content, Files.readAllBytes(keystoreFile));
|
||||
|
||||
terminal.addTextInput("y");
|
||||
execute();
|
||||
assertNotNull(KeyStoreWrapper.load(env.configFile()));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.FileSystem;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import com.google.common.jimfs.Configuration;
|
||||
import com.google.common.jimfs.Jimfs;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.cli.CommandTestCase;
|
||||
import org.elasticsearch.common.io.PathUtilsForTesting;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
/**
|
||||
* Base test case for manipulating the ES keystore.
|
||||
*/
|
||||
@LuceneTestCase.SuppressFileSystems("*") // we do our own mocking
|
||||
public abstract class KeyStoreCommandTestCase extends CommandTestCase {
|
||||
|
||||
Environment env;
|
||||
|
||||
List<FileSystem> fileSystems = new ArrayList<>();
|
||||
|
||||
@After
|
||||
public void closeMockFileSystems() throws IOException {
|
||||
IOUtils.close(fileSystems);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setupEnv() throws IOException {
|
||||
setupEnv(true); // default to posix, but tests may call setupEnv(false) to overwrite
|
||||
}
|
||||
|
||||
void setupEnv(boolean posix) throws IOException {
|
||||
final Configuration configuration;
|
||||
if (posix) {
|
||||
configuration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build();
|
||||
} else {
|
||||
configuration = Configuration.unix();
|
||||
}
|
||||
FileSystem fs = Jimfs.newFileSystem(configuration);
|
||||
fileSystems.add(fs);
|
||||
PathUtilsForTesting.installMock(fs); // restored by restoreFileSystem in ESTestCase
|
||||
Path home = fs.getPath("/", "test-home");
|
||||
Files.createDirectories(home.resolve("config"));
|
||||
env = new Environment(Settings.builder().put("path.home", home).build());
|
||||
}
|
||||
|
||||
KeyStoreWrapper createKeystore(String password, String... settings) throws Exception {
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.create(password.toCharArray());
|
||||
assertEquals(0, settings.length % 2);
|
||||
for (int i = 0; i < settings.length; i += 2) {
|
||||
keystore.setStringSetting(settings[i], settings[i + 1].toCharArray());
|
||||
}
|
||||
keystore.save(env.configFile());
|
||||
return keystore;
|
||||
}
|
||||
|
||||
KeyStoreWrapper loadKeystore(String password) throws Exception {
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile());
|
||||
keystore.decrypt(password.toCharArray());
|
||||
return keystore;
|
||||
}
|
||||
|
||||
void assertSecureString(String setting, String value) throws Exception {
|
||||
assertSecureString(loadKeystore(""), setting, value);
|
||||
}
|
||||
|
||||
void assertSecureString(KeyStoreWrapper keystore, String setting, String value) throws Exception {
|
||||
assertEquals(value, keystore.getStringSetting(setting).toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class ListKeyStoreCommandTests extends KeyStoreCommandTestCase {
|
||||
|
||||
@Override
|
||||
protected Command newCommand() {
|
||||
return new ListKeyStoreCommand() {
|
||||
@Override
|
||||
protected Environment createEnv(Terminal terminal, Map<String, String> settings) {
|
||||
return env;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public void testMissing() throws Exception {
|
||||
UserException e = expectThrows(UserException.class, this::execute);
|
||||
assertEquals(ExitCodes.DATA_ERROR, e.exitCode);
|
||||
assertThat(e.getMessage(), containsString("keystore not found"));
|
||||
}
|
||||
|
||||
public void testEmpty() throws Exception {
|
||||
createKeystore("");
|
||||
execute();
|
||||
assertTrue(terminal.getOutput(), terminal.getOutput().isEmpty());
|
||||
}
|
||||
|
||||
public void testOne() throws Exception {
|
||||
createKeystore("", "foo", "bar");
|
||||
execute();
|
||||
assertEquals("foo\n", terminal.getOutput());
|
||||
}
|
||||
|
||||
public void testMultiple() throws Exception {
|
||||
createKeystore("", "foo", "1", "baz", "2", "bar", "3");
|
||||
execute();
|
||||
assertEquals("bar\nbaz\nfoo\n", terminal.getOutput()); // sorted
|
||||
}
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import javax.crypto.SecretKeyFactory;
|
||||
import java.security.Provider;
|
||||
import java.security.Security;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class RemoveSettingKeyStoreCommandTests extends KeyStoreCommandTestCase {
|
||||
|
||||
@Override
|
||||
protected Command newCommand() {
|
||||
return new RemoveSettingKeyStoreCommand() {
|
||||
@Override
|
||||
protected Environment createEnv(Terminal terminal, Map<String, String> settings) {
|
||||
return env;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public void testMissing() throws Exception {
|
||||
UserException e = expectThrows(UserException.class, () -> execute("foo"));
|
||||
assertEquals(ExitCodes.DATA_ERROR, e.exitCode);
|
||||
assertThat(e.getMessage(), containsString("keystore not found"));
|
||||
}
|
||||
|
||||
public void testNoSettings() throws Exception {
|
||||
createKeystore("");
|
||||
UserException e = expectThrows(UserException.class, this::execute);
|
||||
assertEquals(ExitCodes.USAGE, e.exitCode);
|
||||
assertThat(e.getMessage(), containsString("Must supply at least one setting"));
|
||||
}
|
||||
|
||||
public void testNonExistentSetting() throws Exception {
|
||||
createKeystore("");
|
||||
UserException e = expectThrows(UserException.class, () -> execute("foo"));
|
||||
assertEquals(ExitCodes.CONFIG, e.exitCode);
|
||||
assertThat(e.getMessage(), containsString("[foo] does not exist"));
|
||||
}
|
||||
|
||||
public void testOne() throws Exception {
|
||||
createKeystore("", "foo", "bar");
|
||||
execute("foo");
|
||||
assertFalse(loadKeystore("").getSettings().contains("foo"));
|
||||
}
|
||||
|
||||
public void testMany() throws Exception {
|
||||
createKeystore("", "foo", "1", "bar", "2", "baz", "3");
|
||||
execute("foo", "baz");
|
||||
Set<String> settings = loadKeystore("").getSettings();
|
||||
assertFalse(settings.contains("foo"));
|
||||
assertFalse(settings.contains("baz"));
|
||||
assertTrue(settings.contains("bar"));
|
||||
assertEquals(1, settings.size());
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue