[PURIFY] remove all trace of x-pack graph (#13)

This commit removes all trace of Elastic licensed graph feature

Signed-off-by: Peter Nied <petern@amazon.com>
This commit is contained in:
Nick Knize 2021-01-30 00:25:41 -06:00 committed by Peter Nied
parent 125958eb2c
commit 1cdbc63917
18 changed files with 0 additions and 1998 deletions

View File

@ -29,7 +29,6 @@
documentation that are so wide that they scroll.
-->
<suppress files="client[/\\]rest-high-level[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]documentation[/\\]ClusterClientDocumentationIT.java" id="SnippetLength" />
<suppress files="client[/\\]rest-high-level[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]documentation[/\\]GraphDocumentationIT.java" id="SnippetLength" />
<suppress files="client[/\\]rest-high-level[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]documentation[/\\]CRUDDocumentationIT.java" id="SnippetLength" />
<suppress files="client[/\\]rest-high-level[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]documentation[/\\]IndicesClientDocumentationIT.java" id="SnippetLength" />
<suppress files="client[/\\]rest-high-level[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]documentation[/\\]IngestClientDocumentationIT.java" id="SnippetLength" />

View File

@ -1,64 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.graph.GraphExploreRequest;
import org.elasticsearch.client.graph.GraphExploreResponse;
import java.io.IOException;
import static java.util.Collections.emptySet;
public class GraphClient {
private final RestHighLevelClient restHighLevelClient;
GraphClient(RestHighLevelClient restHighLevelClient) {
this.restHighLevelClient = restHighLevelClient;
}
/**
* Executes an exploration request using the Graph API.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html">Graph API
* on elastic.co</a>.
*/
public final GraphExploreResponse explore(GraphExploreRequest graphExploreRequest,
RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, GraphRequestConverters::explore,
options, GraphExploreResponse::fromXContent, emptySet());
}
/**
* Asynchronously executes an exploration request using the Graph API.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html">Graph API
* on elastic.co</a>.
* @return cancellable that may be used to cancel the request
*/
public final Cancellable exploreAsync(GraphExploreRequest graphExploreRequest,
RequestOptions options,
ActionListener<GraphExploreResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, GraphRequestConverters::explore,
options, GraphExploreResponse::fromXContent, listener, emptySet());
}
}

View File

@ -1,37 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.client.methods.HttpGet;
import org.elasticsearch.client.graph.GraphExploreRequest;
import java.io.IOException;
final class GraphRequestConverters {
private GraphRequestConverters() {}
static Request explore(GraphExploreRequest exploreRequest) throws IOException {
String endpoint = RequestConverters.endpoint(exploreRequest.indices(), exploreRequest.types(), "_graph/explore");
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
request.setEntity(RequestConverters.createEntity(exploreRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE));
return request;
}
}

View File

@ -261,7 +261,6 @@ public class RestHighLevelClient implements Closeable {
private final SnapshotClient snapshotClient = new SnapshotClient(this);
private final TasksClient tasksClient = new TasksClient(this);
private final WatcherClient watcherClient = new WatcherClient(this);
private final GraphClient graphClient = new GraphClient(this);
private final MigrationClient migrationClient = new MigrationClient(this);
private final MachineLearningClient machineLearningClient = new MachineLearningClient(this);
private final SecurityClient securityClient = new SecurityClient(this);
@ -380,16 +379,6 @@ public class RestHighLevelClient implements Closeable {
*/
public WatcherClient watcher() { return watcherClient; }
/**
* Provides methods for accessing the Elastic Licensed Graph explore API that
* is shipped with the default distribution of Elasticsearch. All of
* these APIs will 404 if run against the OSS distribution of Elasticsearch.
* <p>
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/graph-explore-api.html">
* Graph API on elastic.co</a> for more information.
*/
public GraphClient graph() { return graphClient; }
/**
* A wrapper for the {@link RestHighLevelClient} that provides methods for
* accessing the Elastic Index Lifecycle APIs.

View File

@ -1,210 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.graph;
import com.carrotsearch.hppc.ObjectIntHashMap;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.client.graph.Vertex.VertexId;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
/**
* A Connection links exactly two {@link Vertex} objects. The basis of a
* connection is one or more documents have been found that contain
* this pair of terms and the strength of the connection is recorded
* as a weight.
*/
public class Connection {
private Vertex from;
private Vertex to;
private double weight;
private long docCount;
public Connection(Vertex from, Vertex to, double weight, long docCount) {
this.from = from;
this.to = to;
this.weight = weight;
this.docCount = docCount;
}
Connection() {
}
public ConnectionId getId() {
return new ConnectionId(from.getId(), to.getId());
}
public Vertex getFrom() {
return from;
}
public Vertex getTo() {
return to;
}
/**
* @return a measure of the relative connectedness between a pair of {@link Vertex} objects
*/
public double getWeight() {
return weight;
}
/**
* @return the number of documents in the sampled set that contained this
* pair of {@link Vertex} objects.
*/
public long getDocCount() {
return docCount;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Connection other = (Connection) obj;
return docCount == other.docCount &&
weight == other.weight &&
Objects.equals(to, other.to) &&
Objects.equals(from, other.from);
}
@Override
public int hashCode() {
return Objects.hash(docCount, weight, from, to);
}
private static final ParseField SOURCE = new ParseField("source");
private static final ParseField TARGET = new ParseField("target");
private static final ParseField WEIGHT = new ParseField("weight");
private static final ParseField DOC_COUNT = new ParseField("doc_count");
void toXContent(XContentBuilder builder, Params params, ObjectIntHashMap<Vertex> vertexNumbers) throws IOException {
builder.field(SOURCE.getPreferredName(), vertexNumbers.get(from));
builder.field(TARGET.getPreferredName(), vertexNumbers.get(to));
builder.field(WEIGHT.getPreferredName(), weight);
builder.field(DOC_COUNT.getPreferredName(), docCount);
}
//When deserializing from XContent we need to wait for all vertices to be loaded before
// Connection objects can be created that reference them. This class provides the interim
// state for connections.
static class UnresolvedConnection {
int fromIndex;
int toIndex;
double weight;
long docCount;
UnresolvedConnection(int fromIndex, int toIndex, double weight, long docCount) {
super();
this.fromIndex = fromIndex;
this.toIndex = toIndex;
this.weight = weight;
this.docCount = docCount;
}
public Connection resolve(List<Vertex> vertices) {
return new Connection(vertices.get(fromIndex), vertices.get(toIndex), weight, docCount);
}
private static final ConstructingObjectParser<UnresolvedConnection, Void> PARSER = new ConstructingObjectParser<>(
"ConnectionParser", true,
args -> {
int source = (Integer) args[0];
int target = (Integer) args[1];
double weight = (Double) args[2];
long docCount = (Long) args[3];
return new UnresolvedConnection(source, target, weight, docCount);
});
static {
PARSER.declareInt(constructorArg(), SOURCE);
PARSER.declareInt(constructorArg(), TARGET);
PARSER.declareDouble(constructorArg(), WEIGHT);
PARSER.declareLong(constructorArg(), DOC_COUNT);
}
static UnresolvedConnection fromXContent(XContentParser parser) throws IOException {
return PARSER.apply(parser, null);
}
}
/**
* An identifier (implements hashcode and equals) that represents a
* unique key for a {@link Connection}
*/
public static class ConnectionId {
private final VertexId source;
private final VertexId target;
public ConnectionId(VertexId source, VertexId target) {
this.source = source;
this.target = target;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ConnectionId vertexId = (ConnectionId) o;
if (source != null ? !source.equals(vertexId.source) : vertexId.source != null)
return false;
if (target != null ? !target.equals(vertexId.target) : vertexId.target != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = source != null ? source.hashCode() : 0;
result = 31 * result + (target != null ? target.hashCode() : 0);
return result;
}
public VertexId getSource() {
return source;
}
public VertexId getTarget() {
return target;
}
@Override
public String toString() {
return getSource() + "->" + getTarget();
}
}
}

View File

@ -1,354 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.graph;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Validatable;
import org.elasticsearch.client.ValidationException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.terms.SignificantTerms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
/**
* Holds the criteria required to guide the exploration of connected terms which
* can be returned as a graph.
*/
public class GraphExploreRequest implements IndicesRequest.Replaceable, ToXContentObject, Validatable {
public static final String NO_HOPS_ERROR_MESSAGE = "Graph explore request must have at least one hop";
public static final String NO_VERTICES_ERROR_MESSAGE = "Graph explore hop must have at least one VertexRequest";
private String[] indices = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
private String[] types = Strings.EMPTY_ARRAY;
private String routing;
private TimeValue timeout;
private int sampleSize = SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE;
private String sampleDiversityField;
private int maxDocsPerDiversityValue;
private boolean useSignificance = true;
private boolean returnDetailedInfo;
private List<Hop> hops = new ArrayList<>();
public GraphExploreRequest() {
}
/**
* Constructs a new graph request to run against the provided indices. No
* indices means it will run against all indices.
*/
public GraphExploreRequest(String... indices) {
this.indices = indices;
}
@Override
public Optional<ValidationException> validate() {
ValidationException validationException = new ValidationException();
if (hops.size() == 0) {
validationException.addValidationError(NO_HOPS_ERROR_MESSAGE);
}
for (Hop hop : hops) {
hop.validate(validationException);
}
return validationException.validationErrors().isEmpty() ? Optional.empty() : Optional.of(validationException);
}
@Override
public String[] indices() {
return this.indices;
}
@Override
public GraphExploreRequest indices(String... indices) {
this.indices = indices;
return this;
}
@Override
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) {
if (indicesOptions == null) {
throw new IllegalArgumentException("IndicesOptions must not be null");
}
this.indicesOptions = indicesOptions;
return this;
}
/**
* The document types to execute the explore against. Defaults to be executed against
* all types.
*
* @deprecated Types are in the process of being removed. Instead of using a type, prefer to
* filter on a field on the document.
*/
@Deprecated
public String[] types() {
return this.types;
}
/**
* The document types to execute the explore request against. Defaults to be executed against
* all types.
*
* @deprecated Types are in the process of being removed. Instead of using a type, prefer to
* filter on a field on the document.
*/
@Deprecated
public GraphExploreRequest types(String... types) {
this.types = types;
return this;
}
public String routing() {
return this.routing;
}
public GraphExploreRequest routing(String routing) {
this.routing = routing;
return this;
}
public GraphExploreRequest routing(String... routings) {
this.routing = Strings.arrayToCommaDelimitedString(routings);
return this;
}
public TimeValue timeout() {
return timeout;
}
/**
* Graph exploration can be set to timeout after the given period. Search
* operations involved in each hop are limited to the remaining time
* available but can still overrun due to the nature of their "best efforts"
* timeout support. When a timeout occurs partial results are returned.
*
* @param timeout
* a {@link TimeValue} object which determines the maximum length
* of time to spend exploring
*/
public GraphExploreRequest timeout(TimeValue timeout) {
if (timeout == null) {
throw new IllegalArgumentException("timeout must not be null");
}
this.timeout = timeout;
return this;
}
public GraphExploreRequest timeout(String timeout) {
timeout(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".timeout"));
return this;
}
@Override
public String toString() {
return "graph explore [" + Arrays.toString(indices) + "][" + Arrays.toString(types) + "]";
}
/**
* The number of top-matching documents that are considered during each hop
* (default is {@link SamplerAggregationBuilder#DEFAULT_SHARD_SAMPLE_SIZE}
* Very small values (less than 50) may not provide sufficient
* weight-of-evidence to identify significant connections between terms.
* <p>
* Very large values (many thousands) are not recommended with loosely
* defined queries (fuzzy queries or those with many OR clauses). This is
* because any useful signals in the best documents are diluted with
* irrelevant noise from low-quality matches. Performance is also typically
* better with smaller samples as there are less look-ups required for
* background frequencies of terms found in the documents
* </p>
*
* @param maxNumberOfDocsPerHop
* shard-level sample size in documents
*/
public void sampleSize(int maxNumberOfDocsPerHop) {
sampleSize = maxNumberOfDocsPerHop;
}
public int sampleSize() {
return sampleSize;
}
/**
* Optional choice of single-value field on which to diversify sampled
* search results
*/
public void sampleDiversityField(String name) {
sampleDiversityField = name;
}
public String sampleDiversityField() {
return sampleDiversityField;
}
/**
* Optional number of permitted docs with same value in sampled search
* results. Must also declare which field using sampleDiversityField
*/
public void maxDocsPerDiversityValue(int maxDocs) {
this.maxDocsPerDiversityValue = maxDocs;
}
public int maxDocsPerDiversityValue() {
return maxDocsPerDiversityValue;
}
/**
* Controls the choice of algorithm used to select interesting terms. The
* default value is true which means terms are selected based on
* significance (see the {@link SignificantTerms} aggregation) rather than
* popularity (using the {@link TermsAggregator}).
*
* @param value
* true if the significant_terms algorithm should be used.
*/
public void useSignificance(boolean value) {
this.useSignificance = value;
}
public boolean useSignificance() {
return useSignificance;
}
/**
* Return detailed information about vertex frequencies as part of JSON
* results - defaults to false
*
* @param value
* true if detailed information is required in JSON responses
*/
public void returnDetailedInfo(boolean value) {
this.returnDetailedInfo = value;
}
public boolean returnDetailedInfo() {
return returnDetailedInfo;
}
/**
* Add a stage in the graph exploration. Each hop represents a stage of
* querying elasticsearch to identify terms which can then be connnected to
* other terms in a subsequent hop.
*
* @param guidingQuery
* optional choice of query which influences which documents are
* considered in this stage
* @return a {@link Hop} object that holds settings for a stage in the graph
* exploration
*/
public Hop createNextHop(QueryBuilder guidingQuery) {
Hop parent = null;
if (hops.size() > 0) {
parent = hops.get(hops.size() - 1);
}
Hop newHop = new Hop(parent);
newHop.guidingQuery = guidingQuery;
hops.add(newHop);
return newHop;
}
public int getHopNumbers() {
return hops.size();
}
public Hop getHop(int hopNumber) {
return hops.get(hopNumber);
}
public static class TermBoost {
String term;
float boost;
public TermBoost(String term, float boost) {
super();
this.term = term;
if (boost <= 0) {
throw new IllegalArgumentException("Boosts must be a positive non-zero number");
}
this.boost = boost;
}
TermBoost() {
}
public String getTerm() {
return term;
}
public float getBoost() {
return boost;
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.startObject("controls");
{
if (sampleSize != SamplerAggregationBuilder.DEFAULT_SHARD_SAMPLE_SIZE) {
builder.field("sample_size", sampleSize);
}
if (sampleDiversityField != null) {
builder.startObject("sample_diversity");
builder.field("field", sampleDiversityField);
builder.field("max_docs_per_value", maxDocsPerDiversityValue);
builder.endObject();
}
builder.field("use_significance", useSignificance);
if (returnDetailedInfo) {
builder.field("return_detailed_stats", returnDetailedInfo);
}
}
builder.endObject();
for (Hop hop : hops) {
if (hop.parentHop != null) {
builder.startObject("connections");
}
hop.toXContent(builder, params);
}
for (Hop hop : hops) {
if (hop.parentHop != null) {
builder.endObject();
}
}
builder.endObject();
return builder;
}
}

View File

@ -1,211 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.graph;
import com.carrotsearch.hppc.ObjectIntHashMap;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.client.graph.Connection.ConnectionId;
import org.elasticsearch.client.graph.Connection.UnresolvedConnection;
import org.elasticsearch.client.graph.Vertex.VertexId;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/**
* Graph explore response holds a graph of {@link Vertex} and {@link Connection} objects
* (nodes and edges in common graph parlance).
*
* @see GraphExploreRequest
*/
public class GraphExploreResponse implements ToXContentObject {
private long tookInMillis;
private boolean timedOut = false;
private ShardOperationFailedException[] shardFailures = ShardSearchFailure.EMPTY_ARRAY;
private Map<VertexId, Vertex> vertices;
private Map<ConnectionId, Connection> connections;
private boolean returnDetailedInfo;
static final String RETURN_DETAILED_INFO_PARAM = "returnDetailedInfo";
public GraphExploreResponse() {
}
public GraphExploreResponse(long tookInMillis, boolean timedOut, ShardOperationFailedException[] shardFailures,
Map<VertexId, Vertex> vertices, Map<ConnectionId, Connection> connections, boolean returnDetailedInfo) {
this.tookInMillis = tookInMillis;
this.timedOut = timedOut;
this.shardFailures = shardFailures;
this.vertices = vertices;
this.connections = connections;
this.returnDetailedInfo = returnDetailedInfo;
}
public TimeValue getTook() {
return new TimeValue(tookInMillis);
}
public long getTookInMillis() {
return tookInMillis;
}
/**
* @return true if the time stated in {@link GraphExploreRequest#timeout(TimeValue)} was exceeded
* (not all hops may have been completed in this case)
*/
public boolean isTimedOut() {
return this.timedOut;
}
public ShardOperationFailedException[] getShardFailures() {
return shardFailures;
}
public Collection<Connection> getConnections() {
return connections.values();
}
public Collection<ConnectionId> getConnectionIds() {
return connections.keySet();
}
public Connection getConnection(ConnectionId connectionId) {
return connections.get(connectionId);
}
public Collection<Vertex> getVertices() {
return vertices.values();
}
public Collection<VertexId> getVertexIds() {
return vertices.keySet();
}
public Vertex getVertex(VertexId id) {
return vertices.get(id);
}
public boolean isReturnDetailedInfo() {
return returnDetailedInfo;
}
private static final ParseField TOOK = new ParseField("took");
private static final ParseField TIMED_OUT = new ParseField("timed_out");
private static final ParseField VERTICES = new ParseField("vertices");
private static final ParseField CONNECTIONS = new ParseField("connections");
private static final ParseField FAILURES = new ParseField("failures");
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(TOOK.getPreferredName(), tookInMillis);
builder.field(TIMED_OUT.getPreferredName(), timedOut);
builder.startArray(FAILURES.getPreferredName());
if (shardFailures != null) {
for (ShardOperationFailedException shardFailure : shardFailures) {
shardFailure.toXContent(builder, params);
}
}
builder.endArray();
ObjectIntHashMap<Vertex> vertexNumbers = new ObjectIntHashMap<>(vertices.size());
Map<String, String> extraParams = new HashMap<>();
extraParams.put(RETURN_DETAILED_INFO_PARAM, Boolean.toString(returnDetailedInfo));
Params extendedParams = new DelegatingMapParams(extraParams, params);
builder.startArray(VERTICES.getPreferredName());
for (Vertex vertex : vertices.values()) {
builder.startObject();
vertexNumbers.put(vertex, vertexNumbers.size());
vertex.toXContent(builder, extendedParams);
builder.endObject();
}
builder.endArray();
builder.startArray(CONNECTIONS.getPreferredName());
for (Connection connection : connections.values()) {
builder.startObject();
connection.toXContent(builder, extendedParams, vertexNumbers);
builder.endObject();
}
builder.endArray();
builder.endObject();
return builder;
}
private static final ConstructingObjectParser<GraphExploreResponse, Void> PARSER = new ConstructingObjectParser<>(
"GraphExploreResponsenParser", true,
args -> {
GraphExploreResponse result = new GraphExploreResponse();
result.vertices = new HashMap<>();
result.connections = new HashMap<>();
result.tookInMillis = (Long) args[0];
result.timedOut = (Boolean) args[1];
@SuppressWarnings("unchecked")
List<Vertex> vertices = (List<Vertex>) args[2];
@SuppressWarnings("unchecked")
List<UnresolvedConnection> unresolvedConnections = (List<UnresolvedConnection>) args[3];
@SuppressWarnings("unchecked")
List<ShardSearchFailure> failures = (List<ShardSearchFailure>) args[4];
for (Vertex vertex : vertices) {
// reverse-engineer if detailed stats were requested -
// mainly here for testing framework's equality tests
result.returnDetailedInfo = result.returnDetailedInfo || vertex.getFg() > 0;
result.vertices.put(vertex.getId(), vertex);
}
for (UnresolvedConnection unresolvedConnection : unresolvedConnections) {
Connection resolvedConnection = unresolvedConnection.resolve(vertices);
result.connections.put(resolvedConnection.getId(), resolvedConnection);
}
if (failures.size() > 0) {
result.shardFailures = failures.toArray(new ShardSearchFailure[failures.size()]);
}
return result;
});
static {
PARSER.declareLong(constructorArg(), TOOK);
PARSER.declareBoolean(constructorArg(), TIMED_OUT);
PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> Vertex.fromXContent(p), VERTICES);
PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> UnresolvedConnection.fromXContent(p), CONNECTIONS);
PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES);
}
public static GraphExploreResponse fromXContent(XContentParser parser) throws IOException {
return PARSER.apply(parser, null);
}
}

View File

@ -1,142 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.graph;
import org.elasticsearch.client.ValidationException;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* A Hop represents one of potentially many stages in a graph exploration.
* Each Hop identifies one or more fields in which it will attempt to find
* terms that are significantly connected to the previous Hop. Each field is identified
* using a {@link VertexRequest}
*
* <p>An example series of Hops on webserver logs would be:
* <ol>
* <li>an initial Hop to find
* the top ten IPAddresses trying to access urls containing the word "admin"</li>
* <li>a secondary Hop to see which other URLs those IPAddresses were trying to access</li>
* </ol>
*
* <p>
* Optionally, each hop can contain a "guiding query" that further limits the set of documents considered.
* In our weblog example above we might choose to constrain the second hop to only look at log records that
* had a reponse code of 404.
* </p>
* <p>
* If absent, the list of {@link VertexRequest}s is inherited from the prior Hop's list to avoid repeating
* the fields that will be examined at each stage.
* </p>
*
*/
public class Hop implements ToXContentFragment {
final Hop parentHop;
List<VertexRequest> vertices = null;
QueryBuilder guidingQuery = null;
public Hop(Hop parent) {
this.parentHop = parent;
}
public void validate(ValidationException validationException) {
if (getEffectiveVertexRequests().size() == 0) {
validationException.addValidationError(GraphExploreRequest.NO_VERTICES_ERROR_MESSAGE);
}
}
public Hop getParentHop() {
return parentHop;
}
public QueryBuilder guidingQuery() {
if (guidingQuery != null) {
return guidingQuery;
}
return QueryBuilders.matchAllQuery();
}
/**
* Add a field in which this {@link Hop} will look for terms that are highly linked to
* previous hops and optionally the guiding query.
*
* @param fieldName a field in the chosen index
*/
public VertexRequest addVertexRequest(String fieldName) {
if (vertices == null) {
vertices = new ArrayList<>();
}
VertexRequest vr = new VertexRequest();
vr.fieldName(fieldName);
vertices.add(vr);
return vr;
}
/**
* An optional parameter that focuses the exploration on documents that
* match the given query.
*
* @param queryBuilder any query
*/
public void guidingQuery(QueryBuilder queryBuilder) {
guidingQuery = queryBuilder;
}
protected List<VertexRequest> getEffectiveVertexRequests() {
if (vertices != null) {
return vertices;
}
if (parentHop == null) {
return Collections.emptyList();
}
// otherwise inherit settings from parent
return parentHop.getEffectiveVertexRequests();
}
public int getNumberVertexRequests() {
return getEffectiveVertexRequests().size();
}
public VertexRequest getVertexRequest(int requestNumber) {
return getEffectiveVertexRequests().get(requestNumber);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (guidingQuery != null) {
builder.field("query");
guidingQuery.toXContent(builder, params);
}
if(vertices != null && vertices.size()>0) {
builder.startArray("vertices");
for (VertexRequest vertexRequest : vertices) {
vertexRequest.toXContent(builder, params);
}
builder.endArray();
}
return builder;
}
}

View File

@ -1,261 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.graph;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/**
* A vertex in a graph response represents a single term (a field and value pair)
* which appears in one or more documents found as part of the graph exploration.
*
* A vertex term could be a bank account number, an email address, a hashtag or any
* other term that appears in documents and is interesting to represent in a network.
*/
public class Vertex implements ToXContentFragment {
private final String field;
private final String term;
private double weight;
private final int depth;
private final long bg;
private long fg;
private static final ParseField FIELD = new ParseField("field");
private static final ParseField TERM = new ParseField("term");
private static final ParseField WEIGHT = new ParseField("weight");
private static final ParseField DEPTH = new ParseField("depth");
private static final ParseField FG = new ParseField("fg");
private static final ParseField BG = new ParseField("bg");
public Vertex(String field, String term, double weight, int depth, long bg, long fg) {
super();
this.field = field;
this.term = term;
this.weight = weight;
this.depth = depth;
this.bg = bg;
this.fg = fg;
}
@Override
public int hashCode() {
return Objects.hash(field, term, weight, depth, bg, fg);
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Vertex other = (Vertex) obj;
return depth == other.depth &&
weight == other.weight &&
bg == other.bg &&
fg == other.fg &&
Objects.equals(field, other.field) &&
Objects.equals(term, other.term);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
boolean returnDetailedInfo = params.paramAsBoolean(GraphExploreResponse.RETURN_DETAILED_INFO_PARAM, false);
builder.field(FIELD.getPreferredName(), field);
builder.field(TERM.getPreferredName(), term);
builder.field(WEIGHT.getPreferredName(), weight);
builder.field(DEPTH.getPreferredName(), depth);
if (returnDetailedInfo) {
builder.field(FG.getPreferredName(), fg);
builder.field(BG.getPreferredName(), bg);
}
return builder;
}
private static final ConstructingObjectParser<Vertex, Void> PARSER = new ConstructingObjectParser<>(
"VertexParser", true,
args -> {
String field = (String) args[0];
String term = (String) args[1];
double weight = (Double) args[2];
int depth = (Integer) args[3];
Long optionalBg = (Long) args[4];
Long optionalFg = (Long) args[5];
long bg = optionalBg == null ? 0 : optionalBg;
long fg = optionalFg == null ? 0 : optionalFg;
return new Vertex(field, term, weight, depth, bg, fg);
});
static {
PARSER.declareString(constructorArg(), FIELD);
PARSER.declareString(constructorArg(), TERM);
PARSER.declareDouble(constructorArg(), WEIGHT);
PARSER.declareInt(constructorArg(), DEPTH);
PARSER.declareLong(optionalConstructorArg(), BG);
PARSER.declareLong(optionalConstructorArg(), FG);
}
static Vertex fromXContent(XContentParser parser) throws IOException {
return PARSER.apply(parser, null);
}
/**
* @return a {@link VertexId} object that uniquely identifies this Vertex
*/
public VertexId getId() {
return createId(field, term);
}
/**
* A convenience method for creating a {@link VertexId}
* @param field the field
* @param term the term
* @return a {@link VertexId} that can be used for looking up vertices
*/
public static VertexId createId(String field, String term) {
return new VertexId(field,term);
}
@Override
public String toString() {
return getId().toString();
}
public String getField() {
return field;
}
public String getTerm() {
return term;
}
/**
* The weight of a vertex is an accumulation of all of the {@link Connection}s
* that are linked to this {@link Vertex} as part of a graph exploration.
* It is used internally to identify the most interesting vertices to be returned.
* @return a measure of the {@link Vertex}'s relative importance.
*/
public double getWeight() {
return weight;
}
public void setWeight(final double weight) {
this.weight = weight;
}
/**
* If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default)
* this statistic is available.
* @return the number of documents in the index that contain this term (see bg_count in
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html">
* the significant_terms aggregation</a>)
*/
public long getBg() {
return bg;
}
/**
* If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default)
* this statistic is available.
* Together with {@link #getBg()} these numbers are used to derive the significance of a term.
* @return the number of documents in the sample of best matching documents that contain this term (see fg_count in
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html">
* the significant_terms aggregation</a>)
*/
public long getFg() {
return fg;
}
public void setFg(final long fg) {
this.fg = fg;
}
/**
* @return the sequence number in the series of hops where this Vertex term was first encountered
*/
public int getHopDepth() {
return depth;
}
/**
* An identifier (implements hashcode and equals) that represents a
* unique key for a {@link Vertex}
*/
public static class VertexId {
private final String field;
private final String term;
public VertexId(String field, String term) {
this.field = field;
this.term = term;
}
public String getField() {
return field;
}
public String getTerm() {
return term;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
VertexId vertexId = (VertexId) o;
if (field != null ? !field.equals(vertexId.field) : vertexId.field != null)
return false;
if (term != null ? !term.equals(vertexId.term) : vertexId.term != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = field != null ? field.hashCode() : 0;
result = 31 * result + (term != null ? term.hashCode() : 0);
return result;
}
@Override
public String toString() {
return field + ":" + term;
}
}
}

View File

@ -1,195 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.graph;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.client.graph.GraphExploreRequest.TermBoost;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* A request to identify terms from a choice of field as part of a {@link Hop}.
* Optionally, a set of terms can be provided that are used as an exclusion or
* inclusion list to filter which terms are considered.
*
*/
public class VertexRequest implements ToXContentObject {
private String fieldName;
private int size = DEFAULT_SIZE;
public static final int DEFAULT_SIZE = 5;
private Map<String, TermBoost> includes;
private Set<String> excludes;
public static final int DEFAULT_MIN_DOC_COUNT = 3;
private int minDocCount = DEFAULT_MIN_DOC_COUNT;
public static final int DEFAULT_SHARD_MIN_DOC_COUNT = 2;
private int shardMinDocCount = DEFAULT_SHARD_MIN_DOC_COUNT;
public VertexRequest() {
}
public String fieldName() {
return fieldName;
}
public VertexRequest fieldName(String fieldName) {
this.fieldName = fieldName;
return this;
}
public int size() {
return size;
}
/**
* @param size The maximum number of terms that should be returned from this field as part of this {@link Hop}
*/
public VertexRequest size(int size) {
this.size = size;
return this;
}
public boolean hasIncludeClauses() {
return includes != null && includes.size() > 0;
}
public boolean hasExcludeClauses() {
return excludes != null && excludes.size() > 0;
}
/**
* Adds a term that should be excluded from results
* @param term A term to be excluded
*/
public void addExclude(String term) {
if (includes != null) {
throw new IllegalArgumentException("Cannot have both include and exclude clauses");
}
if (excludes == null) {
excludes = new HashSet<>();
}
excludes.add(term);
}
/**
* Adds a term to the set of allowed values - the boost defines the relative
* importance when pursuing connections in subsequent {@link Hop}s. The boost value
* appears as part of the query.
* @param term a required term
* @param boost an optional boost
*/
public void addInclude(String term, float boost) {
if (excludes != null) {
throw new IllegalArgumentException("Cannot have both include and exclude clauses");
}
if (includes == null) {
includes = new HashMap<>();
}
includes.put(term, new TermBoost(term, boost));
}
public TermBoost[] includeValues() {
return includes.values().toArray(new TermBoost[includes.size()]);
}
public String[] includeValuesAsStringArray() {
String[] result = new String[includes.size()];
int i = 0;
for (TermBoost tb : includes.values()) {
result[i++] = tb.term;
}
return result;
}
public String[] excludesAsArray() {
return excludes.toArray(new String[excludes.size()]);
}
public int minDocCount() {
return minDocCount;
}
/**
* A "certainty" threshold which defines the weight-of-evidence required before
* a term found in this field is identified as a useful connection
*
* @param value The minimum number of documents that contain this term found in the samples used across all shards
*/
public VertexRequest minDocCount(int value) {
minDocCount = value;
return this;
}
public int shardMinDocCount() {
return Math.min(shardMinDocCount, minDocCount);
}
/**
* A "certainty" threshold which defines the weight-of-evidence required before
* a term found in this field is identified as a useful connection
*
* @param value The minimum number of documents that contain this term found in the samples used across all shards
*/
public VertexRequest shardMinDocCount(int value) {
shardMinDocCount = value;
return this;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("field", fieldName);
if (size != DEFAULT_SIZE) {
builder.field("size", size);
}
if (minDocCount != DEFAULT_MIN_DOC_COUNT) {
builder.field("min_doc_count", minDocCount);
}
if (shardMinDocCount != DEFAULT_SHARD_MIN_DOC_COUNT) {
builder.field("shard_min_doc_count", shardMinDocCount);
}
if (includes != null) {
builder.startArray("include");
for (TermBoost tb : includes.values()) {
builder.startObject();
builder.field("term", tb.term);
builder.field("boost", tb.boost);
builder.endObject();
}
builder.endArray();
}
if (excludes != null) {
builder.startArray("exclude");
for (String value : excludes) {
builder.value(value);
}
builder.endArray();
}
builder.endObject();
return builder;
}
}

View File

@ -1,24 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Request and Response objects for the default distribution's Graph
* APIs.
*/
package org.elasticsearch.client.graph;

View File

@ -1,66 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.client.methods.HttpGet;
import org.elasticsearch.client.graph.GraphExploreRequest;
import org.elasticsearch.client.graph.Hop;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.test.ESTestCase;
import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.is;
public class GrapRequestConvertersTests extends ESTestCase {
public void testGraphExplore() throws Exception {
Map<String, String> expectedParams = new HashMap<>();
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
graphExploreRequest.sampleDiversityField("diversity");
graphExploreRequest.indices("index1", "index2");
graphExploreRequest.types("type1", "type2");
int timeout = randomIntBetween(10000, 20000);
graphExploreRequest.timeout(TimeValue.timeValueMillis(timeout));
graphExploreRequest.useSignificance(randomBoolean());
int numHops = randomIntBetween(1, 5);
for (int i = 0; i < numHops; i++) {
int hopNumber = i + 1;
QueryBuilder guidingQuery = null;
if (randomBoolean()) {
guidingQuery = new TermQueryBuilder("field" + hopNumber, "value" + hopNumber);
}
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
hop.addVertexRequest("field" + hopNumber);
hop.getVertexRequest(0).addInclude("value" + hopNumber, hopNumber);
}
Request request = GraphRequestConverters.explore(graphExploreRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertEquals("/index1,index2/type1,type2/_graph/explore", request.getEndpoint());
assertEquals(expectedParams, request.getParameters());
assertThat(request.getEntity().getContentType().getValue(), is(XContentType.JSON.mediaTypeWithoutParameters()));
RequestConvertersTests.assertToXContentBody(graphExploreRequest, request.getEntity());
}
}

View File

@ -1,139 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.client.graph.GraphExploreRequest;
import org.elasticsearch.client.graph.GraphExploreResponse;
import org.elasticsearch.client.graph.Hop;
import org.elasticsearch.client.graph.Vertex;
import org.elasticsearch.client.graph.VertexRequest;
import org.hamcrest.Matchers;
import org.junit.Before;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
public class GraphIT extends ESRestHighLevelClientTestCase {
@Before
public void indexDocuments() throws IOException {
// Create chain of doc IDs across indices 1->2->3
Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/_doc/1");
doc1.setJsonEntity("{ \"num\":[1], \"const\":\"start\"}");
client().performRequest(doc1);
Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/_doc/1");
doc2.setJsonEntity("{\"num\":[1,2], \"const\":\"foo\"}");
client().performRequest(doc2);
Request doc3 = new Request(HttpPut.METHOD_NAME, "/index2/_doc/2");
doc3.setJsonEntity("{\"num\":[2,3], \"const\":\"foo\"}");
client().performRequest(doc3);
Request doc4 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/_doc/2");
doc4.setJsonEntity("{\"num\":\"string\", \"const\":\"foo\"}");
client().performRequest(doc4);
Request doc5 = new Request(HttpPut.METHOD_NAME, "/index_no_field_data/_doc/2");
doc5.setJsonEntity("{\"num\":[2,4], \"const\":\"foo\"}");
client().performRequest(doc5);
client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh"));
}
public void testCleanExplore() throws Exception {
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
graphExploreRequest.indices("index1", "index2");
graphExploreRequest.useSignificance(false);
int numHops = 3;
for (int i = 0; i < numHops; i++) {
QueryBuilder guidingQuery = null;
if (i == 0) {
guidingQuery = new TermQueryBuilder("const.keyword", "start");
} else if (randomBoolean()){
guidingQuery = new TermQueryBuilder("const.keyword", "foo");
}
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
VertexRequest vr = hop.addVertexRequest("num");
vr.minDocCount(1);
}
Map<String, Integer> expectedTermsAndDepths = new HashMap<>();
expectedTermsAndDepths.put("1", 0);
expectedTermsAndDepths.put("2", 1);
expectedTermsAndDepths.put("3", 2);
GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT);
Map<String, Integer> actualTermsAndDepths = new HashMap<>();
Collection<Vertex> v = exploreResponse.getVertices();
for (Vertex vertex : v) {
actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth());
}
assertEquals(expectedTermsAndDepths, actualTermsAndDepths);
assertThat(exploreResponse.isTimedOut(), Matchers.is(false));
ShardOperationFailedException[] failures = exploreResponse.getShardFailures();
assertThat(failures.length, Matchers.equalTo(0));
}
public void testBadExplore() throws Exception {
//Explore indices where lack of fielddata=true on one index leads to partial failures
GraphExploreRequest graphExploreRequest = new GraphExploreRequest();
graphExploreRequest.indices("index1", "index2", "index_no_field_data");
graphExploreRequest.useSignificance(false);
int numHops = 3;
for (int i = 0; i < numHops; i++) {
QueryBuilder guidingQuery = null;
if (i == 0) {
guidingQuery = new TermQueryBuilder("const.keyword", "start");
} else if (randomBoolean()){
guidingQuery = new TermQueryBuilder("const.keyword", "foo");
}
Hop hop = graphExploreRequest.createNextHop(guidingQuery);
VertexRequest vr = hop.addVertexRequest("num");
vr.minDocCount(1);
}
Map<String, Integer> expectedTermsAndDepths = new HashMap<>();
expectedTermsAndDepths.put("1", 0);
expectedTermsAndDepths.put("2", 1);
expectedTermsAndDepths.put("3", 2);
GraphExploreResponse exploreResponse = highLevelClient().graph().explore(graphExploreRequest, RequestOptions.DEFAULT);
Map<String, Integer> actualTermsAndDepths = new HashMap<>();
Collection<Vertex> v = exploreResponse.getVertices();
for (Vertex vertex : v) {
actualTermsAndDepths.put(vertex.getTerm(), vertex.getHopDepth());
}
assertEquals(expectedTermsAndDepths, actualTermsAndDepths);
assertThat(exploreResponse.isTimedOut(), Matchers.is(false));
ShardOperationFailedException[] failures = exploreResponse.getShardFailures();
assertThat(failures.length, Matchers.equalTo(1));
assertTrue(failures[0].reason().contains("Text fields are not optimised for operations that require per-document field data"));
}
}

View File

@ -70,10 +70,6 @@ public class PingAndInfoIT extends ESRestHighLevelClientTestCase {
assertEquals("trial", info.getLicenseInfo().getMode());
assertEquals(LicenseStatus.ACTIVE, info.getLicenseInfo().getStatus());
FeatureSet graph = info.getFeatureSetsInfo().getFeatureSets().get("graph");
assertTrue(graph.available());
assertTrue(graph.enabled());
assertNull(graph.nativeCodeInfo());
FeatureSet monitoring = info.getFeatureSetsInfo().getFeatureSets().get("monitoring");
assertTrue(monitoring.available());
assertTrue(monitoring.enabled());

View File

@ -1,125 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.documentation;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.client.graph.Connection;
import org.elasticsearch.client.graph.GraphExploreRequest;
import org.elasticsearch.client.graph.GraphExploreResponse;
import org.elasticsearch.client.graph.Hop;
import org.elasticsearch.client.graph.Vertex;
import org.elasticsearch.client.graph.VertexRequest;
import org.junit.Before;
import java.io.IOException;
import java.util.Collection;
public class GraphDocumentationIT extends ESRestHighLevelClientTestCase {
@Before
public void indexDocuments() throws IOException {
// Create chain of doc IDs across indices 1->2->3
Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/_doc/1");
doc1.setJsonEntity("{ \"participants\":[1,2], \"text\":\"let's start projectx\", \"attachment_md5\":\"324FHDGHFDG4564\"}");
client().performRequest(doc1);
Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/_doc/2");
doc2.setJsonEntity("{\"participants\":[2,3,4], \"text\":\"got something you both may be interested in\"}");
client().performRequest(doc2);
client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh"));
}
@SuppressForbidden(reason = "system out is ok for a documentation example")
public void testExplore() throws Exception {
RestHighLevelClient client = highLevelClient();
// tag::x-pack-graph-explore-request
GraphExploreRequest request = new GraphExploreRequest();
request.indices("index1", "index2");
request.useSignificance(false);
TermQueryBuilder startingQuery = new TermQueryBuilder("text", "projectx");
Hop hop1 = request.createNextHop(startingQuery); // <1>
VertexRequest people = hop1.addVertexRequest("participants"); // <2>
people.minDocCount(1);
VertexRequest files = hop1.addVertexRequest("attachment_md5");
files.minDocCount(1);
Hop hop2 = request.createNextHop(null); // <3>
VertexRequest vr2 = hop2.addVertexRequest("participants");
vr2.minDocCount(5);
GraphExploreResponse exploreResponse = client.graph().explore(request, RequestOptions.DEFAULT); // <4>
// end::x-pack-graph-explore-request
// tag::x-pack-graph-explore-response
Collection<Vertex> v = exploreResponse.getVertices();
Collection<Connection> c = exploreResponse.getConnections();
for (Vertex vertex : v) {
System.out.println(vertex.getField() + ":" + vertex.getTerm() + // <1>
" discovered at hop depth " + vertex.getHopDepth());
}
for (Connection link : c) {
System.out.println(link.getFrom() + " -> " + link.getTo() // <2>
+ " evidenced by " + link.getDocCount() + " docs");
}
// end::x-pack-graph-explore-response
Collection<Vertex> initialVertices = exploreResponse.getVertices();
// tag::x-pack-graph-explore-expand
GraphExploreRequest expandRequest = new GraphExploreRequest();
expandRequest.indices("index1", "index2");
Hop expandHop1 = expandRequest.createNextHop(null); // <1>
VertexRequest fromPeople = expandHop1.addVertexRequest("participants"); // <2>
for (Vertex vertex : initialVertices) {
if (vertex.getField().equals("participants")) {
fromPeople.addInclude(vertex.getTerm(), 1f);
}
}
Hop expandHop2 = expandRequest.createNextHop(null);
VertexRequest newPeople = expandHop2.addVertexRequest("participants"); // <3>
for (Vertex vertex : initialVertices) {
if (vertex.getField().equals("participants")) {
newPeople.addExclude(vertex.getTerm());
}
}
GraphExploreResponse expandResponse = client.graph().explore(expandRequest, RequestOptions.DEFAULT);
// end::x-pack-graph-explore-expand
}
}

View File

@ -1,152 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.graph;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.client.AbstractResponseTestCase;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.protocol.xpack.graph.Connection.ConnectionId;
import org.junit.Assert;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
public class GraphExploreResponseTests extends
AbstractResponseTestCase<org.elasticsearch.protocol.xpack.graph.GraphExploreResponse, GraphExploreResponse> {
@Override
protected org.elasticsearch.protocol.xpack.graph.GraphExploreResponse createServerTestInstance(XContentType xContentType) {
return createInstance(randomIntBetween(1, 128));
}
private static org.elasticsearch.protocol.xpack.graph.GraphExploreResponse createInstance(int numFailures) {
int numItems = randomIntBetween(4, 128);
boolean timedOut = randomBoolean();
boolean showDetails = randomBoolean();
long overallTookInMillis = randomNonNegativeLong();
Map<org.elasticsearch.protocol.xpack.graph.Vertex.VertexId, org.elasticsearch.protocol.xpack.graph.Vertex> vertices =
new HashMap<>();
Map<ConnectionId,
org.elasticsearch.protocol.xpack.graph.Connection> connections = new HashMap<>();
ShardOperationFailedException [] failures = new ShardOperationFailedException [numFailures];
for (int i = 0; i < failures.length; i++) {
failures[i] = new ShardSearchFailure(new ElasticsearchException("an error"));
}
//Create random set of vertices
for (int i = 0; i < numItems; i++) {
org.elasticsearch.protocol.xpack.graph.Vertex v = new org.elasticsearch.protocol.xpack.graph.Vertex("field1",
randomAlphaOfLength(5), randomDouble(), 0,
showDetails? randomIntBetween(100, 200):0,
showDetails? randomIntBetween(1, 100):0);
vertices.put(v.getId(), v);
}
//Wire up half the vertices randomly
org.elasticsearch.protocol.xpack.graph.Vertex[] vs =
vertices.values().toArray(new org.elasticsearch.protocol.xpack.graph.Vertex[vertices.size()]);
for (int i = 0; i < numItems/2; i++) {
org.elasticsearch.protocol.xpack.graph.Vertex v1 = vs[randomIntBetween(0, vs.length-1)];
org.elasticsearch.protocol.xpack.graph.Vertex v2 = vs[randomIntBetween(0, vs.length-1)];
if(v1 != v2) {
org.elasticsearch.protocol.xpack.graph.Connection conn = new org.elasticsearch.protocol.xpack.graph.Connection(v1, v2,
randomDouble(), randomLongBetween(1, 10));
connections.put(conn.getId(), conn);
}
}
return new org.elasticsearch.protocol.xpack.graph.GraphExploreResponse(overallTookInMillis, timedOut, failures,
vertices, connections, showDetails);
}
private static org.elasticsearch.protocol.xpack.graph.GraphExploreResponse createTestInstanceWithFailures() {
return createInstance(randomIntBetween(1, 128));
}
@Override
protected void assertInstances(org.elasticsearch.protocol.xpack.graph.GraphExploreResponse serverTestInstance,
GraphExploreResponse clientInstance) {
Assert.assertThat(serverTestInstance.getTook(), equalTo(clientInstance.getTook()));
Assert.assertThat(serverTestInstance.isTimedOut(), equalTo(clientInstance.isTimedOut()));
Comparator<org.elasticsearch.protocol.xpack.graph.Connection> serverComparator =
Comparator.comparing(o -> o.getId().toString());
org.elasticsearch.protocol.xpack.graph.Connection[] serverConns =
serverTestInstance.getConnections().toArray(new org.elasticsearch.protocol.xpack.graph.Connection[0]);
Comparator<Connection> clientComparator =
Comparator.comparing(o -> o.getId().toString());
Connection[] clientConns =
clientInstance.getConnections().toArray(new Connection[0]);
Arrays.sort(serverConns, serverComparator);
Arrays.sort(clientConns, clientComparator);
assertThat(serverConns.length, equalTo(clientConns.length));
for (int i = 0; i < clientConns.length ; i++) {
org.elasticsearch.protocol.xpack.graph.Connection serverConn = serverConns[i];
Connection clientConn = clientConns[i];
// String rep since they are different classes
assertThat(serverConn.getId().toString(), equalTo(clientConn.getId().toString()));
assertVertex(serverConn.getTo(), clientConn.getTo());
assertThat(serverConn.getDocCount(), equalTo(clientConn.getDocCount()));
assertVertex(serverConn.getFrom(), clientConn.getFrom());
assertThat(serverConn.getWeight(), equalTo(clientConn.getWeight()));
}
//Sort the vertices lists before equality test (map insertion sequences can cause order differences)
Comparator<org.elasticsearch.protocol.xpack.graph.Vertex> serverVertexComparator = Comparator.comparing(o -> o.getId().toString());
org.elasticsearch.protocol.xpack.graph.Vertex[] serverVertices =
serverTestInstance.getVertices().toArray(new org.elasticsearch.protocol.xpack.graph.Vertex[0]);
Comparator<Vertex> clientVertexComparator = Comparator.comparing(o -> o.getId().toString());
Vertex[] clientVerticies = clientInstance.getVertices().toArray(new Vertex[0]);
Arrays.sort(serverVertices, serverVertexComparator);
Arrays.sort(clientVerticies, clientVertexComparator);
assertThat(serverVertices.length, equalTo(clientVerticies.length));
for (int i = 0; i < serverVertices.length; i++) {
org.elasticsearch.protocol.xpack.graph.Vertex serverVertex = serverVertices[i];
Vertex clientVertex = clientVerticies[i];
assertVertex(serverVertex, clientVertex);
}
ShardOperationFailedException[] newFailures = serverTestInstance.getShardFailures();
ShardOperationFailedException[] expectedFailures = clientInstance.getShardFailures();
Assert.assertEquals(expectedFailures.length, newFailures.length);
}
private void assertVertex(org.elasticsearch.protocol.xpack.graph.Vertex server, Vertex client) {
assertThat(client.getId().toString(), equalTo(server.getId().toString()));
assertThat(client.getTerm(), equalTo(server.getTerm()));
assertThat(client.getField(), equalTo(server.getField()));
assertThat(client.getHopDepth(), equalTo(server.getHopDepth()));
assertThat(client.getFg(), equalTo(server.getFg()));
assertThat(client.getBg(), equalTo(server.getBg()));
assertThat(client.getWeight(), equalTo(server.getWeight()));
}
@Override
protected GraphExploreResponse doParseToClientInstance(XContentParser parser) throws IOException {
return GraphExploreResponse.fromXContent(parser);
}
}

View File

@ -125,7 +125,6 @@ def projectPathsToExclude = [
':x-pack:plugin:core',
':x-pack:plugin:deprecation',
':x-pack:plugin:frozen-indices',
':x-pack:plugin:graph',
':x-pack:plugin:identity-provider',
':x-pack:plugin:ilm',
':x-pack:plugin:mapper-constant-keyword',

View File

@ -27,7 +27,6 @@ testClusters {
String licenseType = System.getProperty("run.license_type", "basic")
if (licenseType == 'trial') {
setting 'xpack.ml.enabled', 'true'
setting 'xpack.graph.enabled', 'true'
setting 'xpack.watcher.enabled', 'true'
setting 'xpack.license.self_generated.type', 'trial'
} else if (licenseType != 'basic') {