Migrate from Trove to Hppc.

This commit is contained in:
Martijn van Groningen 2013-09-13 23:54:04 +02:00
parent 373e64b3eb
commit 088e05b368
98 changed files with 1124 additions and 1141 deletions

12
pom.xml
View File

@ -170,9 +170,9 @@
</dependency>
<dependency>
<groupId>net.sf.trove4j</groupId>
<artifactId>trove4j</artifactId>
<version>3.0.3</version>
<groupId>com.carrotsearch</groupId>
<artifactId>hppc</artifactId>
<version>0.5.2</version>
</dependency>
<dependency>
@ -439,7 +439,7 @@
<artifactSet>
<includes>
<include>com.google.guava:guava</include>
<include>net.sf.trove4j:trove4j</include>
<include>com.carrotsearch:hppc</include>
<include>org.mvel:mvel2</include>
<include>com.fasterxml.jackson.core:jackson-core</include>
<include>com.fasterxml.jackson.dataformat:jackson-dataformat-smile</include>
@ -455,8 +455,8 @@
<shadedPattern>org.elasticsearch.common</shadedPattern>
</relocation>
<relocation>
<pattern>gnu.trove</pattern>
<shadedPattern>org.elasticsearch.common.trove</shadedPattern>
<pattern>com.carrotsearch.hppc</pattern>
<shadedPattern>org.elasticsearch.common.hppc</shadedPattern>
</relocation>
<relocation>
<pattern>jsr166y</pattern>

View File

@ -755,7 +755,10 @@ public class MapperQueryParser extends QueryParser {
private void applyBoost(String field, Query q) {
if (settings.boosts() != null) {
float boost = settings.boosts().get(field);
float boost = 1f;
if (settings.boosts().containsKey(field)) {
boost = settings.boosts().lget();
}
q.setBoost(boost);
}
}

View File

@ -19,7 +19,7 @@
package org.apache.lucene.queryparser.classic;
import gnu.trove.map.hash.TObjectFloatHashMap;
import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.MultiTermQuery;
@ -63,7 +63,7 @@ public class QueryParserSettings {
List<String> fields = null;
Collection<String> queryTypes = null;
TObjectFloatHashMap<String> boosts = null;
ObjectFloatOpenHashMap<String> boosts = null;
float tieBreaker = 0.0f;
boolean useDisMax = true;
@ -272,11 +272,11 @@ public class QueryParserSettings {
this.queryTypes = queryTypes;
}
public TObjectFloatHashMap<String> boosts() {
public ObjectFloatOpenHashMap<String> boosts() {
return boosts;
}
public void boosts(TObjectFloatHashMap<String> boosts) {
public void boosts(ObjectFloatOpenHashMap<String> boosts) {
this.boosts = boosts;
}

View File

@ -18,7 +18,7 @@
*/
package org.apache.lucene.search.suggest.analyzing;
import gnu.trove.map.hash.TObjectIntHashMap;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.TokenStreamToAutomaton;
@ -33,6 +33,7 @@ import org.apache.lucene.util.fst.*;
import org.apache.lucene.util.fst.FST.BytesReader;
import org.apache.lucene.util.fst.PairOutputs.Pair;
import org.apache.lucene.util.fst.Util.MinResult;
import org.elasticsearch.common.hppc.HppcMaps;
import java.io.File;
import java.io.IOException;
@ -917,7 +918,7 @@ public class XAnalyzingSuggester extends Lookup {
private BytesRef analyzed = new BytesRef();
private final SurfaceFormAndPayload[] surfaceFormsAndPayload;
private int count;
private TObjectIntHashMap<BytesRef> seenSurfaceForms = new TObjectIntHashMap<BytesRef>(256, 0.75f, -1);
private ObjectIntOpenHashMap<BytesRef> seenSurfaceForms = HppcMaps.Object.Integer.ensureNoNullKeys(256, 0.75f);
public XBuilder(int maxSurfaceFormsPerAnalyzedForm, boolean hasPayloads) {
this.outputs = new PairOutputs<Long, BytesRef>(PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton());
@ -969,7 +970,8 @@ public class XAnalyzingSuggester extends Lookup {
return;
}
BytesRef surfaceCopy;
if (count > 0 && (surfaceIndex = seenSurfaceForms.get(surface)) >= 0) {
if (count > 0 && seenSurfaceForms.containsKey(surface)) {
surfaceIndex = seenSurfaceForms.lget();
SurfaceFormAndPayload surfaceFormAndPayload = surfaceFormsAndPayload[surfaceIndex];
if (encodedWeight >= surfaceFormAndPayload.weight) {
return;

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.deletebyquery;
import gnu.trove.set.hash.THashSet;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.replication.IndexReplicationOperationRequest;
import org.elasticsearch.common.Nullable;
@ -30,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import static org.elasticsearch.action.ValidateActions.addValidationError;
@ -102,7 +102,7 @@ public class IndexDeleteByQueryRequest extends IndexReplicationOperationRequest<
}
int routingSize = in.readVInt();
if (routingSize > 0) {
routing = new THashSet<String>(routingSize);
routing = new HashSet<String>(routingSize);
for (int i = 0; i < routingSize; i++) {
routing.add(in.readString());
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.deletebyquery;
import gnu.trove.set.hash.THashSet;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest;
import org.elasticsearch.common.Nullable;
@ -31,6 +30,7 @@ import org.elasticsearch.common.xcontent.XContentHelper;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import static org.elasticsearch.action.ValidateActions.addValidationError;
@ -101,7 +101,7 @@ public class ShardDeleteByQueryRequest extends ShardReplicationOperationRequest<
types = in.readStringArray();
int routingSize = in.readVInt();
if (routingSize > 0) {
routing = new THashSet<String>(routingSize);
routing = new HashSet<String>(routingSize);
for (int i = 0; i < routingSize; i++) {
routing.add(in.readString());
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.get;
import gnu.trove.list.array.TIntArrayList;
import gnu.trove.list.array.TLongArrayList;
import com.carrotsearch.hppc.IntArrayList;
import com.carrotsearch.hppc.LongArrayList;
import org.elasticsearch.action.support.single.shard.SingleShardOperationRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
@ -39,11 +39,11 @@ public class MultiGetShardRequest extends SingleShardOperationRequest<MultiGetSh
Boolean realtime;
boolean refresh;
TIntArrayList locations;
IntArrayList locations;
List<String> types;
List<String> ids;
List<String[]> fields;
TLongArrayList versions;
LongArrayList versions;
List<VersionType> versionTypes;
List<FetchSourceContext> fetchSourceContexts;
@ -54,11 +54,11 @@ public class MultiGetShardRequest extends SingleShardOperationRequest<MultiGetSh
MultiGetShardRequest(String index, int shardId) {
super(index);
this.shardId = shardId;
locations = new TIntArrayList();
locations = new IntArrayList();
types = new ArrayList<String>();
ids = new ArrayList<String>();
fields = new ArrayList<String[]>();
versions = new TLongArrayList();
versions = new LongArrayList();
versionTypes = new ArrayList<VersionType>();
fetchSourceContexts = new ArrayList<FetchSourceContext>();
}
@ -113,11 +113,11 @@ public class MultiGetShardRequest extends SingleShardOperationRequest<MultiGetSh
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
locations = new TIntArrayList(size);
locations = new IntArrayList(size);
types = new ArrayList<String>(size);
ids = new ArrayList<String>(size);
fields = new ArrayList<String[]>(size);
versions = new TLongArrayList(size);
versions = new LongArrayList(size);
versionTypes = new ArrayList<VersionType>(size);
fetchSourceContexts = new ArrayList<FetchSourceContext>(size);
for (int i = 0; i < size; i++) {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.get;
import gnu.trove.list.array.TIntArrayList;
import com.carrotsearch.hppc.IntArrayList;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -30,12 +30,12 @@ import java.util.List;
public class MultiGetShardResponse extends ActionResponse {
TIntArrayList locations;
IntArrayList locations;
List<GetResponse> responses;
List<MultiGetResponse.Failure> failures;
MultiGetShardResponse() {
locations = new TIntArrayList();
locations = new IntArrayList();
responses = new ArrayList<GetResponse>();
failures = new ArrayList<MultiGetResponse.Failure>();
}
@ -56,7 +56,7 @@ public class MultiGetShardResponse extends ActionResponse {
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
locations = new TIntArrayList(size);
locations = new IntArrayList(size);
responses = new ArrayList<GetResponse>(size);
failures = new ArrayList<MultiGetResponse.Failure>(size);
for (int i = 0; i < size; i++) {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.percolate;
import gnu.trove.list.array.TIntArrayList;
import com.carrotsearch.hppc.IntArrayList;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.get.*;
@ -76,7 +76,7 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
final List<Object> percolateRequests = new ArrayList<Object>(request.requests().size());
// Can have a mixture of percolate requests. (normal percolate requests & percolate existing doc),
// so we need to keep track for what percolate request we had a get request
final TIntArrayList getRequestSlots = new TIntArrayList();
final IntArrayList getRequestSlots = new IntArrayList();
List<GetRequest> existingDocsRequests = new ArrayList<GetRequest>();
for (int slot = 0; slot < request.requests().size(); slot++) {
PercolateRequest percolateRequest = request.requests().get(slot);
@ -139,7 +139,7 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
final Map<ShardId, TransportShardMultiPercolateAction.Request> requestsByShard;
final List<Object> percolateRequests;
final Map<ShardId, TIntArrayList> shardToSlots;
final Map<ShardId, IntArrayList> shardToSlots;
final AtomicInteger expectedOperations;
final AtomicArray<Object> reducedResponses;
final AtomicReferenceArray<AtomicInteger> expectedOperationsPerItem;
@ -155,7 +155,7 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
// Resolving concrete indices and routing and grouping the requests by shard
requestsByShard = new HashMap<ShardId, TransportShardMultiPercolateAction.Request>();
// Keep track what slots belong to what shard, in case a request to a shard fails on all copies
shardToSlots = new HashMap<ShardId, TIntArrayList>();
shardToSlots = new HashMap<ShardId, IntArrayList>();
int expectedResults = 0;
for (int slot = 0; slot < percolateRequests.size(); slot++) {
Object element = percolateRequests.get(slot);
@ -180,9 +180,9 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
logger.trace("Adding shard[{}] percolate request for item[{}]", shardId, slot);
requests.add(new TransportShardMultiPercolateAction.Request.Item(slot, new PercolateShardRequest(shardId, percolateRequest)));
TIntArrayList items = shardToSlots.get(shardId);
IntArrayList items = shardToSlots.get(shardId);
if (items == null) {
shardToSlots.put(shardId, items = new TIntArrayList());
shardToSlots.put(shardId, items = new IntArrayList());
}
items.add(slot);
}
@ -257,7 +257,7 @@ public class TransportMultiPercolateAction extends TransportAction<MultiPercolat
void onShardFailure(ShardId shardId, Throwable e) {
logger.debug("{} Shard multi percolate failure", e, shardId);
try {
TIntArrayList slots = shardToSlots.get(shardId);
IntArrayList slots = shardToSlots.get(shardId);
for (int i = 0; i < slots.size(); i++) {
int slot = slots.get(i);
AtomicReferenceArray shardResults = responsesByItemAndShard.get(slot);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.search.type;
import com.carrotsearch.hppc.IntArrayList;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.search.ReduceSearchPhaseException;
import org.elasticsearch.action.search.SearchOperationThreading;
@ -28,7 +29,6 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.trove.ExtTIntArrayList;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchServiceListener;
@ -66,13 +66,13 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA
final AtomicArray<QuerySearchResult> queryResults;
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<ExtTIntArrayList> docIdsToLoad;
final AtomicArray<IntArrayList> docIdsToLoad;
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
super(request, listener);
queryResults = new AtomicArray<QuerySearchResult>(firstResults.length());
fetchResults = new AtomicArray<FetchSearchResult>(firstResults.length());
docIdsToLoad = new AtomicArray<ExtTIntArrayList>(firstResults.length());
docIdsToLoad = new AtomicArray<IntArrayList>(firstResults.length());
}
@Override
@ -192,7 +192,7 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
int localOperations = 0;
for (final AtomicArray.Entry<ExtTIntArrayList> entry : docIdsToLoad.asList()) {
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = queryResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
@ -208,7 +208,7 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
for (final AtomicArray.Entry<ExtTIntArrayList> entry : docIdsToLoad.asList()) {
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = queryResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
@ -220,7 +220,7 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA
});
} else {
boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
for (final AtomicArray.Entry<ExtTIntArrayList> entry : docIdsToLoad.asList()) {
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
final QuerySearchResult queryResult = queryResults.get(entry.index);
final DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.search.type;
import com.carrotsearch.hppc.IntArrayList;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.search.ReduceSearchPhaseException;
import org.elasticsearch.action.search.SearchOperationThreading;
@ -28,7 +29,6 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.trove.ExtTIntArrayList;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchServiceListener;
@ -62,12 +62,12 @@ public class TransportSearchQueryThenFetchAction extends TransportSearchTypeActi
private class AsyncAction extends BaseAsyncAction<QuerySearchResult> {
final AtomicArray<FetchSearchResult> fetchResults;
final AtomicArray<ExtTIntArrayList> docIdsToLoad;
final AtomicArray<IntArrayList> docIdsToLoad;
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
super(request, listener);
fetchResults = new AtomicArray<FetchSearchResult>(firstResults.length());
docIdsToLoad = new AtomicArray<ExtTIntArrayList>(firstResults.length());
docIdsToLoad = new AtomicArray<IntArrayList>(firstResults.length());
}
@Override
@ -93,7 +93,7 @@ public class TransportSearchQueryThenFetchAction extends TransportSearchTypeActi
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
int localOperations = 0;
for (AtomicArray.Entry<ExtTIntArrayList> entry : docIdsToLoad.asList()) {
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = firstResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
@ -109,7 +109,7 @@ public class TransportSearchQueryThenFetchAction extends TransportSearchTypeActi
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
for (AtomicArray.Entry<ExtTIntArrayList> entry : docIdsToLoad.asList()) {
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = firstResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
@ -121,7 +121,7 @@ public class TransportSearchQueryThenFetchAction extends TransportSearchTypeActi
});
} else {
boolean localAsync = request.operationThreading() == SearchOperationThreading.THREAD_PER_SHARD;
for (final AtomicArray.Entry<ExtTIntArrayList> entry : docIdsToLoad.asList()) {
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
final QuerySearchResult queryResult = firstResults.get(entry.index);
final DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.search.type;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.search.*;
@ -29,7 +30,6 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.trove.ExtTIntArrayList;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchServiceListener;
import org.elasticsearch.search.action.SearchServiceTransportAction;
@ -226,7 +226,7 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
private void executeFetchPhase() {
sortedShardList = searchPhaseController.sortDocs(queryResults);
AtomicArray<ExtTIntArrayList> docIdsToLoad = new AtomicArray<ExtTIntArrayList>(queryResults.length());
AtomicArray<IntArrayList> docIdsToLoad = new AtomicArray<IntArrayList>(queryResults.length());
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
if (docIdsToLoad.asList().isEmpty()) {
@ -235,8 +235,8 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
for (final AtomicArray.Entry<ExtTIntArrayList> entry : docIdsToLoad.asList()) {
ExtTIntArrayList docIds = entry.value;
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
IntArrayList docIds = entry.value;
final QuerySearchResult querySearchResult = queryResults.get(entry.index);
FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, querySearchResult.id(), docIds);
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.search.type;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
@ -35,7 +36,6 @@ import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.trove.ExtTIntArrayList;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
@ -356,7 +356,7 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
* Releases shard targets that are not used in the docsIdsToLoad.
*/
protected void releaseIrrelevantSearchContexts(AtomicArray<? extends QuerySearchResultProvider> queryResults,
AtomicArray<ExtTIntArrayList> docIdsToLoad) {
AtomicArray<IntArrayList> docIdsToLoad) {
if (docIdsToLoad == null) {
return;
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.termvector;
import gnu.trove.list.array.TIntArrayList;
import com.carrotsearch.hppc.IntArrayList;
import org.elasticsearch.action.support.single.shard.SingleShardOperationRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -33,7 +33,7 @@ public class MultiTermVectorsShardRequest extends SingleShardOperationRequest<Mu
private int shardId;
private String preference;
TIntArrayList locations;
IntArrayList locations;
List<TermVectorRequest> requests;
MultiTermVectorsShardRequest() {
@ -43,7 +43,7 @@ public class MultiTermVectorsShardRequest extends SingleShardOperationRequest<Mu
MultiTermVectorsShardRequest(String index, int shardId) {
super(index);
this.shardId = shardId;
locations = new TIntArrayList();
locations = new IntArrayList();
requests = new ArrayList<TermVectorRequest>();
}
@ -75,7 +75,7 @@ public class MultiTermVectorsShardRequest extends SingleShardOperationRequest<Mu
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
locations = new TIntArrayList(size);
locations = new IntArrayList(size);
requests = new ArrayList<TermVectorRequest>(size);
for (int i = 0; i < size; i++) {
locations.add(in.readVInt());

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.termvector;
import gnu.trove.list.array.TIntArrayList;
import com.carrotsearch.hppc.IntArrayList;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -30,12 +30,12 @@ import java.util.List;
public class MultiTermVectorsShardResponse extends ActionResponse {
TIntArrayList locations;
IntArrayList locations;
List<TermVectorResponse> responses;
List<MultiTermVectorsResponse.Failure> failures;
MultiTermVectorsShardResponse() {
locations = new TIntArrayList();
locations = new IntArrayList();
responses = new ArrayList<TermVectorResponse>();
failures = new ArrayList<MultiTermVectorsResponse.Failure>();
}
@ -56,7 +56,7 @@ public class MultiTermVectorsShardResponse extends ActionResponse {
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
locations = new TIntArrayList(size);
locations = new IntArrayList(size);
responses = new ArrayList<TermVectorResponse>(size);
failures = new ArrayList<MultiTermVectorsResponse.Failure>(size);
for (int i = 0; i < size; i++) {

View File

@ -19,14 +19,15 @@
package org.elasticsearch.action.termvector;
import gnu.trove.impl.Constants;
import gnu.trove.map.hash.TObjectLongHashMap;
import com.carrotsearch.hppc.ObjectLongOpenHashMap;
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import org.apache.lucene.index.*;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.hppc.HppcMaps;
import org.elasticsearch.common.io.stream.BytesStreamInput;
import java.io.IOException;
@ -112,7 +113,7 @@ import static org.apache.lucene.util.ArrayUtil.grow;
public final class TermVectorFields extends Fields {
final private TObjectLongHashMap<String> fieldMap;
final private ObjectLongOpenHashMap<String> fieldMap;
final private BytesReference termVectors;
final boolean hasTermStatistic;
final boolean hasFieldStatistic;
@ -124,7 +125,7 @@ public final class TermVectorFields extends Fields {
*/
public TermVectorFields(BytesReference headerRef, BytesReference termVectors) throws IOException {
BytesStreamInput header = new BytesStreamInput(headerRef);
fieldMap = new TObjectLongHashMap<String>(Constants.DEFAULT_CAPACITY, Constants.DEFAULT_LOAD_FACTOR, -1);
fieldMap = new ObjectLongOpenHashMap<String>();
// here we read the header to fill the field offset map
String headerString = header.readString();
@ -144,20 +145,36 @@ public final class TermVectorFields extends Fields {
@Override
public Iterator<String> iterator() {
return fieldMap.keySet().iterator();
final Iterator<ObjectLongCursor<String>> iterator = fieldMap.iterator();
return new Iterator<String>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public String next() {
return iterator.next().key;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
@Override
public Terms terms(String field) throws IOException {
// first, find where in the termVectors bytes the actual term vector for
// this field is stored
Long offset = fieldMap.get(field);
if (offset.longValue() < 0) {
if (!fieldMap.containsKey(field)) {
return null; // we don't have it.
}
long offset = fieldMap.lget();
final BytesStreamInput perFieldTermVectorInput = new BytesStreamInput(this.termVectors);
perFieldTermVectorInput.reset();
perFieldTermVectorInput.skip(offset.longValue());
perFieldTermVectorInput.skip(offset);
// read how many terms....
final long numTerms = perFieldTermVectorInput.readVLong();

View File

@ -19,8 +19,7 @@
package org.elasticsearch.cache.recycler;
import gnu.trove.map.hash.*;
import gnu.trove.set.hash.THashSet;
import com.carrotsearch.hppc.*;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
@ -29,25 +28,22 @@ import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.recycler.SoftThreadLocalRecycler;
import org.elasticsearch.common.recycler.ThreadLocalRecycler;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.trove.ExtTDoubleObjectHashMap;
import org.elasticsearch.common.trove.ExtTHashMap;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
@SuppressWarnings("unchecked")
public class CacheRecycler extends AbstractComponent {
public final Recycler<ExtTHashMap> hashMap;
public final Recycler<THashSet> hashSet;
public final Recycler<ExtTDoubleObjectHashMap> doubleObjectMap;
public final Recycler<ExtTLongObjectHashMap> longObjectMap;
public final Recycler<TLongLongHashMap> longLongMap;
public final Recycler<TIntIntHashMap> intIntMap;
public final Recycler<TFloatIntHashMap> floatIntMap;
public final Recycler<TDoubleIntHashMap> doubleIntMap;
public final Recycler<TLongIntHashMap> longIntMap;
public final Recycler<TObjectIntHashMap> objectIntMap;
public final Recycler<TIntObjectHashMap> intObjectMap;
public final Recycler<TObjectFloatHashMap> objectFloatMap;
public final Recycler<ObjectObjectOpenHashMap> hashMap;
public final Recycler<ObjectOpenHashSet> hashSet;
public final Recycler<DoubleObjectOpenHashMap> doubleObjectMap;
public final Recycler<LongObjectOpenHashMap> longObjectMap;
public final Recycler<LongLongOpenHashMap> longLongMap;
public final Recycler<IntIntOpenHashMap> intIntMap;
public final Recycler<FloatIntOpenHashMap> floatIntMap;
public final Recycler<DoubleIntOpenHashMap> doubleIntMap;
public final Recycler<LongIntOpenHashMap> longIntMap;
public final Recycler<ObjectIntOpenHashMap> objectIntMap;
public final Recycler<IntObjectOpenHashMap> intObjectMap;
public final Recycler<ObjectFloatOpenHashMap> objectFloatMap;
public void close() {
hashMap.close();
@ -71,185 +67,185 @@ public class CacheRecycler extends AbstractComponent {
int limit = settings.getAsInt("limit", 10);
int smartSize = settings.getAsInt("smart_size", 1024);
hashMap = build(type, limit, smartSize, new Recycler.C<ExtTHashMap>() {
hashMap = build(type, limit, smartSize, new Recycler.C<ObjectObjectOpenHashMap>() {
@Override
public ExtTHashMap newInstance(int sizing) {
return new ExtTHashMap(size(sizing));
public ObjectObjectOpenHashMap newInstance(int sizing) {
return new ObjectObjectOpenHashMap(size(sizing));
}
@Override
public void clear(ExtTHashMap value) {
public void clear(ObjectObjectOpenHashMap value) {
value.clear();
}
});
hashSet = build(type, limit, smartSize, new Recycler.C<THashSet>() {
hashSet = build(type, limit, smartSize, new Recycler.C<ObjectOpenHashSet>() {
@Override
public THashSet newInstance(int sizing) {
return new THashSet(size(sizing));
public ObjectOpenHashSet newInstance(int sizing) {
return new ObjectOpenHashSet(size(sizing), 0.5f);
}
@Override
public void clear(THashSet value) {
public void clear(ObjectOpenHashSet value) {
value.clear();
}
});
doubleObjectMap = build(type, limit, smartSize, new Recycler.C<ExtTDoubleObjectHashMap>() {
doubleObjectMap = build(type, limit, smartSize, new Recycler.C<DoubleObjectOpenHashMap>() {
@Override
public ExtTDoubleObjectHashMap newInstance(int sizing) {
return new ExtTDoubleObjectHashMap(size(sizing));
public DoubleObjectOpenHashMap newInstance(int sizing) {
return new DoubleObjectOpenHashMap(size(sizing));
}
@Override
public void clear(ExtTDoubleObjectHashMap value) {
public void clear(DoubleObjectOpenHashMap value) {
value.clear();
}
});
longObjectMap = build(type, limit, smartSize, new Recycler.C<ExtTLongObjectHashMap>() {
longObjectMap = build(type, limit, smartSize, new Recycler.C<LongObjectOpenHashMap>() {
@Override
public ExtTLongObjectHashMap newInstance(int sizing) {
return new ExtTLongObjectHashMap(size(sizing));
public LongObjectOpenHashMap newInstance(int sizing) {
return new LongObjectOpenHashMap(size(sizing));
}
@Override
public void clear(ExtTLongObjectHashMap value) {
public void clear(LongObjectOpenHashMap value) {
value.clear();
}
});
longLongMap = build(type, limit, smartSize, new Recycler.C<TLongLongHashMap>() {
longLongMap = build(type, limit, smartSize, new Recycler.C<LongLongOpenHashMap>() {
@Override
public TLongLongHashMap newInstance(int sizing) {
return new TLongLongHashMap(size(sizing));
public LongLongOpenHashMap newInstance(int sizing) {
return new LongLongOpenHashMap(size(sizing));
}
@Override
public void clear(TLongLongHashMap value) {
public void clear(LongLongOpenHashMap value) {
value.clear();
}
});
intIntMap = build(type, limit, smartSize, new Recycler.C<TIntIntHashMap>() {
intIntMap = build(type, limit, smartSize, new Recycler.C<IntIntOpenHashMap>() {
@Override
public TIntIntHashMap newInstance(int sizing) {
return new TIntIntHashMap(size(sizing));
public IntIntOpenHashMap newInstance(int sizing) {
return new IntIntOpenHashMap(size(sizing));
}
@Override
public void clear(TIntIntHashMap value) {
public void clear(IntIntOpenHashMap value) {
value.clear();
}
});
floatIntMap = build(type, limit, smartSize, new Recycler.C<TFloatIntHashMap>() {
floatIntMap = build(type, limit, smartSize, new Recycler.C<FloatIntOpenHashMap>() {
@Override
public TFloatIntHashMap newInstance(int sizing) {
return new TFloatIntHashMap(size(sizing));
public FloatIntOpenHashMap newInstance(int sizing) {
return new FloatIntOpenHashMap(size(sizing));
}
@Override
public void clear(TFloatIntHashMap value) {
public void clear(FloatIntOpenHashMap value) {
value.clear();
}
});
doubleIntMap = build(type, limit, smartSize, new Recycler.C<TDoubleIntHashMap>() {
doubleIntMap = build(type, limit, smartSize, new Recycler.C<DoubleIntOpenHashMap>() {
@Override
public TDoubleIntHashMap newInstance(int sizing) {
return new TDoubleIntHashMap(size(sizing));
public DoubleIntOpenHashMap newInstance(int sizing) {
return new DoubleIntOpenHashMap(size(sizing));
}
@Override
public void clear(TDoubleIntHashMap value) {
public void clear(DoubleIntOpenHashMap value) {
value.clear();
}
});
longIntMap = build(type, limit, smartSize, new Recycler.C<TLongIntHashMap>() {
longIntMap = build(type, limit, smartSize, new Recycler.C<LongIntOpenHashMap>() {
@Override
public TLongIntHashMap newInstance(int sizing) {
return new TLongIntHashMap(size(sizing));
public LongIntOpenHashMap newInstance(int sizing) {
return new LongIntOpenHashMap(size(sizing));
}
@Override
public void clear(TLongIntHashMap value) {
public void clear(LongIntOpenHashMap value) {
value.clear();
}
});
objectIntMap = build(type, limit, smartSize, new Recycler.C<TObjectIntHashMap>() {
objectIntMap = build(type, limit, smartSize, new Recycler.C<ObjectIntOpenHashMap>() {
@Override
public TObjectIntHashMap newInstance(int sizing) {
return new TObjectIntHashMap(size(sizing));
public ObjectIntOpenHashMap newInstance(int sizing) {
return new ObjectIntOpenHashMap(size(sizing));
}
@Override
public void clear(TObjectIntHashMap value) {
public void clear(ObjectIntOpenHashMap value) {
value.clear();
}
});
intObjectMap = build(type, limit, smartSize, new Recycler.C<TIntObjectHashMap>() {
intObjectMap = build(type, limit, smartSize, new Recycler.C<IntObjectOpenHashMap>() {
@Override
public TIntObjectHashMap newInstance(int sizing) {
return new TIntObjectHashMap(size(sizing));
public IntObjectOpenHashMap newInstance(int sizing) {
return new IntObjectOpenHashMap(size(sizing));
}
@Override
public void clear(TIntObjectHashMap value) {
public void clear(IntObjectOpenHashMap value) {
value.clear();
}
});
objectFloatMap = build(type, limit, smartSize, new Recycler.C<TObjectFloatHashMap>() {
objectFloatMap = build(type, limit, smartSize, new Recycler.C<ObjectFloatOpenHashMap>() {
@Override
public TObjectFloatHashMap newInstance(int sizing) {
return new TObjectFloatHashMap(size(sizing));
public ObjectFloatOpenHashMap newInstance(int sizing) {
return new ObjectFloatOpenHashMap(size(sizing));
}
@Override
public void clear(TObjectFloatHashMap value) {
public void clear(ObjectFloatOpenHashMap value) {
value.clear();
}
});
}
public <K, V> Recycler.V<ExtTHashMap<K, V>> hashMap(int sizing) {
public <K, V> Recycler.V<ObjectObjectOpenHashMap<K, V>> hashMap(int sizing) {
return (Recycler.V) hashMap.obtain(sizing);
}
public <T> Recycler.V<THashSet<T>> hashSet(int sizing) {
public <T> Recycler.V<ObjectOpenHashSet<T>> hashSet(int sizing) {
return (Recycler.V) hashSet.obtain(sizing);
}
public <T> Recycler.V<ExtTDoubleObjectHashMap<T>> doubleObjectMap(int sizing) {
public <T> Recycler.V<DoubleObjectOpenHashMap<T>> doubleObjectMap(int sizing) {
return (Recycler.V) doubleObjectMap.obtain(sizing);
}
public <T> Recycler.V<ExtTLongObjectHashMap<T>> longObjectMap(int sizing) {
public <T> Recycler.V<LongObjectOpenHashMap<T>> longObjectMap(int sizing) {
return (Recycler.V) longObjectMap.obtain(sizing);
}
public Recycler.V<TLongLongHashMap> longLongMap(int sizing) {
public Recycler.V<LongLongOpenHashMap> longLongMap(int sizing) {
return longLongMap.obtain(sizing);
}
public Recycler.V<TIntIntHashMap> intIntMap(int sizing) {
public Recycler.V<IntIntOpenHashMap> intIntMap(int sizing) {
return intIntMap.obtain(sizing);
}
public Recycler.V<TFloatIntHashMap> floatIntMap(int sizing) {
public Recycler.V<FloatIntOpenHashMap> floatIntMap(int sizing) {
return floatIntMap.obtain(sizing);
}
public Recycler.V<TDoubleIntHashMap> doubleIntMap(int sizing) {
public Recycler.V<DoubleIntOpenHashMap> doubleIntMap(int sizing) {
return doubleIntMap.obtain(sizing);
}
public Recycler.V<TLongIntHashMap> longIntMap(int sizing) {
public Recycler.V<LongIntOpenHashMap> longIntMap(int sizing) {
return longIntMap.obtain(sizing);
}
public <T> Recycler.V<TObjectIntHashMap<T>> objectIntMap(int sizing) {
public <T> Recycler.V<ObjectIntOpenHashMap<T>> objectIntMap(int sizing) {
return (Recycler.V) objectIntMap.obtain(sizing);
}
public <T> Recycler.V<TIntObjectHashMap<T>> intObjectMap(int sizing) {
public <T> Recycler.V<IntObjectOpenHashMap<T>> intObjectMap(int sizing) {
return (Recycler.V) intObjectMap.obtain(sizing);
}
public <T> Recycler.V<TObjectFloatHashMap<T>> objectFloatMap(int sizing) {
public <T> Recycler.V<ObjectFloatOpenHashMap<T>> objectFloatMap(int sizing) {
return (Recycler.V) objectFloatMap.obtain(sizing);
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.cluster.metadata;
import com.google.common.base.Predicate;
import com.google.common.collect.*;
import gnu.trove.set.hash.THashSet;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
@ -38,7 +37,6 @@ import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.loader.SettingsLoader;
import org.elasticsearch.common.trove.ExtTHashMap;
import org.elasticsearch.common.xcontent.*;
import org.elasticsearch.index.Index;
import org.elasticsearch.indices.IndexMissingException;
@ -160,26 +158,24 @@ public class MetaData implements Iterable<IndexMetaData> {
this.allOpenIndices = allOpenIndices.toArray(new String[allOpenIndices.size()]);
// build aliases map
ExtTHashMap<String, Map<String, AliasMetaData>> aliases = new ExtTHashMap<String, Map<String, AliasMetaData>>(numAliases);
Map<String, Map<String, AliasMetaData>> tmpAliases = new HashMap<String, Map<String, AliasMetaData>>(numAliases);
for (IndexMetaData indexMetaData : indices.values()) {
String index = indexMetaData.index();
for (AliasMetaData aliasMd : indexMetaData.aliases().values()) {
Map<String, AliasMetaData> indexAliasMap = aliases.get(aliasMd.alias());
Map<String, AliasMetaData> indexAliasMap = tmpAliases.get(aliasMd.alias());
if (indexAliasMap == null) {
indexAliasMap = new ExtTHashMap<String, AliasMetaData>(indices.size());
aliases.put(aliasMd.alias(), indexAliasMap);
indexAliasMap = new HashMap<String, AliasMetaData>(indices.size());
tmpAliases.put(aliasMd.alias(), indexAliasMap);
}
indexAliasMap.put(index, aliasMd);
}
}
for (int i = 0; i < aliases.internalValues().length; i++) {
if (aliases.internalValues()[i] != null) {
aliases.internalValues()[i] = XMaps.makeReadOnly((Map) aliases.internalValues()[i]);
}
for (String alias : tmpAliases.keySet()) {
tmpAliases.put(alias, XMaps.makeReadOnly(tmpAliases.get(alias)));
}
this.aliases = XMaps.makeReadOnly(aliases);
this.aliases = XMaps.makeReadOnly(tmpAliases);
ExtTHashMap<String, StringArray> aliasAndIndexToIndexMap = new ExtTHashMap<String, StringArray>(numAliases + numIndices);
Map<String, StringArray> aliasAndIndexToIndexMap = new HashMap<String, StringArray>(numAliases + numIndices);
for (IndexMetaData indexMetaData : indices.values()) {
StringArray indicesLst = aliasAndIndexToIndexMap.get(indexMetaData.index());
if (indicesLst == null) {
@ -198,8 +194,8 @@ public class MetaData implements Iterable<IndexMetaData> {
}
}
for (StringArray stringArray : aliasAndIndexToIndexMap.values()) {
stringArray.trim();
for (StringArray value : aliasAndIndexToIndexMap.values()) {
value.trim();
}
this.aliasAndIndexToIndexMap = XMaps.makeReadOnly(aliasAndIndexToIndexMap);
@ -439,7 +435,7 @@ public class MetaData implements Iterable<IndexMetaData> {
Map<String, Set<String>> routings = null;
Set<String> paramRouting = null;
// List of indices that don't require any routing
Set<String> norouting = new THashSet<String>();
Set<String> norouting = new HashSet<String>();
if (routing != null) {
paramRouting = Strings.splitStringByCommaToSet(routing);
}
@ -456,7 +452,7 @@ public class MetaData implements Iterable<IndexMetaData> {
}
Set<String> r = routings.get(indexRouting.getKey());
if (r == null) {
r = new THashSet<String>();
r = new HashSet<String>();
routings.put(indexRouting.getKey(), r);
}
r.addAll(indexRouting.getValue().searchRoutingValues());
@ -471,7 +467,7 @@ public class MetaData implements Iterable<IndexMetaData> {
if (!norouting.contains(indexRouting.getKey())) {
norouting.add(indexRouting.getKey());
if (paramRouting != null) {
Set<String> r = new THashSet<String>(paramRouting);
Set<String> r = new HashSet<String>(paramRouting);
if (routings == null) {
routings = newHashMap();
}
@ -490,7 +486,7 @@ public class MetaData implements Iterable<IndexMetaData> {
if (!norouting.contains(aliasOrIndex)) {
norouting.add(aliasOrIndex);
if (paramRouting != null) {
Set<String> r = new THashSet<String>(paramRouting);
Set<String> r = new HashSet<String>(paramRouting);
if (routings == null) {
routings = newHashMap();
}
@ -523,7 +519,7 @@ public class MetaData implements Iterable<IndexMetaData> {
for (Map.Entry<String, AliasMetaData> indexRouting : indexToRoutingMap.entrySet()) {
if (!indexRouting.getValue().searchRoutingValues().isEmpty()) {
// Routing alias
Set<String> r = new THashSet<String>(indexRouting.getValue().searchRoutingValues());
Set<String> r = new HashSet<String>(indexRouting.getValue().searchRoutingValues());
if (paramRouting != null) {
r.retainAll(paramRouting);
}
@ -536,7 +532,7 @@ public class MetaData implements Iterable<IndexMetaData> {
} else {
// Non-routing alias
if (paramRouting != null) {
Set<String> r = new THashSet<String>(paramRouting);
Set<String> r = new HashSet<String>(paramRouting);
if (routings == null) {
routings = newHashMap();
}
@ -619,7 +615,7 @@ public class MetaData implements Iterable<IndexMetaData> {
return aliasesOrIndices;
}
Set<String> actualIndices = new THashSet<String>();
Set<String> actualIndices = new HashSet<String>();
for (String index : aliasesOrIndices) {
StringArray actualLst = aliasAndIndexToIndexMap.get(index);
if (actualLst == null) {
@ -680,7 +676,7 @@ public class MetaData implements Iterable<IndexMetaData> {
} else if (aliasOrIndex.charAt(0) == '-') {
// if its the first, fill it with all the indices...
if (i == 0) {
result = new THashSet<String>(Arrays.asList(wildcardOnlyOpen ? concreteAllOpenIndices() : concreteAllIndices()));
result = new HashSet<String>(Arrays.asList(wildcardOnlyOpen ? concreteAllOpenIndices() : concreteAllIndices()));
}
add = false;
aliasOrIndex = aliasOrIndex.substring(1);
@ -700,7 +696,7 @@ public class MetaData implements Iterable<IndexMetaData> {
}
if (result == null) {
// add all the previous ones...
result = new THashSet<String>();
result = new HashSet<String>();
result.addAll(Arrays.asList(aliasesOrIndices).subList(0, i));
}
String[] indices = wildcardOnlyOpen ? concreteAllOpenIndices() : concreteAllIndices();

View File

@ -19,10 +19,10 @@
package org.elasticsearch.cluster.routing;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import gnu.trove.map.hash.TObjectIntHashMap;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -55,7 +55,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
private Set<ShardId> clearPostAllocationFlag;
private final Map<String, TObjectIntHashMap<String>> nodesPerAttributeNames = new HashMap<String, TObjectIntHashMap<String>>();
private final Map<String, ObjectIntOpenHashMap<String>> nodesPerAttributeNames = new HashMap<String, ObjectIntOpenHashMap<String>>();
public RoutingNodes(ClusterState clusterState) {
this.metaData = clusterState.metaData();
@ -188,15 +188,15 @@ public class RoutingNodes implements Iterable<RoutingNode> {
return nodesToShards.get(nodeId);
}
public TObjectIntHashMap<String> nodesPerAttributesCounts(String attributeName) {
TObjectIntHashMap<String> nodesPerAttributesCounts = nodesPerAttributeNames.get(attributeName);
public ObjectIntOpenHashMap<String> nodesPerAttributesCounts(String attributeName) {
ObjectIntOpenHashMap<String> nodesPerAttributesCounts = nodesPerAttributeNames.get(attributeName);
if (nodesPerAttributesCounts != null) {
return nodesPerAttributesCounts;
}
nodesPerAttributesCounts = new TObjectIntHashMap<String>();
nodesPerAttributesCounts = new ObjectIntOpenHashMap<String>();
for (RoutingNode routingNode : this) {
String attrValue = routingNode.node().attributes().get(attributeName);
nodesPerAttributesCounts.adjustOrPutValue(attrValue, 1, 1);
nodesPerAttributesCounts.addTo(attrValue, 1);
}
nodesPerAttributeNames.put(attributeName, nodesPerAttributesCounts);
return nodesPerAttributesCounts;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.cluster.routing.allocation.allocator;
import gnu.trove.map.hash.TObjectIntHashMap;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
@ -225,12 +225,12 @@ public class EvenShardsCountAllocator extends AbstractComponent implements Shard
private RoutingNode[] sortedNodesLeastToHigh(RoutingAllocation allocation) {
// create count per node id, taking into account relocations
final TObjectIntHashMap<String> nodeCounts = new TObjectIntHashMap<String>();
final ObjectIntOpenHashMap<String> nodeCounts = new ObjectIntOpenHashMap<String>();
for (RoutingNode node : allocation.routingNodes()) {
for (int i = 0; i < node.shards().size(); i++) {
ShardRouting shardRouting = node.shards().get(i);
String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId();
nodeCounts.adjustOrPutValue(nodeId, 1, 1);
nodeCounts.addTo(nodeId, 1);
}
}
RoutingNode[] nodes = allocation.routingNodes().nodesToShards().values().toArray(new RoutingNode[allocation.routingNodes().nodesToShards().values().size()]);

View File

@ -19,8 +19,8 @@
package org.elasticsearch.cluster.routing.allocation.decider;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import com.google.common.collect.Maps;
import gnu.trove.map.hash.TObjectIntHashMap;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.MutableShardRouting;
import org.elasticsearch.cluster.routing.RoutingNode;
@ -176,10 +176,10 @@ public class AwarenessAllocationDecider extends AllocationDecider {
}
// build attr_value -> nodes map
TObjectIntHashMap<String> nodesPerAttribute = allocation.routingNodes().nodesPerAttributesCounts(awarenessAttribute);
ObjectIntOpenHashMap<String> nodesPerAttribute = allocation.routingNodes().nodesPerAttributesCounts(awarenessAttribute);
// build the count of shards per attribute value
TObjectIntHashMap<String> shardPerAttribute = new TObjectIntHashMap<String>();
ObjectIntOpenHashMap<String> shardPerAttribute = new ObjectIntOpenHashMap<String>();
for (RoutingNode routingNode : allocation.routingNodes()) {
for (int i = 0; i < routingNode.shards().size(); i++) {
MutableShardRouting nodeShardRouting = routingNode.shards().get(i);
@ -187,9 +187,9 @@ public class AwarenessAllocationDecider extends AllocationDecider {
// if the shard is relocating, then make sure we count it as part of the node it is relocating to
if (nodeShardRouting.relocating()) {
RoutingNode relocationNode = allocation.routingNodes().node(nodeShardRouting.relocatingNodeId());
shardPerAttribute.adjustOrPutValue(relocationNode.node().attributes().get(awarenessAttribute), 1, 1);
shardPerAttribute.addTo(relocationNode.node().attributes().get(awarenessAttribute), 1);
} else if (nodeShardRouting.started()) {
shardPerAttribute.adjustOrPutValue(routingNode.node().attributes().get(awarenessAttribute), 1, 1);
shardPerAttribute.addTo(routingNode.node().attributes().get(awarenessAttribute), 1);
}
}
}
@ -199,11 +199,11 @@ public class AwarenessAllocationDecider extends AllocationDecider {
String nodeId = shardRouting.relocating() ? shardRouting.relocatingNodeId() : shardRouting.currentNodeId();
if (!node.nodeId().equals(nodeId)) {
// we work on different nodes, move counts around
shardPerAttribute.adjustOrPutValue(allocation.routingNodes().node(nodeId).node().attributes().get(awarenessAttribute), -1, 0);
shardPerAttribute.adjustOrPutValue(node.node().attributes().get(awarenessAttribute), 1, 1);
shardPerAttribute.putOrAdd(allocation.routingNodes().node(nodeId).node().attributes().get(awarenessAttribute), 0, -1);
shardPerAttribute.addTo(node.node().attributes().get(awarenessAttribute), 1);
}
} else {
shardPerAttribute.adjustOrPutValue(node.node().attributes().get(awarenessAttribute), 1, 1);
shardPerAttribute.addTo(node.node().attributes().get(awarenessAttribute), 1);
}
}
@ -211,7 +211,7 @@ public class AwarenessAllocationDecider extends AllocationDecider {
String[] fullValues = forcedAwarenessAttributes.get(awarenessAttribute);
if (fullValues != null) {
for (String fullValue : fullValues) {
if (!shardPerAttribute.contains(fullValue)) {
if (!shardPerAttribute.containsKey(fullValue)) {
numberOfAttributes++;
}
}

View File

@ -21,7 +21,7 @@ package org.elasticsearch.common;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import gnu.trove.set.hash.THashSet;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.UnicodeUtil;
import org.elasticsearch.ElasticSearchIllegalStateException;
@ -1021,7 +1021,8 @@ public class Strings {
count++;
}
}
final THashSet<String> result = new THashSet<String>(count);
// TODO (MvG): No push: hppc or jcf?
final Set<String> result = new HashSet<String>(count);
final int len = chars.length;
int start = 0; // starting index in chars of the current substring.
int pos = 0; // current index in chars.

View File

@ -19,11 +19,6 @@
package org.elasticsearch.common.collect;
import com.google.common.collect.ForwardingMap;
import gnu.trove.impl.Constants;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
import org.elasticsearch.common.trove.ExtTHashMap;
import java.util.Collections;
import java.util.Map;
@ -36,59 +31,6 @@ import java.util.Map;
*/
public final class XMaps {
public static final int DEFAULT_CAPACITY = Constants.DEFAULT_CAPACITY;
/**
* Returns a new map with the given initial capacity
*/
public static <K, V> Map<K, V> newMap(int capacity) {
return new ExtTHashMap<K, V>(capacity, Constants.DEFAULT_LOAD_FACTOR);
}
/**
* Returns a new map with a default initial capacity of
* {@value #DEFAULT_CAPACITY}
*/
public static <K, V> Map<K, V> newMap() {
return newMap(DEFAULT_CAPACITY);
}
/**
* Returns a map like {@link #newMap()} that does not accept <code>null</code> keys
*/
public static <K, V> Map<K, V> newNoNullKeysMap() {
Map<K, V> delegate = newMap();
return ensureNoNullKeys(delegate);
}
/**
* Returns a map like {@link #newMap(in)} that does not accept <code>null</code> keys
*/
public static <K, V> Map<K, V> newNoNullKeysMap(int capacity) {
Map<K, V> delegate = newMap(capacity);
return ensureNoNullKeys(delegate);
}
/**
* Wraps the given map and prevent adding of <code>null</code> keys.
*/
public static <K, V> Map<K, V> ensureNoNullKeys(final Map<K, V> delegate) {
return new ForwardingMap<K, V>() {
@Override
public V put(K key, V value) {
if (key == null) {
throw new ElasticSearchIllegalArgumentException("Map key must not be null");
}
return super.put(key, value);
}
@Override
protected Map<K, V> delegate() {
return delegate;
}
};
}
/**
* Wraps the given map into a read only implementation.
*/

View File

@ -0,0 +1,81 @@
package org.elasticsearch.common.hppc;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
/**
*/
public final class HppcMaps {
private HppcMaps() {
}
/**
* Returns a new map with the given initial capacity
*/
public static <K, V> ObjectObjectOpenHashMap<K, V> newMap(int capacity) {
return new ObjectObjectOpenHashMap<K, V>(capacity);
}
/**
* Returns a new map with a default initial capacity of
* {@value com.carrotsearch.hppc.HashContainerUtils#DEFAULT_CAPACITY}
*/
public static <K, V> ObjectObjectOpenHashMap<K, V> newMap() {
return newMap(16);
}
/**
* Returns a map like {@link #newMap()} that does not accept <code>null</code> keys
*/
public static <K, V> ObjectObjectOpenHashMap<K, V> newNoNullKeysMap() {
return ensureNoNullKeys(16);
}
/**
* Returns a map like {@link #newMap(int)} that does not accept <code>null</code> keys
*/
public static <K, V> ObjectObjectOpenHashMap<K, V> newNoNullKeysMap(int capacity) {
return ensureNoNullKeys(capacity);
}
/**
* Wraps the given map and prevent adding of <code>null</code> keys.
*/
public static <K, V> ObjectObjectOpenHashMap<K, V> ensureNoNullKeys(int capacity) {
return new ObjectObjectOpenHashMap<K, V>(capacity) {
@Override
public V put(K key, V value) {
if (key == null) {
throw new ElasticSearchIllegalArgumentException("Map key must not be null");
}
return super.put(key, value);
}
};
}
public final static class Object {
public final static class Integer {
public static <V> ObjectIntOpenHashMap<V> ensureNoNullKeys(int capacity, float loadFactor) {
return new ObjectIntOpenHashMap<V>(capacity, loadFactor) {
@Override
public int put(V key, int value) {
if (key == null) {
throw new ElasticSearchIllegalArgumentException("Map key must not be null");
}
return super.put(key, value);
}
};
}
}
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.io.stream;
import gnu.trove.map.hash.TIntObjectHashMap;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
import org.elasticsearch.common.text.Text;
import java.io.IOException;
@ -29,8 +29,8 @@ import java.io.IOException;
*/
public class HandlesStreamInput extends AdapterStreamInput {
private final TIntObjectHashMap<String> handles = new TIntObjectHashMap<String>();
private final TIntObjectHashMap<Text> handlesText = new TIntObjectHashMap<Text>();
private final IntObjectOpenHashMap<String> handles = new IntObjectOpenHashMap<String>();
private final IntObjectOpenHashMap<Text> handlesText = new IntObjectOpenHashMap<Text>();
HandlesStreamInput() {
super();

View File

@ -19,8 +19,7 @@
package org.elasticsearch.common.io.stream;
import gnu.trove.impl.Constants;
import gnu.trove.map.hash.TObjectIntHashMap;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import org.elasticsearch.common.text.Text;
import java.io.IOException;
@ -30,8 +29,8 @@ import java.io.IOException;
*/
public class HandlesStreamOutput extends AdapterStreamOutput {
private final TObjectIntHashMap<String> handles = new TObjectIntHashMap<String>(Constants.DEFAULT_CAPACITY, Constants.DEFAULT_LOAD_FACTOR, -1);
private final TObjectIntHashMap<Text> handlesText = new TObjectIntHashMap<Text>(Constants.DEFAULT_CAPACITY, Constants.DEFAULT_LOAD_FACTOR, -1);
private final ObjectIntOpenHashMap<String> handles = new ObjectIntOpenHashMap<String>();
private final ObjectIntOpenHashMap<Text> handlesText = new ObjectIntOpenHashMap<Text>();
public HandlesStreamOutput(StreamOutput out) {
super(out);
@ -39,16 +38,15 @@ public class HandlesStreamOutput extends AdapterStreamOutput {
@Override
public void writeSharedString(String str) throws IOException {
int handle = handles.get(str);
if (handle == -1) {
handle = handles.size();
if (handles.containsKey(str)) {
out.writeByte((byte) 1);
out.writeVInt(handles.lget());
} else {
int handle = handles.size();
handles.put(str, handle);
out.writeByte((byte) 0);
out.writeVInt(handle);
out.writeString(str);
} else {
out.writeByte((byte) 1);
out.writeVInt(handle);
}
}
@ -59,16 +57,15 @@ public class HandlesStreamOutput extends AdapterStreamOutput {
@Override
public void writeSharedText(Text text) throws IOException {
int handle = handlesText.get(text);
if (handle == -1) {
handle = handlesText.size();
if (handlesText.containsKey(text)) {
out.writeByte((byte) 1);
out.writeVInt(handlesText.lget());
} else {
int handle = handlesText.size();
handlesText.put(text, handle);
out.writeByte((byte) 0);
out.writeVInt(handle);
out.writeText(text);
} else {
out.writeByte((byte) 1);
out.writeVInt(handle);
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.lucene.search;
import gnu.trove.set.hash.THashSet;
import com.carrotsearch.hppc.ObjectOpenHashSet;
import org.apache.lucene.index.*;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.Query;
@ -138,7 +138,7 @@ public class MultiPhrasePrefixQuery extends Query {
}
Term[] suffixTerms = termArrays.get(sizeMinus1);
int position = positions.get(sizeMinus1);
Set<Term> terms = new THashSet<Term>();
ObjectOpenHashSet<Term> terms = new ObjectOpenHashSet<Term>();
for (Term term : suffixTerms) {
getPrefixTerms(terms, term, reader);
if (terms.size() > maxExpansions) {
@ -148,11 +148,11 @@ public class MultiPhrasePrefixQuery extends Query {
if (terms.isEmpty()) {
return MatchNoDocsQuery.INSTANCE;
}
query.add(terms.toArray(new Term[terms.size()]), position);
query.add(terms.toArray(Term.class), position);
return query.rewrite(reader);
}
private void getPrefixTerms(Set<Term> terms, final Term prefix, final IndexReader reader) throws IOException {
private void getPrefixTerms(ObjectOpenHashSet<Term> terms, final Term prefix, final IndexReader reader) throws IOException {
// SlowCompositeReaderWrapper could be used... but this would merge all terms from each segment into one terms
// instance, which is very expensive. Therefore I think it is better to iterate over each leaf individually.
TermsEnum termsEnum = null;

View File

@ -19,7 +19,8 @@
package org.elasticsearch.common.transport;
import gnu.trove.list.array.TIntArrayList;
import com.carrotsearch.hppc.IntArrayList;
import java.util.StringTokenizer;
@ -35,7 +36,7 @@ public class PortsRange {
}
public int[] ports() throws NumberFormatException {
final TIntArrayList ports = new TIntArrayList();
final IntArrayList ports = new IntArrayList();
iterate(new PortCallback() {
@Override
public boolean onPortNumber(int portNumber) {
@ -43,7 +44,7 @@ public class PortsRange {
return false;
}
});
return ports.toArray(new int[ports.size()]);
return ports.toArray();
}
public boolean iterate(PortCallback callback) throws NumberFormatException {

View File

@ -1,53 +0,0 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.trove;
import gnu.trove.map.TDoubleObjectMap;
import gnu.trove.map.hash.TDoubleObjectHashMap;
public class ExtTDoubleObjectHashMap<V> extends TDoubleObjectHashMap<V> {
public ExtTDoubleObjectHashMap() {
}
public ExtTDoubleObjectHashMap(int initialCapacity) {
super(initialCapacity);
}
public ExtTDoubleObjectHashMap(int initialCapacity, float loadFactor) {
super(initialCapacity, loadFactor);
}
public ExtTDoubleObjectHashMap(int initialCapacity, float loadFactor, double noEntryKey) {
super(initialCapacity, loadFactor, noEntryKey);
}
public ExtTDoubleObjectHashMap(TDoubleObjectMap<V> vtDoubleObjectMap) {
super(vtDoubleObjectMap);
}
/**
* Internal method to get the actual values associated. Some values might have "null" or no entry
* values.
*/
public Object[] internalValues() {
return this._values;
}
}

View File

@ -1,54 +0,0 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.trove;
import gnu.trove.map.hash.THashMap;
import java.util.Map;
public class ExtTHashMap<K, V> extends THashMap<K, V> {
public ExtTHashMap() {
}
public ExtTHashMap(int initialCapacity) {
super(initialCapacity);
}
public ExtTHashMap(int initialCapacity, float loadFactor) {
super(initialCapacity, loadFactor);
}
public ExtTHashMap(Map<K, V> kvMap) {
super(kvMap);
}
public ExtTHashMap(THashMap<K, V> kvtHashMap) {
super(kvtHashMap);
}
/**
* Internal method to get the actual values associated. Some values might have "null" or no entry
* values.
*/
public Object[] internalValues() {
return this._values;
}
}

View File

@ -1,43 +0,0 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.trove;
import gnu.trove.list.array.TIntArrayList;
/**
*
*/
public class ExtTIntArrayList extends TIntArrayList {
public ExtTIntArrayList() {
}
public ExtTIntArrayList(int capacity) {
super(capacity);
}
public ExtTIntArrayList(int[] values) {
super(values);
}
public int[] unsafeArray() {
return _data;
}
}

View File

@ -1,53 +0,0 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.trove;
import gnu.trove.map.TLongObjectMap;
import gnu.trove.map.hash.TLongObjectHashMap;
public class ExtTLongObjectHashMap<V> extends TLongObjectHashMap<V> {
public ExtTLongObjectHashMap() {
}
public ExtTLongObjectHashMap(int initialCapacity) {
super(initialCapacity);
}
public ExtTLongObjectHashMap(int initialCapacity, float loadFactor) {
super(initialCapacity, loadFactor);
}
public ExtTLongObjectHashMap(int initialCapacity, float loadFactor, long noEntryKey) {
super(initialCapacity, loadFactor, noEntryKey);
}
public ExtTLongObjectHashMap(TLongObjectMap<V> vtLongObjectMap) {
super(vtLongObjectMap);
}
/**
* Internal method to get the actual values associated. Some values might have "null" or no entry
* values.
*/
public Object[] internalValues() {
return this._values;
}
}

View File

@ -1,55 +0,0 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.trove;
import gnu.trove.map.hash.TObjectIntHashMap;
/**
*
*/
public class ExtTObjectIntHasMap<T> extends TObjectIntHashMap<T> {
public ExtTObjectIntHasMap() {
}
public ExtTObjectIntHasMap(int initialCapacity) {
super(initialCapacity);
}
public ExtTObjectIntHasMap(int initialCapacity, float loadFactor) {
super(initialCapacity, loadFactor);
}
public ExtTObjectIntHasMap(int initialCapacity, float loadFactor, int noEntryValue) {
super(initialCapacity, loadFactor, noEntryValue);
}
/**
* Returns an already existing key, or <tt>null</tt> if it does not exists.
*/
public T key(T key) {
int index = index(key);
return index < 0 ? null : (T) _set[index];
}
public int _valuesSize() {
return _values.length;
}
}

View File

@ -1,39 +0,0 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.trove;
import gnu.trove.strategy.HashingStrategy;
/**
* A string based hash code with identity equality.
*/
public class StringIdentityHashingStrategy implements HashingStrategy<String> {
static final long serialVersionUID = -5188534454583764905L;
public int computeHashCode(String object) {
return object.hashCode();
}
@SuppressWarnings({"StringEquality"})
public boolean equals(String o1, String o2) {
return o1 == o2;
}
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.gateway.local;
import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
import com.google.common.collect.Sets;
import gnu.trove.map.hash.TObjectIntHashMap;
import org.elasticsearch.ElasticSearchException;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.cluster.ClusterChangedEvent;
@ -136,7 +136,7 @@ public class LocalGateway extends AbstractLifecycleComponent<Gateway> implements
}
MetaData.Builder metaDataBuilder = MetaData.builder();
TObjectIntHashMap<String> indices = new TObjectIntHashMap<String>();
ObjectFloatOpenHashMap<String> indices = new ObjectFloatOpenHashMap<String>();
MetaData electedGlobalState = null;
int found = 0;
for (TransportNodesListGatewayMetaState.NodeLocalGatewayMetaState nodeState : nodesState) {
@ -150,7 +150,7 @@ public class LocalGateway extends AbstractLifecycleComponent<Gateway> implements
electedGlobalState = nodeState.metaData();
}
for (IndexMetaData indexMetaData : nodeState.metaData().indices().values()) {
indices.adjustOrPutValue(indexMetaData.index(), 1, 1);
indices.addTo(indexMetaData.index(), 1);
}
}
if (found < requiredAllocation) {
@ -159,29 +159,34 @@ public class LocalGateway extends AbstractLifecycleComponent<Gateway> implements
}
// update the global state, and clean the indices, we elect them in the next phase
metaDataBuilder.metaData(electedGlobalState).removeAllIndices();
for (String index : indices.keySet()) {
IndexMetaData electedIndexMetaData = null;
int indexMetaDataCount = 0;
for (TransportNodesListGatewayMetaState.NodeLocalGatewayMetaState nodeState : nodesState) {
if (nodeState.metaData() == null) {
continue;
final boolean[] states = indices.allocated;
final Object[] keys = indices.keys;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
String index = (String) keys[i];
IndexMetaData electedIndexMetaData = null;
int indexMetaDataCount = 0;
for (TransportNodesListGatewayMetaState.NodeLocalGatewayMetaState nodeState : nodesState) {
if (nodeState.metaData() == null) {
continue;
}
IndexMetaData indexMetaData = nodeState.metaData().index(index);
if (indexMetaData == null) {
continue;
}
if (electedIndexMetaData == null) {
electedIndexMetaData = indexMetaData;
} else if (indexMetaData.version() > electedIndexMetaData.version()) {
electedIndexMetaData = indexMetaData;
}
indexMetaDataCount++;
}
IndexMetaData indexMetaData = nodeState.metaData().index(index);
if (indexMetaData == null) {
continue;
if (electedIndexMetaData != null) {
if (indexMetaDataCount < requiredAllocation) {
logger.debug("[{}] found [{}], required [{}], not adding", index, indexMetaDataCount, requiredAllocation);
}
metaDataBuilder.put(electedIndexMetaData, false);
}
if (electedIndexMetaData == null) {
electedIndexMetaData = indexMetaData;
} else if (indexMetaData.version() > electedIndexMetaData.version()) {
electedIndexMetaData = indexMetaData;
}
indexMetaDataCount++;
}
if (electedIndexMetaData != null) {
if (indexMetaDataCount < requiredAllocation) {
logger.debug("[{}] found [{}], required [{}], not adding", index, indexMetaDataCount, requiredAllocation);
}
metaDataBuilder.put(electedIndexMetaData, false);
}
}
ClusterState.Builder builder = ClusterState.builder();

View File

@ -19,10 +19,10 @@
package org.elasticsearch.gateway.local;
import com.carrotsearch.hppc.ObjectLongOpenHashMap;
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import gnu.trove.iterator.TObjectLongIterator;
import gnu.trove.map.hash.TObjectLongHashMap;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
@ -66,7 +66,7 @@ public class LocalGatewayAllocator extends AbstractComponent implements GatewayA
private final ConcurrentMap<ShardId, Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData>> cachedStores = ConcurrentCollections.newConcurrentMap();
private final ConcurrentMap<ShardId, TObjectLongHashMap<DiscoveryNode>> cachedShardsState = ConcurrentCollections.newConcurrentMap();
private final ConcurrentMap<ShardId, ObjectLongOpenHashMap<DiscoveryNode>> cachedShardsState = ConcurrentCollections.newConcurrentMap();
private final TimeValue listTimeout;
@ -121,15 +121,21 @@ public class LocalGatewayAllocator extends AbstractComponent implements GatewayA
continue;
}
TObjectLongHashMap<DiscoveryNode> nodesState = buildShardStates(nodes, shard);
ObjectLongOpenHashMap<DiscoveryNode> nodesState = buildShardStates(nodes, shard);
int numberOfAllocationsFound = 0;
long highestVersion = -1;
Set<DiscoveryNode> nodesWithHighestVersion = Sets.newHashSet();
for (TObjectLongIterator<DiscoveryNode> it = nodesState.iterator(); it.hasNext(); ) {
it.advance();
DiscoveryNode node = it.key();
long version = it.value();
final boolean[] states = nodesState.allocated;
final Object[] keys = nodesState.keys;
final long[] values = nodesState.values;
for (int i = 0; i < states.length; i++) {
if (!states[i]) {
continue;
}
DiscoveryNode node = (DiscoveryNode) keys[i];
long version = values[i];
// since we don't check in NO allocation, we need to double check here
if (allocation.shouldIgnoreShardForNode(shard.shardId(), node.id())) {
continue;
@ -352,18 +358,18 @@ public class LocalGatewayAllocator extends AbstractComponent implements GatewayA
return changed;
}
private TObjectLongHashMap<DiscoveryNode> buildShardStates(DiscoveryNodes nodes, MutableShardRouting shard) {
TObjectLongHashMap<DiscoveryNode> shardStates = cachedShardsState.get(shard.shardId());
private ObjectLongOpenHashMap<DiscoveryNode> buildShardStates(DiscoveryNodes nodes, MutableShardRouting shard) {
ObjectLongOpenHashMap<DiscoveryNode> shardStates = cachedShardsState.get(shard.shardId());
Set<String> nodeIds;
if (shardStates == null) {
shardStates = new TObjectLongHashMap<DiscoveryNode>();
shardStates = new ObjectLongOpenHashMap<DiscoveryNode>();
cachedShardsState.put(shard.shardId(), shardStates);
nodeIds = nodes.dataNodes().keySet();
} else {
// clean nodes that have failed
for (TObjectLongIterator<DiscoveryNode> it = shardStates.iterator(); it.hasNext(); ) {
it.advance();
if (!nodes.nodeExists(it.key().id())) {
for (Iterator<ObjectLongCursor<DiscoveryNode>> it = shardStates.iterator(); it.hasNext(); ) {
DiscoveryNode node = it.next().key;
if (!nodes.nodeExists(node.id())) {
it.remove();
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis;
import gnu.trove.map.hash.TIntObjectHashMap;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
import org.apache.lucene.util.NumericUtils;
import java.io.IOException;
@ -30,10 +30,10 @@ import java.io.Reader;
*/
public class NumericDoubleAnalyzer extends NumericAnalyzer<NumericDoubleTokenizer> {
private final static TIntObjectHashMap<NamedAnalyzer> builtIn;
private final static IntObjectOpenHashMap<NamedAnalyzer> builtIn;
static {
builtIn = new TIntObjectHashMap<NamedAnalyzer>();
builtIn = new IntObjectOpenHashMap<NamedAnalyzer>();
builtIn.put(Integer.MAX_VALUE, new NamedAnalyzer("_double/max", AnalyzerScope.GLOBAL, new NumericDoubleAnalyzer(Integer.MAX_VALUE)));
for (int i = 0; i <= 64; i += 4) {
builtIn.put(i, new NamedAnalyzer("_double/" + i, AnalyzerScope.GLOBAL, new NumericDoubleAnalyzer(i)));

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis;
import gnu.trove.map.hash.TIntObjectHashMap;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
import org.apache.lucene.util.NumericUtils;
import java.io.IOException;
@ -30,10 +30,10 @@ import java.io.Reader;
*/
public class NumericFloatAnalyzer extends NumericAnalyzer<NumericFloatTokenizer> {
private final static TIntObjectHashMap<NamedAnalyzer> builtIn;
private final static IntObjectOpenHashMap<NamedAnalyzer> builtIn;
static {
builtIn = new TIntObjectHashMap<NamedAnalyzer>();
builtIn = new IntObjectOpenHashMap<NamedAnalyzer>();
builtIn.put(Integer.MAX_VALUE, new NamedAnalyzer("_float/max", AnalyzerScope.GLOBAL, new NumericFloatAnalyzer(Integer.MAX_VALUE)));
for (int i = 0; i <= 64; i += 4) {
builtIn.put(i, new NamedAnalyzer("_float/" + i, AnalyzerScope.GLOBAL, new NumericFloatAnalyzer(i)));

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis;
import gnu.trove.map.hash.TIntObjectHashMap;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
import org.apache.lucene.util.NumericUtils;
import java.io.IOException;
@ -30,10 +30,10 @@ import java.io.Reader;
*/
public class NumericIntegerAnalyzer extends NumericAnalyzer<NumericIntegerTokenizer> {
private final static TIntObjectHashMap<NamedAnalyzer> builtIn;
private final static IntObjectOpenHashMap<NamedAnalyzer> builtIn;
static {
builtIn = new TIntObjectHashMap<NamedAnalyzer>();
builtIn = new IntObjectOpenHashMap<NamedAnalyzer>();
builtIn.put(Integer.MAX_VALUE, new NamedAnalyzer("_int/max", AnalyzerScope.GLOBAL, new NumericIntegerAnalyzer(Integer.MAX_VALUE)));
for (int i = 0; i <= 64; i += 4) {
builtIn.put(i, new NamedAnalyzer("_int/" + i, AnalyzerScope.GLOBAL, new NumericIntegerAnalyzer(i)));

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis;
import gnu.trove.map.hash.TIntObjectHashMap;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
import org.apache.lucene.util.NumericUtils;
import java.io.IOException;
@ -30,10 +30,10 @@ import java.io.Reader;
*/
public class NumericLongAnalyzer extends NumericAnalyzer<NumericLongTokenizer> {
private final static TIntObjectHashMap<NamedAnalyzer> builtIn;
private final static IntObjectOpenHashMap<NamedAnalyzer> builtIn;
static {
builtIn = new TIntObjectHashMap<NamedAnalyzer>();
builtIn = new IntObjectOpenHashMap<NamedAnalyzer>();
builtIn.put(Integer.MAX_VALUE, new NamedAnalyzer("_long/max", AnalyzerScope.GLOBAL, new NumericLongAnalyzer(Integer.MAX_VALUE)));
for (int i = 0; i <= 64; i += 4) {
builtIn.put(i, new NamedAnalyzer("_long/" + i, AnalyzerScope.GLOBAL, new NumericLongAnalyzer(i)));

View File

@ -19,10 +19,9 @@
package org.elasticsearch.index.cache.id.simple;
import gnu.trove.impl.Constants;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import org.apache.lucene.index.*;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.UnicodeUtil;
import org.elasticsearch.ElasticSearchException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.HashedBytesArray;
@ -30,7 +29,6 @@ import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.UTF8SortedAsUnicodeComparator;
import org.elasticsearch.common.trove.ExtTObjectIntHasMap;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
@ -304,7 +302,7 @@ public class SimpleIdCache extends AbstractIndexComponent implements IdCache, Se
}
static class TypeBuilder {
final ExtTObjectIntHasMap<HashedBytesArray> idToDoc = new ExtTObjectIntHasMap<HashedBytesArray>(Constants.DEFAULT_CAPACITY, Constants.DEFAULT_LOAD_FACTOR, -1);
final ObjectIntOpenHashMap<HashedBytesArray> idToDoc = new ObjectIntOpenHashMap<HashedBytesArray>();
final HashedBytesArray[] docToId;
final ArrayList<HashedBytesArray> parentIdsValues = new ArrayList<HashedBytesArray>();
final int[] parentIdsOrdinals;
@ -321,7 +319,11 @@ public class SimpleIdCache extends AbstractIndexComponent implements IdCache, Se
* Returns an already stored instance if exists, if not, returns null;
*/
public HashedBytesArray canReuse(HashedBytesArray id) {
return idToDoc.key(id);
if (idToDoc.containsKey(id)) {
return idToDoc.lkey();
} else {
return id;
}
}
}
}

View File

@ -19,10 +19,9 @@
package org.elasticsearch.index.cache.id.simple;
import gnu.trove.impl.hash.TObjectHash;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.bytes.HashedBytesArray;
import org.elasticsearch.common.trove.ExtTObjectIntHasMap;
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
/**
@ -32,7 +31,7 @@ public class SimpleIdReaderTypeCache implements IdReaderTypeCache {
private final String type;
private final ExtTObjectIntHasMap<HashedBytesArray> idToDoc;
private final ObjectIntOpenHashMap<HashedBytesArray> idToDoc;
private final HashedBytesArray[] docIdToId;
@ -42,12 +41,11 @@ public class SimpleIdReaderTypeCache implements IdReaderTypeCache {
private long sizeInBytes = -1;
public SimpleIdReaderTypeCache(String type, ExtTObjectIntHasMap<HashedBytesArray> idToDoc, HashedBytesArray[] docIdToId,
public SimpleIdReaderTypeCache(String type, ObjectIntOpenHashMap<HashedBytesArray> idToDoc, HashedBytesArray[] docIdToId,
HashedBytesArray[] parentIdsValues, int[] parentIdsOrdinals) {
this.type = type;
this.idToDoc = idToDoc;
this.docIdToId = docIdToId;
this.idToDoc.trimToSize();
this.parentIdsValues = parentIdsValues;
this.parentIdsOrdinals = parentIdsOrdinals;
}
@ -61,7 +59,11 @@ public class SimpleIdReaderTypeCache implements IdReaderTypeCache {
}
public int docById(HashedBytesArray uid) {
return idToDoc.get(uid);
if (idToDoc.containsKey(uid)) {
return idToDoc.lget();
} else {
return -1;
}
}
public HashedBytesArray idByDoc(int docId) {
@ -79,20 +81,29 @@ public class SimpleIdReaderTypeCache implements IdReaderTypeCache {
* Returns an already stored instance if exists, if not, returns null;
*/
public HashedBytesArray canReuse(HashedBytesArray id) {
return idToDoc.key(id);
if (idToDoc.containsKey(id)) {
return idToDoc.lkey();
} else {
return id;
}
}
long computeSizeInBytes() {
long sizeInBytes = 0;
// Ignore type field
// sizeInBytes += ((type.length() * RamUsage.NUM_BYTES_CHAR) + (3 * RamUsage.NUM_BYTES_INT)) + RamUsage.NUM_BYTES_OBJECT_HEADER;
sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (idToDoc._valuesSize() * RamUsageEstimator.NUM_BYTES_INT);
for (Object o : idToDoc._set) {
if (o == TObjectHash.FREE || o == TObjectHash.REMOVED) {
sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_REF;
} else {
HashedBytesArray bytesArray = (HashedBytesArray) o;
sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + (bytesArray.length() + RamUsageEstimator.NUM_BYTES_INT);
sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (idToDoc.values.length * RamUsageEstimator.NUM_BYTES_INT);
sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (idToDoc.allocated.length);
final boolean[] states = idToDoc.allocated;
final Object[] keys = idToDoc.keys;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
HashedBytesArray bytesArray = (HashedBytesArray) keys[i];
if (bytesArray != null) {
sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + (bytesArray.length() + RamUsageEstimator.NUM_BYTES_INT);
} else {
sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_REF;
}
}
}

View File

@ -19,8 +19,7 @@
package org.elasticsearch.index.fielddata;
import gnu.trove.iterator.TObjectLongIterator;
import gnu.trove.map.hash.TObjectLongHashMap;
import com.carrotsearch.hppc.ObjectLongOpenHashMap;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -39,13 +38,13 @@ public class FieldDataStats implements Streamable, ToXContent {
long memorySize;
long evictions;
@Nullable
TObjectLongHashMap<String> fields;
ObjectLongOpenHashMap<String> fields;
public FieldDataStats() {
}
public FieldDataStats(long memorySize, long evictions, @Nullable TObjectLongHashMap<String> fields) {
public FieldDataStats(long memorySize, long evictions, @Nullable ObjectLongOpenHashMap<String> fields) {
this.memorySize = memorySize;
this.evictions = evictions;
this.fields = fields;
@ -55,10 +54,14 @@ public class FieldDataStats implements Streamable, ToXContent {
this.memorySize += stats.memorySize;
this.evictions += stats.evictions;
if (stats.fields != null) {
if (fields == null) fields = new TObjectLongHashMap<String>();
for (TObjectLongIterator<String> it = stats.fields.iterator(); it.hasNext(); ) {
it.advance();
fields.adjustOrPutValue(it.key(), it.value(), it.value());
if (fields == null) fields = new ObjectLongOpenHashMap<String>();
final boolean[] states = stats.fields.allocated;
final Object[] keys = stats.fields.keys;
final long[] values = stats.fields.values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
fields.addTo((String) keys[i], values[i]);
}
}
}
}
@ -76,7 +79,7 @@ public class FieldDataStats implements Streamable, ToXContent {
}
@Nullable
public TObjectLongHashMap<String> getFields() {
public ObjectLongOpenHashMap<String> getFields() {
return fields;
}
@ -92,7 +95,7 @@ public class FieldDataStats implements Streamable, ToXContent {
evictions = in.readVLong();
if (in.readBoolean()) {
int size = in.readVInt();
fields = new TObjectLongHashMap<String>(size);
fields = new ObjectLongOpenHashMap<String>(size);
for (int i = 0; i < size; i++) {
fields.put(in.readString(), in.readVLong());
}
@ -108,10 +111,14 @@ public class FieldDataStats implements Streamable, ToXContent {
} else {
out.writeBoolean(true);
out.writeVInt(fields.size());
for (TObjectLongIterator<String> it = fields.iterator(); it.hasNext(); ) {
it.advance();
out.writeString(it.key());
out.writeVLong(it.value());
final boolean[] states = fields.allocated;
final Object[] keys = fields.keys;
final long[] values = fields.values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
out.writeString((String) keys[i]);
out.writeVLong(values[i]);
}
}
}
}
@ -123,11 +130,15 @@ public class FieldDataStats implements Streamable, ToXContent {
builder.field(Fields.EVICTIONS, getEvictions());
if (fields != null) {
builder.startObject(Fields.FIELDS);
for (TObjectLongIterator<String> it = fields.iterator(); it.hasNext(); ) {
it.advance();
builder.startObject(it.key(), XContentBuilder.FieldCaseConversion.NONE);
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, it.value());
builder.endObject();
final boolean[] states = fields.allocated;
final Object[] keys = fields.keys;
final long[] values = fields.values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
builder.startObject((String) keys[i], XContentBuilder.FieldCaseConversion.NONE);
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, values[i]);
builder.endObject();
}
}
builder.endObject();
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.fielddata;
import gnu.trove.map.hash.TObjectLongHashMap;
import com.carrotsearch.hppc.ObjectLongOpenHashMap;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.metrics.CounterMetric;
@ -49,9 +49,9 @@ public class ShardFieldData extends AbstractIndexShardComponent implements Index
}
public FieldDataStats stats(String... fields) {
TObjectLongHashMap<String> fieldTotals = null;
ObjectLongOpenHashMap<String> fieldTotals = null;
if (fields != null && fields.length > 0) {
fieldTotals = new TObjectLongHashMap<String>();
fieldTotals = new ObjectLongOpenHashMap<String>();
for (Map.Entry<String, CounterMetric> entry : perFieldTotals.entrySet()) {
for (String field : fields) {
if (Regex.simpleMatch(field, entry.getKey())) {

View File

@ -19,8 +19,8 @@
package org.elasticsearch.index.query;
import com.carrotsearch.hppc.FloatArrayList;
import com.google.common.collect.Maps;
import gnu.trove.list.array.TFloatArrayList;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
@ -50,7 +50,7 @@ public class CustomFiltersScoreQueryBuilder extends BaseQueryBuilder implements
private ArrayList<FilterBuilder> filters = new ArrayList<FilterBuilder>();
private ArrayList<String> scripts = new ArrayList<String>();
private TFloatArrayList boosts = new TFloatArrayList();
private FloatArrayList boosts = new FloatArrayList();
public CustomFiltersScoreQueryBuilder(QueryBuilder queryBuilder) {
this.queryBuilder = queryBuilder;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.query;
import gnu.trove.list.array.TFloatArrayList;
import com.carrotsearch.hppc.FloatArrayList;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.Strings;
@ -66,7 +66,7 @@ public class CustomFiltersScoreQueryParser implements QueryParser {
ArrayList<Filter> filters = new ArrayList<Filter>();
boolean filtersFound = false;
ArrayList<String> scripts = new ArrayList<String>();
TFloatArrayList boosts = new TFloatArrayList();
FloatArrayList boosts = new FloatArrayList();
float maxBoost = Float.MAX_VALUE;
String currentFieldName = null;

View File

@ -19,9 +19,8 @@
package org.elasticsearch.index.query;
import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
import com.google.common.collect.Lists;
import gnu.trove.impl.Constants;
import gnu.trove.map.hash.TObjectFloatHashMap;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
@ -37,7 +36,7 @@ public class MultiMatchQueryBuilder extends BaseQueryBuilder implements Boostabl
private final Object text;
private final List<String> fields;
private TObjectFloatHashMap<String> fieldsBoosts;
private ObjectFloatOpenHashMap<String> fieldsBoosts;
private MatchQueryBuilder.Type type;
@ -96,7 +95,7 @@ public class MultiMatchQueryBuilder extends BaseQueryBuilder implements Boostabl
public MultiMatchQueryBuilder field(String field, float boost) {
fields.add(field);
if (fieldsBoosts == null) {
fieldsBoosts = new TObjectFloatHashMap<String>(Constants.DEFAULT_CAPACITY, Constants.DEFAULT_LOAD_FACTOR, -1);
fieldsBoosts = new ObjectFloatOpenHashMap<String>();
}
fieldsBoosts.put(field, boost);
return this;
@ -230,12 +229,8 @@ public class MultiMatchQueryBuilder extends BaseQueryBuilder implements Boostabl
builder.field("query", text);
builder.startArray("fields");
for (String field : fields) {
float boost = -1;
if (fieldsBoosts != null) {
boost = fieldsBoosts.get(field);
}
if (boost != -1) {
field += "^" + boost;
if (fieldsBoosts != null && fieldsBoosts.containsKey(field)) {
field += "^" + fieldsBoosts.lget();
}
builder.value(field);
}

View File

@ -19,8 +19,7 @@
package org.elasticsearch.index.query;
import gnu.trove.impl.Constants;
import gnu.trove.map.hash.TObjectFloatHashMap;
import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
@ -78,7 +77,7 @@ public class QueryStringQueryBuilder extends BaseQueryBuilder implements Boostab
private List<String> fields;
private TObjectFloatHashMap<String> fieldsBoosts;
private ObjectFloatOpenHashMap<String> fieldsBoosts;
private Boolean useDisMax;
@ -125,7 +124,7 @@ public class QueryStringQueryBuilder extends BaseQueryBuilder implements Boostab
}
fields.add(field);
if (fieldsBoosts == null) {
fieldsBoosts = new TObjectFloatHashMap<String>(Constants.DEFAULT_CAPACITY, Constants.DEFAULT_LOAD_FACTOR, -1);
fieldsBoosts = new ObjectFloatOpenHashMap<String>();
}
fieldsBoosts.put(field, boost);
return this;
@ -323,12 +322,8 @@ public class QueryStringQueryBuilder extends BaseQueryBuilder implements Boostab
if (fields != null) {
builder.startArray("fields");
for (String field : fields) {
float boost = -1;
if (fieldsBoosts != null) {
boost = fieldsBoosts.get(field);
}
if (boost != -1) {
field += "^" + boost;
if (fieldsBoosts != null && fieldsBoosts.containsKey(field)) {
field += "^" + fieldsBoosts.get(field);
}
builder.value(field);
}

View File

@ -19,9 +19,8 @@
package org.elasticsearch.index.query;
import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
import com.google.common.collect.Lists;
import gnu.trove.impl.Constants;
import gnu.trove.map.hash.TObjectFloatHashMap;
import org.apache.lucene.queryparser.classic.MapperQueryParser;
import org.apache.lucene.queryparser.classic.QueryParserSettings;
import org.apache.lucene.search.BooleanQuery;
@ -104,7 +103,7 @@ public class QueryStringQueryParser implements QueryParser {
qpSettings.fields().add(field);
if (fBoost != -1) {
if (qpSettings.boosts() == null) {
qpSettings.boosts(new TObjectFloatHashMap<String>(Constants.DEFAULT_CAPACITY, Constants.DEFAULT_LOAD_FACTOR, 1.0f));
qpSettings.boosts(new ObjectFloatOpenHashMap<String>());
}
qpSettings.boosts().put(field, fBoost);
}
@ -113,7 +112,7 @@ public class QueryStringQueryParser implements QueryParser {
qpSettings.fields().add(fField);
if (fBoost != -1) {
if (qpSettings.boosts() == null) {
qpSettings.boosts(new TObjectFloatHashMap<String>(Constants.DEFAULT_CAPACITY, Constants.DEFAULT_LOAD_FACTOR, 1.0f));
qpSettings.boosts(new ObjectFloatOpenHashMap<String>());
}
qpSettings.boosts().put(fField, fBoost);
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.index.search.child;
import gnu.trove.map.hash.TObjectFloatHashMap;
import gnu.trove.map.hash.TObjectIntHashMap;
import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@ -64,8 +64,8 @@ public class ChildrenQuery extends Query implements SearchContext.Rewrite {
private final int shortCircuitParentDocSet;
private Query rewrittenChildQuery;
private Recycler.V<TObjectFloatHashMap<HashedBytesArray>> uidToScore;
private Recycler.V<TObjectIntHashMap<HashedBytesArray>> uidToCount;
private Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore;
private Recycler.V<ObjectIntOpenHashMap<HashedBytesArray>> uidToCount;
public ChildrenQuery(SearchContext searchContext, String parentType, String childType, Filter parentFilter, Query childQuery, ScoreType scoreType, int shortCircuitParentDocSet) {
this.searchContext = searchContext;
@ -177,10 +177,10 @@ public class ChildrenQuery extends Query implements SearchContext.Rewrite {
Filter parentFilter;
if (size == 1) {
BytesRef id = uidToScore.v().keySet().iterator().next().toBytesRef();
BytesRef id = uidToScore.v().keys().iterator().next().value.toBytesRef();
parentFilter = new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));
} else if (size <= shortCircuitParentDocSet) {
parentFilter = new ParentIdsFilter(parentType, uidToScore.v().keySet());
parentFilter = new ParentIdsFilter(parentType, uidToScore.v().keys, uidToScore.v().allocated);
} else {
parentFilter = this.parentFilter;
}
@ -239,7 +239,7 @@ public class ChildrenQuery extends Query implements SearchContext.Rewrite {
class ParentScorer extends Scorer {
final TObjectFloatHashMap<HashedBytesArray> uidToScore;
final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
final IdReaderTypeCache idTypeCache;
final DocIdSetIterator parentsIterator;
@ -247,7 +247,7 @@ public class ChildrenQuery extends Query implements SearchContext.Rewrite {
int currentDocId = -1;
float currentScore;
ParentScorer(Weight weight, IdReaderTypeCache idTypeCache, TObjectFloatHashMap<HashedBytesArray> uidToScore, DocIdSetIterator parentsIterator) {
ParentScorer(Weight weight, IdReaderTypeCache idTypeCache, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, DocIdSetIterator parentsIterator) {
super(weight);
this.idTypeCache = idTypeCache;
this.parentsIterator = parentsIterator;
@ -323,9 +323,9 @@ public class ChildrenQuery extends Query implements SearchContext.Rewrite {
final class AvgParentScorer extends ParentScorer {
HashedBytesArray currentUid;
final TObjectIntHashMap<HashedBytesArray> uidToCount;
final ObjectIntOpenHashMap<HashedBytesArray> uidToCount;
AvgParentScorer(Weight weight, IdReaderTypeCache idTypeCache, TObjectFloatHashMap<HashedBytesArray> uidToScore, TObjectIntHashMap<HashedBytesArray> uidToCount, DocIdSetIterator parentsIterator) {
AvgParentScorer(Weight weight, IdReaderTypeCache idTypeCache, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, ObjectIntOpenHashMap<HashedBytesArray> uidToCount, DocIdSetIterator parentsIterator) {
super(weight, idTypeCache, uidToScore, parentsIterator);
this.uidToCount = uidToCount;
}
@ -371,11 +371,11 @@ public class ChildrenQuery extends Query implements SearchContext.Rewrite {
static class ChildUidCollector extends ParentIdCollector {
final TObjectFloatHashMap<HashedBytesArray> uidToScore;
final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
final ScoreType scoreType;
Scorer scorer;
ChildUidCollector(ScoreType scoreType, SearchContext searchContext, String childType, TObjectFloatHashMap<HashedBytesArray> uidToScore) {
ChildUidCollector(ScoreType scoreType, SearchContext searchContext, String childType, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore) {
super(childType, searchContext);
this.uidToScore = uidToScore;
this.scoreType = scoreType;
@ -388,27 +388,27 @@ public class ChildrenQuery extends Query implements SearchContext.Rewrite {
@Override
protected void collect(int doc, HashedBytesArray parentUid) throws IOException {
float previousScore = uidToScore.get(parentUid);
float currentScore = scorer.score();
if (previousScore == 0) {
uidToScore.put(parentUid, currentScore);
} else {
switch (scoreType) {
case SUM:
uidToScore.adjustValue(parentUid, currentScore);
break;
case MAX:
switch (scoreType) {
case SUM:
uidToScore.addTo(parentUid, currentScore);
break;
case MAX:
if (uidToScore.containsKey(parentUid)) {
float previousScore = uidToScore.lget();
if (currentScore > previousScore) {
uidToScore.put(parentUid, currentScore);
uidToScore.lset(currentScore);
}
break;
case AVG:
assert false : "AVG has it's own collector";
} else {
uidToScore.put(parentUid, currentScore);
}
break;
case AVG:
assert false : "AVG has it's own collector";
default:
assert false : "Are we missing a score type here? -- " + scoreType;
break;
}
default:
assert false : "Are we missing a score type here? -- " + scoreType;
break;
}
}
@ -416,9 +416,9 @@ public class ChildrenQuery extends Query implements SearchContext.Rewrite {
final static class AvgChildUidCollector extends ChildUidCollector {
final TObjectIntHashMap<HashedBytesArray> uidToCount;
final ObjectIntOpenHashMap<HashedBytesArray> uidToCount;
AvgChildUidCollector(ScoreType scoreType, SearchContext searchContext, String childType, TObjectFloatHashMap<HashedBytesArray> uidToScore, TObjectIntHashMap<HashedBytesArray> uidToCount) {
AvgChildUidCollector(ScoreType scoreType, SearchContext searchContext, String childType, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, ObjectIntOpenHashMap<HashedBytesArray> uidToCount) {
super(scoreType, searchContext, childType, uidToScore);
this.uidToCount = uidToCount;
assert scoreType == ScoreType.AVG;
@ -426,15 +426,9 @@ public class ChildrenQuery extends Query implements SearchContext.Rewrite {
@Override
protected void collect(int doc, HashedBytesArray parentUid) throws IOException {
float previousScore = uidToScore.get(parentUid);
float currentScore = scorer.score();
if (previousScore == 0) {
uidToScore.put(parentUid, currentScore);
uidToCount.put(parentUid, 1);
} else {
uidToScore.adjustValue(parentUid, currentScore);
uidToCount.increment(parentUid);
}
uidToCount.addTo(parentUid, 1);
uidToScore.addTo(parentUid, currentScore);
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.search.child;
import gnu.trove.set.hash.THashSet;
import com.carrotsearch.hppc.ObjectOpenHashSet;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@ -56,7 +56,7 @@ public class HasChildFilter extends Filter implements SearchContext.Rewrite {
Filter shortCircuitFilter;
int remaining;
Recycler.V<THashSet<HashedBytesArray>> collectedUids;
Recycler.V<ObjectOpenHashSet<HashedBytesArray>> collectedUids;
public HasChildFilter(Query childQuery, String parentType, String childType, Filter parentFilter, SearchContext searchContext, int shortCircuitParentDocSet) {
this.parentFilter = parentFilter;
@ -135,10 +135,10 @@ public class HasChildFilter extends Filter implements SearchContext.Rewrite {
if (remaining == 0) {
shortCircuitFilter = Queries.MATCH_NO_FILTER;
} else if (remaining == 1) {
BytesRef id = collectedUids.v().iterator().next().toBytesRef();
BytesRef id = collectedUids.v().iterator().next().value.toBytesRef();
shortCircuitFilter = new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));
} else if (remaining <= shortCircuitParentDocSet) {
shortCircuitFilter = new ParentIdsFilter(parentType, collectedUids.v());
shortCircuitFilter = new ParentIdsFilter(parentType, collectedUids.v().keys, collectedUids.v().allocated);
}
}
@ -158,10 +158,10 @@ public class HasChildFilter extends Filter implements SearchContext.Rewrite {
final class ParentDocSet extends MatchDocIdSet {
final IndexReader reader;
final THashSet<HashedBytesArray> parents;
final ObjectOpenHashSet<HashedBytesArray> parents;
final IdReaderTypeCache typeCache;
ParentDocSet(IndexReader reader, Bits acceptDocs, THashSet<HashedBytesArray> parents, IdReaderTypeCache typeCache) {
ParentDocSet(IndexReader reader, Bits acceptDocs, ObjectOpenHashSet<HashedBytesArray> parents, IdReaderTypeCache typeCache) {
super(reader.maxDoc(), acceptDocs);
this.reader = reader;
this.parents = parents;
@ -185,9 +185,9 @@ public class HasChildFilter extends Filter implements SearchContext.Rewrite {
final static class UidCollector extends ParentIdCollector {
final THashSet<HashedBytesArray> collectedUids;
final ObjectOpenHashSet<HashedBytesArray> collectedUids;
UidCollector(String parentType, SearchContext context, THashSet<HashedBytesArray> collectedUids) {
UidCollector(String parentType, SearchContext context, ObjectOpenHashSet<HashedBytesArray> collectedUids) {
super(parentType, context);
this.collectedUids = collectedUids;
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.search.child;
import gnu.trove.set.hash.THashSet;
import com.carrotsearch.hppc.ObjectOpenHashSet;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.DocIdSet;
@ -47,7 +47,7 @@ public class HasParentFilter extends Filter implements SearchContext.Rewrite {
final SearchContext context;
final Filter childrenFilter;
Recycler.V<THashSet<HashedBytesArray>> parents;
Recycler.V<ObjectOpenHashSet<HashedBytesArray>> parents;
public HasParentFilter(Query parentQuery, String parentType, SearchContext context, Filter childrenFilter) {
this.parentQuery = parentQuery;
@ -120,10 +120,10 @@ public class HasParentFilter extends Filter implements SearchContext.Rewrite {
final static class ChildrenDocSet extends MatchDocIdSet {
final IndexReader reader;
final THashSet<HashedBytesArray> parents;
final ObjectOpenHashSet<HashedBytesArray> parents;
final IdReaderTypeCache idReaderTypeCache;
ChildrenDocSet(IndexReader reader, Bits acceptDocs, THashSet<HashedBytesArray> parents, IdReaderTypeCache idReaderTypeCache) {
ChildrenDocSet(IndexReader reader, Bits acceptDocs, ObjectOpenHashSet<HashedBytesArray> parents, IdReaderTypeCache idReaderTypeCache) {
super(reader.maxDoc(), acceptDocs);
this.reader = reader;
this.parents = parents;
@ -139,13 +139,13 @@ public class HasParentFilter extends Filter implements SearchContext.Rewrite {
final static class ParentUidsCollector extends NoopCollector {
final THashSet<HashedBytesArray> collectedUids;
final ObjectOpenHashSet<HashedBytesArray> collectedUids;
final SearchContext context;
final String parentType;
IdReaderTypeCache typeCache;
ParentUidsCollector(THashSet<HashedBytesArray> collectedUids, SearchContext context, String parentType) {
ParentUidsCollector(ObjectOpenHashSet<HashedBytesArray> collectedUids, SearchContext context, String parentType) {
this.collectedUids = collectedUids;
this.context = context;
this.parentType = parentType;

View File

@ -34,7 +34,6 @@ import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import java.io.IOException;
import java.util.Set;
/**
* Advantages over using this filter over Lucene's TermsFilter in the parent child context:
@ -46,11 +45,13 @@ import java.util.Set;
final class ParentIdsFilter extends Filter {
private final BytesRef parentTypeBr;
private final Set<HashedBytesArray> collectedUids;
private final Object[] keys;
private final boolean[] allocated;
public ParentIdsFilter(String parentType, Set<HashedBytesArray> collectedUids) {
public ParentIdsFilter(String parentType, Object[] keys, boolean[] allocated) {
this.parentTypeBr = new BytesRef(parentType);
this.collectedUids = collectedUids;
this.keys = keys;
this.allocated = allocated;
}
@Override
@ -66,8 +67,12 @@ final class ParentIdsFilter extends Filter {
DocsEnum docsEnum = null;
FixedBitSet result = null;
for (HashedBytesArray parentId : collectedUids) {
idSpare.bytes = parentId.toBytes();
for (int i = 0; i < allocated.length; i++) {
if (!allocated[i]) {
continue;
}
idSpare.bytes = ((HashedBytesArray) keys[i]).toBytes();
idSpare.length = idSpare.bytes.length;
Uid.createUidAsBytes(parentTypeBr, idSpare, uidSpare);
if (termsEnum.seekExact(uidSpare, false)) {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.search.child;
import gnu.trove.map.hash.TObjectFloatHashMap;
import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@ -27,7 +27,6 @@ import org.apache.lucene.search.*;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
import org.elasticsearch.ElasticSearchIllegalStateException;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.HashedBytesArray;
import org.elasticsearch.common.lucene.docset.DocIdSets;
import org.elasticsearch.common.lucene.search.ApplyAcceptedDocsFilter;
@ -54,7 +53,7 @@ public class ParentQuery extends Query implements SearchContext.Rewrite {
private final Filter childrenFilter;
private Query rewrittenParentQuery;
private Recycler.V<TObjectFloatHashMap<HashedBytesArray>> uidToScore;
private Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore;
public ParentQuery(SearchContext searchContext, Query parentQuery, String parentType, Filter childrenFilter) {
this.searchContext = searchContext;
@ -152,14 +151,14 @@ public class ParentQuery extends Query implements SearchContext.Rewrite {
static class ParentUidCollector extends NoopCollector {
final TObjectFloatHashMap<HashedBytesArray> uidToScore;
final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
final SearchContext searchContext;
final String parentType;
Scorer scorer;
IdReaderTypeCache typeCache;
ParentUidCollector(TObjectFloatHashMap<HashedBytesArray> uidToScore, SearchContext searchContext, String parentType) {
ParentUidCollector(ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, SearchContext searchContext, String parentType) {
this.uidToScore = uidToScore;
this.searchContext = searchContext;
this.parentType = parentType;
@ -232,14 +231,14 @@ public class ParentQuery extends Query implements SearchContext.Rewrite {
static class ChildScorer extends Scorer {
final TObjectFloatHashMap<HashedBytesArray> uidToScore;
final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
final DocIdSetIterator childrenIterator;
final IdReaderTypeCache typeCache;
int currentChildDoc = -1;
float currentScore;
ChildScorer(Weight weight, TObjectFloatHashMap<HashedBytesArray> uidToScore, DocIdSetIterator childrenIterator, IdReaderTypeCache typeCache) {
ChildScorer(Weight weight, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, DocIdSetIterator childrenIterator, IdReaderTypeCache typeCache) {
super(weight);
this.uidToScore = uidToScore;
this.childrenIterator = childrenIterator;
@ -271,7 +270,7 @@ public class ParentQuery extends Query implements SearchContext.Rewrite {
return currentChildDoc;
}
BytesReference uid = typeCache.parentIdByDoc(currentChildDoc);
HashedBytesArray uid = typeCache.parentIdByDoc(currentChildDoc);
if (uid == null) {
continue;
}
@ -288,7 +287,7 @@ public class ParentQuery extends Query implements SearchContext.Rewrite {
if (currentChildDoc == DocIdSetIterator.NO_MORE_DOCS) {
return currentChildDoc;
}
BytesReference uid = typeCache.idByDoc(currentChildDoc);
HashedBytesArray uid = typeCache.idByDoc(currentChildDoc);
if (uid == null) {
return nextDoc();
}

View File

@ -19,7 +19,8 @@
package org.elasticsearch.index.search.child;
import gnu.trove.map.hash.TIntObjectHashMap;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.util.Bits;
@ -29,13 +30,11 @@ import org.elasticsearch.cache.recycler.CacheRecycler;
import org.elasticsearch.common.bytes.HashedBytesArray;
import org.elasticsearch.common.lucene.search.EmptyScorer;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTHashMap;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Map;
import java.util.Set;
/**
@ -64,7 +63,7 @@ public class TopChildrenQuery extends Query implements SearchContext.Rewrite {
// This field will hold the rewritten form of originalChildQuery, so that we can reuse it
private Query rewrittenChildQuery;
private Recycler.V<ExtTHashMap<Object, ParentDoc[]>> parentDocs;
private Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs;
// Note, the query is expected to already be filtered to only child type docs
public TopChildrenQuery(Query childQuery, String childType, String parentType, ScoreType scoreType, int factor, int incrementalFactor, CacheRecycler cacheRecycler) {
@ -146,7 +145,7 @@ public class TopChildrenQuery extends Query implements SearchContext.Rewrite {
int resolveParentDocuments(TopDocs topDocs, SearchContext context) {
int parentHitsResolved = 0;
Recycler.V<ExtTHashMap<Object, Recycler.V<TIntObjectHashMap<ParentDoc>>>> parentDocsPerReader = cacheRecycler.hashMap(context.searcher().getIndexReader().leaves().size());
Recycler.V<ObjectObjectOpenHashMap<Object, Recycler.V<IntObjectOpenHashMap<ParentDoc>>>> parentDocsPerReader = cacheRecycler.hashMap(context.searcher().getIndexReader().leaves().size());
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
int readerIndex = ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves());
AtomicReaderContext subContext = context.searcher().getIndexReader().leaves().get(readerIndex);
@ -166,7 +165,7 @@ public class TopChildrenQuery extends Query implements SearchContext.Rewrite {
if (parentDocId != -1 && (liveDocs == null || liveDocs.get(parentDocId))) {
// we found a match, add it and break
Recycler.V<TIntObjectHashMap<ParentDoc>> readerParentDocs = parentDocsPerReader.v().get(indexReader.getCoreCacheKey());
Recycler.V<IntObjectOpenHashMap<ParentDoc>> readerParentDocs = parentDocsPerReader.v().get(indexReader.getCoreCacheKey());
if (readerParentDocs == null) {
readerParentDocs = cacheRecycler.intObjectMap(indexReader.maxDoc());
parentDocsPerReader.v().put(indexReader.getCoreCacheKey(), readerParentDocs);
@ -191,12 +190,18 @@ public class TopChildrenQuery extends Query implements SearchContext.Rewrite {
}
}
}
boolean[] states = parentDocsPerReader.v().allocated;
Object[] keys = parentDocsPerReader.v().keys;
Object[] values = parentDocsPerReader.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
Recycler.V<IntObjectOpenHashMap<ParentDoc>> value = (Recycler.V<IntObjectOpenHashMap<ParentDoc>>) values[i];
ParentDoc[] parentDocs = value.v().values().toArray(ParentDoc.class);
Arrays.sort(parentDocs, PARENT_DOC_COMP);
for (Map.Entry<Object, Recycler.V<TIntObjectHashMap<ParentDoc>>> entry : parentDocsPerReader.v().entrySet()) {
ParentDoc[] values = entry.getValue().v().values(new ParentDoc[entry.getValue().v().size()]);
Arrays.sort(values, PARENT_DOC_COMP);
parentDocs.v().put(entry.getKey(), values);
entry.getValue().release();
this.parentDocs.v().put(keys[i], parentDocs);
value.release();
}
}
parentDocsPerReader.release();
return parentHitsResolved;

View File

@ -19,12 +19,12 @@
package org.elasticsearch.indices.cache.filter;
import com.carrotsearch.hppc.ObjectOpenHashSet;
import com.google.common.base.Objects;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import gnu.trove.set.hash.THashSet;
import org.apache.lucene.search.DocIdSet;
import org.elasticsearch.cache.recycler.CacheRecycler;
import org.elasticsearch.common.component.AbstractComponent;
@ -176,7 +176,7 @@ public class IndicesFilterCache extends AbstractComponent implements RemovalList
threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() {
@Override
public void run() {
Recycler.V<THashSet<Object>> keys = cacheRecycler.hashSet(-1);
Recycler.V<ObjectOpenHashSet<Object>> keys = cacheRecycler.hashSet(-1);
try {
for (Iterator<Object> it = readersKeysToClean.iterator(); it.hasNext(); ) {
keys.v().add(it.next());

View File

@ -19,8 +19,8 @@
package org.elasticsearch.indices.cluster;
import com.carrotsearch.hppc.IntOpenHashSet;
import com.google.common.collect.Lists;
import gnu.trove.set.hash.TIntHashSet;
import org.elasticsearch.ElasticSearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterChangedEvent;
@ -277,7 +277,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (routingNode == null) {
return;
}
TIntHashSet newShardIds = new TIntHashSet();
IntOpenHashSet newShardIds = new IntOpenHashSet();
for (IndexService indexService : indicesService) {
String index = indexService.index().name();
IndexMetaData indexMetaData = event.state().metaData().index(index);

View File

@ -18,8 +18,8 @@
package org.elasticsearch.percolator;
import com.carrotsearch.hppc.ByteObjectOpenHashMap;
import com.google.common.collect.ImmutableMap;
import gnu.trove.map.hash.TByteObjectHashMap;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexableField;
@ -92,7 +92,7 @@ public class PercolatorService extends AbstractComponent {
private final CloseableThreadLocal<MemoryIndex> cache;
private final IndicesService indicesService;
private final TByteObjectHashMap<PercolatorType> percolatorTypes;
private final ByteObjectOpenHashMap<PercolatorType> percolatorTypes;
private final ClusterService clusterService;
@ -112,8 +112,8 @@ public class PercolatorService extends AbstractComponent {
return new ExtendedMemoryIndex(true, maxReuseBytes);
}
};
percolatorTypes = new TByteObjectHashMap<PercolatorType>(6);
percolatorTypes = new ByteObjectOpenHashMap<PercolatorType>(6);
percolatorTypes.put(countPercolator.id(), countPercolator);
percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator);
percolatorTypes.put(matchPercolator.id(), matchPercolator);
@ -450,8 +450,7 @@ public class PercolatorService extends AbstractComponent {
// Use a custom impl of AbstractBigArray for Object[]?
List<PercolateResponse.Match> finalMatches = new ArrayList<PercolateResponse.Match>(requestedSize == 0 ? numMatches : requestedSize);
outer:
for (PercolateShardResponse response : shardResults) {
outer: for (PercolateShardResponse response : shardResults) {
Text index = new StringText(response.getIndex());
for (int i = 0; i < response.matches().length; i++) {
float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i];

View File

@ -19,8 +19,8 @@
package org.elasticsearch.percolator;
import com.carrotsearch.hppc.FloatArrayList;
import com.google.common.collect.ImmutableMap;
import gnu.trove.list.array.TFloatArrayList;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.*;
import org.apache.lucene.util.BytesRef;
@ -218,7 +218,7 @@ abstract class QueryCollector extends Collector {
final List<BytesRef> matches = new ArrayList<BytesRef>();
final List<Map<String, HighlightField>> hls = new ArrayList<Map<String, HighlightField>>();
// TODO: Use thread local in order to cache the scores lists?
final TFloatArrayList scores = new TFloatArrayList();
final FloatArrayList scores = new FloatArrayList();
final boolean limit;
final int size;
long counter = 0;
@ -278,7 +278,7 @@ abstract class QueryCollector extends Collector {
return matches;
}
TFloatArrayList scores() {
FloatArrayList scores() {
return scores;
}

View File

@ -19,11 +19,10 @@
package org.elasticsearch.search.builder;
import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import gnu.trove.iterator.TObjectFloatIterator;
import gnu.trove.map.hash.TObjectFloatHashMap;
import org.elasticsearch.ElasticSearchGenerationException;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.Nullable;
@ -112,7 +111,7 @@ public class SearchSourceBuilder implements ToXContent {
private RescoreBuilder rescoreBuilder;
private TObjectFloatHashMap<String> indexBoost = null;
private ObjectFloatOpenHashMap<String> indexBoost = null;
private String[] stats;
@ -590,7 +589,7 @@ public class SearchSourceBuilder implements ToXContent {
*/
public SearchSourceBuilder indexBoost(String index, float indexBoost) {
if (this.indexBoost == null) {
this.indexBoost = new TObjectFloatHashMap<String>();
this.indexBoost = new ObjectFloatOpenHashMap<String>();
}
this.indexBoost.put(index, indexBoost);
return this;
@ -761,9 +760,13 @@ public class SearchSourceBuilder implements ToXContent {
if (indexBoost != null) {
builder.startObject("indices_boost");
for (TObjectFloatIterator<String> it = indexBoost.iterator(); it.hasNext(); ) {
it.advance();
builder.field(it.key(), it.value());
final boolean[] states = indexBoost.allocated;
final Object[] keys = indexBoost.keys;
final float[] values = indexBoost.values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
builder.field((String) keys[i], values[i]);
}
}
builder.endObject();
}

View File

@ -19,16 +19,17 @@
package org.elasticsearch.search.controller;
import com.carrotsearch.hppc.IntArrayList;
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import com.google.common.collect.Lists;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.util.PriorityQueue;
import org.elasticsearch.cache.recycler.CacheRecycler;
import org.elasticsearch.common.collect.XMaps;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.hppc.HppcMaps;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.trove.ExtTIntArrayList;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
@ -79,8 +80,8 @@ public class SearchPhaseController extends AbstractComponent {
}
public AggregatedDfs aggregateDfs(AtomicArray<DfsSearchResult> results) {
Map<Term, TermStatistics> termStatistics = XMaps.newNoNullKeysMap();
Map<String, CollectionStatistics> fieldStatistics = XMaps.newNoNullKeysMap();
ObjectObjectOpenHashMap<Term, TermStatistics> termStatistics = HppcMaps.newNoNullKeysMap();
ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
long aggMaxDoc = 0;
for (AtomicArray.Entry<DfsSearchResult> lEntry : results.asList()) {
final Term[] terms = lEntry.value.terms();
@ -101,19 +102,26 @@ public class SearchPhaseController extends AbstractComponent {
}
}
for (Map.Entry<String, CollectionStatistics> entry : lEntry.value.fieldStatistics().entrySet()) {
assert entry.getKey() != null;
CollectionStatistics existing = fieldStatistics.get(entry.getKey());
if (existing != null) {
CollectionStatistics merged = new CollectionStatistics(
entry.getKey(), existing.maxDoc() + entry.getValue().maxDoc(),
optionalSum(existing.docCount(), entry.getValue().docCount()),
optionalSum(existing.sumTotalTermFreq(), entry.getValue().sumTotalTermFreq()),
optionalSum(existing.sumDocFreq(), entry.getValue().sumDocFreq())
);
fieldStatistics.put(entry.getKey(), merged);
} else {
fieldStatistics.put(entry.getKey(), entry.getValue());
final boolean[] states = lEntry.value.fieldStatistics().allocated;
final Object[] keys = lEntry.value.fieldStatistics().keys;
final Object[] values = lEntry.value.fieldStatistics().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
String key = (String) keys[i];
CollectionStatistics value = (CollectionStatistics) values[i];
assert key != null;
CollectionStatistics existing = fieldStatistics.get(key);
if (existing != null) {
CollectionStatistics merged = new CollectionStatistics(
key, existing.maxDoc() + value.maxDoc(),
optionalSum(existing.docCount(), value.docCount()),
optionalSum(existing.sumTotalTermFreq(), value.sumTotalTermFreq()),
optionalSum(existing.sumDocFreq(), value.sumDocFreq())
);
fieldStatistics.put(key, merged);
} else {
fieldStatistics.put(key, value);
}
}
}
aggMaxDoc += lEntry.value.maxDoc();
@ -285,11 +293,11 @@ public class SearchPhaseController extends AbstractComponent {
/**
* Builds an array, with potential null elements, with docs to load.
*/
public void fillDocIdsToLoad(AtomicArray<ExtTIntArrayList> docsIdsToLoad, ScoreDoc[] shardDocs) {
public void fillDocIdsToLoad(AtomicArray<IntArrayList> docsIdsToLoad, ScoreDoc[] shardDocs) {
for (ScoreDoc shardDoc : shardDocs) {
ExtTIntArrayList list = docsIdsToLoad.get(shardDoc.shardIndex);
IntArrayList list = docsIdsToLoad.get(shardDoc.shardIndex);
if (list == null) {
list = new ExtTIntArrayList(); // can't be shared!, uses unsafe on it later on
list = new IntArrayList(); // can't be shared!, uses unsafe on it later on
docsIdsToLoad.set(shardDoc.shardIndex, list);
}
list.add(shardDoc.doc);

View File

@ -20,37 +20,37 @@
package org.elasticsearch.search.dfs;
import java.io.IOException;
import java.util.Map;
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.TermStatistics;
import org.elasticsearch.common.collect.XMaps;
import org.elasticsearch.common.hppc.HppcMaps;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import java.io.IOException;
public class AggregatedDfs implements Streamable {
private Map<Term, TermStatistics> termStatistics;
private Map<String, CollectionStatistics> fieldStatistics;
private ObjectObjectOpenHashMap<Term, TermStatistics> termStatistics;
private ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics;
private long maxDoc;
private AggregatedDfs() {
}
public AggregatedDfs(Map<Term, TermStatistics> termStatistics, Map<String, CollectionStatistics> fieldStatistics, long maxDoc) {
public AggregatedDfs(ObjectObjectOpenHashMap<Term, TermStatistics> termStatistics, ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics, long maxDoc) {
this.termStatistics = termStatistics;
this.fieldStatistics = fieldStatistics;
this.maxDoc = maxDoc;
}
public Map<Term, TermStatistics> termStatistics() {
public ObjectObjectOpenHashMap<Term, TermStatistics> termStatistics() {
return termStatistics;
}
public Map<String, CollectionStatistics> fieldStatistics() {
public ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics() {
return fieldStatistics;
}
@ -67,7 +67,7 @@ public class AggregatedDfs implements Streamable {
@Override
public void readFrom(StreamInput in) throws IOException {
int size = in.readVInt();
termStatistics = XMaps.newMap(size);
termStatistics = HppcMaps.newMap(size);
for (int i = 0; i < size; i++) {
Term term = new Term(in.readString(), in.readBytesRef());
TermStatistics stats = new TermStatistics(in.readBytesRef(),
@ -82,14 +82,19 @@ public class AggregatedDfs implements Streamable {
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeVInt(termStatistics.size());
for (Map.Entry<Term, TermStatistics> termTermStatisticsEntry : termStatistics.entrySet()) {
Term term = termTermStatisticsEntry.getKey();
out.writeString(term.field());
out.writeBytesRef(term.bytes());
TermStatistics stats = termTermStatisticsEntry.getValue();
out.writeBytesRef(stats.term());
out.writeVLong(stats.docFreq());
out.writeVLong(DfsSearchResult.addOne(stats.totalTermFreq()));
final boolean[] states = termStatistics.allocated;
final Object[] keys = termStatistics.keys;
final Object[] values = termStatistics.values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
Term term = (Term) keys[i];
out.writeString(term.field());
out.writeBytesRef(term.bytes());
TermStatistics stats = (TermStatistics) values[i];
out.writeBytesRef(stats.term());
out.writeVLong(stats.docFreq());
out.writeVLong(DfsSearchResult.addOne(stats.totalTermFreq()));
}
}
DfsSearchResult.writeFieldStats(out, fieldStatistics);
out.writeVLong(maxDoc);

View File

@ -19,18 +19,23 @@
package org.elasticsearch.search.dfs;
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import com.carrotsearch.hppc.ObjectOpenHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.google.common.collect.ImmutableMap;
import gnu.trove.set.hash.THashSet;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.TermStatistics;
import org.elasticsearch.common.collect.XMaps;
import org.elasticsearch.common.hppc.HppcMaps;
import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.SearchPhase;
import org.elasticsearch.search.internal.SearchContext;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
/**
@ -38,10 +43,10 @@ import java.util.Map;
*/
public class DfsPhase implements SearchPhase {
private static ThreadLocal<THashSet<Term>> cachedTermsSet = new ThreadLocal<THashSet<Term>>() {
private static ThreadLocal<ObjectOpenHashSet<Term>> cachedTermsSet = new ThreadLocal<ObjectOpenHashSet<Term>>() {
@Override
protected THashSet<Term> initialValue() {
return new THashSet<Term>();
protected ObjectOpenHashSet<Term> initialValue() {
return new ObjectOpenHashSet<Term>();
}
};
@ -55,22 +60,21 @@ public class DfsPhase implements SearchPhase {
}
public void execute(SearchContext context) {
THashSet<Term> termsSet = null;
final ObjectOpenHashSet<Term> termsSet = cachedTermsSet.get();
try {
if (!context.queryRewritten()) {
context.updateRewriteQuery(context.searcher().rewrite(context.query()));
}
termsSet = cachedTermsSet.get();
if (!termsSet.isEmpty()) {
termsSet.clear();
}
context.query().extractTerms(termsSet);
context.query().extractTerms(new DelegateSet(termsSet));
if (context.rescore() != null) {
context.rescore().rescorer().extractTerms(context, context.rescore(), termsSet);
context.rescore().rescorer().extractTerms(context, context.rescore(), new DelegateSet(termsSet));
}
Term[] terms = termsSet.toArray(new Term[termsSet.size()]);
Term[] terms = termsSet.toArray(Term.class);
TermStatistics[] termStatistics = new TermStatistics[terms.length];
IndexReaderContext indexReaderContext = context.searcher().getTopReaderContext();
for (int i = 0; i < terms.length; i++) {
@ -79,7 +83,7 @@ public class DfsPhase implements SearchPhase {
termStatistics[i] = context.searcher().termStatistics(terms[i], termContext);
}
Map<String, CollectionStatistics> fieldStatistics = XMaps.newNoNullKeysMap();
ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
for (Term term : terms) {
assert term.field() != null : "field is null";
if (!fieldStatistics.containsKey(term.field())) {
@ -94,9 +98,58 @@ public class DfsPhase implements SearchPhase {
} catch (Exception e) {
throw new DfsPhaseExecutionException(context, "Exception during dfs phase", e);
} finally {
if (termsSet != null) {
termsSet.clear(); // don't hold on to terms
}
termsSet.clear(); // don't hold on to terms
}
}
// We need to bridge to JCF world, b/c of Query#extractTerms
private static class DelegateSet extends AbstractSet<Term> {
private final ObjectOpenHashSet<Term> delegate;
private DelegateSet(ObjectOpenHashSet<Term> delegate) {
this.delegate = delegate;
}
@Override
public boolean add(Term term) {
return delegate.add(term);
}
@Override
public boolean addAll(Collection<? extends Term> terms) {
boolean result = false;
for (Term term : terms) {
result = delegate.add(term);
}
return result;
}
@Override
public Iterator<Term> iterator() {
final Iterator<ObjectCursor<Term>> iterator = delegate.iterator();
return new Iterator<Term>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public Term next() {
return iterator.next().value;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
@Override
public int size() {
return delegate.size();
}
}
}

View File

@ -19,20 +19,20 @@
package org.elasticsearch.search.dfs;
import java.io.IOException;
import java.util.Map;
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.TermStatistics;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.collect.XMaps;
import org.elasticsearch.common.hppc.HppcMaps;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.transport.TransportResponse;
import java.io.IOException;
/**
*
*/
@ -45,7 +45,7 @@ public class DfsSearchResult extends TransportResponse implements SearchPhaseRes
private long id;
private Term[] terms;
private TermStatistics[] termStatistics;
private Map<String, CollectionStatistics> fieldStatistics = XMaps.newNoNullKeysMap();
private ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics = HppcMaps.newNoNullKeysMap();
private int maxDoc;
public DfsSearchResult() {
@ -85,7 +85,7 @@ public class DfsSearchResult extends TransportResponse implements SearchPhaseRes
return this;
}
public DfsSearchResult fieldStatistics(Map<String, CollectionStatistics> fieldStatistics) {
public DfsSearchResult fieldStatistics(ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics) {
this.fieldStatistics = fieldStatistics;
return this;
}
@ -98,7 +98,7 @@ public class DfsSearchResult extends TransportResponse implements SearchPhaseRes
return termStatistics;
}
public Map<String, CollectionStatistics> fieldStatistics() {
public ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics() {
return fieldStatistics;
}
@ -143,15 +143,21 @@ public class DfsSearchResult extends TransportResponse implements SearchPhaseRes
out.writeVInt(maxDoc);
}
public static void writeFieldStats(StreamOutput out, Map<String, CollectionStatistics> fieldStatistics) throws IOException {
public static void writeFieldStats(StreamOutput out, ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics) throws IOException {
out.writeVInt(fieldStatistics.size());
for (Map.Entry<String, CollectionStatistics> entry : fieldStatistics.entrySet()) {
out.writeString(entry.getKey());
assert entry.getValue().maxDoc() >= 0;
out.writeVLong(entry.getValue().maxDoc());
out.writeVLong(addOne(entry.getValue().docCount()));
out.writeVLong(addOne(entry.getValue().sumTotalTermFreq()));
out.writeVLong(addOne(entry.getValue().sumDocFreq()));
final boolean[] states = fieldStatistics.allocated;
Object[] keys = fieldStatistics.keys;
Object[] values = fieldStatistics.values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
out.writeString((String) keys[i]);
CollectionStatistics statistics = (CollectionStatistics) values[i];
assert statistics.maxDoc() >= 0;
out.writeVLong(statistics.maxDoc());
out.writeVLong(addOne(statistics.docCount()));
out.writeVLong(addOne(statistics.sumTotalTermFreq()));
out.writeVLong(addOne(statistics.sumDocFreq()));
}
}
}
@ -168,14 +174,14 @@ public class DfsSearchResult extends TransportResponse implements SearchPhaseRes
out.writeVLong(addOne(termStatistic.totalTermFreq()));
}
public static Map<String, CollectionStatistics> readFieldStats(StreamInput in) throws IOException {
public static ObjectObjectOpenHashMap<String, CollectionStatistics> readFieldStats(StreamInput in) throws IOException {
return readFieldStats(in, null);
}
public static Map<String, CollectionStatistics> readFieldStats(StreamInput in, Map<String, CollectionStatistics> fieldStatistics) throws IOException {
public static ObjectObjectOpenHashMap<String, CollectionStatistics> readFieldStats(StreamInput in, ObjectObjectOpenHashMap<String, CollectionStatistics> fieldStatistics) throws IOException {
final int numFieldStatistics = in.readVInt();
if (fieldStatistics == null) {
fieldStatistics = XMaps.newNoNullKeysMap(numFieldStatistics);
fieldStatistics = HppcMaps.newNoNullKeysMap(numFieldStatistics);
}
for (int i = 0; i < numFieldStatistics; i++) {
final String field = in.readString();

View File

@ -19,8 +19,7 @@
package org.elasticsearch.search.facet.datehistogram;
import gnu.trove.iterator.TLongLongIterator;
import gnu.trove.map.hash.TLongLongHashMap;
import com.carrotsearch.hppc.LongLongOpenHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.cache.recycler.CacheRecycler;
import org.elasticsearch.common.joda.TimeZoneRounding;
@ -43,7 +42,7 @@ public class CountDateHistogramFacetExecutor extends FacetExecutor {
private final IndexNumericFieldData indexFieldData;
final DateHistogramFacet.ComparatorType comparatorType;
final Recycler.V<TLongLongHashMap> counts;
final Recycler.V<LongLongOpenHashMap> counts;
public CountDateHistogramFacetExecutor(IndexNumericFieldData indexFieldData, TimeZoneRounding tzRounding, DateHistogramFacet.ComparatorType comparatorType, CacheRecycler cacheRecycler) {
this.comparatorType = comparatorType;
@ -60,14 +59,19 @@ public class CountDateHistogramFacetExecutor extends FacetExecutor {
@Override
public InternalFacet buildFacet(String facetName) {
InternalCountDateHistogramFacet.CountEntry[] entries = new InternalCountDateHistogramFacet.CountEntry[counts.v().size()];
int i = 0;
for (TLongLongIterator it = counts.v().iterator(); it.hasNext(); ) {
it.advance();
entries[i++] = new InternalCountDateHistogramFacet.CountEntry(it.key(), it.value());
InternalCountDateHistogramFacet.CountEntry[] countEntries = new InternalCountDateHistogramFacet.CountEntry[counts.v().size()];
final boolean[] states = counts.v().allocated;
final long[] keys = counts.v().keys;
final long[] values = counts.v().values;
int entryIndex = 0;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
countEntries[entryIndex++] = new InternalCountDateHistogramFacet.CountEntry(keys[i], values[i]);
}
}
counts.release();
return new InternalCountDateHistogramFacet(facetName, comparatorType, entries);
return new InternalCountDateHistogramFacet(facetName, comparatorType, countEntries);
}
class Collector extends FacetExecutor.Collector {
@ -96,20 +100,20 @@ public class CountDateHistogramFacetExecutor extends FacetExecutor {
public static class DateHistogramProc extends LongFacetAggregatorBase {
private final TLongLongHashMap counts;
private final LongLongOpenHashMap counts;
private final TimeZoneRounding tzRounding;
public DateHistogramProc(TLongLongHashMap counts, TimeZoneRounding tzRounding) {
public DateHistogramProc(LongLongOpenHashMap counts, TimeZoneRounding tzRounding) {
this.counts = counts;
this.tzRounding = tzRounding;
}
@Override
public void onValue(int docId, long value) {
counts.adjustOrPutValue(tzRounding.calc(value), 1, 1);
counts.addTo(tzRounding.calc(value), 1);
}
public TLongLongHashMap counts() {
public LongLongOpenHashMap counts() {
return counts;
}
}

View File

@ -19,8 +19,7 @@
package org.elasticsearch.search.facet.datehistogram;
import gnu.trove.iterator.TLongLongIterator;
import gnu.trove.map.hash.TLongLongHashMap;
import com.carrotsearch.hppc.LongLongOpenHashMap;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.HashedBytesArray;
@ -139,25 +138,29 @@ public class InternalCountDateHistogramFacet extends InternalDateHistogramFacet
return facets.get(0);
}
Recycler.V<TLongLongHashMap> counts = context.cacheRecycler().longLongMap(-1);
Recycler.V<LongLongOpenHashMap> counts = context.cacheRecycler().longLongMap(-1);
for (Facet facet : facets) {
InternalCountDateHistogramFacet histoFacet = (InternalCountDateHistogramFacet) facet;
for (CountEntry entry : histoFacet.entries) {
counts.v().adjustOrPutValue(entry.getTime(), entry.getCount(), entry.getCount());
counts.v().addTo(entry.getTime(), entry.getCount());
}
}
CountEntry[] entries = new CountEntry[counts.v().size()];
int i = 0;
for (TLongLongIterator it = counts.v().iterator(); it.hasNext(); ) {
it.advance();
entries[i++] = new CountEntry(it.key(), it.value());
CountEntry[] countEntries = new CountEntry[counts.v().size()];
final boolean[] states = counts.v().allocated;
final long[] keys = counts.v().keys;
final long[] values = counts.v().values;
int entriesIndex = 0;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
countEntries[entriesIndex++] = new CountEntry(keys[i], values[i]);
}
}
counts.release();
Arrays.sort(entries, comparatorType.comparator());
Arrays.sort(countEntries, comparatorType.comparator());
return new InternalCountDateHistogramFacet(getName(), comparatorType, entries);
return new InternalCountDateHistogramFacet(getName(), comparatorType, countEntries);
}
static final class Fields {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.facet.datehistogram;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
@ -26,7 +27,6 @@ import org.elasticsearch.common.bytes.HashedBytesArray;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.search.facet.Facet;
@ -154,7 +154,7 @@ public class InternalFullDateHistogramFacet extends InternalDateHistogramFacet {
return internalFacet;
}
Recycler.V<ExtTLongObjectHashMap<FullEntry>> map = context.cacheRecycler().longObjectMap(-1);
Recycler.V<LongObjectOpenHashMap<FullEntry>> map = context.cacheRecycler().longObjectMap(-1);
for (Facet facet : facets) {
InternalFullDateHistogramFacet histoFacet = (InternalFullDateHistogramFacet) facet;
@ -177,7 +177,8 @@ public class InternalFullDateHistogramFacet extends InternalDateHistogramFacet {
}
// sort
Object[] values = map.v().internalValues();
// TODO: hppc - not happy with toArray
Object[] values = map.v().values().toArray();
Arrays.sort(values, (Comparator) comparatorType.comparator());
List<FullEntry> ordered = new ArrayList<FullEntry>(map.v().size());
for (int i = 0; i < map.v().size(); i++) {

View File

@ -19,11 +19,11 @@
package org.elasticsearch.search.facet.datehistogram;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.cache.recycler.CacheRecycler;
import org.elasticsearch.common.joda.TimeZoneRounding;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
import org.elasticsearch.index.fielddata.DoubleValues;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.index.fielddata.LongValues;
@ -45,7 +45,7 @@ public class ValueDateHistogramFacetExecutor extends FacetExecutor {
private final DateHistogramFacet.ComparatorType comparatorType;
final TimeZoneRounding tzRounding;
final Recycler.V<ExtTLongObjectHashMap<InternalFullDateHistogramFacet.FullEntry>> entries;
final Recycler.V<LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry>> entries;
public ValueDateHistogramFacetExecutor(IndexNumericFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, TimeZoneRounding tzRounding, DateHistogramFacet.ComparatorType comparatorType, CacheRecycler cacheRecycler) {
this.comparatorType = comparatorType;
@ -63,7 +63,16 @@ public class ValueDateHistogramFacetExecutor extends FacetExecutor {
@Override
public InternalFacet buildFacet(String facetName) {
ArrayList<InternalFullDateHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullDateHistogramFacet.FullEntry>(entries.v().valueCollection());
ArrayList<InternalFullDateHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullDateHistogramFacet.FullEntry>(entries.v().size());
final boolean[] states = entries.v().allocated;
final Object[] values = entries.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
InternalFullDateHistogramFacet.FullEntry value = (InternalFullDateHistogramFacet.FullEntry) values[i];
entries1.add(value);
}
}
entries.release();
return new InternalFullDateHistogramFacet(facetName, comparatorType, entries1);
}
@ -95,14 +104,14 @@ public class ValueDateHistogramFacetExecutor extends FacetExecutor {
public static class DateHistogramProc extends LongFacetAggregatorBase {
final ExtTLongObjectHashMap<InternalFullDateHistogramFacet.FullEntry> entries;
final LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry> entries;
private final TimeZoneRounding tzRounding;
DoubleValues valueValues;
final ValueAggregator valueAggregator = new ValueAggregator();
public DateHistogramProc(TimeZoneRounding tzRounding, ExtTLongObjectHashMap<InternalFullDateHistogramFacet.FullEntry> entries) {
public DateHistogramProc(TimeZoneRounding tzRounding, LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry> entries) {
this.tzRounding = tzRounding;
this.entries = entries;
}

View File

@ -19,12 +19,12 @@
package org.elasticsearch.search.facet.datehistogram;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Scorer;
import org.elasticsearch.cache.recycler.CacheRecycler;
import org.elasticsearch.common.joda.TimeZoneRounding;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.index.fielddata.LongValues;
import org.elasticsearch.script.SearchScript;
@ -46,7 +46,7 @@ public class ValueScriptDateHistogramFacetExecutor extends FacetExecutor {
final SearchScript valueScript;
final TimeZoneRounding tzRounding;
final Recycler.V<ExtTLongObjectHashMap<InternalFullDateHistogramFacet.FullEntry>> entries;
final Recycler.V<LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry>> entries;
public ValueScriptDateHistogramFacetExecutor(IndexNumericFieldData keyIndexFieldData, SearchScript valueScript, TimeZoneRounding tzRounding, DateHistogramFacet.ComparatorType comparatorType, CacheRecycler cacheRecycler) {
this.comparatorType = comparatorType;
@ -64,7 +64,16 @@ public class ValueScriptDateHistogramFacetExecutor extends FacetExecutor {
@Override
public InternalFacet buildFacet(String facetName) {
ArrayList<InternalFullDateHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullDateHistogramFacet.FullEntry>(entries.v().valueCollection());
ArrayList<InternalFullDateHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullDateHistogramFacet.FullEntry>(entries.v().size());
final boolean[] states = entries.v().allocated;
final Object[] values = entries.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
InternalFullDateHistogramFacet.FullEntry value = (InternalFullDateHistogramFacet.FullEntry) values[i];
entries1.add(value);
}
}
entries.release();
return new InternalFullDateHistogramFacet(facetName, comparatorType, entries1);
}
@ -104,9 +113,9 @@ public class ValueScriptDateHistogramFacetExecutor extends FacetExecutor {
private final TimeZoneRounding tzRounding;
protected final SearchScript valueScript;
final ExtTLongObjectHashMap<InternalFullDateHistogramFacet.FullEntry> entries;
final LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry> entries;
public DateHistogramProc(TimeZoneRounding tzRounding, SearchScript valueScript, final ExtTLongObjectHashMap<InternalFullDateHistogramFacet.FullEntry> entries) {
public DateHistogramProc(TimeZoneRounding tzRounding, SearchScript valueScript, final LongObjectOpenHashMap<InternalFullDateHistogramFacet.FullEntry> entries) {
this.tzRounding = tzRounding;
this.valueScript = valueScript;
this.entries = entries;

View File

@ -19,8 +19,7 @@
package org.elasticsearch.search.facet.histogram;
import gnu.trove.iterator.TLongLongIterator;
import gnu.trove.map.hash.TLongLongHashMap;
import com.carrotsearch.hppc.LongLongOpenHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.index.fielddata.DoubleValues;
@ -42,7 +41,7 @@ public class CountHistogramFacetExecutor extends FacetExecutor {
private final HistogramFacet.ComparatorType comparatorType;
final long interval;
final Recycler.V<TLongLongHashMap> counts;
final Recycler.V<LongLongOpenHashMap> counts;
public CountHistogramFacetExecutor(IndexNumericFieldData indexFieldData, long interval, HistogramFacet.ComparatorType comparatorType, SearchContext context) {
this.comparatorType = comparatorType;
@ -60,10 +59,14 @@ public class CountHistogramFacetExecutor extends FacetExecutor {
@Override
public InternalFacet buildFacet(String facetName) {
InternalCountHistogramFacet.CountEntry[] entries = new InternalCountHistogramFacet.CountEntry[counts.v().size()];
int i = 0;
for (TLongLongIterator it = counts.v().iterator(); it.hasNext(); ) {
it.advance();
entries[i++] = new InternalCountHistogramFacet.CountEntry(it.key(), it.value());
final boolean[] states = counts.v().allocated;
final long[] keys = counts.v().keys;
final long[] values = counts.v().values;
int entryIndex = 0;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
entries[entryIndex++] = new InternalCountHistogramFacet.CountEntry(keys[i], values[i]);
}
}
counts.release();
return new InternalCountHistogramFacet(facetName, comparatorType, entries);
@ -100,9 +103,9 @@ public class CountHistogramFacetExecutor extends FacetExecutor {
public final static class HistogramProc extends DoubleFacetAggregatorBase {
private final long interval;
private final TLongLongHashMap counts;
private final LongLongOpenHashMap counts;
public HistogramProc(long interval, TLongLongHashMap counts) {
public HistogramProc(long interval, LongLongOpenHashMap counts) {
this.interval = interval;
this.counts = counts;
}
@ -110,10 +113,10 @@ public class CountHistogramFacetExecutor extends FacetExecutor {
@Override
public void onValue(int docId, double value) {
long bucket = bucket(value, interval);
counts.adjustOrPutValue(bucket, 1, 1);
counts.addTo(bucket, 1);
}
public TLongLongHashMap counts() {
public LongLongOpenHashMap counts() {
return counts;
}
}

View File

@ -19,9 +19,9 @@
package org.elasticsearch.search.facet.histogram;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
import org.elasticsearch.index.fielddata.DoubleValues;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
@ -43,7 +43,7 @@ public class FullHistogramFacetExecutor extends FacetExecutor {
private final HistogramFacet.ComparatorType comparatorType;
final long interval;
final Recycler.V<ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry>> entries;
final Recycler.V<LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry>> entries;
public FullHistogramFacetExecutor(IndexNumericFieldData indexFieldData, long interval, HistogramFacet.ComparatorType comparatorType, SearchContext context) {
this.comparatorType = comparatorType;
@ -60,9 +60,16 @@ public class FullHistogramFacetExecutor extends FacetExecutor {
@Override
public InternalFacet buildFacet(String facetName) {
List<InternalFullHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().valueCollection());
List<InternalFullHistogramFacet.FullEntry> fullEntries = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().size());
boolean[] states = entries.v().allocated;
Object[] values = entries.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
fullEntries.add((InternalFullHistogramFacet.FullEntry) values[i]);
}
}
entries.release();
return new InternalFullHistogramFacet(facetName, comparatorType, entries1);
return new InternalFullHistogramFacet(facetName, comparatorType, fullEntries);
}
public static long bucket(double value, long interval) {
@ -96,9 +103,9 @@ public class FullHistogramFacetExecutor extends FacetExecutor {
public final static class HistogramProc extends DoubleFacetAggregatorBase {
final long interval;
final ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry> entries;
final LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries;
public HistogramProc(long interval, ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry> entries) {
public HistogramProc(long interval, LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries) {
this.interval = interval;
this.entries = entries;
}

View File

@ -19,8 +19,7 @@
package org.elasticsearch.search.facet.histogram;
import gnu.trove.iterator.TLongLongIterator;
import gnu.trove.map.hash.TLongLongHashMap;
import com.carrotsearch.hppc.LongLongOpenHashMap;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.HashedBytesArray;
@ -140,18 +139,22 @@ public class InternalCountHistogramFacet extends InternalHistogramFacet {
return facets.get(0);
}
Recycler.V<TLongLongHashMap> counts = context.cacheRecycler().longLongMap(-1);
Recycler.V<LongLongOpenHashMap> counts = context.cacheRecycler().longLongMap(-1);
for (Facet facet : facets) {
InternalCountHistogramFacet histoFacet = (InternalCountHistogramFacet) facet;
for (Entry entry : histoFacet.entries) {
counts.v().adjustOrPutValue(entry.getKey(), entry.getCount(), entry.getCount());
counts.v().addTo(entry.getKey(), entry.getCount());
}
}
final boolean[] states = counts.v().allocated;
final long[] keys = counts.v().keys;
final long[] values = counts.v().values;
CountEntry[] entries = new CountEntry[counts.v().size()];
int i = 0;
for (TLongLongIterator it = counts.v().iterator(); it.hasNext(); ) {
it.advance();
entries[i++] = new CountEntry(it.key(), it.value());
int entryIndex = 0;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
entries[entryIndex++] = new CountEntry(keys[i], values[i]);
}
}
counts.release();

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.facet.histogram;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
@ -26,7 +27,6 @@ import org.elasticsearch.common.bytes.HashedBytesArray;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.search.facet.Facet;
@ -151,7 +151,7 @@ public class InternalFullHistogramFacet extends InternalHistogramFacet {
return internalFacet;
}
Recycler.V<ExtTLongObjectHashMap<FullEntry>> map = context.cacheRecycler().longObjectMap(-1);
Recycler.V<LongObjectOpenHashMap<FullEntry>> map = context.cacheRecycler().longObjectMap(-1);
for (Facet facet : facets) {
InternalFullHistogramFacet histoFacet = (InternalFullHistogramFacet) facet;
@ -174,7 +174,8 @@ public class InternalFullHistogramFacet extends InternalHistogramFacet {
}
// sort
Object[] values = map.v().internalValues();
// TODO: hppc - toArray?
Object[] values = map.v().values().toArray();
Arrays.sort(values, (Comparator) comparatorType.comparator());
List<FullEntry> ordered = new ArrayList<FullEntry>(map.v().size());
for (int i = 0; i < map.v().size(); i++) {

View File

@ -19,10 +19,10 @@
package org.elasticsearch.search.facet.histogram;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Scorer;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
import org.elasticsearch.script.SearchScript;
import org.elasticsearch.search.facet.FacetExecutor;
import org.elasticsearch.search.facet.InternalFacet;
@ -43,7 +43,7 @@ public class ScriptHistogramFacetExecutor extends FacetExecutor {
final long interval;
private final HistogramFacet.ComparatorType comparatorType;
final Recycler.V<ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry>> entries;
final Recycler.V<LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry>> entries;
public ScriptHistogramFacetExecutor(String scriptLang, String keyScript, String valueScript, Map<String, Object> params, long interval, HistogramFacet.ComparatorType comparatorType, SearchContext context) {
this.keyScript = context.scriptService().search(context.lookup(), scriptLang, keyScript, params);
@ -61,7 +61,16 @@ public class ScriptHistogramFacetExecutor extends FacetExecutor {
@Override
public InternalFacet buildFacet(String facetName) {
List<InternalFullHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().valueCollection());
List<InternalFullHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().size());
final boolean[] states = entries.v().allocated;
final Object[] values = entries.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
InternalFullHistogramFacet.FullEntry value = (InternalFullHistogramFacet.FullEntry) values[i];
entries1.add(value);
}
}
entries.release();
return new InternalFullHistogramFacet(facetName, comparatorType, entries1);
}
@ -72,9 +81,9 @@ public class ScriptHistogramFacetExecutor extends FacetExecutor {
class Collector extends FacetExecutor.Collector {
final ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry> entries;
final LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries;
Collector(ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry> entries) {
Collector(LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries) {
this.entries = entries;
}

View File

@ -19,9 +19,9 @@
package org.elasticsearch.search.facet.histogram;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
import org.elasticsearch.index.fielddata.DoubleValues;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.search.facet.DoubleFacetAggregatorBase;
@ -43,7 +43,7 @@ public class ValueHistogramFacetExecutor extends FacetExecutor {
private final HistogramFacet.ComparatorType comparatorType;
private final long interval;
final Recycler.V<ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry>> entries;
final Recycler.V<LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry>> entries;
public ValueHistogramFacetExecutor(IndexNumericFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, long interval, HistogramFacet.ComparatorType comparatorType, SearchContext context) {
this.comparatorType = comparatorType;
@ -60,7 +60,16 @@ public class ValueHistogramFacetExecutor extends FacetExecutor {
@Override
public InternalFacet buildFacet(String facetName) {
List<InternalFullHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().valueCollection());
List<InternalFullHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().size());
final boolean [] states = entries.v().allocated;
final Object[] values = entries.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
InternalFullHistogramFacet.FullEntry value = (InternalFullHistogramFacet.FullEntry) values[i];
entries1.add(value);
}
}
entries.release();
return new InternalFullHistogramFacet(facetName, comparatorType, entries1);
}
@ -93,13 +102,13 @@ public class ValueHistogramFacetExecutor extends FacetExecutor {
public final static class HistogramProc extends DoubleFacetAggregatorBase {
final long interval;
final ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry> entries;
final LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries;
DoubleValues valueValues;
final ValueAggregator valueAggregator = new ValueAggregator();
public HistogramProc(long interval, ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry> entries) {
public HistogramProc(long interval, LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries) {
this.interval = interval;
this.entries = entries;
}

View File

@ -19,10 +19,10 @@
package org.elasticsearch.search.facet.histogram;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Scorer;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
import org.elasticsearch.index.fielddata.DoubleValues;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.script.SearchScript;
@ -47,7 +47,7 @@ public class ValueScriptHistogramFacetExecutor extends FacetExecutor {
final SearchScript valueScript;
final long interval;
final Recycler.V<ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry>> entries;
final Recycler.V<LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry>> entries;
public ValueScriptHistogramFacetExecutor(IndexNumericFieldData indexFieldData, String scriptLang, String valueScript, Map<String, Object> params, long interval, HistogramFacet.ComparatorType comparatorType, SearchContext context) {
this.comparatorType = comparatorType;
@ -65,7 +65,16 @@ public class ValueScriptHistogramFacetExecutor extends FacetExecutor {
@Override
public InternalFacet buildFacet(String facetName) {
List<InternalFullHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().valueCollection());
List<InternalFullHistogramFacet.FullEntry> entries1 = new ArrayList<InternalFullHistogramFacet.FullEntry>(entries.v().size());
final boolean[] states = entries.v().allocated;
final Object[] values = entries.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
InternalFullHistogramFacet.FullEntry value = (InternalFullHistogramFacet.FullEntry) values[i];
entries1.add(value);
}
}
entries.release();
return new InternalFullHistogramFacet(facetName, comparatorType, entries1);
}
@ -110,9 +119,9 @@ public class ValueScriptHistogramFacetExecutor extends FacetExecutor {
private final SearchScript valueScript;
final ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry> entries;
final LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries;
public HistogramProc(long interval, SearchScript valueScript, ExtTLongObjectHashMap<InternalFullHistogramFacet.FullEntry> entries) {
public HistogramProc(long interval, SearchScript valueScript, LongObjectOpenHashMap<InternalFullHistogramFacet.FullEntry> entries) {
this.interval = interval;
this.valueScript = valueScript;
this.entries = entries;

View File

@ -19,9 +19,8 @@
package org.elasticsearch.search.facet.terms.doubles;
import com.carrotsearch.hppc.DoubleIntOpenHashMap;
import com.google.common.collect.ImmutableList;
import gnu.trove.iterator.TDoubleIntIterator;
import gnu.trove.map.hash.TDoubleIntHashMap;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.HashedBytesArray;
@ -173,7 +172,7 @@ public class InternalDoubleTermsFacet extends InternalTermsFacet {
InternalDoubleTermsFacet first = null;
Recycler.V<TDoubleIntHashMap> aggregated = context.cacheRecycler().doubleIntMap(-1);
Recycler.V<DoubleIntOpenHashMap> aggregated = context.cacheRecycler().doubleIntMap(-1);
long missing = 0;
long total = 0;
for (Facet facet : facets) {
@ -185,15 +184,20 @@ public class InternalDoubleTermsFacet extends InternalTermsFacet {
missing += termsFacet.getMissingCount();
total += termsFacet.getTotalCount();
for (Entry entry : termsFacet.getEntries()) {
aggregated.v().adjustOrPutValue(((DoubleEntry) entry).term, entry.getCount(), entry.getCount());
aggregated.v().addTo(((DoubleEntry) entry).term, entry.getCount());
}
}
BoundedTreeSet<DoubleEntry> ordered = new BoundedTreeSet<DoubleEntry>(first.comparatorType.comparator(), first.requiredSize);
for (TDoubleIntIterator it = aggregated.v().iterator(); it.hasNext(); ) {
it.advance();
ordered.add(new DoubleEntry(it.key(), it.value()));
final boolean[] states = aggregated.v().allocated;
final double[] keys = aggregated.v().keys;
final int[] values = aggregated.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
ordered.add(new DoubleEntry(keys[i], values[i]));
}
}
first.entries = ordered;
first.missing = missing;
first.total = total;

View File

@ -19,11 +19,10 @@
package org.elasticsearch.search.facet.terms.doubles;
import com.carrotsearch.hppc.DoubleIntOpenHashMap;
import com.carrotsearch.hppc.DoubleOpenHashSet;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import gnu.trove.iterator.TDoubleIntIterator;
import gnu.trove.map.hash.TDoubleIntHashMap;
import gnu.trove.set.hash.TDoubleHashSet;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.BytesRef;
@ -57,7 +56,7 @@ public class TermsDoubleFacetExecutor extends FacetExecutor {
private final SearchScript script;
private final ImmutableSet<BytesRef> excluded;
final Recycler.V<TDoubleIntHashMap> facets;
final Recycler.V<DoubleIntOpenHashMap> facets;
long missing;
long total;
@ -121,11 +120,15 @@ public class TermsDoubleFacetExecutor extends FacetExecutor {
facets.release();
return new InternalDoubleTermsFacet(facetName, comparatorType, size, ImmutableList.<InternalDoubleTermsFacet.DoubleEntry>of(), missing, total);
} else {
final boolean[] states = facets.v().allocated;
final double[] keys = facets.v().keys;
final int[] values = facets.v().values;
if (size < EntryPriorityQueue.LIMIT) {
EntryPriorityQueue ordered = new EntryPriorityQueue(shardSize, comparatorType.comparator());
for (TDoubleIntIterator it = facets.v().iterator(); it.hasNext(); ) {
it.advance();
ordered.insertWithOverflow(new InternalDoubleTermsFacet.DoubleEntry(it.key(), it.value()));
for (int i = 0; i < states.length; i++) {
if (states[i]) {
ordered.insertWithOverflow(new InternalDoubleTermsFacet.DoubleEntry(keys[i], values[i]));
}
}
InternalDoubleTermsFacet.DoubleEntry[] list = new InternalDoubleTermsFacet.DoubleEntry[ordered.size()];
for (int i = ordered.size() - 1; i >= 0; i--) {
@ -135,9 +138,10 @@ public class TermsDoubleFacetExecutor extends FacetExecutor {
return new InternalDoubleTermsFacet(facetName, comparatorType, size, Arrays.asList(list), missing, total);
} else {
BoundedTreeSet<InternalDoubleTermsFacet.DoubleEntry> ordered = new BoundedTreeSet<InternalDoubleTermsFacet.DoubleEntry>(comparatorType.comparator(), shardSize);
for (TDoubleIntIterator it = facets.v().iterator(); it.hasNext(); ) {
it.advance();
ordered.add(new InternalDoubleTermsFacet.DoubleEntry(it.key(), it.value()));
for (int i = 0; i < states.length; i++) {
if (states[i]) {
ordered.add(new InternalDoubleTermsFacet.DoubleEntry(keys[i], values[i]));
}
}
facets.release();
return new InternalDoubleTermsFacet(facetName, comparatorType, size, ordered, missing, total);
@ -189,15 +193,15 @@ public class TermsDoubleFacetExecutor extends FacetExecutor {
private final SearchScript script;
private final TDoubleHashSet excluded;
private final DoubleOpenHashSet excluded;
public AggregatorValueProc(TDoubleIntHashMap facets, Set<BytesRef> excluded, SearchScript script) {
public AggregatorValueProc(DoubleIntOpenHashMap facets, Set<BytesRef> excluded, SearchScript script) {
super(facets);
this.script = script;
if (excluded == null || excluded.isEmpty()) {
this.excluded = null;
} else {
this.excluded = new TDoubleHashSet(excluded.size());
this.excluded = new DoubleOpenHashSet(excluded.size());
for (BytesRef s : excluded) {
this.excluded.add(Double.parseDouble(s.utf8ToString()));
}
@ -230,18 +234,18 @@ public class TermsDoubleFacetExecutor extends FacetExecutor {
public static class StaticAggregatorValueProc extends DoubleFacetAggregatorBase {
private final TDoubleIntHashMap facets;
private final DoubleIntOpenHashMap facets;
public StaticAggregatorValueProc(TDoubleIntHashMap facets) {
public StaticAggregatorValueProc(DoubleIntOpenHashMap facets) {
this.facets = facets;
}
@Override
public void onValue(int docId, double value) {
facets.adjustOrPutValue(value, 1, 1);
facets.addTo(value, 1);
}
public final TDoubleIntHashMap facets() {
public final DoubleIntOpenHashMap facets() {
return facets;
}
}

View File

@ -19,9 +19,8 @@
package org.elasticsearch.search.facet.terms.longs;
import com.carrotsearch.hppc.LongIntOpenHashMap;
import com.google.common.collect.ImmutableList;
import gnu.trove.iterator.TLongIntIterator;
import gnu.trove.map.hash.TLongIntHashMap;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.HashedBytesArray;
@ -174,7 +173,7 @@ public class InternalLongTermsFacet extends InternalTermsFacet {
InternalLongTermsFacet first = null;
Recycler.V<TLongIntHashMap> aggregated = context.cacheRecycler().longIntMap(-1);
Recycler.V<LongIntOpenHashMap> aggregated = context.cacheRecycler().longIntMap(-1);
long missing = 0;
long total = 0;
for (Facet facet : facets) {
@ -186,14 +185,19 @@ public class InternalLongTermsFacet extends InternalTermsFacet {
missing += termsFacet.getMissingCount();
total += termsFacet.getTotalCount();
for (Entry entry : termsFacet.getEntries()) {
aggregated.v().adjustOrPutValue(((LongEntry) entry).term, entry.getCount(), entry.getCount());
aggregated.v().addTo(((LongEntry) entry).term, entry.getCount());
}
}
BoundedTreeSet<LongEntry> ordered = new BoundedTreeSet<LongEntry>(first.comparatorType.comparator(), first.requiredSize);
for (TLongIntIterator it = aggregated.v().iterator(); it.hasNext(); ) {
it.advance();
ordered.add(new LongEntry(it.key(), it.value()));
LongIntOpenHashMap entries = aggregated.v();
final boolean[] states = aggregated.v().allocated;
final long[] keys = aggregated.v().keys;
final int[] values = aggregated.v().values;
for (int i = 0; i < entries.allocated.length; i++) {
if (states[i]) {
ordered.add(new LongEntry(keys[i], values[i]));
}
}
first.entries = ordered;
first.missing = missing;

View File

@ -19,11 +19,10 @@
package org.elasticsearch.search.facet.terms.longs;
import com.carrotsearch.hppc.LongIntOpenHashMap;
import com.carrotsearch.hppc.LongOpenHashSet;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import gnu.trove.iterator.TLongIntIterator;
import gnu.trove.map.hash.TLongIntHashMap;
import gnu.trove.set.hash.TLongHashSet;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.BytesRef;
@ -57,7 +56,7 @@ public class TermsLongFacetExecutor extends FacetExecutor {
private final SearchScript script;
private final ImmutableSet<BytesRef> excluded;
final Recycler.V<TLongIntHashMap> facets;
final Recycler.V<LongIntOpenHashMap> facets;
long missing;
long total;
@ -120,11 +119,16 @@ public class TermsLongFacetExecutor extends FacetExecutor {
facets.release();
return new InternalLongTermsFacet(facetName, comparatorType, size, ImmutableList.<InternalLongTermsFacet.LongEntry>of(), missing, total);
} else {
LongIntOpenHashMap facetEntries = facets.v();
final boolean[] states = facets.v().allocated;
final long[] keys = facets.v().keys;
final int[] values = facets.v().values;
if (size < EntryPriorityQueue.LIMIT) {
EntryPriorityQueue ordered = new EntryPriorityQueue(shardSize, comparatorType.comparator());
for (TLongIntIterator it = facets.v().iterator(); it.hasNext(); ) {
it.advance();
ordered.insertWithOverflow(new InternalLongTermsFacet.LongEntry(it.key(), it.value()));
for (int i = 0; i < states.length; i++) {
if (states[i]) {
ordered.insertWithOverflow(new InternalLongTermsFacet.LongEntry(keys[i], values[i]));
}
}
InternalLongTermsFacet.LongEntry[] list = new InternalLongTermsFacet.LongEntry[ordered.size()];
for (int i = ordered.size() - 1; i >= 0; i--) {
@ -134,9 +138,10 @@ public class TermsLongFacetExecutor extends FacetExecutor {
return new InternalLongTermsFacet(facetName, comparatorType, size, Arrays.asList(list), missing, total);
} else {
BoundedTreeSet<InternalLongTermsFacet.LongEntry> ordered = new BoundedTreeSet<InternalLongTermsFacet.LongEntry>(comparatorType.comparator(), shardSize);
for (TLongIntIterator it = facets.v().iterator(); it.hasNext(); ) {
it.advance();
ordered.add(new InternalLongTermsFacet.LongEntry(it.key(), it.value()));
for (int i = 0; i < states.length; i++) {
if (states[i]) {
ordered.add(new InternalLongTermsFacet.LongEntry(keys[i], values[i]));
}
}
facets.release();
return new InternalLongTermsFacet(facetName, comparatorType, size, ordered, missing, total);
@ -188,15 +193,15 @@ public class TermsLongFacetExecutor extends FacetExecutor {
private final SearchScript script;
private final TLongHashSet excluded;
private final LongOpenHashSet excluded;
public AggregatorValueProc(TLongIntHashMap facets, Set<BytesRef> excluded, SearchScript script) {
public AggregatorValueProc(LongIntOpenHashMap facets, Set<BytesRef> excluded, SearchScript script) {
super(facets);
this.script = script;
if (excluded == null || excluded.isEmpty()) {
this.excluded = null;
} else {
this.excluded = new TLongHashSet(excluded.size());
this.excluded = new LongOpenHashSet(excluded.size());
for (BytesRef s : excluded) {
this.excluded.add(Long.parseLong(s.utf8ToString()));
}
@ -229,18 +234,18 @@ public class TermsLongFacetExecutor extends FacetExecutor {
public static class StaticAggregatorValueProc extends LongFacetAggregatorBase {
private final TLongIntHashMap facets;
private final LongIntOpenHashMap facets;
public StaticAggregatorValueProc(TLongIntHashMap facets) {
public StaticAggregatorValueProc(LongIntOpenHashMap facets) {
this.facets = facets;
}
@Override
public void onValue(int docId, long value) {
facets.adjustOrPutValue(value, 1, 1);
facets.addTo(value, 1);
}
public final TLongIntHashMap facets() {
public final LongIntOpenHashMap facets() {
return facets;
}
}

View File

@ -18,8 +18,8 @@
*/
package org.elasticsearch.search.facet.terms.strings;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import com.google.common.collect.ImmutableList;
import gnu.trove.map.hash.TObjectIntHashMap;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
@ -235,12 +235,12 @@ public class HashedAggregator {
// implemenation
// for
// assertions
private final TObjectIntHashMap<HashedBytesRef> valuesAndCount = new TObjectIntHashMap<HashedBytesRef>();
private final ObjectIntOpenHashMap<HashedBytesRef> valuesAndCount = new ObjectIntOpenHashMap<HashedBytesRef>();
private HashedBytesRef spare = new HashedBytesRef();
@Override
public boolean add(BytesRef value, int hashCode, BytesValues values) {
int adjustedValue = valuesAndCount.adjustOrPutValue(spare.reset(value, hashCode), 1, 1);
int adjustedValue = valuesAndCount.addTo(spare.reset(value, hashCode), 1);
assert adjustedValue >= 1;
if (adjustedValue == 1) { // only if we added the spare we create a
// new instance
@ -268,7 +268,7 @@ public class HashedAggregator {
@Override
public boolean addNoCount(BytesRef value, int hashCode, BytesValues values) {
if (!valuesAndCount.containsKey(spare.reset(value, hashCode))) {
valuesAndCount.adjustOrPutValue(spare.reset(value, hashCode), 0, 0);
valuesAndCount.addTo(spare.reset(value, hashCode), 0);
spare.bytes = values.makeSafe(spare.bytes);
spare = new HashedBytesRef();
return true;

View File

@ -19,9 +19,8 @@
package org.elasticsearch.search.facet.terms.strings;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import com.google.common.collect.ImmutableList;
import gnu.trove.iterator.TObjectIntIterator;
import gnu.trove.map.hash.TObjectIntHashMap;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
@ -179,7 +178,7 @@ public class InternalStringTermsFacet extends InternalTermsFacet {
InternalStringTermsFacet first = null;
Recycler.V<TObjectIntHashMap<Text>> aggregated = context.cacheRecycler().objectIntMap(-1);
Recycler.V<ObjectIntOpenHashMap<Text>> aggregated = context.cacheRecycler().objectIntMap(-1);
long missing = 0;
long total = 0;
for (Facet facet : facets) {
@ -199,14 +198,21 @@ public class InternalStringTermsFacet extends InternalTermsFacet {
}
for (Entry entry : termsFacet.getEntries()) {
aggregated.v().adjustOrPutValue(entry.getTerm(), entry.getCount(), entry.getCount());
aggregated.v().addTo(entry.getTerm(), entry.getCount());
}
}
BoundedTreeSet<TermEntry> ordered = new BoundedTreeSet<TermEntry>(first.comparatorType.comparator(), first.requiredSize);
for (TObjectIntIterator<Text> it = aggregated.v().iterator(); it.hasNext(); ) {
it.advance();
ordered.add(new TermEntry(it.key(), it.value()));
ObjectIntOpenHashMap<Text> aggregatedEntries = aggregated.v();
final boolean[] states = aggregatedEntries.allocated;
Object[] keys = aggregatedEntries.keys;
int[] values = aggregatedEntries.values;
for (int i = 0; i < aggregatedEntries.allocated.length; i++) {
if (states[i]) {
Text key = (Text) keys[i];
ordered.add(new TermEntry(key, values[i]));
}
}
first.entries = ordered;
first.missing = missing;

View File

@ -19,10 +19,9 @@
package org.elasticsearch.search.facet.terms.strings;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import gnu.trove.iterator.TObjectIntIterator;
import gnu.trove.map.hash.TObjectIntHashMap;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.BytesRef;
@ -54,7 +53,7 @@ public class ScriptTermsStringFieldFacetExecutor extends FacetExecutor {
private final ImmutableSet<BytesRef> excluded;
private final int numberOfShards;
final Recycler.V<TObjectIntHashMap<BytesRef>> facets;
final Recycler.V<ObjectIntOpenHashMap<BytesRef>> facets;
long missing;
long total;
@ -84,11 +83,16 @@ public class ScriptTermsStringFieldFacetExecutor extends FacetExecutor {
facets.release();
return new InternalStringTermsFacet(facetName, comparatorType, size, ImmutableList.<InternalStringTermsFacet.TermEntry>of(), missing, total);
} else {
final boolean[] states = facets.v().allocated;
final Object[] keys = facets.v().keys;
final int[] values = facets.v().values;
if (shardSize < EntryPriorityQueue.LIMIT) {
EntryPriorityQueue ordered = new EntryPriorityQueue(shardSize, comparatorType.comparator());
for (TObjectIntIterator<BytesRef> it = facets.v().iterator(); it.hasNext(); ) {
it.advance();
ordered.insertWithOverflow(new InternalStringTermsFacet.TermEntry(it.key(), it.value()));
for (int i = 0; i < states.length; i++) {
if (states[i]) {
BytesRef key = (BytesRef) keys[i];
ordered.insertWithOverflow(new InternalStringTermsFacet.TermEntry(key, values[i]));
}
}
InternalStringTermsFacet.TermEntry[] list = new InternalStringTermsFacet.TermEntry[ordered.size()];
for (int i = ordered.size() - 1; i >= 0; i--) {
@ -98,9 +102,11 @@ public class ScriptTermsStringFieldFacetExecutor extends FacetExecutor {
return new InternalStringTermsFacet(facetName, comparatorType, size, Arrays.asList(list), missing, total);
} else {
BoundedTreeSet<InternalStringTermsFacet.TermEntry> ordered = new BoundedTreeSet<InternalStringTermsFacet.TermEntry>(comparatorType.comparator(), shardSize);
for (TObjectIntIterator<BytesRef> it = facets.v().iterator(); it.hasNext(); ) {
it.advance();
ordered.add(new InternalStringTermsFacet.TermEntry(it.key(), it.value()));
for (int i = 0; i < states.length; i++) {
if (states[i]) {
BytesRef key = (BytesRef) keys[i];
ordered.add(new InternalStringTermsFacet.TermEntry(key, values[i]));
}
}
facets.release();
return new InternalStringTermsFacet(facetName, comparatorType, size, ordered, missing, total);
@ -113,12 +119,12 @@ public class ScriptTermsStringFieldFacetExecutor extends FacetExecutor {
private final Matcher matcher;
private final ImmutableSet<BytesRef> excluded;
private final SearchScript script;
private final TObjectIntHashMap<BytesRef> facets;
private final ObjectIntOpenHashMap<BytesRef> facets;
long missing;
long total;
Collector(Matcher matcher, ImmutableSet<BytesRef> excluded, SearchScript script, TObjectIntHashMap<BytesRef> facets) {
Collector(Matcher matcher, ImmutableSet<BytesRef> excluded, SearchScript script, ObjectIntOpenHashMap<BytesRef> facets) {
this.matcher = matcher;
this.excluded = excluded;
this.script = script;
@ -150,7 +156,7 @@ public class ScriptTermsStringFieldFacetExecutor extends FacetExecutor {
if (match(value)) {
found = true;
// LUCENE 4 UPGRADE: should be possible to convert directly to BR
facets.adjustOrPutValue(new BytesRef(value), 1, 1);
facets.addTo(new BytesRef(value), 1);
total++;
}
}
@ -164,7 +170,7 @@ public class ScriptTermsStringFieldFacetExecutor extends FacetExecutor {
if (match(value)) {
found = true;
// LUCENE 4 UPGRADE: should be possible to convert directly to BR
facets.adjustOrPutValue(new BytesRef(value), 1, 1);
facets.addTo(new BytesRef(value), 1);
total++;
}
}
@ -175,7 +181,7 @@ public class ScriptTermsStringFieldFacetExecutor extends FacetExecutor {
String value = o.toString();
if (match(value)) {
// LUCENE 4 UPGRADE: should be possible to convert directly to BR
facets.adjustOrPutValue(new BytesRef(value), 1, 1);
facets.addTo(new BytesRef(value), 1);
total++;
} else {
missing++;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.facet.termsstats.doubles;
import com.carrotsearch.hppc.DoubleObjectOpenHashMap;
import com.google.common.collect.ImmutableList;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Strings;
@ -29,7 +30,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.trove.ExtTDoubleObjectHashMap;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.search.facet.Facet;
@ -184,7 +184,7 @@ public class InternalTermsStatsDoubleFacet extends InternalTermsStatsFacet {
return facets.get(0);
}
int missing = 0;
Recycler.V<ExtTDoubleObjectHashMap<DoubleEntry>> map = context.cacheRecycler().doubleObjectMap(-1);
Recycler.V<DoubleObjectOpenHashMap<DoubleEntry>> map = context.cacheRecycler().doubleObjectMap(-1);
for (Facet facet : facets) {
InternalTermsStatsDoubleFacet tsFacet = (InternalTermsStatsDoubleFacet) facet;
missing += tsFacet.missing;
@ -209,12 +209,12 @@ public class InternalTermsStatsDoubleFacet extends InternalTermsStatsFacet {
// sort
if (requiredSize == 0) { // all terms
DoubleEntry[] entries1 = map.v().values(new DoubleEntry[map.v().size()]);
DoubleEntry[] entries1 = map.v().values().toArray(DoubleEntry.class);
Arrays.sort(entries1, comparatorType.comparator());
map.release();
return new InternalTermsStatsDoubleFacet(getName(), comparatorType, requiredSize, Arrays.asList(entries1), missing);
} else {
Object[] values = map.v().internalValues();
Object[] values = map.v().values;
Arrays.sort(values, (Comparator) comparatorType.comparator());
List<DoubleEntry> ordered = new ArrayList<DoubleEntry>(map.v().size());
for (int i = 0; i < requiredSize; i++) {

View File

@ -19,12 +19,12 @@
package org.elasticsearch.search.facet.termsstats.doubles;
import com.carrotsearch.hppc.DoubleObjectOpenHashMap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Scorer;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTDoubleObjectHashMap;
import org.elasticsearch.index.fielddata.DoubleValues;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.script.SearchScript;
@ -35,6 +35,7 @@ import org.elasticsearch.search.facet.termsstats.TermsStatsFacet;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
@ -50,7 +51,7 @@ public class TermsStatsDoubleFacetExecutor extends FacetExecutor {
private final int size;
private final int shardSize;
final Recycler.V<ExtTDoubleObjectHashMap<InternalTermsStatsDoubleFacet.DoubleEntry>> entries;
final Recycler.V<DoubleObjectOpenHashMap<InternalTermsStatsDoubleFacet.DoubleEntry>> entries;
long missing;
public TermsStatsDoubleFacetExecutor(IndexNumericFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, SearchScript script,
@ -78,9 +79,18 @@ public class TermsStatsDoubleFacetExecutor extends FacetExecutor {
}
if (size == 0) { // all terms
// all terms, just return the collection, we will sort it on the way back
return new InternalTermsStatsDoubleFacet(facetName, comparatorType, 0 /* indicates all terms*/, entries.v().valueCollection(), missing);
List<InternalTermsStatsDoubleFacet.DoubleEntry> doubleEntries = new ArrayList<InternalTermsStatsDoubleFacet.DoubleEntry>(entries.v().size());
boolean[] states = entries.v().allocated;
Object[] values = entries.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
doubleEntries.add((InternalTermsStatsDoubleFacet.DoubleEntry) values[i]);
}
}
entries.release();
return new InternalTermsStatsDoubleFacet(facetName, comparatorType, 0 /* indicates all terms*/, doubleEntries, missing);
}
Object[] values = entries.v().internalValues();
Object[] values = entries.v().values;
Arrays.sort(values, (Comparator) comparatorType.comparator());
int limit = shardSize;
@ -140,12 +150,12 @@ public class TermsStatsDoubleFacetExecutor extends FacetExecutor {
public static class Aggregator extends DoubleFacetAggregatorBase {
final ExtTDoubleObjectHashMap<InternalTermsStatsDoubleFacet.DoubleEntry> entries;
final DoubleObjectOpenHashMap<InternalTermsStatsDoubleFacet.DoubleEntry> entries;
int missing;
DoubleValues valueFieldData;
final ValueAggregator valueAggregator = new ValueAggregator();
public Aggregator(ExtTDoubleObjectHashMap<InternalTermsStatsDoubleFacet.DoubleEntry> entries) {
public Aggregator(DoubleObjectOpenHashMap<InternalTermsStatsDoubleFacet.DoubleEntry> entries) {
this.entries = entries;
}
@ -184,7 +194,7 @@ public class TermsStatsDoubleFacetExecutor extends FacetExecutor {
private final SearchScript script;
public ScriptAggregator(ExtTDoubleObjectHashMap<InternalTermsStatsDoubleFacet.DoubleEntry> entries, SearchScript script) {
public ScriptAggregator(DoubleObjectOpenHashMap<InternalTermsStatsDoubleFacet.DoubleEntry> entries, SearchScript script) {
super(entries);
this.script = script;
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.facet.termsstats.longs;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
import com.google.common.collect.ImmutableList;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Strings;
@ -29,7 +30,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.search.facet.Facet;
@ -184,7 +184,7 @@ public class InternalTermsStatsLongFacet extends InternalTermsStatsFacet {
return facets.get(0);
}
int missing = 0;
Recycler.V<ExtTLongObjectHashMap<LongEntry>> map = context.cacheRecycler().longObjectMap(-1);
Recycler.V<LongObjectOpenHashMap<LongEntry>> map = context.cacheRecycler().longObjectMap(-1);
for (Facet facet : facets) {
InternalTermsStatsLongFacet tsFacet = (InternalTermsStatsLongFacet) facet;
missing += tsFacet.missing;
@ -209,12 +209,12 @@ public class InternalTermsStatsLongFacet extends InternalTermsStatsFacet {
// sort
if (requiredSize == 0) { // all terms
LongEntry[] entries1 = map.v().values(new LongEntry[map.v().size()]);
LongEntry[] entries1 = map.v().values().toArray(LongEntry.class);
Arrays.sort(entries1, comparatorType.comparator());
map.release();
return new InternalTermsStatsLongFacet(getName(), comparatorType, requiredSize, Arrays.asList(entries1), missing);
} else {
Object[] values = map.v().internalValues();
Object[] values = map.v().values;
Arrays.sort(values, (Comparator) comparatorType.comparator());
List<LongEntry> ordered = new ArrayList<LongEntry>(map.v().size());
for (int i = 0; i < requiredSize; i++) {

View File

@ -19,12 +19,12 @@
package org.elasticsearch.search.facet.termsstats.longs;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Scorer;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTLongObjectHashMap;
import org.elasticsearch.index.fielddata.DoubleValues;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.index.fielddata.LongValues;
@ -37,6 +37,7 @@ import org.elasticsearch.search.facet.termsstats.TermsStatsFacet;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
@ -51,7 +52,7 @@ public class TermsStatsLongFacetExecutor extends FacetExecutor {
private final int size;
private final int shardSize;
final Recycler.V<ExtTLongObjectHashMap<InternalTermsStatsLongFacet.LongEntry>> entries;
final Recycler.V<LongObjectOpenHashMap<InternalTermsStatsLongFacet.LongEntry>> entries;
long missing;
public TermsStatsLongFacetExecutor(IndexNumericFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, SearchScript script,
@ -79,11 +80,21 @@ public class TermsStatsLongFacetExecutor extends FacetExecutor {
}
if (size == 0) { // all terms
// all terms, just return the collection, we will sort it on the way back
return new InternalTermsStatsLongFacet(facetName, comparatorType, 0 /* indicates all terms*/, entries.v().valueCollection(), missing);
List<InternalTermsStatsLongFacet.LongEntry> longEntries = new ArrayList<InternalTermsStatsLongFacet.LongEntry>(entries.v().size());
boolean[] states = entries.v().allocated;
Object[] values = entries.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
longEntries.add((InternalTermsStatsLongFacet.LongEntry) values[i]);
}
}
entries.release();
return new InternalTermsStatsLongFacet(facetName, comparatorType, 0 /* indicates all terms*/, longEntries, missing);
}
// we need to fetch facets of "size * numberOfShards" because of problems in how they are distributed across shards
Object[] values = entries.v().internalValues();
Object[] values = entries.v().values;
Arrays.sort(values, (Comparator) comparatorType.comparator());
int limit = shardSize;
@ -142,11 +153,11 @@ public class TermsStatsLongFacetExecutor extends FacetExecutor {
public static class Aggregator extends LongFacetAggregatorBase {
final ExtTLongObjectHashMap<InternalTermsStatsLongFacet.LongEntry> entries;
final LongObjectOpenHashMap<InternalTermsStatsLongFacet.LongEntry> entries;
DoubleValues valueValues;
final ValueAggregator valueAggregator = new ValueAggregator();
public Aggregator(ExtTLongObjectHashMap<InternalTermsStatsLongFacet.LongEntry> entries) {
public Aggregator(LongObjectOpenHashMap<InternalTermsStatsLongFacet.LongEntry> entries) {
this.entries = entries;
}
@ -185,7 +196,7 @@ public class TermsStatsLongFacetExecutor extends FacetExecutor {
private final SearchScript script;
public ScriptAggregator(ExtTLongObjectHashMap<InternalTermsStatsLongFacet.LongEntry> entries, SearchScript script) {
public ScriptAggregator(LongObjectOpenHashMap<InternalTermsStatsLongFacet.LongEntry> entries, SearchScript script) {
super(entries);
this.script = script;
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.facet.termsstats.strings;
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import com.google.common.collect.ImmutableList;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Strings;
@ -31,7 +32,6 @@ import org.elasticsearch.common.lucene.HashedBytesRef;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.text.BytesText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.trove.ExtTHashMap;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.search.facet.Facet;
@ -189,7 +189,7 @@ public class InternalTermsStatsStringFacet extends InternalTermsStatsFacet {
return tsFacet;
}
int missing = 0;
Recycler.V<ExtTHashMap<Text, StringEntry>> map = context.cacheRecycler().hashMap(-1);
Recycler.V<ObjectObjectOpenHashMap<Text, StringEntry>> map = context.cacheRecycler().hashMap(-1);
for (Facet facet : facets) {
InternalTermsStatsStringFacet tsFacet = (InternalTermsStatsStringFacet) facet;
missing += tsFacet.missing;
@ -214,12 +214,12 @@ public class InternalTermsStatsStringFacet extends InternalTermsStatsFacet {
// sort
if (requiredSize == 0) { // all terms
StringEntry[] entries1 = map.v().values().toArray(new StringEntry[map.v().size()]);
StringEntry[] entries1 = map.v().values().toArray(StringEntry.class);
Arrays.sort(entries1, comparatorType.comparator());
map.release();
return new InternalTermsStatsStringFacet(getName(), comparatorType, requiredSize, Arrays.asList(entries1), missing);
} else {
Object[] values = map.v().internalValues();
Object[] values = map.v().values;
Arrays.sort(values, (Comparator) comparatorType.comparator());
List<StringEntry> ordered = new ArrayList<StringEntry>(Math.min(map.v().size(), requiredSize));
for (int i = 0; i < requiredSize; i++) {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.facet.termsstats.strings;
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.lucene.index.AtomicReaderContext;
@ -26,7 +27,6 @@ import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.lucene.HashedBytesRef;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.trove.ExtTHashMap;
import org.elasticsearch.index.fielddata.BytesValues;
import org.elasticsearch.index.fielddata.DoubleValues;
import org.elasticsearch.index.fielddata.IndexFieldData;
@ -40,6 +40,7 @@ import org.elasticsearch.search.facet.termsstats.TermsStatsFacet;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
@ -53,7 +54,7 @@ public class TermsStatsStringFacetExecutor extends FacetExecutor {
private final int size;
private final int shardSize;
final Recycler.V<ExtTHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry>> entries;
final Recycler.V<ObjectObjectOpenHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry>> entries;
long missing;
public TermsStatsStringFacetExecutor(IndexFieldData keyIndexFieldData, IndexNumericFieldData valueIndexFieldData, SearchScript valueScript,
@ -81,9 +82,17 @@ public class TermsStatsStringFacetExecutor extends FacetExecutor {
}
if (size == 0) { // all terms
// all terms, just return the collection, we will sort it on the way back
return new InternalTermsStatsStringFacet(facetName, comparatorType, 0/* indicates all terms*/, entries.v().values(), missing);
List<InternalTermsStatsStringFacet.StringEntry> stringEntries = new ArrayList<InternalTermsStatsStringFacet.StringEntry>();
final boolean[] states = entries.v().allocated;
final Object[] values = entries.v().values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
stringEntries.add((InternalTermsStatsStringFacet.StringEntry) values[i]);
}
}
return new InternalTermsStatsStringFacet(facetName, comparatorType, 0 /* indicates all terms*/, stringEntries, missing);
}
Object[] values = entries.v().internalValues();
Object[] values = entries.v().values;
Arrays.sort(values, (Comparator) comparatorType.comparator());
List<InternalTermsStatsStringFacet.StringEntry> ordered = Lists.newArrayList();
@ -144,7 +153,7 @@ public class TermsStatsStringFacetExecutor extends FacetExecutor {
public static class Aggregator extends HashedAggregator {
final ExtTHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry> entries;
final ObjectObjectOpenHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry> entries;
final HashedBytesRef spare = new HashedBytesRef();
int missing = 0;
@ -152,7 +161,7 @@ public class TermsStatsStringFacetExecutor extends FacetExecutor {
ValueAggregator valueAggregator = new ValueAggregator();
public Aggregator(ExtTHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry> entries) {
public Aggregator(ObjectObjectOpenHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry> entries) {
this.entries = entries;
}
@ -191,7 +200,7 @@ public class TermsStatsStringFacetExecutor extends FacetExecutor {
public static class ScriptAggregator extends Aggregator {
private final SearchScript script;
public ScriptAggregator(ExtTHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry> entries, SearchScript script) {
public ScriptAggregator(ObjectObjectOpenHashMap<HashedBytesRef, InternalTermsStatsStringFacet.StringEntry> entries, SearchScript script) {
super(entries);
this.script = script;
}

View File

@ -19,9 +19,9 @@
package org.elasticsearch.search.fetch;
import com.carrotsearch.hppc.IntArrayList;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.trove.ExtTIntArrayList;
import org.elasticsearch.transport.TransportRequest;
import java.io.IOException;
@ -40,10 +40,10 @@ public class FetchSearchRequest extends TransportRequest {
public FetchSearchRequest() {
}
public FetchSearchRequest(TransportRequest request, long id, ExtTIntArrayList list) {
public FetchSearchRequest(TransportRequest request, long id, IntArrayList list) {
super(request);
this.id = id;
this.docIds = list.unsafeArray();
this.docIds = list.buffer;
this.size = list.size();
}

View File

@ -19,8 +19,8 @@
package org.elasticsearch.search.internal;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
import com.google.common.collect.Iterators;
import gnu.trove.map.hash.TIntObjectHashMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -51,7 +51,7 @@ public class InternalSearchHits implements SearchHits {
}
private IdentityHashMap<SearchShardTarget, Integer> shardHandleLookup = new IdentityHashMap<SearchShardTarget, Integer>();
private TIntObjectHashMap<SearchShardTarget> handleShardLookup = new TIntObjectHashMap<SearchShardTarget>();
private IntObjectOpenHashMap<SearchShardTarget> handleShardLookup = new IntObjectOpenHashMap<SearchShardTarget>();
private ShardTargetType streamShardTarget = ShardTargetType.STREAM;
public StreamContext reset() {
@ -65,7 +65,7 @@ public class InternalSearchHits implements SearchHits {
return shardHandleLookup;
}
public TIntObjectHashMap<SearchShardTarget> handleShardLookup() {
public IntObjectOpenHashMap<SearchShardTarget> handleShardLookup() {
return handleShardLookup;
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.search.suggest.completion;
import gnu.trove.map.hash.TObjectLongHashMap;
import com.carrotsearch.hppc.ObjectLongOpenHashMap;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.codecs.*;
import org.apache.lucene.index.FieldInfo;
@ -259,9 +259,9 @@ public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider
@Override
public CompletionStats stats(String... fields) {
long sizeInBytes = 0;
TObjectLongHashMap<String> completionFields = null;
if (fields != null && fields.length > 0) {
completionFields = new TObjectLongHashMap<String>(fields.length);
ObjectLongOpenHashMap<String> completionFields = null;
if (fields != null && fields.length > 0) {
completionFields = new ObjectLongOpenHashMap<String>(fields.length);
}
for (Map.Entry<String, AnalyzingSuggestHolder> entry : lookupMap.entrySet()) {
@ -273,7 +273,7 @@ public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider
// support for getting fields by regex as in fielddata
if (Regex.simpleMatch(field, entry.getKey())) {
long fstSize = entry.getValue().fst.sizeInBytes();
completionFields.adjustOrPutValue(field, fstSize, fstSize);
completionFields.addTo(field, fstSize);
}
}
}

View File

@ -18,8 +18,7 @@
*/
package org.elasticsearch.search.suggest.completion;
import gnu.trove.iterator.TObjectLongIterator;
import gnu.trove.map.hash.TObjectLongHashMap;
import com.carrotsearch.hppc.ObjectLongOpenHashMap;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -39,12 +38,12 @@ public class CompletionStats implements Streamable, ToXContent {
private long sizeInBytes;
@Nullable
private TObjectLongHashMap<String> fields;
private ObjectLongOpenHashMap<String> fields;
public CompletionStats() {
}
public CompletionStats(long size, @Nullable TObjectLongHashMap<String> fields) {
public CompletionStats(long size, @Nullable ObjectLongOpenHashMap<String> fields) {
this.sizeInBytes = size;
this.fields = fields;
}
@ -57,7 +56,7 @@ public class CompletionStats implements Streamable, ToXContent {
return new ByteSizeValue(sizeInBytes);
}
public TObjectLongHashMap<String> getFields() {
public ObjectLongOpenHashMap<String> getFields() {
return fields;
}
@ -66,7 +65,7 @@ public class CompletionStats implements Streamable, ToXContent {
sizeInBytes = in.readVLong();
if (in.readBoolean()) {
int size = in.readVInt();
fields = new TObjectLongHashMap<String>(size);
fields = new ObjectLongOpenHashMap<String>(size);
for (int i = 0; i < size; i++) {
fields.put(in.readString(), in.readVLong());
}
@ -81,10 +80,14 @@ public class CompletionStats implements Streamable, ToXContent {
} else {
out.writeBoolean(true);
out.writeVInt(fields.size());
for (TObjectLongIterator<String> it = fields.iterator(); it.hasNext(); ) {
it.advance();
out.writeString(it.key());
out.writeVLong(it.value());
final boolean[] states = fields.allocated;
final Object[] keys = fields.keys;
final long[] values = fields.values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
out.writeString((String) keys[i]);
out.writeVLong(values[i]);
}
}
}
}
@ -95,11 +98,15 @@ public class CompletionStats implements Streamable, ToXContent {
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, sizeInBytes);
if (fields != null) {
builder.startObject(Fields.FIELDS);
for (TObjectLongIterator<String> it = fields.iterator(); it.hasNext(); ) {
it.advance();
builder.startObject(it.key(), XContentBuilder.FieldCaseConversion.NONE);
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, it.value());
builder.endObject();
final boolean[] states = fields.allocated;
final Object[] keys = fields.keys;
final long[] values = fields.values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
builder.startObject((String) keys[i], XContentBuilder.FieldCaseConversion.NONE);
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, values[i]);
builder.endObject();
}
}
builder.endObject();
}
@ -128,10 +135,15 @@ public class CompletionStats implements Streamable, ToXContent {
sizeInBytes += completion.getSizeInBytes();
if (completion.fields != null) {
if (fields == null) fields = new TObjectLongHashMap<String>();
for (TObjectLongIterator<String> it = completion.fields.iterator(); it.hasNext(); ) {
it.advance();
fields.adjustOrPutValue(it.key(), it.value(), it.value());
if (fields == null) fields = new ObjectLongOpenHashMap<String>();
final boolean[] states = completion.fields.allocated;
final Object[] keys = completion.fields.keys;
final long[] values = completion.fields.values;
for (int i = 0; i < states.length; i++) {
if (states[i]) {
fields.addTo((String) keys[i], values[i]);
}
}
}
}

View File

@ -17,18 +17,15 @@
* under the License.
*/
package org.elasticsearch.benchmark.trove;
package org.elasticsearch.benchmark.hppc;
import com.carrotsearch.hppc.IntIntOpenHashMap;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import gnu.trove.map.custom_hash.TObjectIntCustomHashMap;
import gnu.trove.map.hash.THashMap;
import gnu.trove.map.hash.TIntIntHashMap;
import gnu.trove.map.hash.TIntObjectHashMap;
import gnu.trove.map.hash.TObjectIntHashMap;
import gnu.trove.strategy.IdentityHashingStrategy;
import jsr166y.ThreadLocalRandom;
import org.elasticsearch.common.StopWatch;
import org.elasticsearch.common.trove.StringIdentityHashingStrategy;
import org.elasticsearch.common.unit.SizeValue;
import java.util.HashMap;
@ -53,15 +50,15 @@ public class StringMapAdjustOrPutBenchmark {
StopWatch stopWatch;
stopWatch = new StopWatch().start();
TObjectIntHashMap<String> map = new TObjectIntHashMap<String>();
ObjectIntOpenHashMap<String> map = new ObjectIntOpenHashMap<String>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
map.clear();
} else {
map = new TObjectIntHashMap<String>();
map = new ObjectIntOpenHashMap<String>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
map.adjustOrPutValue(values[(int) (i % NUMBER_OF_KEYS)], 1, 1);
map.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
}
}
map.clear();
@ -71,15 +68,16 @@ public class StringMapAdjustOrPutBenchmark {
System.out.println("TObjectIntHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
stopWatch = new StopWatch().start();
TObjectIntCustomHashMap<String> iMap = new TObjectIntCustomHashMap<String>(new StringIdentityHashingStrategy());
// TObjectIntCustomHashMap<String> iMap = new TObjectIntCustomHashMap<String>(new StringIdentityHashingStrategy());
ObjectIntOpenHashMap<String> iMap = new ObjectIntOpenHashMap<String>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
iMap.clear();
} else {
iMap = new TObjectIntCustomHashMap<String>(new StringIdentityHashingStrategy());
iMap = new ObjectIntOpenHashMap<String>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
iMap.adjustOrPutValue(values[(int) (i % NUMBER_OF_KEYS)], 1, 1);
iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
}
}
stopWatch.stop();
@ -88,15 +86,15 @@ public class StringMapAdjustOrPutBenchmark {
iMap = null;
stopWatch = new StopWatch().start();
iMap = new TObjectIntCustomHashMap<String>(new IdentityHashingStrategy<String>());
iMap = new ObjectIntOpenHashMap<String>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
iMap.clear();
} else {
iMap = new TObjectIntCustomHashMap<String>(new IdentityHashingStrategy<String>());
iMap = new ObjectIntOpenHashMap<String>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
iMap.adjustOrPutValue(values[(int) (i % NUMBER_OF_KEYS)], 1, 1);
iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
}
}
stopWatch.stop();
@ -106,12 +104,12 @@ public class StringMapAdjustOrPutBenchmark {
// now test with THashMap
stopWatch = new StopWatch().start();
THashMap<String, StringEntry> tMap = new THashMap<String, StringEntry>();
ObjectObjectOpenHashMap<String, StringEntry> tMap = new ObjectObjectOpenHashMap<String, StringEntry>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
tMap.clear();
} else {
tMap = new THashMap<String, StringEntry>();
tMap = new ObjectObjectOpenHashMap<String, StringEntry>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
String key = values[(int) (i % NUMBER_OF_KEYS)];
@ -189,16 +187,16 @@ public class StringMapAdjustOrPutBenchmark {
}
stopWatch = new StopWatch().start();
TIntIntHashMap intMap = new TIntIntHashMap();
IntIntOpenHashMap intMap = new IntIntOpenHashMap();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
intMap.clear();
} else {
intMap = new TIntIntHashMap();
intMap = new IntIntOpenHashMap();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
int key = iValues[(int) (i % NUMBER_OF_KEYS)];
intMap.adjustOrPutValue(key, 1, 1);
intMap.addTo(key, 1);
}
}
stopWatch.stop();
@ -209,12 +207,12 @@ public class StringMapAdjustOrPutBenchmark {
// now test with THashMap
stopWatch = new StopWatch().start();
TIntObjectHashMap<IntEntry> tIntMap = new TIntObjectHashMap<IntEntry>();
IntObjectOpenHashMap<IntEntry> tIntMap = new IntObjectOpenHashMap<IntEntry>();
for (long iter = 0; iter < ITERATIONS; iter++) {
if (REUSE) {
tIntMap.clear();
} else {
tIntMap = new TIntObjectHashMap<IntEntry>();
tIntMap = new IntObjectOpenHashMap<IntEntry>();
}
for (long i = 0; i < PUT_OPERATIONS; i++) {
int key = iValues[(int) (i % NUMBER_OF_KEYS)];

View File

@ -42,6 +42,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.FilterBuilders.hasChildFilter;
import static org.elasticsearch.index.query.FilterBuilders.hasParentFilter;
import static org.elasticsearch.index.query.FilterBuilders.rangeFilter;
import static org.elasticsearch.index.query.QueryBuilders.*;
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
@ -230,7 +231,7 @@ public class ChildSearchBenchmark {
}
System.out.println("--> has_child filter with exponential parent results Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
/*// run parent child constant query
// run parent child constant query
for (int j = 0; j < QUERY_WARMUP; j++) {
SearchResponse searchResponse = client.prepareSearch(indexName)
.setQuery(
@ -326,7 +327,7 @@ public class ChildSearchBenchmark {
// }
totalQueryTime += searchResponse.getTookInMillis();
}
System.out.println("--> top_children, with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");*/
System.out.println("--> top_children, with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();

View File

@ -19,7 +19,7 @@
package org.elasticsearch.cluster.allocation;
import gnu.trove.map.hash.TObjectIntHashMap;
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.cluster.ClusterState;
@ -69,7 +69,7 @@ public class AwarenessAllocationTests extends AbstractIntegrationTest {
String node3 = cluster().startNode(ImmutableSettings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_2").build());
long start = System.currentTimeMillis();
TObjectIntHashMap<String> counts;
ObjectIntOpenHashMap<String> counts;
// On slow machines the initial relocation might be delayed
do {
Thread.sleep(100);
@ -81,11 +81,11 @@ public class AwarenessAllocationTests extends AbstractIntegrationTest {
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
//System.out.println(clusterState.routingTable().prettyPrint());
// verify that we have 10 shards on node3
counts = new TObjectIntHashMap<String>();
counts = new ObjectIntOpenHashMap<String>();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
counts.adjustOrPutValue(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1, 1);
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
}
}
}
@ -112,12 +112,12 @@ public class AwarenessAllocationTests extends AbstractIntegrationTest {
ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForRelocatingShards(0).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
TObjectIntHashMap<String> counts = new TObjectIntHashMap<String>();
ObjectIntOpenHashMap<String> counts = new ObjectIntOpenHashMap<String>();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
counts.adjustOrPutValue(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1, 1);
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
}
}
}
@ -145,12 +145,12 @@ public class AwarenessAllocationTests extends AbstractIntegrationTest {
ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").setWaitForRelocatingShards(0).execute().actionGet();
assertThat(health.isTimedOut(), equalTo(false));
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
TObjectIntHashMap<String> counts = new TObjectIntHashMap<String>();
ObjectIntOpenHashMap<String> counts = new ObjectIntOpenHashMap<String>();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
counts.adjustOrPutValue(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1, 1);
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
}
}
}
@ -167,12 +167,12 @@ public class AwarenessAllocationTests extends AbstractIntegrationTest {
assertThat(health.isTimedOut(), equalTo(false));
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
counts = new TObjectIntHashMap<String>();
counts = new ObjectIntOpenHashMap<String>();
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
for (ShardRouting shardRouting : indexShardRoutingTable) {
counts.adjustOrPutValue(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1, 1);
counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
}
}
}

View File

@ -19,11 +19,8 @@
package org.elasticsearch.index.fielddata;
import gnu.trove.iterator.TLongIterator;
import gnu.trove.set.TDoubleSet;
import gnu.trove.set.TLongSet;
import gnu.trove.set.hash.TDoubleHashSet;
import gnu.trove.set.hash.TLongHashSet;
import com.carrotsearch.hppc.DoubleOpenHashSet;
import com.carrotsearch.hppc.LongOpenHashSet;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongField;
@ -299,17 +296,22 @@ public class LongFieldDataTests extends AbstractNumericFieldDataTests {
public abstract long nextValue(Random r);
}
private void test(List<TLongSet> values) throws Exception {
private void test(List<LongOpenHashSet> values) throws Exception {
StringField id = new StringField("_id", "", Field.Store.NO);
for (int i = 0; i < values.size(); ++i) {
Document doc = new Document();
id.setStringValue("" + i);
doc.add(id);
final TLongSet v = values.get(i);
for (TLongIterator it = v.iterator(); it.hasNext(); ) {
LongField value = new LongField("value", it.next(), Field.Store.NO);
doc.add(value);
final LongOpenHashSet v = values.get(i);
final boolean[] states = v.allocated;
final long[] keys = v.keys;
for (int j = 0; j < states.length; j++) {
if (states[j]) {
LongField value = new LongField("value", keys[j], Field.Store.NO);
doc.add(value);
}
}
writer.addDocument(doc);
}
@ -319,10 +321,10 @@ public class LongFieldDataTests extends AbstractNumericFieldDataTests {
final AtomicNumericFieldData atomicFieldData = indexFieldData.load(refreshReader());
final LongValues data = atomicFieldData.getLongValues();
final DoubleValues doubleData = atomicFieldData.getDoubleValues();
final TLongSet set = new TLongHashSet();
final TDoubleSet doubleSet = new TDoubleHashSet();
final LongOpenHashSet set = new LongOpenHashSet();
final DoubleOpenHashSet doubleSet = new DoubleOpenHashSet();
for (int i = 0; i < values.size(); ++i) {
final TLongSet v = values.get(i);
final LongOpenHashSet v = values.get(i);
assertThat(data.hasValue(i), equalTo(!v.isEmpty()));
assertThat(doubleData.hasValue(i), equalTo(!v.isEmpty()));
@ -338,9 +340,13 @@ public class LongFieldDataTests extends AbstractNumericFieldDataTests {
}
assertThat(set, equalTo(v));
final TDoubleSet doubleV = new TDoubleHashSet();
for (TLongIterator it = v.iterator(); it.hasNext(); ) {
doubleV.add((double) it.next());
final DoubleOpenHashSet doubleV = new DoubleOpenHashSet();
final boolean[] states = v.allocated;
final long[] keys = v.keys;
for (int j = 0; j < states.length; j++) {
if (states[j]) {
doubleV.add((double) keys[j]);
}
}
doubleSet.clear();
for (DoubleValues.Iter iter = doubleData.getIter(i); iter.hasNext(); ) {
@ -353,10 +359,10 @@ public class LongFieldDataTests extends AbstractNumericFieldDataTests {
private void test(Data data) throws Exception {
Random r = getRandom();
final int numDocs = 1000 + r.nextInt(19000);
final List<TLongSet> values = new ArrayList<TLongSet>(numDocs);
final List<LongOpenHashSet> values = new ArrayList<LongOpenHashSet>(numDocs);
for (int i = 0; i < numDocs; ++i) {
final int numValues = data.numValues(r);
final TLongSet vals = new TLongHashSet(numValues);
final LongOpenHashSet vals = new LongOpenHashSet(numValues);
for (int j = 0; j < numValues; ++j) {
vals.add(data.nextValue(r));
}

View File

@ -19,9 +19,8 @@
package org.elasticsearch.recovery;
import gnu.trove.procedure.TIntProcedure;
import gnu.trove.set.TIntSet;
import gnu.trove.set.hash.TIntHashSet;
import com.carrotsearch.hppc.IntOpenHashSet;
import com.carrotsearch.hppc.procedures.IntProcedure;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.bulk.BulkItemResponse;
@ -231,19 +230,20 @@ public class RelocationTests extends AbstractIntegrationTest {
for (int hit = 0; hit < indexCounter.get(); hit++) {
hitIds[hit] = hit + 1;
}
TIntSet set = new TIntHashSet(hitIds);
IntOpenHashSet set = IntOpenHashSet.from(hitIds);
for (SearchHit hit : hits.hits()) {
int id = Integer.parseInt(hit.id());
if (!set.remove(id)) {
logger.error("Extra id [{}]", id);
}
}
set.forEach(new TIntProcedure() {
set.forEach(new IntProcedure() {
@Override
public boolean execute(int value) {
public void apply(int value) {
logger.error("Missing id [{}]", value);
return true;
}
});
}
assertThat(hits.totalHits(), equalTo(indexCounter.get()));
@ -390,18 +390,18 @@ public class RelocationTests extends AbstractIntegrationTest {
for (int hit = 0; hit < indexCounter.get(); hit++) {
hitIds[hit] = hit + 1;
}
TIntSet set = new TIntHashSet(hitIds);
IntOpenHashSet set = IntOpenHashSet.from(hitIds);
for (SearchHit hit : hits.hits()) {
int id = Integer.parseInt(hit.id());
if (!set.remove(id)) {
logger.error("Extra id [{}]", id);
}
}
set.forEach(new TIntProcedure() {
set.forEach(new IntProcedure() {
@Override
public boolean execute(int value) {
public void apply(int value) {
logger.error("Missing id [{}]", value);
return true;
}
});
}