Internal: remove dependency on hppc:esoteric.

The esoteric classifier contains in particular maps that take bytes or doubles
as keys. In the byte case, we can just use integer, and in the double case we
can use their long bits instead.
This commit is contained in:
Adrien Grand 2015-05-13 14:13:10 +02:00
parent 491b00c4ec
commit 5c9c4776cd
4 changed files with 11 additions and 20 deletions

View File

@ -229,13 +229,6 @@
<version>0.7.1</version> <version>0.7.1</version>
</dependency> </dependency>
<dependency> <!-- ES uses byte* hashes -->
<groupId>com.carrotsearch</groupId>
<artifactId>hppc</artifactId>
<version>0.7.1</version>
<classifier>esoteric</classifier>
</dependency>
<dependency> <dependency>
<groupId>joda-time</groupId> <groupId>joda-time</groupId>
<artifactId>joda-time</artifactId> <artifactId>joda-time</artifactId>

View File

@ -19,9 +19,7 @@
package org.elasticsearch.index.mapper.core; package org.elasticsearch.index.mapper.core;
import com.carrotsearch.hppc.DoubleHashSet;
import com.carrotsearch.hppc.LongArrayList; import com.carrotsearch.hppc.LongArrayList;
import com.carrotsearch.hppc.LongHashSet;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.NumericTokenStream;

View File

@ -18,7 +18,7 @@
*/ */
package org.elasticsearch.percolator; package org.elasticsearch.percolator;
import com.carrotsearch.hppc.ByteObjectHashMap; import com.carrotsearch.hppc.IntObjectHashMap;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
@ -109,7 +109,7 @@ public class PercolatorService extends AbstractComponent {
public final static String TYPE_NAME = ".percolator"; public final static String TYPE_NAME = ".percolator";
private final IndicesService indicesService; private final IndicesService indicesService;
private final ByteObjectHashMap<PercolatorType> percolatorTypes; private final IntObjectHashMap<PercolatorType> percolatorTypes;
private final PageCacheRecycler pageCacheRecycler; private final PageCacheRecycler pageCacheRecycler;
private final BigArrays bigArrays; private final BigArrays bigArrays;
private final ClusterService clusterService; private final ClusterService clusterService;
@ -153,7 +153,7 @@ public class PercolatorService extends AbstractComponent {
single = new SingleDocumentPercolatorIndex(cache); single = new SingleDocumentPercolatorIndex(cache);
multi = new MultiDocumentPercolatorIndex(cache); multi = new MultiDocumentPercolatorIndex(cache);
percolatorTypes = new ByteObjectHashMap<>(6); percolatorTypes = new IntObjectHashMap<>(6);
percolatorTypes.put(countPercolator.id(), countPercolator); percolatorTypes.put(countPercolator.id(), countPercolator);
percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator); percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator);
percolatorTypes.put(matchPercolator.id(), matchPercolator); percolatorTypes.put(matchPercolator.id(), matchPercolator);

View File

@ -19,9 +19,7 @@
package org.elasticsearch.index.fielddata; package org.elasticsearch.index.fielddata;
import com.carrotsearch.hppc.DoubleHashSet;
import com.carrotsearch.hppc.LongHashSet; import com.carrotsearch.hppc.LongHashSet;
import com.carrotsearch.hppc.cursors.DoubleCursor;
import com.carrotsearch.hppc.cursors.LongCursor; import com.carrotsearch.hppc.cursors.LongCursor;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
@ -37,7 +35,9 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Random; import java.util.Random;
import static org.hamcrest.Matchers.*; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan;
/** /**
* Tests for all integer types (byte, short, int, long). * Tests for all integer types (byte, short, int, long).
@ -348,7 +348,7 @@ public class LongFieldDataTests extends AbstractNumericFieldDataTests {
final SortedNumericDocValues data = atomicFieldData.getLongValues(); final SortedNumericDocValues data = atomicFieldData.getLongValues();
final SortedNumericDoubleValues doubleData = atomicFieldData.getDoubleValues(); final SortedNumericDoubleValues doubleData = atomicFieldData.getDoubleValues();
final LongHashSet set = new LongHashSet(); final LongHashSet set = new LongHashSet();
final DoubleHashSet doubleSet = new DoubleHashSet(); final LongHashSet doubleSet = new LongHashSet();
for (int i = 0; i < values.size(); ++i) { for (int i = 0; i < values.size(); ++i) {
final LongHashSet v = values.get(i); final LongHashSet v = values.get(i);
@ -365,17 +365,17 @@ public class LongFieldDataTests extends AbstractNumericFieldDataTests {
} }
assertThat(set, equalTo(v)); assertThat(set, equalTo(v));
final DoubleHashSet doubleV = new DoubleHashSet(); final LongHashSet doubleV = new LongHashSet();
for (LongCursor c : v) { for (LongCursor c : v) {
doubleV.add(c.value); doubleV.add(Double.doubleToLongBits(c.value));
} }
doubleSet.clear(); doubleSet.clear();
doubleData.setDocument(i); doubleData.setDocument(i);
numValues = doubleData.count(); numValues = doubleData.count();
double prev = 0; double prev = 0;
for (int j = 0; j < numValues; j++) { for (int j = 0; j < numValues; j++) {
double current; double current = doubleData.valueAt(j);
doubleSet.add(current = doubleData.valueAt(j)); doubleSet.add(Double.doubleToLongBits(current));
if (j > 0) { if (j > 0) {
assertThat(prev, lessThan(current)); assertThat(prev, lessThan(current));
} }