Migrated p/c queries from id cache to field data. Changed p/c queries to use paging data structures (BytesRefHash, BigFloatArray, BigIntArray) instead of hppc maps / sets.
Also removed the id cache. Closes #4930
This commit is contained in:
parent
5429019920
commit
0e780b7e99
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.service.IndexService;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.filter.terms.IndicesTermsFilterCache;
|
||||
|
@ -149,7 +150,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio
|
|||
}
|
||||
if (request.idCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
service.cache().idCache().clear();
|
||||
service.fieldData().clearField(ParentFieldMapper.NAME);
|
||||
}
|
||||
if (!clearedAtLeastOne) {
|
||||
if (request.fields() != null && request.fields().length > 0) {
|
||||
|
|
|
@ -46,6 +46,7 @@ public class PageCacheRecycler extends AbstractComponent {
|
|||
private final Recycler<byte[]> bytePage;
|
||||
private final Recycler<int[]> intPage;
|
||||
private final Recycler<long[]> longPage;
|
||||
private final Recycler<float[]> floatPage;
|
||||
private final Recycler<double[]> doublePage;
|
||||
private final Recycler<Object[]> objectPage;
|
||||
|
||||
|
@ -53,6 +54,7 @@ public class PageCacheRecycler extends AbstractComponent {
|
|||
bytePage.close();
|
||||
intPage.close();
|
||||
longPage.close();
|
||||
floatPage.close();
|
||||
doublePage.close();
|
||||
objectPage.close();
|
||||
}
|
||||
|
@ -102,6 +104,7 @@ public class PageCacheRecycler extends AbstractComponent {
|
|||
final double bytesWeight = componentSettings.getAsDouble(WEIGHT + ".bytes", 1d);
|
||||
final double intsWeight = componentSettings.getAsDouble(WEIGHT + ".ints", 1d);
|
||||
final double longsWeight = componentSettings.getAsDouble(WEIGHT + ".longs", 1d);
|
||||
final double floatsWeight = componentSettings.getAsDouble(WEIGHT + ".floats", 1d);
|
||||
final double doublesWeight = componentSettings.getAsDouble(WEIGHT + ".doubles", 1d);
|
||||
// object pages are less useful to us so we give them a lower weight by default
|
||||
final double objectsWeight = componentSettings.getAsDouble(WEIGHT + ".objects", 0.1d);
|
||||
|
@ -138,6 +141,16 @@ public class PageCacheRecycler extends AbstractComponent {
|
|||
// nothing to do
|
||||
}
|
||||
});
|
||||
floatPage = build(type, maxCount(limit, BigArrays.FLOAT_PAGE_SIZE, floatsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new AbstractRecyclerC<float[]>() {
|
||||
@Override
|
||||
public float[] newInstance(int sizing) {
|
||||
return new float[BigArrays.FLOAT_PAGE_SIZE];
|
||||
}
|
||||
@Override
|
||||
public void recycle(float[] value) {
|
||||
// nothing to do
|
||||
}
|
||||
});
|
||||
doublePage = build(type, maxCount(limit, BigArrays.DOUBLE_PAGE_SIZE, doublesWeight, totalWeight), searchThreadPoolSize, availableProcessors, new AbstractRecyclerC<double[]>() {
|
||||
@Override
|
||||
public double[] newInstance(int sizing) {
|
||||
|
@ -184,6 +197,14 @@ public class PageCacheRecycler extends AbstractComponent {
|
|||
return v;
|
||||
}
|
||||
|
||||
public Recycler.V<float[]> floatPage(boolean clear) {
|
||||
final Recycler.V<float[]> v = floatPage.obtain();
|
||||
if (v.isRecycled() && clear) {
|
||||
Arrays.fill(v.v(), 0f);
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
public Recycler.V<double[]> doublePage(boolean clear) {
|
||||
final Recycler.V<double[]> v = doublePage.obtain();
|
||||
if (v.isRecycled() && clear) {
|
||||
|
|
|
@ -124,6 +124,15 @@ abstract class AbstractBigArray extends AbstractArray {
|
|||
}
|
||||
}
|
||||
|
||||
protected final float[] newFloatPage(int page) {
|
||||
if (recycler != null) {
|
||||
final Recycler.V<float[]> v = recycler.floatPage(clearOnResize);
|
||||
return registerNewPage(v, page, BigArrays.FLOAT_PAGE_SIZE);
|
||||
} else {
|
||||
return new float[BigArrays.FLOAT_PAGE_SIZE];
|
||||
}
|
||||
}
|
||||
|
||||
protected final double[] newDoublePage(int page) {
|
||||
if (recycler != null) {
|
||||
final Recycler.V<double[]> v = recycler.doublePage(clearOnResize);
|
||||
|
|
|
@ -35,6 +35,7 @@ public enum BigArrays {
|
|||
public static final int PAGE_SIZE_IN_BYTES = 1 << 14;
|
||||
public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_BYTE;
|
||||
public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_INT;
|
||||
public static final int FLOAT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_FLOAT;
|
||||
public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_LONG;
|
||||
public static final int DOUBLE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_DOUBLE;
|
||||
public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||
|
@ -236,6 +237,49 @@ public enum BigArrays {
|
|||
|
||||
}
|
||||
|
||||
private static class FloatArrayWrapper extends AbstractArray implements FloatArray {
|
||||
|
||||
private final float[] array;
|
||||
|
||||
FloatArrayWrapper(float[] array, PageCacheRecycler recycler, boolean clearOnResize) {
|
||||
super(recycler, clearOnResize);
|
||||
this.array = array;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long size() {
|
||||
return array.length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float get(long index) {
|
||||
assert indexIsInt(index);
|
||||
return array[(int) index];
|
||||
}
|
||||
|
||||
@Override
|
||||
public float set(long index, float value) {
|
||||
assert indexIsInt(index);
|
||||
float ret = array[(int) index];
|
||||
array[(int) index] = value;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float increment(long index, float inc) {
|
||||
assert indexIsInt(index);
|
||||
return array[(int) index] += inc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void fill(long fromIndex, long toIndex, float value) {
|
||||
assert indexIsInt(fromIndex);
|
||||
assert indexIsInt(toIndex);
|
||||
Arrays.fill(array, (int) fromIndex, (int) toIndex, value);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class ObjectArrayWrapper<T> extends AbstractArray implements ObjectArray<T> {
|
||||
|
||||
private final Object[] array;
|
||||
|
@ -419,6 +463,44 @@ public enum BigArrays {
|
|||
return resize(array, newSize);
|
||||
}
|
||||
|
||||
/** Allocate a new {@link FloatArray} of the given capacity. */
|
||||
public static FloatArray newFloatArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
|
||||
if (size <= FLOAT_PAGE_SIZE) {
|
||||
return new FloatArrayWrapper(new float[(int) size], recycler, clearOnResize);
|
||||
} else {
|
||||
return new BigFloatArray(size, recycler, clearOnResize);
|
||||
}
|
||||
}
|
||||
|
||||
/** Allocate a new {@link FloatArray} of the given capacity. */
|
||||
public static FloatArray newFloatArray(long size) {
|
||||
return newFloatArray(size, null, true);
|
||||
}
|
||||
|
||||
/** Resize the array to the exact provided size. */
|
||||
public static FloatArray resize(FloatArray array, long size) {
|
||||
if (array instanceof BigFloatArray) {
|
||||
((BigFloatArray) array).resize(size);
|
||||
return array;
|
||||
} else {
|
||||
AbstractArray arr = (AbstractArray) array;
|
||||
final FloatArray newArray = newFloatArray(size, arr.recycler, arr.clearOnResize);
|
||||
for (long i = 0, end = Math.min(size, array.size()); i < end; ++i) {
|
||||
newArray.set(i, array.get(i));
|
||||
}
|
||||
return newArray;
|
||||
}
|
||||
}
|
||||
|
||||
/** Grow an array to a size that is larger than <code>minSize</code>, preserving content, and potentially reusing part of the provided array. */
|
||||
public static FloatArray grow(FloatArray array, long minSize) {
|
||||
if (minSize <= array.size()) {
|
||||
return array;
|
||||
}
|
||||
final long newSize = overSize(minSize, FLOAT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_FLOAT);
|
||||
return resize(array, newSize);
|
||||
}
|
||||
|
||||
/** Allocate a new {@link ObjectArray} of the given capacity. */
|
||||
public static <T> ObjectArray<T> newObjectArray(long size, PageCacheRecycler recycler) {
|
||||
if (size <= OBJECT_PAGE_SIZE) {
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.elasticsearch.common.util.BigArrays.FLOAT_PAGE_SIZE;
|
||||
|
||||
/**
|
||||
* Float array abstraction able to support more than 2B values. This implementation slices data into fixed-sized blocks of
|
||||
* configurable length.
|
||||
*/
|
||||
final class BigFloatArray extends AbstractBigArray implements FloatArray {
|
||||
|
||||
private float[][] pages;
|
||||
|
||||
/** Constructor. */
|
||||
public BigFloatArray(long size, PageCacheRecycler recycler, boolean clearOnResize) {
|
||||
super(FLOAT_PAGE_SIZE, recycler, clearOnResize);
|
||||
this.size = size;
|
||||
pages = new float[numPages(size)][];
|
||||
for (int i = 0; i < pages.length; ++i) {
|
||||
pages[i] = newFloatPage(i);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public float set(long index, float value) {
|
||||
final int pageIndex = pageIndex(index);
|
||||
final int indexInPage = indexInPage(index);
|
||||
final float[] page = pages[pageIndex];
|
||||
final float ret = page[indexInPage];
|
||||
page[indexInPage] = value;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float increment(long index, float inc) {
|
||||
final int pageIndex = pageIndex(index);
|
||||
final int indexInPage = indexInPage(index);
|
||||
return pages[pageIndex][indexInPage] += inc;
|
||||
}
|
||||
|
||||
public float get(long index) {
|
||||
final int pageIndex = pageIndex(index);
|
||||
final int indexInPage = indexInPage(index);
|
||||
return pages[pageIndex][indexInPage];
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int numBytesPerElement() {
|
||||
return RamUsageEstimator.NUM_BYTES_FLOAT;
|
||||
}
|
||||
|
||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||
public void resize(long newSize) {
|
||||
final int numPages = numPages(newSize);
|
||||
if (numPages > pages.length) {
|
||||
pages = Arrays.copyOf(pages, ArrayUtil.oversize(numPages, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
|
||||
}
|
||||
for (int i = numPages - 1; i >= 0 && pages[i] == null; --i) {
|
||||
pages[i] = newFloatPage(i);
|
||||
}
|
||||
for (int i = numPages; i < pages.length && pages[i] != null; ++i) {
|
||||
pages[i] = null;
|
||||
releasePage(i);
|
||||
}
|
||||
this.size = newSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void fill(long fromIndex, long toIndex, float value) {
|
||||
Preconditions.checkArgument(fromIndex <= toIndex);
|
||||
final int fromPage = pageIndex(fromIndex);
|
||||
final int toPage = pageIndex(toIndex - 1);
|
||||
if (fromPage == toPage) {
|
||||
Arrays.fill(pages[fromPage], indexInPage(fromIndex), indexInPage(toIndex - 1) + 1, value);
|
||||
} else {
|
||||
Arrays.fill(pages[fromPage], indexInPage(fromIndex), pages[fromPage].length, value);
|
||||
for (int i = fromPage + 1; i < toPage; ++i) {
|
||||
Arrays.fill(pages[i], value);
|
||||
}
|
||||
Arrays.fill(pages[toPage], 0, indexInPage(toIndex - 1) + 1, value);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -16,35 +16,32 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.cache.id;
|
||||
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
package org.elasticsearch.common.util;
|
||||
|
||||
/**
|
||||
*
|
||||
* Abstraction of an array of double values.
|
||||
*/
|
||||
public interface IdReaderTypeCache {
|
||||
public interface FloatArray extends BigArray {
|
||||
|
||||
/**
|
||||
* @param docId The Lucene docId of the child document to return the parent _uid for.
|
||||
* @return The parent _uid for the specified docId (which is a child document)
|
||||
* Get an element given its index.
|
||||
*/
|
||||
HashedBytesArray parentIdByDoc(int docId);
|
||||
public abstract float get(long index);
|
||||
|
||||
/**
|
||||
* @param uid The uid of the document to return the lucene docId for
|
||||
* @return The lucene docId for the specified uid
|
||||
* Set a value at the given index and return the previous value.
|
||||
*/
|
||||
int docById(HashedBytesArray uid);
|
||||
public abstract float set(long index, float value);
|
||||
|
||||
/**
|
||||
* @param docId The lucene docId of the document to return _uid for
|
||||
* @return The _uid of the specified docId
|
||||
* Increment value at the given index by <code>inc</code> and return the value.
|
||||
*/
|
||||
HashedBytesArray idByDoc(int docId);
|
||||
public abstract float increment(long index, float inc);
|
||||
|
||||
/**
|
||||
* @return The size in bytes for this particular instance
|
||||
* Fill slots between <code>fromIndex</code> inclusive to <code>toIndex</code> exclusive with <code>value</code>.
|
||||
*/
|
||||
long sizeInBytes();
|
||||
public abstract void fill(long fromIndex, long toIndex, float value);
|
||||
|
||||
}
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.index.AbstractIndexComponent;
|
|||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.cache.docset.DocSetCache;
|
||||
import org.elasticsearch.index.cache.filter.FilterCache;
|
||||
import org.elasticsearch.index.cache.id.IdCache;
|
||||
import org.elasticsearch.index.cache.query.parser.QueryParserCache;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
|
||||
|
@ -43,18 +42,15 @@ public class IndexCache extends AbstractIndexComponent implements CloseableCompo
|
|||
|
||||
private final FilterCache filterCache;
|
||||
private final QueryParserCache queryParserCache;
|
||||
private final IdCache idCache;
|
||||
private final DocSetCache docSetCache;
|
||||
|
||||
private ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryParserCache queryParserCache, IdCache idCache,
|
||||
DocSetCache docSetCache) {
|
||||
public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryParserCache queryParserCache, DocSetCache docSetCache) {
|
||||
super(index, indexSettings);
|
||||
this.filterCache = filterCache;
|
||||
this.queryParserCache = queryParserCache;
|
||||
this.idCache = idCache;
|
||||
this.docSetCache = docSetCache;
|
||||
}
|
||||
|
||||
|
@ -74,10 +70,6 @@ public class IndexCache extends AbstractIndexComponent implements CloseableCompo
|
|||
return this.docSetCache;
|
||||
}
|
||||
|
||||
public IdCache idCache() {
|
||||
return this.idCache;
|
||||
}
|
||||
|
||||
public QueryParserCache queryParserCache() {
|
||||
return this.queryParserCache;
|
||||
}
|
||||
|
@ -85,7 +77,6 @@ public class IndexCache extends AbstractIndexComponent implements CloseableCompo
|
|||
@Override
|
||||
public void close() throws ElasticsearchException {
|
||||
filterCache.close();
|
||||
idCache.close();
|
||||
queryParserCache.close();
|
||||
docSetCache.clear("close");
|
||||
if (clusterService != null) {
|
||||
|
@ -95,13 +86,11 @@ public class IndexCache extends AbstractIndexComponent implements CloseableCompo
|
|||
|
||||
public void clear(IndexReader reader) {
|
||||
filterCache.clear(reader);
|
||||
idCache.clear(reader);
|
||||
docSetCache.clear(reader);
|
||||
}
|
||||
|
||||
public void clear(String reason) {
|
||||
filterCache.clear(reason);
|
||||
idCache.clear();
|
||||
queryParserCache.clear();
|
||||
docSetCache.clear(reason);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.common.inject.AbstractModule;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.cache.docset.DocSetCacheModule;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheModule;
|
||||
import org.elasticsearch.index.cache.id.IdCacheModule;
|
||||
import org.elasticsearch.index.cache.query.parser.QueryParserCacheModule;
|
||||
|
||||
/**
|
||||
|
@ -40,7 +39,6 @@ public class IndexCacheModule extends AbstractModule {
|
|||
@Override
|
||||
protected void configure() {
|
||||
new FilterCacheModule(settings).configure(binder());
|
||||
new IdCacheModule(settings).configure(binder());
|
||||
new QueryParserCacheModule(settings).configure(binder());
|
||||
new DocSetCacheModule(settings).configure(binder());
|
||||
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.cache.id;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.elasticsearch.common.component.CloseableComponent;
|
||||
import org.elasticsearch.index.IndexComponent;
|
||||
import org.elasticsearch.index.service.IndexService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* This id cache contains only the ids of parent documents, loaded via the uid or parent field.
|
||||
* This name IdCache is misleading, parentIdCache would be a better name.
|
||||
*/
|
||||
public interface IdCache extends IndexComponent, CloseableComponent {
|
||||
|
||||
// we need to "inject" the index service to not create cyclic dep
|
||||
void setIndexService(IndexService indexService);
|
||||
|
||||
void clear();
|
||||
|
||||
void clear(Object coreCacheKey);
|
||||
|
||||
void refresh(List<AtomicReaderContext> readers) throws IOException;
|
||||
|
||||
IdReaderCache reader(AtomicReader reader);
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.cache.id;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.Scopes;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.cache.id.simple.SimpleIdCache;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class IdCacheModule extends AbstractModule {
|
||||
|
||||
public static final class IdCacheSettings {
|
||||
public static final String ID_CACHE_TYPE = "index.cache.id.type";
|
||||
}
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
public IdCacheModule(Settings settings) {
|
||||
this.settings = settings;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(IdCache.class)
|
||||
.to(settings.getAsClass(IdCacheSettings.ID_CACHE_TYPE, SimpleIdCache.class, "org.elasticsearch.index.cache.id.", "IdCache"))
|
||||
.in(Scopes.SINGLETON);
|
||||
}
|
||||
}
|
|
@ -30,7 +30,10 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
|
|||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Id cache has been removed in favor for {@link org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData}
|
||||
* this class now reports field data memory usage for _parent field.
|
||||
*/
|
||||
@Deprecated
|
||||
public class IdCacheStats implements Streamable, ToXContent {
|
||||
|
||||
long memorySize;
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.cache.id;
|
||||
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public interface IdReaderCache {
|
||||
|
||||
IdReaderTypeCache type(String type);
|
||||
|
||||
HashedBytesArray parentIdByDoc(String type, int docId);
|
||||
|
||||
int docById(String type, HashedBytesArray id);
|
||||
|
||||
long sizeInBytes();
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.cache.id;
|
||||
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ShardIdCache extends AbstractIndexShardComponent {
|
||||
|
||||
final CounterMetric totalMetric = new CounterMetric();
|
||||
|
||||
@Inject
|
||||
public ShardIdCache(ShardId shardId, @IndexSettings Settings indexSettings) {
|
||||
super(shardId, indexSettings);
|
||||
}
|
||||
|
||||
public IdCacheStats stats() {
|
||||
return new IdCacheStats(totalMetric.count());
|
||||
}
|
||||
|
||||
public void onCached(long sizeInBytes) {
|
||||
totalMetric.inc(sizeInBytes);
|
||||
}
|
||||
|
||||
public void onRemoval(long sizeInBytes) {
|
||||
totalMetric.dec(sizeInBytes);
|
||||
}
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.cache.id;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ShardIdCacheModule extends AbstractModule {
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(ShardIdCache.class).asEagerSingleton();
|
||||
}
|
||||
}
|
|
@ -1,353 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.cache.id.simple;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.UTF8SortedAsUnicodeComparator;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.cache.id.IdCache;
|
||||
import org.elasticsearch.index.cache.id.IdReaderCache;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentTypeListener;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.service.IndexService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.index.shard.ShardUtils;
|
||||
import org.elasticsearch.index.shard.service.IndexShard;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class SimpleIdCache extends AbstractIndexComponent implements IdCache, SegmentReader.CoreClosedListener, DocumentTypeListener {
|
||||
|
||||
private final boolean reuse;
|
||||
private final ConcurrentMap<Object, SimpleIdReaderCache> idReaders;
|
||||
private final NavigableSet<HashedBytesArray> parentTypes;
|
||||
|
||||
IndexService indexService;
|
||||
|
||||
@Inject
|
||||
public SimpleIdCache(Index index, @IndexSettings Settings indexSettings) {
|
||||
super(index, indexSettings);
|
||||
reuse = componentSettings.getAsBoolean("reuse", false);
|
||||
idReaders = ConcurrentCollections.newConcurrentMap();
|
||||
parentTypes = new TreeSet<HashedBytesArray>(UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setIndexService(IndexService indexService) {
|
||||
this.indexService = indexService;
|
||||
indexService.mapperService().addTypeListener(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws ElasticsearchException {
|
||||
indexService.mapperService().removeTypeListener(this);
|
||||
clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
// Make a copy of the live id readers...
|
||||
Map<Object, SimpleIdReaderCache> copy = new HashMap<Object, SimpleIdReaderCache>(idReaders);
|
||||
for (Map.Entry<Object, SimpleIdReaderCache> entry : copy.entrySet()) {
|
||||
SimpleIdReaderCache removed = idReaders.remove(entry.getKey());
|
||||
// ... and only if the id reader still exists in live readers we decrement stats,
|
||||
// this will prevent double onRemoval calls
|
||||
if (removed != null) {
|
||||
onRemoval(removed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(Object coreCacheKey) {
|
||||
clear(coreCacheKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear(Object coreCacheKey) {
|
||||
SimpleIdReaderCache removed = idReaders.remove(coreCacheKey);
|
||||
if (removed != null) onRemoval(removed);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IdReaderCache reader(AtomicReader reader) {
|
||||
return idReaders.get(reader.getCoreCacheKey());
|
||||
}
|
||||
|
||||
@SuppressWarnings({"StringEquality"})
|
||||
@Override
|
||||
public void refresh(List<AtomicReaderContext> atomicReaderContexts) throws IOException {
|
||||
// do a quick check for the common case, that all are there
|
||||
if (refreshNeeded(atomicReaderContexts)) {
|
||||
synchronized (idReaders) {
|
||||
if (!refreshNeeded(atomicReaderContexts)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// do the refresh
|
||||
Map<Object, Map<String, TypeBuilder>> builders = new HashMap<Object, Map<String, TypeBuilder>>();
|
||||
Map<Object, AtomicReader> cacheToReader = new HashMap<Object, AtomicReader>();
|
||||
|
||||
// first, go over and load all the id->doc map for all types
|
||||
for (AtomicReaderContext context : atomicReaderContexts) {
|
||||
AtomicReader reader = context.reader();
|
||||
if (!refreshNeeded(context)) {
|
||||
// no need, continue
|
||||
continue;
|
||||
}
|
||||
|
||||
if (reader instanceof SegmentReader) {
|
||||
((SegmentReader) reader).addCoreClosedListener(this);
|
||||
}
|
||||
Map<String, TypeBuilder> readerBuilder = new HashMap<String, TypeBuilder>();
|
||||
builders.put(reader.getCoreCacheKey(), readerBuilder);
|
||||
cacheToReader.put(reader.getCoreCacheKey(), context.reader());
|
||||
|
||||
|
||||
Terms terms = reader.terms(UidFieldMapper.NAME);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator(null);
|
||||
DocsEnum docsEnum = null;
|
||||
uid: for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
|
||||
HashedBytesArray[] typeAndId = Uid.splitUidIntoTypeAndId(term);
|
||||
// We don't want to load uid of child documents, this allows us to not load uids of child types.
|
||||
if (!parentTypes.contains(typeAndId[0])) {
|
||||
do {
|
||||
HashedBytesArray nextParent = parentTypes.ceiling(typeAndId[0]);
|
||||
if (nextParent == null) {
|
||||
break uid;
|
||||
}
|
||||
|
||||
TermsEnum.SeekStatus status = termsEnum.seekCeil(nextParent.toBytesRef());
|
||||
if (status == TermsEnum.SeekStatus.END) {
|
||||
break uid;
|
||||
} else if (status == TermsEnum.SeekStatus.NOT_FOUND) {
|
||||
term = termsEnum.term();
|
||||
typeAndId = Uid.splitUidIntoTypeAndId(term);
|
||||
} else if (status == TermsEnum.SeekStatus.FOUND) {
|
||||
assert false : "Seek status should never be FOUND, because we seek only the type part";
|
||||
term = termsEnum.term();
|
||||
typeAndId = Uid.splitUidIntoTypeAndId(term);
|
||||
}
|
||||
} while (!parentTypes.contains(typeAndId[0]));
|
||||
}
|
||||
|
||||
String type = typeAndId[0].toUtf8();
|
||||
TypeBuilder typeBuilder = readerBuilder.get(type);
|
||||
if (typeBuilder == null) {
|
||||
typeBuilder = new TypeBuilder(reader);
|
||||
readerBuilder.put(type, typeBuilder);
|
||||
}
|
||||
|
||||
HashedBytesArray idAsBytes = checkIfCanReuse(builders, typeAndId[1]);
|
||||
docsEnum = termsEnum.docs(null, docsEnum, 0);
|
||||
for (int docId = docsEnum.nextDoc(); docId != DocsEnum.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
|
||||
typeBuilder.idToDoc.put(idAsBytes, docId);
|
||||
typeBuilder.docToId[docId] = idAsBytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now, go and load the docId->parentId map
|
||||
for (AtomicReaderContext context : atomicReaderContexts) {
|
||||
AtomicReader reader = context.reader();
|
||||
if (!refreshNeeded(context)) {
|
||||
// no need, continue
|
||||
continue;
|
||||
}
|
||||
|
||||
Map<String, TypeBuilder> readerBuilder = builders.get(reader.getCoreCacheKey());
|
||||
|
||||
Terms terms = reader.terms(ParentFieldMapper.NAME);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator(null);
|
||||
DocsEnum docsEnum = null;
|
||||
for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
|
||||
HashedBytesArray[] typeAndId = Uid.splitUidIntoTypeAndId(term);
|
||||
|
||||
TypeBuilder typeBuilder = readerBuilder.get(typeAndId[0].toUtf8());
|
||||
if (typeBuilder == null) {
|
||||
typeBuilder = new TypeBuilder(reader);
|
||||
readerBuilder.put(typeAndId[0].toUtf8(), typeBuilder);
|
||||
}
|
||||
|
||||
HashedBytesArray idAsBytes = checkIfCanReuse(builders, typeAndId[1]);
|
||||
boolean added = false; // optimize for when all the docs are deleted for this id
|
||||
|
||||
docsEnum = termsEnum.docs(null, docsEnum, 0);
|
||||
for (int docId = docsEnum.nextDoc(); docId != DocsEnum.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
|
||||
if (!added) {
|
||||
typeBuilder.parentIdsValues.add(idAsBytes);
|
||||
added = true;
|
||||
}
|
||||
typeBuilder.parentIdsOrdinals[docId] = typeBuilder.t;
|
||||
}
|
||||
|
||||
if (added) {
|
||||
typeBuilder.t++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// now, build it back
|
||||
for (Map.Entry<Object, Map<String, TypeBuilder>> entry : builders.entrySet()) {
|
||||
Object readerKey = entry.getKey();
|
||||
MapBuilder<String, SimpleIdReaderTypeCache> types = MapBuilder.newMapBuilder();
|
||||
for (Map.Entry<String, TypeBuilder> typeBuilderEntry : entry.getValue().entrySet()) {
|
||||
types.put(typeBuilderEntry.getKey(), new SimpleIdReaderTypeCache(typeBuilderEntry.getKey(),
|
||||
typeBuilderEntry.getValue().idToDoc,
|
||||
typeBuilderEntry.getValue().docToId,
|
||||
typeBuilderEntry.getValue().parentIdsValues.toArray(new HashedBytesArray[typeBuilderEntry.getValue().parentIdsValues.size()]),
|
||||
typeBuilderEntry.getValue().parentIdsOrdinals));
|
||||
}
|
||||
AtomicReader indexReader = cacheToReader.get(readerKey);
|
||||
SimpleIdReaderCache readerCache = new SimpleIdReaderCache(types.immutableMap(), ShardUtils.extractShardId(indexReader));
|
||||
idReaders.put(readerKey, readerCache);
|
||||
onCached(readerCache);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void onCached(SimpleIdReaderCache readerCache) {
|
||||
if (readerCache.shardId != null) {
|
||||
IndexShard shard = indexService.shard(readerCache.shardId.id());
|
||||
if (shard != null) {
|
||||
shard.idCache().onCached(readerCache.sizeInBytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void onRemoval(SimpleIdReaderCache readerCache) {
|
||||
if (readerCache.shardId != null) {
|
||||
IndexShard shard = indexService.shard(readerCache.shardId.id());
|
||||
if (shard != null) {
|
||||
shard.idCache().onRemoval(readerCache.sizeInBytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private HashedBytesArray checkIfCanReuse(Map<Object, Map<String, TypeBuilder>> builders, HashedBytesArray idAsBytes) {
|
||||
HashedBytesArray finalIdAsBytes;
|
||||
// go over and see if we can reuse this id
|
||||
if (reuse) {
|
||||
for (SimpleIdReaderCache idReaderCache : idReaders.values()) {
|
||||
finalIdAsBytes = idReaderCache.canReuse(idAsBytes);
|
||||
if (finalIdAsBytes != null) {
|
||||
return finalIdAsBytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
// even if we don't enable reuse, at least check on the current "live" builders that we are handling
|
||||
for (Map<String, TypeBuilder> map : builders.values()) {
|
||||
for (TypeBuilder typeBuilder : map.values()) {
|
||||
finalIdAsBytes = typeBuilder.canReuse(idAsBytes);
|
||||
if (finalIdAsBytes != null) {
|
||||
return finalIdAsBytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
return idAsBytes;
|
||||
}
|
||||
|
||||
private boolean refreshNeeded(List<AtomicReaderContext> atomicReaderContexts) {
|
||||
for (AtomicReaderContext atomicReaderContext : atomicReaderContexts) {
|
||||
if (refreshNeeded(atomicReaderContext)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private boolean refreshNeeded(AtomicReaderContext atomicReaderContext) {
|
||||
return !idReaders.containsKey(atomicReaderContext.reader().getCoreCacheKey());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeCreate(DocumentMapper mapper) {
|
||||
synchronized (idReaders) {
|
||||
ParentFieldMapper parentFieldMapper = mapper.parentFieldMapper();
|
||||
if (parentFieldMapper.active()) {
|
||||
// A _parent field can never be added to an existing mapping, so a _parent field either exists on
|
||||
// a new created or doesn't exists. This is why we can update the known parent types via DocumentTypeListener
|
||||
if (parentTypes.add(new HashedBytesArray(Strings.toUTF8Bytes(parentFieldMapper.type(), new BytesRef())))) {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterRemove(DocumentMapper mapper) {
|
||||
synchronized (idReaders) {
|
||||
ParentFieldMapper parentFieldMapper = mapper.parentFieldMapper();
|
||||
if (parentFieldMapper.active()) {
|
||||
parentTypes.remove(new HashedBytesArray(Strings.toUTF8Bytes(parentFieldMapper.type(), new BytesRef())));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class TypeBuilder {
|
||||
final ObjectIntOpenHashMap<HashedBytesArray> idToDoc = new ObjectIntOpenHashMap<HashedBytesArray>();
|
||||
final HashedBytesArray[] docToId;
|
||||
final ArrayList<HashedBytesArray> parentIdsValues = new ArrayList<HashedBytesArray>();
|
||||
final int[] parentIdsOrdinals;
|
||||
int t = 1; // current term number (0 indicated null value)
|
||||
|
||||
TypeBuilder(IndexReader reader) {
|
||||
parentIdsOrdinals = new int[reader.maxDoc()];
|
||||
// the first one indicates null value
|
||||
parentIdsValues.add(null);
|
||||
docToId = new HashedBytesArray[reader.maxDoc()];
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an already stored instance if exists, if not, returns null;
|
||||
*/
|
||||
public HashedBytesArray canReuse(HashedBytesArray id) {
|
||||
if (idToDoc.containsKey(id)) {
|
||||
// we can use #lkey() since this is called from a synchronized block
|
||||
return idToDoc.lkey();
|
||||
} else {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.cache.id.simple;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.index.cache.id.IdReaderCache;
|
||||
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class SimpleIdReaderCache implements IdReaderCache {
|
||||
|
||||
private final ImmutableMap<String, SimpleIdReaderTypeCache> types;
|
||||
|
||||
@Nullable
|
||||
public final ShardId shardId;
|
||||
|
||||
public SimpleIdReaderCache(ImmutableMap<String, SimpleIdReaderTypeCache> types, @Nullable ShardId shardId) {
|
||||
this.types = types;
|
||||
this.shardId = shardId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IdReaderTypeCache type(String type) {
|
||||
return types.get(type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public HashedBytesArray parentIdByDoc(String type, int docId) {
|
||||
SimpleIdReaderTypeCache typeCache = types.get(type);
|
||||
if (typeCache != null) {
|
||||
return typeCache.parentIdByDoc(docId);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docById(String type, HashedBytesArray id) {
|
||||
SimpleIdReaderTypeCache typeCache = types.get(type);
|
||||
if (typeCache != null) {
|
||||
return typeCache.docById(id);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
public long sizeInBytes() {
|
||||
long sizeInBytes = 0;
|
||||
for (SimpleIdReaderTypeCache readerTypeCache : types.values()) {
|
||||
sizeInBytes += readerTypeCache.sizeInBytes();
|
||||
}
|
||||
return sizeInBytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an already stored instance if exists, if not, returns null;
|
||||
*/
|
||||
public HashedBytesArray canReuse(HashedBytesArray id) {
|
||||
for (SimpleIdReaderTypeCache typeCache : types.values()) {
|
||||
HashedBytesArray wrap = typeCache.canReuse(id);
|
||||
if (wrap != null) {
|
||||
return wrap;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.cache.id.simple;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class SimpleIdReaderTypeCache implements IdReaderTypeCache {
|
||||
|
||||
private final String type;
|
||||
|
||||
private final ObjectIntOpenHashMap<HashedBytesArray> idToDoc;
|
||||
|
||||
private final HashedBytesArray[] docIdToId;
|
||||
|
||||
private final HashedBytesArray[] parentIdsValues;
|
||||
|
||||
private final int[] parentIdsOrdinals;
|
||||
|
||||
private long sizeInBytes = -1;
|
||||
|
||||
public SimpleIdReaderTypeCache(String type, ObjectIntOpenHashMap<HashedBytesArray> idToDoc, HashedBytesArray[] docIdToId,
|
||||
HashedBytesArray[] parentIdsValues, int[] parentIdsOrdinals) {
|
||||
this.type = type;
|
||||
this.idToDoc = idToDoc;
|
||||
this.docIdToId = docIdToId;
|
||||
this.parentIdsValues = parentIdsValues;
|
||||
this.parentIdsOrdinals = parentIdsOrdinals;
|
||||
}
|
||||
|
||||
public String type() {
|
||||
return this.type;
|
||||
}
|
||||
|
||||
public HashedBytesArray parentIdByDoc(int docId) {
|
||||
return parentIdsValues[parentIdsOrdinals[docId]];
|
||||
}
|
||||
|
||||
public int docById(HashedBytesArray uid) {
|
||||
return idToDoc.getOrDefault(uid, -1);
|
||||
}
|
||||
|
||||
public HashedBytesArray idByDoc(int docId) {
|
||||
return docIdToId[docId];
|
||||
}
|
||||
|
||||
public long sizeInBytes() {
|
||||
if (sizeInBytes == -1) {
|
||||
sizeInBytes = computeSizeInBytes();
|
||||
}
|
||||
return sizeInBytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an already stored instance if exists, if not, returns null;
|
||||
*/
|
||||
public HashedBytesArray canReuse(HashedBytesArray id) {
|
||||
if (idToDoc.containsKey(id)) {
|
||||
// we can use #lkey() since this is called from a synchronized block
|
||||
return idToDoc.lkey();
|
||||
} else {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
|
||||
long computeSizeInBytes() {
|
||||
long sizeInBytes = 0;
|
||||
// Ignore type field
|
||||
// sizeInBytes += ((type.length() * RamUsage.NUM_BYTES_CHAR) + (3 * RamUsage.NUM_BYTES_INT)) + RamUsage.NUM_BYTES_OBJECT_HEADER;
|
||||
sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (idToDoc.values.length * RamUsageEstimator.NUM_BYTES_INT);
|
||||
sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (idToDoc.allocated.length);
|
||||
final boolean[] states = idToDoc.allocated;
|
||||
final Object[] keys = idToDoc.keys;
|
||||
for (int i = 0; i < states.length; i++) {
|
||||
if (states[i]) {
|
||||
HashedBytesArray bytesArray = (HashedBytesArray) keys[i];
|
||||
if (bytesArray != null) {
|
||||
sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + (bytesArray.length() + RamUsageEstimator.NUM_BYTES_INT);
|
||||
} else {
|
||||
sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The docIdToId array contains references to idToDoc for this segment or other segments, so we can use OBJECT_REF
|
||||
sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + (RamUsageEstimator.NUM_BYTES_OBJECT_REF * docIdToId.length);
|
||||
for (HashedBytesArray bytesArray : parentIdsValues) {
|
||||
if (bytesArray == null) {
|
||||
sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||
} else {
|
||||
sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + (bytesArray.length() + RamUsageEstimator.NUM_BYTES_INT);
|
||||
}
|
||||
}
|
||||
sizeInBytes += RamUsageEstimator.sizeOf(parentIdsOrdinals);
|
||||
|
||||
return sizeInBytes;
|
||||
}
|
||||
|
||||
}
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.index.Index;
|
|||
import org.elasticsearch.index.IndexComponent;
|
||||
import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
||||
|
@ -172,7 +173,7 @@ public interface IndexFieldData<FD extends AtomicFieldData> extends IndexCompone
|
|||
interface Builder {
|
||||
|
||||
IndexFieldData build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService);
|
||||
CircuitBreakerService breakerService, MapperService mapperService);
|
||||
}
|
||||
|
||||
public interface WithOrdinals<FD extends AtomicFieldData.WithOrdinals> extends IndexFieldData<FD> {
|
||||
|
|
|
@ -31,8 +31,10 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.*;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.service.IndexService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
|
@ -67,6 +69,7 @@ public class IndexFieldDataService extends AbstractIndexComponent {
|
|||
.put("int", new PackedArrayIndexFieldData.Builder().setNumericType(IndexNumericFieldData.NumericType.INT))
|
||||
.put("long", new PackedArrayIndexFieldData.Builder().setNumericType(IndexNumericFieldData.NumericType.LONG))
|
||||
.put("geo_point", new GeoPointDoubleArrayIndexFieldData.Builder())
|
||||
.put(ParentFieldMapper.NAME, new ParentChildIndexFieldData.Builder())
|
||||
.immutableMap();
|
||||
|
||||
docValuesBuildersByType = MapBuilder.<String, IndexFieldData.Builder>newMapBuilder()
|
||||
|
@ -235,7 +238,7 @@ public class IndexFieldDataService extends AbstractIndexComponent {
|
|||
fieldDataCaches.put(fieldNames.indexName(), cache);
|
||||
}
|
||||
|
||||
fieldData = builder.build(index, indexSettings, mapper, cache, circuitBreakerService);
|
||||
fieldData = builder.build(index, indexSettings, mapper, cache, circuitBreakerService, indexService.mapperService());
|
||||
loadedFieldData.put(fieldNames.indexName(), fieldData);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,9 +26,8 @@ import org.elasticsearch.common.metrics.CounterMetric;
|
|||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.index.fielddata.plain.PackedArrayAtomicFieldData;
|
||||
import org.elasticsearch.index.fielddata.plain.PagedBytesAtomicFieldData;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -66,7 +65,14 @@ public class ShardFieldData extends AbstractIndexShardComponent implements Index
|
|||
}
|
||||
}
|
||||
}
|
||||
return new FieldDataStats(totalMetric.count(), evictionsMetric.count(), fieldTotals);
|
||||
|
||||
// Because we report _parent field used memory separately via id cache, we need to subtract it from the
|
||||
// field data total memory used. This code should be removed for >= 2.0
|
||||
long memorySize = totalMetric.count();
|
||||
if (perFieldTotals.containsKey(ParentFieldMapper.NAME)) {
|
||||
memorySize -= perFieldTotals.get(ParentFieldMapper.NAME).count();
|
||||
}
|
||||
return new FieldDataStats(memorySize, evictionsMetric.count(), fieldTotals);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.index.fielddata.*;
|
|||
import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMapper.Names;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
||||
|
@ -39,7 +40,7 @@ public final class DisabledIndexFieldData extends AbstractIndexFieldData<AtomicF
|
|||
public static class Builder implements IndexFieldData.Builder {
|
||||
@Override
|
||||
public IndexFieldData<AtomicFieldData<?>> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
|
||||
IndexFieldDataCache cache, CircuitBreakerService breakerService) {
|
||||
IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
// Ignore Circuit Breaker
|
||||
return new DisabledIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache);
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
|||
import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMapper.Names;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.internal.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
|
@ -79,7 +80,7 @@ public abstract class DocValuesIndexFieldData {
|
|||
|
||||
@Override
|
||||
public IndexFieldData<?> build(Index index, Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService) {
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
// Ignore Circuit Breaker
|
||||
final FieldMapper.Names fieldNames = mapper.names();
|
||||
final Settings fdSettings = mapper.fieldDataType().getSettings();
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
|||
import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
|
||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
||||
|
@ -47,7 +48,7 @@ public class DoubleArrayIndexFieldData extends AbstractIndexFieldData<DoubleArra
|
|||
|
||||
@Override
|
||||
public IndexFieldData<?> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService) {
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
return new DoubleArrayIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
|||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
||||
|
@ -46,7 +47,7 @@ public class FSTBytesIndexFieldData extends AbstractBytesIndexFieldData<FSTBytes
|
|||
|
||||
@Override
|
||||
public IndexFieldData<FSTBytesAtomicFieldData> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
|
||||
IndexFieldDataCache cache, CircuitBreakerService breakerService) {
|
||||
IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
return new FSTBytesIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
|||
import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
|
||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
||||
|
@ -46,7 +47,7 @@ public class FloatArrayIndexFieldData extends AbstractIndexFieldData<FloatArrayA
|
|||
|
||||
@Override
|
||||
public IndexFieldData<?> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService) {
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
return new FloatArrayIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.index.fielddata.*;
|
|||
import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMapper.Names;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -67,7 +68,7 @@ public class GeoPointBinaryDVIndexFieldData extends DocValuesIndexFieldData impl
|
|||
|
||||
@Override
|
||||
public IndexFieldData<?> build(Index index, Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService) {
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
// Ignore breaker
|
||||
final FieldMapper.Names fieldNames = mapper.names();
|
||||
return new GeoPointBinaryDVIndexFieldData(index, fieldNames);
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
|||
import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
|
||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
@ -51,7 +52,7 @@ public class GeoPointCompressedIndexFieldData extends AbstractGeoPointIndexField
|
|||
|
||||
@Override
|
||||
public IndexFieldData<?> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService) {
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
FieldDataType type = mapper.fieldDataType();
|
||||
final String precisionAsString = type.getSettings().get(PRECISION_KEY);
|
||||
final Distance precision;
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
|||
import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
|
||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
||||
|
@ -44,7 +45,7 @@ public class GeoPointDoubleArrayIndexFieldData extends AbstractGeoPointIndexFiel
|
|||
|
||||
@Override
|
||||
public IndexFieldData<?> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper, IndexFieldDataCache cache,
|
||||
CircuitBreakerService breakerService) {
|
||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
return new GeoPointDoubleArrayIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
|||
import org.elasticsearch.index.fielddata.ordinals.Ordinals.Docs;
|
||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
||||
|
@ -64,7 +65,7 @@ public class PackedArrayIndexFieldData extends AbstractIndexFieldData<AtomicNume
|
|||
|
||||
@Override
|
||||
public IndexFieldData<AtomicNumericFieldData> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
|
||||
IndexFieldDataCache cache, CircuitBreakerService breakerService) {
|
||||
IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
return new PackedArrayIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, numericType, breakerService);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.index.fielddata.*;
|
|||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
||||
|
@ -46,7 +47,7 @@ public class PagedBytesIndexFieldData extends AbstractBytesIndexFieldData<PagedB
|
|||
|
||||
@Override
|
||||
public IndexFieldData<PagedBytesAtomicFieldData> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
|
||||
IndexFieldDataCache cache, CircuitBreakerService breakerService) {
|
||||
IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) {
|
||||
return new PagedBytesIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, breakerService);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.index.fielddata.AtomicFieldData;
|
||||
import org.elasticsearch.index.fielddata.BytesValues;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ParentChildAtomicFieldData implements AtomicFieldData {
|
||||
|
||||
private final ImmutableOpenMap<String, PagedBytesAtomicFieldData> typeToIds;
|
||||
private final long numberUniqueValues;
|
||||
private final long memorySizeInBytes;
|
||||
private final int numDocs;
|
||||
|
||||
public ParentChildAtomicFieldData(ImmutableOpenMap<String, PagedBytesAtomicFieldData> typeToIds) {
|
||||
this.typeToIds = typeToIds;
|
||||
long numValues = 0;
|
||||
for (ObjectCursor<PagedBytesAtomicFieldData> cursor : typeToIds.values()) {
|
||||
numValues += cursor.value.getNumberUniqueValues();
|
||||
}
|
||||
this.numberUniqueValues = numValues;
|
||||
long size = 0;
|
||||
for (ObjectCursor<PagedBytesAtomicFieldData> cursor : typeToIds.values()) {
|
||||
size += cursor.value.getMemorySizeInBytes();
|
||||
}
|
||||
this.memorySizeInBytes = size;
|
||||
this.numDocs = typeToIds.isEmpty() ? 0 : typeToIds.values().toArray(PagedBytesAtomicFieldData.class)[0].getNumDocs();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isMultiValued() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isValuesOrdered() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getNumDocs() {
|
||||
return numDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getNumberUniqueValues() {
|
||||
return numberUniqueValues;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getMemorySizeInBytes() {
|
||||
return memorySizeInBytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesValues getBytesValues(boolean needsHashes) {
|
||||
final BytesValues[] bytesValues = new BytesValues[typeToIds.size()];
|
||||
int index = 0;
|
||||
for (ObjectCursor<PagedBytesAtomicFieldData> cursor : typeToIds.values()) {
|
||||
bytesValues[index++] = cursor.value.getBytesValues(needsHashes);
|
||||
}
|
||||
return new BytesValues(true) {
|
||||
|
||||
private final BytesRef[] terms = new BytesRef[2];
|
||||
private int index;
|
||||
|
||||
@Override
|
||||
public int setDocument(int docId) {
|
||||
index = 0;
|
||||
int counter = 0;
|
||||
for (final BytesValues values : bytesValues) {
|
||||
int numValues = values.setDocument(docId);
|
||||
assert numValues <= 1 : "Per doc/type combination only a single value is allowed";
|
||||
if (numValues == 1) {
|
||||
values.nextValue();
|
||||
terms[counter++] = values.copyShared();
|
||||
}
|
||||
}
|
||||
assert counter <= 2 : "A single doc can potentially be both parent and child, so the maximum allowed values is 2";
|
||||
if (counter > 1) {
|
||||
int cmp = terms[0].compareTo(terms[1]);
|
||||
if (cmp > 0) {
|
||||
BytesRef temp = terms[0];
|
||||
terms[0] = terms[1];
|
||||
terms[1] = temp;
|
||||
} else if (cmp == 0) {
|
||||
// If the id is the same between types the only omit one. For example: a doc has parent#1 in _uid field and has grand_parent#1 in _parent field.
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return counter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef nextValue() {
|
||||
BytesRef current = terms[index++];
|
||||
scratch.bytes = current.bytes;
|
||||
scratch.offset = current.offset;
|
||||
scratch.length = current.length;
|
||||
return scratch;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public BytesValues.WithOrdinals getBytesValues(String type) {
|
||||
WithOrdinals atomicFieldData = typeToIds.get(type);
|
||||
if (atomicFieldData != null) {
|
||||
return atomicFieldData.getBytesValues(true);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ScriptDocValues getScriptValues() {
|
||||
return new ScriptDocValues.Strings(getBytesValues(false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
for (ObjectCursor<PagedBytesAtomicFieldData> cursor : typeToIds.values()) {
|
||||
cursor.value.close();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import org.apache.lucene.index.FilteredTermsEnum;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.NavigableSet;
|
||||
|
||||
/**
|
||||
* Only emits terms that exist in the parentTypes set.
|
||||
*
|
||||
* @elasticsearch.internal
|
||||
*/
|
||||
final class ParentChildFilteredTermsEnum extends FilteredTermsEnum {
|
||||
|
||||
private final NavigableSet<BytesRef> parentTypes;
|
||||
|
||||
private BytesRef seekTerm;
|
||||
private String type;
|
||||
private BytesRef id;
|
||||
|
||||
ParentChildFilteredTermsEnum(TermsEnum tenum, NavigableSet<BytesRef> parentTypes) {
|
||||
super(tenum, true);
|
||||
this.parentTypes = parentTypes;
|
||||
this.seekTerm = parentTypes.isEmpty() ? null : parentTypes.first();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BytesRef nextSeekTerm(BytesRef currentTerm) throws IOException {
|
||||
BytesRef temp = seekTerm;
|
||||
seekTerm = null;
|
||||
return temp;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AcceptStatus accept(BytesRef term) throws IOException {
|
||||
if (parentTypes.isEmpty()) {
|
||||
return AcceptStatus.END;
|
||||
}
|
||||
|
||||
BytesRef[] typeAndId = Uid.splitUidIntoTypeAndId(term);
|
||||
if (parentTypes.contains(typeAndId[0])) {
|
||||
type = typeAndId[0].utf8ToString();
|
||||
id = typeAndId[1];
|
||||
return AcceptStatus.YES;
|
||||
} else {
|
||||
BytesRef nextType = parentTypes.ceiling(typeAndId[0]);
|
||||
if (nextType == null) {
|
||||
return AcceptStatus.END;
|
||||
}
|
||||
seekTerm = nextType;
|
||||
return AcceptStatus.NO_AND_SEEK;
|
||||
}
|
||||
}
|
||||
|
||||
public String type() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public BytesRef id() {
|
||||
return id;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,197 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.PagedBytes;
|
||||
import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.fielddata.AbstractIndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
|
||||
import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
|
||||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentTypeListener;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.fielddata.breaker.CircuitBreakerService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.TreeSet;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ParentChildIndexFieldData extends AbstractIndexFieldData<ParentChildAtomicFieldData> implements DocumentTypeListener {
|
||||
|
||||
private final NavigableSet<BytesRef> parentTypes;
|
||||
private final CircuitBreakerService breakerService;
|
||||
|
||||
// If child type (a type with _parent field) is added or removed, we want to make sure modifications don't happen
|
||||
// while loading.
|
||||
private final Object lock = new Object();
|
||||
|
||||
public ParentChildIndexFieldData(Index index, @IndexSettings Settings indexSettings, FieldMapper.Names fieldNames, FieldDataType fieldDataType, IndexFieldDataCache cache, MapperService mapperService, CircuitBreakerService breakerService) {
|
||||
super(index, indexSettings, fieldNames, fieldDataType, cache);
|
||||
parentTypes = new TreeSet<BytesRef>(BytesRef.getUTF8SortedAsUnicodeComparator());
|
||||
this.breakerService = breakerService;
|
||||
for (DocumentMapper documentMapper : mapperService) {
|
||||
beforeCreate(documentMapper);
|
||||
}
|
||||
mapperService.addTypeListener(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean valuesOrdered() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, SortMode sortMode) {
|
||||
return new BytesRefFieldComparatorSource(this, missingValue, sortMode);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ParentChildAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
|
||||
AtomicReader reader = context.reader();
|
||||
// TODO: Implement a custom estimator for p/c field data
|
||||
NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker());
|
||||
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat(
|
||||
"acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO
|
||||
);
|
||||
|
||||
synchronized (lock) {
|
||||
boolean success = false;
|
||||
ParentChildAtomicFieldData data = null;
|
||||
ParentChildFilteredTermsEnum termsEnum = new ParentChildFilteredTermsEnum(
|
||||
new ParentChildIntersectTermsEnum(reader, UidFieldMapper.NAME, ParentFieldMapper.NAME),
|
||||
parentTypes
|
||||
);
|
||||
ObjectObjectOpenHashMap<String, TypeBuilder> typeBuilders = ObjectObjectOpenHashMap.newInstance();
|
||||
try {
|
||||
DocsEnum docsEnum = null;
|
||||
for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
|
||||
String type = termsEnum.type();
|
||||
TypeBuilder typeBuilder = typeBuilders.get(type);
|
||||
if (typeBuilder == null) {
|
||||
typeBuilders.put(type, typeBuilder = new TypeBuilder(acceptableTransientOverheadRatio, reader));
|
||||
}
|
||||
|
||||
BytesRef id = termsEnum.id();
|
||||
final long termOrd = typeBuilder.builder.nextOrdinal();
|
||||
assert termOrd == typeBuilder.termOrdToBytesOffset.size();
|
||||
typeBuilder.termOrdToBytesOffset.add(typeBuilder.bytes.copyUsingLengthPrefix(id));
|
||||
docsEnum = termsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
|
||||
for (int docId = docsEnum.nextDoc(); docId != DocsEnum.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
|
||||
typeBuilder.builder.addDoc(docId);
|
||||
}
|
||||
}
|
||||
|
||||
ImmutableOpenMap.Builder<String, PagedBytesAtomicFieldData> typeToAtomicFieldData = ImmutableOpenMap.builder(typeBuilders.size());
|
||||
for (ObjectObjectCursor<String, TypeBuilder> cursor : typeBuilders) {
|
||||
final long sizePointer = cursor.value.bytes.getPointer();
|
||||
PagedBytes.Reader bytesReader = cursor.value.bytes.freeze(true);
|
||||
final Ordinals ordinals = cursor.value.builder.build(fieldDataType.getSettings());
|
||||
|
||||
typeToAtomicFieldData.put(
|
||||
cursor.key,
|
||||
new PagedBytesAtomicFieldData(bytesReader, sizePointer, cursor.value.termOrdToBytesOffset, ordinals)
|
||||
);
|
||||
}
|
||||
data = new ParentChildAtomicFieldData(typeToAtomicFieldData.build());
|
||||
success = true;
|
||||
return data;
|
||||
} finally {
|
||||
for (ObjectObjectCursor<String, TypeBuilder> cursor : typeBuilders) {
|
||||
cursor.value.builder.close();
|
||||
if (success) {
|
||||
estimator.afterLoad(null, data.getMemorySizeInBytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeCreate(DocumentMapper mapper) {
|
||||
synchronized (lock) {
|
||||
ParentFieldMapper parentFieldMapper = mapper.parentFieldMapper();
|
||||
if (parentFieldMapper.active()) {
|
||||
// A _parent field can never be added to an existing mapping, so a _parent field either exists on
|
||||
// a new created or doesn't exists. This is why we can update the known parent types via DocumentTypeListener
|
||||
if (parentTypes.add(new BytesRef(parentFieldMapper.type()))) {
|
||||
clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterRemove(DocumentMapper mapper) {
|
||||
synchronized (lock) {
|
||||
ParentFieldMapper parentFieldMapper = mapper.parentFieldMapper();
|
||||
if (parentFieldMapper.active()) {
|
||||
parentTypes.remove(new BytesRef(parentFieldMapper.type()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class TypeBuilder {
|
||||
|
||||
final PagedBytes bytes;
|
||||
final MonotonicAppendingLongBuffer termOrdToBytesOffset;
|
||||
final OrdinalsBuilder builder;
|
||||
|
||||
TypeBuilder(float acceptableTransientOverheadRatio, AtomicReader reader) throws IOException {
|
||||
bytes = new PagedBytes(15);
|
||||
termOrdToBytesOffset = new MonotonicAppendingLongBuffer();
|
||||
termOrdToBytesOffset.add(0); // first ord is reserved for missing values
|
||||
// 0 is reserved for "unset"
|
||||
bytes.copyUsingLengthPrefix(new BytesRef());
|
||||
builder = new OrdinalsBuilder(-1, reader.maxDoc(), acceptableTransientOverheadRatio);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Builder implements IndexFieldData.Builder {
|
||||
|
||||
@Override
|
||||
public IndexFieldData<?> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
|
||||
IndexFieldDataCache cache, CircuitBreakerService breakerService,
|
||||
MapperService mapperService) {
|
||||
return new ParentChildIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, mapperService, breakerService);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,327 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Intersects the terms and unions the doc ids for terms enum of multiple fields.
|
||||
*
|
||||
* @elasticsearch.internal
|
||||
*/
|
||||
final class ParentChildIntersectTermsEnum extends TermsEnum {
|
||||
|
||||
private final Comparator<BytesRef> comparator;
|
||||
private final List<TermsEnumState> states;
|
||||
private final IntArrayList stateSlots;
|
||||
|
||||
private BytesRef current;
|
||||
|
||||
ParentChildIntersectTermsEnum(AtomicReader atomicReader, String... fields) throws IOException {
|
||||
List<TermsEnum> fieldEnums = new ArrayList<TermsEnum>();
|
||||
for (String field : fields) {
|
||||
Terms terms = atomicReader.terms(field);
|
||||
if (terms != null) {
|
||||
fieldEnums.add(terms.iterator(null));
|
||||
}
|
||||
}
|
||||
this.comparator = fieldEnums.get(0).getComparator();
|
||||
states = new ArrayList<TermsEnumState>(fieldEnums.size());
|
||||
for (TermsEnum tEnum : fieldEnums) {
|
||||
states.add(new TermsEnumState(tEnum));
|
||||
}
|
||||
stateSlots = new IntArrayList(states.size());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Comparator<BytesRef> getComparator() {
|
||||
return comparator;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef term() throws IOException {
|
||||
return current;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
|
||||
int size = stateSlots.size();
|
||||
assert size > 0;
|
||||
if (size == 1) {
|
||||
// Can't use 'reuse' since we don't know to which previous TermsEnum it belonged to.
|
||||
return states.get(stateSlots.get(0)).termsEnum.docs(liveDocs, null, flags);
|
||||
} else {
|
||||
List<DocsEnum> docsEnums = new ArrayList<DocsEnum>(stateSlots.size());
|
||||
for (int i = 0; i < stateSlots.size(); i++) {
|
||||
docsEnums.add(states.get(stateSlots.get(i)).termsEnum.docs(liveDocs, null, flags));
|
||||
}
|
||||
return new CompoundDocsEnum(docsEnums);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef next() throws IOException {
|
||||
if (states.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (current == null) {
|
||||
// unpositioned
|
||||
for (TermsEnumState state : states) {
|
||||
state.initialize();
|
||||
}
|
||||
} else {
|
||||
int removed = 0;
|
||||
for (int i = 0; i < stateSlots.size(); i++) {
|
||||
int stateSlot = stateSlots.get(i);
|
||||
if (states.get(stateSlot - removed).next() == null) {
|
||||
states.remove(stateSlot - removed);
|
||||
removed++;
|
||||
}
|
||||
}
|
||||
|
||||
if (states.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
stateSlots.clear();
|
||||
}
|
||||
|
||||
BytesRef lowestTerm = states.get(0).term;
|
||||
stateSlots.add(0);
|
||||
for (int i = 1; i < states.size(); i++) {
|
||||
TermsEnumState state = states.get(i);
|
||||
int cmp = lowestTerm.compareTo(state.term);
|
||||
if (cmp > 0) {
|
||||
lowestTerm = state.term;
|
||||
stateSlots.clear();
|
||||
stateSlots.add(i);
|
||||
} else if (cmp == 0) {
|
||||
stateSlots.add(i);
|
||||
}
|
||||
}
|
||||
|
||||
return current = lowestTerm;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SeekStatus seekCeil(BytesRef text) throws IOException {
|
||||
if (states.isEmpty()) {
|
||||
return SeekStatus.END;
|
||||
}
|
||||
|
||||
boolean found = false;
|
||||
if (current == null) {
|
||||
// unpositioned
|
||||
Iterator<TermsEnumState> iterator = states.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
SeekStatus seekStatus = iterator.next().seekCeil(text);
|
||||
if (seekStatus == SeekStatus.END) {
|
||||
iterator.remove();
|
||||
} else if (seekStatus == SeekStatus.FOUND) {
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int removed = 0;
|
||||
for (int i = 0; i < stateSlots.size(); i++) {
|
||||
int stateSlot = stateSlots.get(i);
|
||||
SeekStatus seekStatus = states.get(stateSlot - removed).seekCeil(text);
|
||||
if (seekStatus == SeekStatus.END) {
|
||||
states.remove(stateSlot - removed);
|
||||
removed++;
|
||||
} else if (seekStatus == SeekStatus.FOUND) {
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (states.isEmpty()) {
|
||||
return SeekStatus.END;
|
||||
}
|
||||
stateSlots.clear();
|
||||
|
||||
if (found) {
|
||||
for (int i = 0; i < states.size(); i++) {
|
||||
if (states.get(i).term.equals(text)) {
|
||||
stateSlots.add(i);
|
||||
}
|
||||
}
|
||||
current = text;
|
||||
return SeekStatus.FOUND;
|
||||
} else {
|
||||
BytesRef lowestTerm = states.get(0).term;
|
||||
stateSlots.add(0);
|
||||
for (int i = 1; i < states.size(); i++) {
|
||||
TermsEnumState state = states.get(i);
|
||||
int cmp = lowestTerm.compareTo(state.term);
|
||||
if (cmp > 0) {
|
||||
lowestTerm = state.term;
|
||||
stateSlots.clear();
|
||||
stateSlots.add(i);
|
||||
} else if (cmp == 0) {
|
||||
stateSlots.add(i);
|
||||
}
|
||||
}
|
||||
current = lowestTerm;
|
||||
return SeekStatus.NOT_FOUND;
|
||||
}
|
||||
}
|
||||
|
||||
class TermsEnumState {
|
||||
|
||||
final TermsEnum termsEnum;
|
||||
BytesRef term;
|
||||
SeekStatus lastSeekStatus;
|
||||
|
||||
TermsEnumState(TermsEnum termsEnum) {
|
||||
this.termsEnum = termsEnum;
|
||||
}
|
||||
|
||||
void initialize() throws IOException {
|
||||
term = termsEnum.next();
|
||||
}
|
||||
|
||||
BytesRef next() throws IOException {
|
||||
return term = termsEnum.next();
|
||||
}
|
||||
|
||||
SeekStatus seekCeil(BytesRef text) throws IOException {
|
||||
lastSeekStatus = termsEnum.seekCeil(text);
|
||||
if (lastSeekStatus != SeekStatus.END) {
|
||||
term = termsEnum.term();
|
||||
}
|
||||
return lastSeekStatus;
|
||||
}
|
||||
}
|
||||
|
||||
class CompoundDocsEnum extends DocsEnum {
|
||||
|
||||
final List<State> states;
|
||||
int current = -1;
|
||||
|
||||
CompoundDocsEnum(List<DocsEnum> docsEnums) {
|
||||
this.states = new ArrayList<State>(docsEnums.size());
|
||||
for (DocsEnum docsEnum : docsEnums) {
|
||||
states.add(new State(docsEnum));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int freq() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return current;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
if (states.isEmpty()) {
|
||||
return current = NO_MORE_DOCS;
|
||||
}
|
||||
|
||||
if (current == -1) {
|
||||
for (State state : states) {
|
||||
state.initialize();
|
||||
}
|
||||
}
|
||||
|
||||
int lowestIndex = 0;
|
||||
int lowestDocId = states.get(0).current;
|
||||
for (int i = 1; i < states.size(); i++) {
|
||||
State state = states.get(i);
|
||||
if (lowestDocId > state.current) {
|
||||
lowestDocId = state.current;
|
||||
lowestIndex = i;
|
||||
}
|
||||
}
|
||||
|
||||
if (states.get(lowestIndex).next() == DocsEnum.NO_MORE_DOCS) {
|
||||
states.remove(lowestIndex);
|
||||
}
|
||||
|
||||
return current = lowestDocId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
class State {
|
||||
|
||||
final DocsEnum docsEnum;
|
||||
int current = -1;
|
||||
|
||||
State(DocsEnum docsEnum) {
|
||||
this.docsEnum = docsEnum;
|
||||
}
|
||||
|
||||
void initialize() throws IOException {
|
||||
current = docsEnum.nextDoc();
|
||||
}
|
||||
|
||||
int next() throws IOException {
|
||||
return current = docsEnum.nextDoc();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ord() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void seekExact(long ord) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long totalTermFreq() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper;
|
|||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
|
||||
import java.util.Collection;
|
||||
|
@ -83,10 +82,6 @@ public final class Uid {
|
|||
return createUidAsBytes(type, id);
|
||||
}
|
||||
|
||||
public static String typePrefix(String type) {
|
||||
return type + DELIMITER;
|
||||
}
|
||||
|
||||
public static BytesRef typePrefixAsBytes(BytesRef type) {
|
||||
BytesRef bytesRef = new BytesRef(type.length + 1);
|
||||
bytesRef.append(type);
|
||||
|
@ -94,24 +89,6 @@ public final class Uid {
|
|||
return bytesRef;
|
||||
}
|
||||
|
||||
public static String idFromUid(String uid) {
|
||||
int delimiterIndex = uid.indexOf(DELIMITER); // type is not allowed to have # in it..., ids can
|
||||
return uid.substring(delimiterIndex + 1);
|
||||
}
|
||||
|
||||
public static HashedBytesArray idFromUid(BytesRef uid) {
|
||||
return splitUidIntoTypeAndId(uid)[1];
|
||||
}
|
||||
|
||||
public static HashedBytesArray typeFromUid(BytesRef uid) {
|
||||
return splitUidIntoTypeAndId(uid)[0];
|
||||
}
|
||||
|
||||
public static String typeFromUid(String uid) {
|
||||
int delimiterIndex = uid.indexOf(DELIMITER); // type is not allowed to have # in it..., ids can
|
||||
return uid.substring(0, delimiterIndex);
|
||||
}
|
||||
|
||||
public static Uid createUid(String uid) {
|
||||
int delimiterIndex = uid.indexOf(DELIMITER); // type is not allowed to have # in it..., ids can
|
||||
return new Uid(uid.substring(0, delimiterIndex), uid.substring(delimiterIndex + 1));
|
||||
|
@ -179,8 +156,7 @@ public final class Uid {
|
|||
return false;
|
||||
}
|
||||
|
||||
// LUCENE 4 UPGRADE: HashedBytesArray or BytesRef as return type?
|
||||
public static HashedBytesArray[] splitUidIntoTypeAndId(BytesRef uid) {
|
||||
public static BytesRef[] splitUidIntoTypeAndId(BytesRef uid) {
|
||||
int loc = -1;
|
||||
final int limit = uid.offset + uid.length;
|
||||
for (int i = uid.offset; i < limit; i++) {
|
||||
|
@ -194,12 +170,11 @@ public final class Uid {
|
|||
return null;
|
||||
}
|
||||
|
||||
byte[] type = new byte[loc - uid.offset];
|
||||
System.arraycopy(uid.bytes, uid.offset, type, 0, type.length);
|
||||
|
||||
byte[] id = new byte[uid.length - type.length - 1];
|
||||
System.arraycopy(uid.bytes, loc + 1, id, 0, id.length);
|
||||
return new HashedBytesArray[]{new HashedBytesArray(type), new HashedBytesArray(id)};
|
||||
int idStart = loc + 1;
|
||||
return new BytesRef[] {
|
||||
new BytesRef(uid.bytes, uid.offset, loc - uid.offset),
|
||||
new BytesRef(uid.bytes, idStart, limit - idStart)
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -82,6 +82,7 @@ public class ParentFieldMapper extends AbstractFieldMapper<Uid> implements Inter
|
|||
public Builder() {
|
||||
super(Defaults.NAME);
|
||||
this.indexName = name;
|
||||
builder = this;
|
||||
}
|
||||
|
||||
public Builder type(String type) {
|
||||
|
@ -146,7 +147,7 @@ public class ParentFieldMapper extends AbstractFieldMapper<Uid> implements Inter
|
|||
|
||||
@Override
|
||||
public FieldDataType defaultFieldDataType() {
|
||||
return new FieldDataType("string");
|
||||
return new FieldDataType("_parent");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,7 +25,9 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
|
||||
import org.elasticsearch.common.lucene.search.XFilteredQuery;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.search.child.ChildrenConstantScoreQuery;
|
||||
import org.elasticsearch.index.search.child.CustomQueryWrappingFilter;
|
||||
import org.elasticsearch.index.search.child.DeleteByQueryWrappingFilter;
|
||||
|
@ -121,10 +123,11 @@ public class HasChildFilterParser implements FilterParser {
|
|||
if (childDocMapper == null) {
|
||||
throw new QueryParsingException(parseContext.index(), "No mapping for for type [" + childType + "]");
|
||||
}
|
||||
if (!childDocMapper.parentFieldMapper().active()) {
|
||||
ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper();
|
||||
if (!parentFieldMapper.active()) {
|
||||
throw new QueryParsingException(parseContext.index(), "Type [" + childType + "] does not have parent mapping");
|
||||
}
|
||||
String parentType = childDocMapper.parentFieldMapper().type();
|
||||
String parentType = parentFieldMapper.type();
|
||||
|
||||
// wrap the query with type query
|
||||
query = new XFilteredQuery(query, parseContext.cacheFilter(childDocMapper.typeFilter(), null));
|
||||
|
@ -140,7 +143,8 @@ public class HasChildFilterParser implements FilterParser {
|
|||
}
|
||||
|
||||
Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null);
|
||||
Query childrenConstantScoreQuery = new ChildrenConstantScoreQuery(query, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = parseContext.fieldData().getForField(parentFieldMapper);
|
||||
Query childrenConstantScoreQuery = new ChildrenConstantScoreQuery(parentChildIndexFieldData, query, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
|
||||
|
||||
if (filterName != null) {
|
||||
parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(childrenConstantScoreQuery));
|
||||
|
|
|
@ -26,7 +26,9 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
|
||||
import org.elasticsearch.common.lucene.search.XFilteredQuery;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.search.child.*;
|
||||
import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
@ -124,9 +126,14 @@ public class HasChildQueryParser implements QueryParser {
|
|||
if (!childDocMapper.parentFieldMapper().active()) {
|
||||
throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] does not have parent mapping");
|
||||
}
|
||||
String parentType = childDocMapper.parentFieldMapper().type();
|
||||
DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
|
||||
|
||||
ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper();
|
||||
if (!parentFieldMapper.active()) {
|
||||
throw new QueryParsingException(parseContext.index(), "[has_child] _parent field not configured");
|
||||
}
|
||||
|
||||
String parentType = parentFieldMapper.type();
|
||||
DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType);
|
||||
if (parentDocMapper == null) {
|
||||
throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] points to a non existent parent type [" + parentType + "]");
|
||||
}
|
||||
|
@ -142,10 +149,11 @@ public class HasChildQueryParser implements QueryParser {
|
|||
boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
|
||||
Query query;
|
||||
Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null);
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = parseContext.fieldData().getForField(parentFieldMapper);
|
||||
if (!deleteByQuery && scoreType != null) {
|
||||
query = new ChildrenQuery(parentType, childType, parentFilter, innerQuery, scoreType, shortCircuitParentDocSet, nonNestedDocsFilter);
|
||||
query = new ChildrenQuery(parentChildIndexFieldData, parentType, childType, parentFilter, innerQuery, scoreType, shortCircuitParentDocSet, nonNestedDocsFilter);
|
||||
} else {
|
||||
query = new ChildrenConstantScoreQuery(innerQuery, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
|
||||
query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, innerQuery, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
|
||||
if (deleteByQuery) {
|
||||
query = new XConstantScoreQuery(new DeleteByQueryWrappingFilter(query));
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
|
|||
import org.elasticsearch.common.lucene.search.XFilteredQuery;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.search.child.CustomQueryWrappingFilter;
|
||||
|
@ -132,16 +133,21 @@ public class HasParentFilterParser implements FilterParser {
|
|||
|
||||
Set<String> parentTypes = new HashSet<String>(5);
|
||||
parentTypes.add(parentType);
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = null;
|
||||
for (DocumentMapper documentMapper : parseContext.mapperService()) {
|
||||
ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper();
|
||||
if (parentFieldMapper.active()) {
|
||||
DocumentMapper parentTypeDocumentMapper = parseContext.mapperService().documentMapper(parentFieldMapper.type());
|
||||
parentChildIndexFieldData = parseContext.fieldData().getForField(parentFieldMapper);
|
||||
if (parentTypeDocumentMapper == null) {
|
||||
// Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent.
|
||||
parentTypes.add(parentFieldMapper.type());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (parentChildIndexFieldData == null) {
|
||||
throw new QueryParsingException(parseContext.index(), "[has_parent] no _parent field configured");
|
||||
}
|
||||
|
||||
Filter parentFilter;
|
||||
if (parentTypes.size() == 1) {
|
||||
|
@ -157,7 +163,7 @@ public class HasParentFilterParser implements FilterParser {
|
|||
parentFilter = parentsFilter;
|
||||
}
|
||||
Filter childrenFilter = parseContext.cacheFilter(new NotFilter(parentFilter), null);
|
||||
Query parentConstantScoreQuery = new ParentConstantScoreQuery(query, parentType, childrenFilter);
|
||||
Query parentConstantScoreQuery = new ParentConstantScoreQuery(parentChildIndexFieldData, query, parentType, childrenFilter);
|
||||
|
||||
if (filterName != null) {
|
||||
parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(parentConstantScoreQuery));
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.lucene.search.XBooleanFilter;
|
|||
import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
|
||||
import org.elasticsearch.common.lucene.search.XFilteredQuery;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.search.child.CustomQueryWrappingFilter;
|
||||
|
@ -130,11 +131,13 @@ public class HasParentQueryParser implements QueryParser {
|
|||
// wrap the query with type query
|
||||
innerQuery = new XFilteredQuery(innerQuery, parseContext.cacheFilter(parentDocMapper.typeFilter(), null));
|
||||
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = null;
|
||||
Set<String> parentTypes = new HashSet<String>(5);
|
||||
parentTypes.add(parentType);
|
||||
for (DocumentMapper documentMapper : parseContext.mapperService()) {
|
||||
ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper();
|
||||
if (parentFieldMapper.active()) {
|
||||
parentChildIndexFieldData = parseContext.fieldData().getForField(parentFieldMapper);
|
||||
DocumentMapper parentTypeDocumentMapper = parseContext.mapperService().documentMapper(parentFieldMapper.type());
|
||||
if (parentTypeDocumentMapper == null) {
|
||||
// Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent.
|
||||
|
@ -142,6 +145,9 @@ public class HasParentQueryParser implements QueryParser {
|
|||
}
|
||||
}
|
||||
}
|
||||
if (parentChildIndexFieldData == null) {
|
||||
throw new QueryParsingException(parseContext.index(), "[has_parent] no _parent field configured");
|
||||
}
|
||||
|
||||
Filter parentFilter;
|
||||
if (parentTypes.size() == 1) {
|
||||
|
@ -161,9 +167,9 @@ public class HasParentQueryParser implements QueryParser {
|
|||
boolean deleteByQuery = "delete_by_query".equals(SearchContext.current().source());
|
||||
Query query;
|
||||
if (!deleteByQuery && score) {
|
||||
query = new ParentQuery(innerQuery, parentType, childrenFilter);
|
||||
query = new ParentQuery(parentChildIndexFieldData, innerQuery, parentType, childrenFilter);
|
||||
} else {
|
||||
query = new ParentConstantScoreQuery(innerQuery, parentType, childrenFilter);
|
||||
query = new ParentConstantScoreQuery(parentChildIndexFieldData, innerQuery, parentType, childrenFilter);
|
||||
if (deleteByQuery) {
|
||||
query = new XConstantScoreQuery(new DeleteByQueryWrappingFilter(query));
|
||||
}
|
||||
|
|
|
@ -18,15 +18,19 @@
|
|||
*/
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.search.XFilteredQuery;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.search.child.CustomQueryWrappingFilter;
|
||||
import org.elasticsearch.index.search.child.ScoreType;
|
||||
import org.elasticsearch.index.search.child.TopChildrenQuery;
|
||||
import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -120,15 +124,22 @@ public class TopChildrenQueryParser implements QueryParser {
|
|||
if (childDocMapper == null) {
|
||||
throw new QueryParsingException(parseContext.index(), "No mapping for for type [" + childType + "]");
|
||||
}
|
||||
if (!childDocMapper.parentFieldMapper().active()) {
|
||||
ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper();
|
||||
if (!parentFieldMapper.active()) {
|
||||
throw new QueryParsingException(parseContext.index(), "Type [" + childType + "] does not have parent mapping");
|
||||
}
|
||||
String parentType = childDocMapper.parentFieldMapper().type();
|
||||
|
||||
Filter nonNestedDocsFilter = null;
|
||||
if (childDocMapper.hasNestedObjects()) {
|
||||
nonNestedDocsFilter = parseContext.cacheFilter(NonNestedDocsFilter.INSTANCE, null);
|
||||
}
|
||||
|
||||
innerQuery.setBoost(boost);
|
||||
// wrap the query with type query
|
||||
innerQuery = new XFilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null));
|
||||
TopChildrenQuery query = new TopChildrenQuery(innerQuery, childType, parentType, scoreType, factor, incrementalFactor, parseContext.cacheRecycler());
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = parseContext.fieldData().getForField(parentFieldMapper);
|
||||
TopChildrenQuery query = new TopChildrenQuery(parentChildIndexFieldData, innerQuery, childType, parentType, scoreType, factor, incrementalFactor, parseContext.cacheRecycler(), nonNestedDocsFilter);
|
||||
if (queryName != null) {
|
||||
parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query));
|
||||
}
|
||||
|
|
|
@ -16,9 +16,9 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.search.child;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectOpenHashSet;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
@ -26,17 +26,20 @@ import org.apache.lucene.queries.TermFilter;
|
|||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.lucene.docset.DocIdSets;
|
||||
import org.elasticsearch.common.lucene.search.ApplyAcceptedDocsFilter;
|
||||
import org.elasticsearch.common.lucene.search.NoopCollector;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.recycler.Recycler;
|
||||
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
|
||||
import org.elasticsearch.index.fielddata.BytesValues;
|
||||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.search.aggregations.bucket.BytesRefHash;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -47,6 +50,7 @@ import java.util.Set;
|
|||
*/
|
||||
public class ChildrenConstantScoreQuery extends Query {
|
||||
|
||||
private final ParentChildIndexFieldData parentChildIndexFieldData;
|
||||
private final Query originalChildQuery;
|
||||
private final String parentType;
|
||||
private final String childType;
|
||||
|
@ -57,7 +61,8 @@ public class ChildrenConstantScoreQuery extends Query {
|
|||
private Query rewrittenChildQuery;
|
||||
private IndexReader rewriteIndexReader;
|
||||
|
||||
public ChildrenConstantScoreQuery(Query childQuery, String parentType, String childType, Filter parentFilter, int shortCircuitParentDocSet, Filter nonNestedDocsFilter) {
|
||||
public ChildrenConstantScoreQuery(ParentChildIndexFieldData parentChildIndexFieldData, Query childQuery, String parentType, String childType, Filter parentFilter, int shortCircuitParentDocSet, Filter nonNestedDocsFilter) {
|
||||
this.parentChildIndexFieldData = parentChildIndexFieldData;
|
||||
this.parentFilter = parentFilter;
|
||||
this.parentType = parentType;
|
||||
this.childType = childType;
|
||||
|
@ -84,9 +89,8 @@ public class ChildrenConstantScoreQuery extends Query {
|
|||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher) throws IOException {
|
||||
SearchContext searchContext = SearchContext.current();
|
||||
searchContext.idCache().refresh(searcher.getTopReaderContext().leaves());
|
||||
Recycler.V<ObjectOpenHashSet<HashedBytesArray>> collectedUids = searchContext.cacheRecycler().hashSet(-1);
|
||||
UidCollector collector = new UidCollector(parentType, searchContext, collectedUids.v());
|
||||
BytesRefHash parentIds = new BytesRefHash(512, searchContext.pageCacheRecycler());
|
||||
ParentIdCollector collector = new ParentIdCollector(parentType, parentChildIndexFieldData, parentIds);
|
||||
final Query childQuery;
|
||||
if (rewrittenChildQuery == null) {
|
||||
childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery);
|
||||
|
@ -98,20 +102,20 @@ public class ChildrenConstantScoreQuery extends Query {
|
|||
indexSearcher.setSimilarity(searcher.getSimilarity());
|
||||
indexSearcher.search(childQuery, collector);
|
||||
|
||||
int remaining = collectedUids.v().size();
|
||||
long remaining = parentIds.size();
|
||||
if (remaining == 0) {
|
||||
return Queries.newMatchNoDocsQuery().createWeight(searcher);
|
||||
}
|
||||
|
||||
Filter shortCircuitFilter = null;
|
||||
if (remaining == 1) {
|
||||
BytesRef id = collectedUids.v().iterator().next().value.toBytesRef();
|
||||
BytesRef id = parentIds.get(0, new BytesRef());
|
||||
shortCircuitFilter = new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));
|
||||
} else if (remaining <= shortCircuitParentDocSet) {
|
||||
shortCircuitFilter = new ParentIdsFilter(parentType, collectedUids.v().keys, collectedUids.v().allocated, nonNestedDocsFilter);
|
||||
shortCircuitFilter = new ParentIdsFilter(parentType, nonNestedDocsFilter, parentIds);
|
||||
}
|
||||
|
||||
ParentWeight parentWeight = new ParentWeight(parentFilter, shortCircuitFilter, searchContext, collectedUids);
|
||||
ParentWeight parentWeight = new ParentWeight(parentFilter, shortCircuitFilter, parentIds);
|
||||
searchContext.addReleasable(parentWeight);
|
||||
return parentWeight;
|
||||
}
|
||||
|
@ -120,19 +124,17 @@ public class ChildrenConstantScoreQuery extends Query {
|
|||
|
||||
private final Filter parentFilter;
|
||||
private final Filter shortCircuitFilter;
|
||||
private final SearchContext searchContext;
|
||||
private final Recycler.V<ObjectOpenHashSet<HashedBytesArray>> collectedUids;
|
||||
private final BytesRefHash parentIds;
|
||||
|
||||
private int remaining;
|
||||
private long remaining;
|
||||
private float queryNorm;
|
||||
private float queryWeight;
|
||||
|
||||
public ParentWeight(Filter parentFilter, Filter shortCircuitFilter, SearchContext searchContext, Recycler.V<ObjectOpenHashSet<HashedBytesArray>> collectedUids) {
|
||||
public ParentWeight(Filter parentFilter, Filter shortCircuitFilter, BytesRefHash parentIds) {
|
||||
this.parentFilter = new ApplyAcceptedDocsFilter(parentFilter);
|
||||
this.shortCircuitFilter = shortCircuitFilter;
|
||||
this.searchContext = searchContext;
|
||||
this.collectedUids = collectedUids;
|
||||
this.remaining = collectedUids.v().size();
|
||||
this.parentIds = parentIds;
|
||||
this.remaining = parentIds.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -176,14 +178,14 @@ public class ChildrenConstantScoreQuery extends Query {
|
|||
|
||||
DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, acceptDocs);
|
||||
if (!DocIdSets.isEmpty(parentDocIdSet)) {
|
||||
IdReaderTypeCache idReaderTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
|
||||
BytesValues bytesValues = parentChildIndexFieldData.load(context).getBytesValues(parentType);
|
||||
// We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
|
||||
// count down (short circuit) logic will then work as expected.
|
||||
parentDocIdSet = BitsFilteredDocIdSet.wrap(parentDocIdSet, context.reader().getLiveDocs());
|
||||
if (idReaderTypeCache != null) {
|
||||
if (bytesValues != null) {
|
||||
DocIdSetIterator innerIterator = parentDocIdSet.iterator();
|
||||
if (innerIterator != null) {
|
||||
ParentDocIdIterator parentDocIdIterator = new ParentDocIdIterator(innerIterator, collectedUids.v(), idReaderTypeCache);
|
||||
ParentDocIdIterator parentDocIdIterator = new ParentDocIdIterator(innerIterator, parentIds, bytesValues);
|
||||
return ConstantScorer.create(parentDocIdIterator, this, queryWeight);
|
||||
}
|
||||
}
|
||||
|
@ -193,19 +195,19 @@ public class ChildrenConstantScoreQuery extends Query {
|
|||
|
||||
@Override
|
||||
public boolean release() throws ElasticsearchException {
|
||||
Releasables.release(collectedUids);
|
||||
Releasables.release(parentIds);
|
||||
return true;
|
||||
}
|
||||
|
||||
private final class ParentDocIdIterator extends FilteredDocIdSetIterator {
|
||||
|
||||
private final ObjectOpenHashSet<HashedBytesArray> parents;
|
||||
private final IdReaderTypeCache typeCache;
|
||||
private final BytesRefHash parentIds;
|
||||
private final BytesValues values;
|
||||
|
||||
private ParentDocIdIterator(DocIdSetIterator innerIterator, ObjectOpenHashSet<HashedBytesArray> parents, IdReaderTypeCache typeCache) {
|
||||
private ParentDocIdIterator(DocIdSetIterator innerIterator, BytesRefHash parentIds, BytesValues values) {
|
||||
super(innerIterator);
|
||||
this.parents = parents;
|
||||
this.typeCache = typeCache;
|
||||
this.parentIds = parentIds;
|
||||
this.values = values;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -219,7 +221,10 @@ public class ChildrenConstantScoreQuery extends Query {
|
|||
return false;
|
||||
}
|
||||
|
||||
boolean match = parents.contains(typeCache.idByDoc(doc));
|
||||
values.setDocument(doc);
|
||||
BytesRef parentId = values.nextValue();
|
||||
int hash = values.currentValueHash();
|
||||
boolean match = parentIds.find(parentId, hash) >= 0;
|
||||
if (match) {
|
||||
remaining--;
|
||||
}
|
||||
|
@ -228,20 +233,52 @@ public class ChildrenConstantScoreQuery extends Query {
|
|||
}
|
||||
}
|
||||
|
||||
private final static class UidCollector extends ParentIdCollector {
|
||||
private final static class ParentIdCollector extends NoopCollector {
|
||||
|
||||
private final ObjectOpenHashSet<HashedBytesArray> collectedUids;
|
||||
private final BytesRefHash parentIds;
|
||||
private final String parentType;
|
||||
private final ParentChildIndexFieldData indexFieldData;
|
||||
|
||||
UidCollector(String parentType, SearchContext context, ObjectOpenHashSet<HashedBytesArray> collectedUids) {
|
||||
super(parentType, context);
|
||||
this.collectedUids = collectedUids;
|
||||
protected BytesValues.WithOrdinals values;
|
||||
private Ordinals.Docs ordinals;
|
||||
|
||||
// This remembers what ordinals have already been seen in the current segment
|
||||
// and prevents from fetch the actual id from FD and checking if it exists in parentIds
|
||||
private FixedBitSet seenOrdinals;
|
||||
|
||||
protected ParentIdCollector(String parentType, ParentChildIndexFieldData indexFieldData, BytesRefHash parentIds) {
|
||||
this.parentType = parentType;
|
||||
this.indexFieldData = indexFieldData;
|
||||
this.parentIds = parentIds;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc, HashedBytesArray parentIdByDoc) {
|
||||
collectedUids.add(parentIdByDoc);
|
||||
public void collect(int doc) throws IOException {
|
||||
if (values != null) {
|
||||
int ord = (int) ordinals.getOrd(doc);
|
||||
if (!seenOrdinals.get(ord)) {
|
||||
final BytesRef bytes = values.getValueByOrd(ord);
|
||||
final int hash = values.currentValueHash();
|
||||
parentIds.add(bytes, hash);
|
||||
seenOrdinals.set(ord);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) throws IOException {
|
||||
values = indexFieldData.load(context).getBytesValues(parentType);
|
||||
if (values != null) {
|
||||
ordinals = values.ordinals();
|
||||
final int maxOrd = (int) ordinals.getMaxOrd();
|
||||
if (seenOrdinals == null || seenOrdinals.length() < maxOrd) {
|
||||
seenOrdinals = new FixedBitSet(maxOrd);
|
||||
} else {
|
||||
seenOrdinals.clear(0, maxOrd);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.index.search.child;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
|
||||
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
@ -29,17 +27,24 @@ import org.apache.lucene.util.Bits;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.lucene.docset.DocIdSets;
|
||||
import org.elasticsearch.common.lucene.search.AndFilter;
|
||||
import org.elasticsearch.common.lucene.search.ApplyAcceptedDocsFilter;
|
||||
import org.elasticsearch.common.lucene.search.NoopCollector;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.recycler.Recycler;
|
||||
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.FloatArray;
|
||||
import org.elasticsearch.common.util.IntArray;
|
||||
import org.elasticsearch.common.util.LongArray;
|
||||
import org.elasticsearch.index.fielddata.BytesValues;
|
||||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.search.aggregations.bucket.BytesRefHash;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -49,7 +54,7 @@ import java.util.Set;
|
|||
|
||||
/**
|
||||
* A query implementation that executes the wrapped child query and connects all the matching child docs to the related
|
||||
* parent documents using the {@link IdReaderTypeCache}.
|
||||
* parent documents using {@link ParentChildIndexFieldData}.
|
||||
* <p/>
|
||||
* This query is executed in two rounds. The first round resolves all the matching child documents and groups these
|
||||
* documents by parent uid value. Also the child scores are aggregated per parent uid value. During the second round
|
||||
|
@ -58,6 +63,7 @@ import java.util.Set;
|
|||
*/
|
||||
public class ChildrenQuery extends Query {
|
||||
|
||||
private final ParentChildIndexFieldData parentChildIndexFieldData;
|
||||
private final String parentType;
|
||||
private final String childType;
|
||||
private final Filter parentFilter;
|
||||
|
@ -69,7 +75,8 @@ public class ChildrenQuery extends Query {
|
|||
private Query rewrittenChildQuery;
|
||||
private IndexReader rewriteIndexReader;
|
||||
|
||||
public ChildrenQuery(String parentType, String childType, Filter parentFilter, Query childQuery, ScoreType scoreType, int shortCircuitParentDocSet, Filter nonNestedDocsFilter) {
|
||||
public ChildrenQuery(ParentChildIndexFieldData parentChildIndexFieldData, String parentType, String childType, Filter parentFilter, Query childQuery, ScoreType scoreType, int shortCircuitParentDocSet, Filter nonNestedDocsFilter) {
|
||||
this.parentChildIndexFieldData = parentChildIndexFieldData;
|
||||
this.parentType = parentType;
|
||||
this.childType = childType;
|
||||
this.parentFilter = parentFilter;
|
||||
|
@ -135,20 +142,7 @@ public class ChildrenQuery extends Query {
|
|||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher) throws IOException {
|
||||
SearchContext searchContext = SearchContext.current();
|
||||
searchContext.idCache().refresh(searchContext.searcher().getTopReaderContext().leaves());
|
||||
|
||||
Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore = searchContext.cacheRecycler().objectFloatMap(-1);
|
||||
Recycler.V<ObjectIntOpenHashMap<HashedBytesArray>> uidToCount = null;
|
||||
|
||||
final Collector collector;
|
||||
switch (scoreType) {
|
||||
case AVG:
|
||||
uidToCount = searchContext.cacheRecycler().objectIntMap(-1);
|
||||
collector = new AvgChildUidCollector(scoreType, searchContext, parentType, uidToScore.v(), uidToCount.v());
|
||||
break;
|
||||
default:
|
||||
collector = new ChildUidCollector(scoreType, searchContext, parentType, uidToScore.v());
|
||||
}
|
||||
final Query childQuery;
|
||||
if (rewrittenChildQuery == null) {
|
||||
childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery);
|
||||
|
@ -158,20 +152,48 @@ public class ChildrenQuery extends Query {
|
|||
}
|
||||
IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
|
||||
indexSearcher.setSimilarity(searcher.getSimilarity());
|
||||
indexSearcher.search(childQuery, collector);
|
||||
|
||||
int size = uidToScore.v().size();
|
||||
final BytesRefHash parentIds;
|
||||
final FloatArray scores;
|
||||
final IntArray occurrences;
|
||||
switch (scoreType) {
|
||||
case MAX:
|
||||
MaxCollector maxCollector = new MaxCollector(parentChildIndexFieldData, parentType, searchContext);
|
||||
indexSearcher.search(childQuery, maxCollector);
|
||||
parentIds = maxCollector.parentIds;
|
||||
scores = maxCollector.scores;
|
||||
occurrences = null;
|
||||
Releasables.release(maxCollector.parentIdsIndex);
|
||||
break;
|
||||
case SUM:
|
||||
SumCollector sumCollector = new SumCollector(parentChildIndexFieldData, parentType, searchContext);
|
||||
indexSearcher.search(childQuery, sumCollector);
|
||||
parentIds = sumCollector.parentIds;
|
||||
scores = sumCollector.scores;
|
||||
occurrences = null;
|
||||
Releasables.release(sumCollector.parentIdsIndex);
|
||||
break;
|
||||
case AVG:
|
||||
AvgCollector avgCollector = new AvgCollector(parentChildIndexFieldData, parentType, searchContext);
|
||||
indexSearcher.search(childQuery, avgCollector);
|
||||
parentIds = avgCollector.parentIds;
|
||||
scores = avgCollector.scores;
|
||||
occurrences = avgCollector.occurrences;
|
||||
Releasables.release(avgCollector.parentIdsIndex);
|
||||
break;
|
||||
default:
|
||||
throw new RuntimeException("Are we missing a score type here? -- " + scoreType);
|
||||
}
|
||||
|
||||
int size = (int) parentIds.size();
|
||||
if (size == 0) {
|
||||
uidToScore.release();
|
||||
if (uidToCount != null) {
|
||||
uidToCount.release();
|
||||
}
|
||||
Releasables.release(parentIds, scores, occurrences);
|
||||
return Queries.newMatchNoDocsQuery().createWeight(searcher);
|
||||
}
|
||||
|
||||
final Filter parentFilter;
|
||||
if (size == 1) {
|
||||
BytesRef id = uidToScore.v().keys().iterator().next().value.toBytesRef();
|
||||
BytesRef id = parentIds.get(0, new BytesRef());
|
||||
if (nonNestedDocsFilter != null) {
|
||||
List<Filter> filters = Arrays.asList(
|
||||
new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))),
|
||||
|
@ -182,11 +204,11 @@ public class ChildrenQuery extends Query {
|
|||
parentFilter = new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));
|
||||
}
|
||||
} else if (size <= shortCircuitParentDocSet) {
|
||||
parentFilter = new ParentIdsFilter(parentType, uidToScore.v().keys, uidToScore.v().allocated, nonNestedDocsFilter);
|
||||
parentFilter = new ParentIdsFilter(parentType, nonNestedDocsFilter, parentIds);
|
||||
} else {
|
||||
parentFilter = new ApplyAcceptedDocsFilter(this.parentFilter);
|
||||
}
|
||||
ParentWeight parentWeight = new ParentWeight(rewrittenChildQuery.createWeight(searcher), parentFilter, searchContext, size, uidToScore, uidToCount);
|
||||
ParentWeight parentWeight = new ParentWeight(rewrittenChildQuery.createWeight(searcher), parentFilter, size, parentIds, scores, occurrences);
|
||||
searchContext.addReleasable(parentWeight);
|
||||
return parentWeight;
|
||||
}
|
||||
|
@ -195,19 +217,19 @@ public class ChildrenQuery extends Query {
|
|||
|
||||
private final Weight childWeight;
|
||||
private final Filter parentFilter;
|
||||
private final SearchContext searchContext;
|
||||
private final Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore;
|
||||
private final Recycler.V<ObjectIntOpenHashMap<HashedBytesArray>> uidToCount;
|
||||
private final BytesRefHash parentIds;
|
||||
private final FloatArray scores;
|
||||
private final IntArray occurrences;
|
||||
|
||||
private int remaining;
|
||||
|
||||
private ParentWeight(Weight childWeight, Filter parentFilter, SearchContext searchContext, int remaining, Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore, Recycler.V<ObjectIntOpenHashMap<HashedBytesArray>> uidToCount) {
|
||||
private ParentWeight(Weight childWeight, Filter parentFilter, int remaining, BytesRefHash parentIds, FloatArray scores, IntArray occurrences) {
|
||||
this.childWeight = childWeight;
|
||||
this.parentFilter = parentFilter;
|
||||
this.searchContext = searchContext;
|
||||
this.remaining = remaining;
|
||||
this.uidToScore = uidToScore;
|
||||
this.uidToCount= uidToCount;
|
||||
this.parentIds = parentIds;
|
||||
this.scores = scores;
|
||||
this.occurrences = occurrences;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -238,38 +260,45 @@ public class ChildrenQuery extends Query {
|
|||
return null;
|
||||
}
|
||||
|
||||
IdReaderTypeCache idTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
|
||||
BytesValues bytesValues = parentChildIndexFieldData.load(context).getBytesValues(parentType);
|
||||
if (bytesValues == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining"
|
||||
// count down (short circuit) logic will then work as expected.
|
||||
DocIdSetIterator parentsIterator = BitsFilteredDocIdSet.wrap(parentsSet, context.reader().getLiveDocs()).iterator();
|
||||
switch (scoreType) {
|
||||
case AVG:
|
||||
return new AvgParentScorer(this, idTypeCache, uidToScore.v(), uidToCount.v(), parentsIterator);
|
||||
return new AvgParentScorer(this, bytesValues, parentIds, scores, occurrences, parentsIterator);
|
||||
default:
|
||||
return new ParentScorer(this, idTypeCache, uidToScore.v(), parentsIterator);
|
||||
return new ParentScorer(this, bytesValues, parentIds, scores, parentsIterator);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean release() throws ElasticsearchException {
|
||||
Releasables.release(uidToScore, uidToCount);
|
||||
Releasables.release(parentIds, scores, occurrences);
|
||||
return true;
|
||||
}
|
||||
|
||||
private class ParentScorer extends Scorer {
|
||||
|
||||
final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
|
||||
final IdReaderTypeCache idTypeCache;
|
||||
final BytesRefHash parentIds;
|
||||
final FloatArray scores;
|
||||
|
||||
final BytesValues bytesValues;
|
||||
final DocIdSetIterator parentsIterator;
|
||||
|
||||
int currentDocId = -1;
|
||||
float currentScore;
|
||||
|
||||
ParentScorer(Weight weight, IdReaderTypeCache idTypeCache, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, DocIdSetIterator parentsIterator) {
|
||||
ParentScorer(Weight weight, BytesValues bytesValues, BytesRefHash parentIds, FloatArray scores, DocIdSetIterator parentsIterator) {
|
||||
super(weight);
|
||||
this.idTypeCache = idTypeCache;
|
||||
this.bytesValues = bytesValues;
|
||||
this.parentsIterator = parentsIterator;
|
||||
this.uidToScore = uidToScore;
|
||||
this.parentIds = parentIds;
|
||||
this.scores = scores;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -301,10 +330,10 @@ public class ChildrenQuery extends Query {
|
|||
return currentDocId;
|
||||
}
|
||||
|
||||
HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
|
||||
if (uidToScore.containsKey(uid)) {
|
||||
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
|
||||
currentScore = uidToScore.lget();
|
||||
bytesValues.setDocument(currentDocId);
|
||||
long index = parentIds.find(bytesValues.nextValue(), bytesValues.currentValueHash());
|
||||
if (index != -1) {
|
||||
currentScore = scores.get(index);
|
||||
remaining--;
|
||||
return currentDocId;
|
||||
}
|
||||
|
@ -322,10 +351,10 @@ public class ChildrenQuery extends Query {
|
|||
return currentDocId;
|
||||
}
|
||||
|
||||
HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
|
||||
if (uidToScore.containsKey(uid)) {
|
||||
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
|
||||
currentScore = uidToScore.lget();
|
||||
bytesValues.setDocument(currentDocId);
|
||||
long index = parentIds.find(bytesValues.nextValue(), bytesValues.currentValueHash());
|
||||
if (index != -1) {
|
||||
currentScore = scores.get(index);
|
||||
remaining--;
|
||||
return currentDocId;
|
||||
} else {
|
||||
|
@ -341,11 +370,11 @@ public class ChildrenQuery extends Query {
|
|||
|
||||
private final class AvgParentScorer extends ParentScorer {
|
||||
|
||||
final ObjectIntOpenHashMap<HashedBytesArray> uidToCount;
|
||||
final IntArray occurrences;
|
||||
|
||||
AvgParentScorer(Weight weight, IdReaderTypeCache idTypeCache, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, ObjectIntOpenHashMap<HashedBytesArray> uidToCount, DocIdSetIterator parentsIterator) {
|
||||
super(weight, idTypeCache, uidToScore, parentsIterator);
|
||||
this.uidToCount = uidToCount;
|
||||
AvgParentScorer(Weight weight, BytesValues values, BytesRefHash parentIds, FloatArray scores, IntArray occurrences, DocIdSetIterator parentsIterator) {
|
||||
super(weight, values, parentIds, scores, parentsIterator);
|
||||
this.occurrences = occurrences;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -360,11 +389,11 @@ public class ChildrenQuery extends Query {
|
|||
return currentDocId;
|
||||
}
|
||||
|
||||
HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
|
||||
if (uidToScore.containsKey(uid)) {
|
||||
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
|
||||
currentScore = uidToScore.lget();
|
||||
currentScore /= uidToCount.get(uid);
|
||||
bytesValues.setDocument(currentDocId);
|
||||
long index = parentIds.find(bytesValues.nextValue(), bytesValues.currentValueHash());
|
||||
if (index != -1) {
|
||||
currentScore = scores.get(index);
|
||||
currentScore /= occurrences.get(index);
|
||||
remaining--;
|
||||
return currentDocId;
|
||||
}
|
||||
|
@ -382,11 +411,11 @@ public class ChildrenQuery extends Query {
|
|||
return currentDocId;
|
||||
}
|
||||
|
||||
HashedBytesArray uid = idTypeCache.idByDoc(currentDocId);
|
||||
if (uidToScore.containsKey(uid)) {
|
||||
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
|
||||
currentScore = uidToScore.lget();
|
||||
currentScore /= uidToCount.get(uid);
|
||||
bytesValues.setDocument(currentDocId);
|
||||
long index = parentIds.find(bytesValues.nextValue(), bytesValues.currentValueHash());
|
||||
if (index != -1) {
|
||||
currentScore = scores.get(index);
|
||||
currentScore /= occurrences.get(index);
|
||||
remaining--;
|
||||
return currentDocId;
|
||||
} else {
|
||||
|
@ -397,16 +426,72 @@ public class ChildrenQuery extends Query {
|
|||
|
||||
}
|
||||
|
||||
private static class ChildUidCollector extends ParentIdCollector {
|
||||
private abstract static class ParentIdAndScoreCollector extends NoopCollector {
|
||||
|
||||
protected final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
|
||||
private final ScoreType scoreType;
|
||||
final BytesRefHash parentIds;
|
||||
protected final String parentType;
|
||||
private final ParentChildIndexFieldData indexFieldData;
|
||||
private final PageCacheRecycler pageCacheRecycler;
|
||||
|
||||
protected FloatArray scores;
|
||||
|
||||
protected BytesValues.WithOrdinals values;
|
||||
protected Ordinals.Docs ordinals;
|
||||
protected Scorer scorer;
|
||||
|
||||
ChildUidCollector(ScoreType scoreType, SearchContext searchContext, String childType, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore) {
|
||||
super(childType, searchContext);
|
||||
this.uidToScore = uidToScore;
|
||||
this.scoreType = scoreType;
|
||||
// This remembers what ordinals have already been seen in the current segment
|
||||
// and prevents from fetch the actual id from FD and checking if it exists in parentIds
|
||||
protected LongArray parentIdsIndex;
|
||||
|
||||
private ParentIdAndScoreCollector(ParentChildIndexFieldData indexFieldData, String parentType, SearchContext searchContext) {
|
||||
this.parentType = parentType;
|
||||
this.indexFieldData = indexFieldData;
|
||||
this.pageCacheRecycler = searchContext.pageCacheRecycler();
|
||||
this.parentIds = new BytesRefHash(512, pageCacheRecycler);
|
||||
this.scores = BigArrays.newFloatArray(512, pageCacheRecycler, false);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (values != null) {
|
||||
long ord = ordinals.getOrd(doc);
|
||||
long parentIdx = parentIdsIndex.get(ord);
|
||||
if (parentIdx < 0) {
|
||||
final BytesRef bytes = values.getValueByOrd(ord);
|
||||
final int hash = values.currentValueHash();
|
||||
parentIdx = parentIds.add(bytes, hash);
|
||||
if (parentIdx < 0) {
|
||||
parentIdx = -parentIdx - 1;
|
||||
doScore(parentIdx);
|
||||
} else {
|
||||
scores = BigArrays.grow(scores, parentIdx + 1);
|
||||
scores.set(parentIdx, scorer.score());
|
||||
}
|
||||
parentIdsIndex.set(ord, parentIdx);
|
||||
} else {
|
||||
doScore(parentIdx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void doScore(long index) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) throws IOException {
|
||||
values = indexFieldData.load(context).getBytesValues(parentType);
|
||||
if (values != null) {
|
||||
ordinals = values.ordinals();
|
||||
final long maxOrd = ordinals.getMaxOrd();
|
||||
if (parentIdsIndex == null) {
|
||||
parentIdsIndex = BigArrays.newLongArray(BigArrays.overSize(maxOrd), pageCacheRecycler, false);
|
||||
} else if (parentIdsIndex.size() < maxOrd) {
|
||||
parentIdsIndex = BigArrays.grow(parentIdsIndex, maxOrd);
|
||||
}
|
||||
parentIdsIndex.fill(0, maxOrd, -1L);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -414,50 +499,71 @@ public class ChildrenQuery extends Query {
|
|||
this.scorer = scorer;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final static class SumCollector extends ParentIdAndScoreCollector {
|
||||
|
||||
private SumCollector(ParentChildIndexFieldData indexFieldData, String parentType, SearchContext searchContext) {
|
||||
super(indexFieldData, parentType, searchContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void collect(int doc, HashedBytesArray parentUid) throws IOException {
|
||||
protected void doScore(long index) throws IOException {
|
||||
scores.increment(index, scorer.score());
|
||||
}
|
||||
}
|
||||
|
||||
private final static class MaxCollector extends ParentIdAndScoreCollector {
|
||||
|
||||
private MaxCollector(ParentChildIndexFieldData indexFieldData, String childType, SearchContext searchContext) {
|
||||
super(indexFieldData, childType, searchContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doScore(long index) throws IOException {
|
||||
float currentScore = scorer.score();
|
||||
switch (scoreType) {
|
||||
case SUM:
|
||||
uidToScore.addTo(parentUid, currentScore);
|
||||
break;
|
||||
case MAX:
|
||||
if (uidToScore.containsKey(parentUid)) {
|
||||
float previousScore = uidToScore.lget();
|
||||
if (currentScore > previousScore) {
|
||||
uidToScore.lset(currentScore);
|
||||
}
|
||||
if (currentScore > scores.get(index)) {
|
||||
scores.set(index, currentScore);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final static class AvgCollector extends ParentIdAndScoreCollector {
|
||||
|
||||
private IntArray occurrences;
|
||||
|
||||
AvgCollector(ParentChildIndexFieldData indexFieldData, String childType, SearchContext searchContext) {
|
||||
super(indexFieldData, childType, searchContext);
|
||||
this.occurrences = BigArrays.newIntArray(512, searchContext.pageCacheRecycler(), false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (values != null) {
|
||||
int ord = (int) ordinals.getOrd(doc);
|
||||
long parentIdx = parentIdsIndex.get(ord);
|
||||
if (parentIdx < 0) {
|
||||
final BytesRef bytes = values.getValueByOrd(ord);
|
||||
final int hash = values.currentValueHash();
|
||||
parentIdx = parentIds.add(bytes, hash);
|
||||
if (parentIdx < 0) {
|
||||
parentIdx = -parentIdx - 1;
|
||||
scores.increment(parentIdx, scorer.score());
|
||||
occurrences.increment(parentIdx, 1);
|
||||
} else {
|
||||
uidToScore.put(parentUid, currentScore);
|
||||
scores = BigArrays.grow(scores, parentIdx + 1);
|
||||
scores.set(parentIdx, scorer.score());
|
||||
occurrences = BigArrays.grow(occurrences, parentIdx + 1);
|
||||
occurrences.set(parentIdx, 1);
|
||||
}
|
||||
break;
|
||||
case AVG:
|
||||
assert false : "AVG has its own collector";
|
||||
default:
|
||||
assert false : "Are we missing a score type here? -- " + scoreType;
|
||||
break;
|
||||
parentIdsIndex.set(ord, parentIdx);
|
||||
} else {
|
||||
scores.increment(parentIdx, scorer.score());
|
||||
occurrences.increment(parentIdx, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final static class AvgChildUidCollector extends ChildUidCollector {
|
||||
|
||||
private final ObjectIntOpenHashMap<HashedBytesArray> uidToCount;
|
||||
|
||||
AvgChildUidCollector(ScoreType scoreType, SearchContext searchContext, String childType, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, ObjectIntOpenHashMap<HashedBytesArray> uidToCount) {
|
||||
super(scoreType, searchContext, childType, uidToScore);
|
||||
this.uidToCount = uidToCount;
|
||||
assert scoreType == ScoreType.AVG;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void collect(int doc, HashedBytesArray parentUid) throws IOException {
|
||||
float currentScore = scorer.score();
|
||||
uidToCount.addTo(parentUid, 1);
|
||||
uidToScore.addTo(parentUid, currentScore);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,22 +18,23 @@
|
|||
*/
|
||||
package org.elasticsearch.index.search.child;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectOpenHashSet;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.lucene.docset.DocIdSets;
|
||||
import org.elasticsearch.common.lucene.search.ApplyAcceptedDocsFilter;
|
||||
import org.elasticsearch.common.lucene.search.NoopCollector;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.recycler.Recycler;
|
||||
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
|
||||
import org.elasticsearch.index.fielddata.BytesValues;
|
||||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.search.aggregations.bucket.BytesRefHash;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -44,6 +45,7 @@ import java.util.Set;
|
|||
*/
|
||||
public class ParentConstantScoreQuery extends Query {
|
||||
|
||||
private final ParentChildIndexFieldData parentChildIndexFieldData;
|
||||
private final Query originalParentQuery;
|
||||
private final String parentType;
|
||||
private final Filter childrenFilter;
|
||||
|
@ -51,7 +53,8 @@ public class ParentConstantScoreQuery extends Query {
|
|||
private Query rewrittenParentQuery;
|
||||
private IndexReader rewriteIndexReader;
|
||||
|
||||
public ParentConstantScoreQuery(Query parentQuery, String parentType, Filter childrenFilter) {
|
||||
public ParentConstantScoreQuery(ParentChildIndexFieldData parentChildIndexFieldData, Query parentQuery, String parentType, Filter childrenFilter) {
|
||||
this.parentChildIndexFieldData = parentChildIndexFieldData;
|
||||
this.originalParentQuery = parentQuery;
|
||||
this.parentType = parentType;
|
||||
this.childrenFilter = childrenFilter;
|
||||
|
@ -75,9 +78,8 @@ public class ParentConstantScoreQuery extends Query {
|
|||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher) throws IOException {
|
||||
SearchContext searchContext = SearchContext.current();
|
||||
searchContext.idCache().refresh(searcher.getTopReaderContext().leaves());
|
||||
Recycler.V<ObjectOpenHashSet<HashedBytesArray>> parents = searchContext.cacheRecycler().hashSet(-1);
|
||||
ParentUidsCollector collector = new ParentUidsCollector(parents.v(), searchContext, parentType);
|
||||
BytesRefHash parentIds = new BytesRefHash(512, searchContext.pageCacheRecycler());
|
||||
ParentIdsCollector collector = new ParentIdsCollector(parentType, parentChildIndexFieldData, parentIds);
|
||||
|
||||
final Query parentQuery;
|
||||
if (rewrittenParentQuery != null) {
|
||||
|
@ -90,11 +92,11 @@ public class ParentConstantScoreQuery extends Query {
|
|||
indexSearcher.setSimilarity(searcher.getSimilarity());
|
||||
indexSearcher.search(parentQuery, collector);
|
||||
|
||||
if (parents.v().isEmpty()) {
|
||||
if (parentIds.size() == 0) {
|
||||
return Queries.newMatchNoDocsQuery().createWeight(searcher);
|
||||
}
|
||||
|
||||
ChildrenWeight childrenWeight = new ChildrenWeight(childrenFilter, searchContext, parents);
|
||||
ChildrenWeight childrenWeight = new ChildrenWeight(childrenFilter, parentIds);
|
||||
searchContext.addReleasable(childrenWeight);
|
||||
return childrenWeight;
|
||||
}
|
||||
|
@ -102,16 +104,17 @@ public class ParentConstantScoreQuery extends Query {
|
|||
private final class ChildrenWeight extends Weight implements Releasable {
|
||||
|
||||
private final Filter childrenFilter;
|
||||
private final SearchContext searchContext;
|
||||
private final Recycler.V<ObjectOpenHashSet<HashedBytesArray>> parents;
|
||||
private final BytesRefHash parentIds;
|
||||
|
||||
private float queryNorm;
|
||||
private float queryWeight;
|
||||
|
||||
private ChildrenWeight(Filter childrenFilter, SearchContext searchContext, Recycler.V<ObjectOpenHashSet<HashedBytesArray>> parents) {
|
||||
private FixedBitSet seenOrdinalsCache;
|
||||
private FixedBitSet seenMatchedOrdinalsCache;
|
||||
|
||||
private ChildrenWeight(Filter childrenFilter, BytesRefHash parentIds) {
|
||||
this.childrenFilter = new ApplyAcceptedDocsFilter(childrenFilter);
|
||||
this.searchContext = searchContext;
|
||||
this.parents = parents;
|
||||
this.parentIds = parentIds;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -143,11 +146,22 @@ public class ParentConstantScoreQuery extends Query {
|
|||
return null;
|
||||
}
|
||||
|
||||
IdReaderTypeCache idReaderTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
|
||||
if (idReaderTypeCache != null) {
|
||||
BytesValues.WithOrdinals bytesValues = parentChildIndexFieldData.load(context).getBytesValues(parentType);
|
||||
if (bytesValues != null) {
|
||||
DocIdSetIterator innerIterator = childrenDocIdSet.iterator();
|
||||
if (innerIterator != null) {
|
||||
ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator(innerIterator, parents.v(), idReaderTypeCache);
|
||||
Ordinals.Docs ordinals = bytesValues.ordinals();
|
||||
int maxOrd = (int) ordinals.getMaxOrd();
|
||||
if (seenOrdinalsCache == null || seenOrdinalsCache.length() < maxOrd) {
|
||||
seenOrdinalsCache = new FixedBitSet(maxOrd);
|
||||
seenMatchedOrdinalsCache = new FixedBitSet(maxOrd);
|
||||
} else {
|
||||
seenOrdinalsCache.clear(0, maxOrd);
|
||||
seenMatchedOrdinalsCache.clear(0, maxOrd);
|
||||
}
|
||||
ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator(
|
||||
innerIterator, parentIds, bytesValues, ordinals, seenOrdinalsCache, seenMatchedOrdinalsCache
|
||||
);
|
||||
return ConstantScorer.create(childrenDocIdIterator, this, queryWeight);
|
||||
}
|
||||
}
|
||||
|
@ -156,53 +170,78 @@ public class ParentConstantScoreQuery extends Query {
|
|||
|
||||
@Override
|
||||
public boolean release() throws ElasticsearchException {
|
||||
Releasables.release(parents);
|
||||
Releasables.release(parentIds);
|
||||
return true;
|
||||
}
|
||||
|
||||
private final class ChildrenDocIdIterator extends FilteredDocIdSetIterator {
|
||||
|
||||
private final ObjectOpenHashSet<HashedBytesArray> parents;
|
||||
private final IdReaderTypeCache idReaderTypeCache;
|
||||
private final BytesRefHash parentIds;
|
||||
private final BytesValues.WithOrdinals bytesValues;
|
||||
private final Ordinals.Docs ordinals;
|
||||
|
||||
ChildrenDocIdIterator(DocIdSetIterator innerIterator, ObjectOpenHashSet<HashedBytesArray> parents, IdReaderTypeCache idReaderTypeCache) {
|
||||
// This remembers what ordinals have already been emitted in the current segment
|
||||
// and prevents from fetch the actual id from FD and checking if it exists in parentIds
|
||||
private final FixedBitSet seenOrdinals;
|
||||
private final FixedBitSet seenMatchedOrdinals;
|
||||
|
||||
ChildrenDocIdIterator(DocIdSetIterator innerIterator, BytesRefHash parentIds, BytesValues.WithOrdinals bytesValues, Ordinals.Docs ordinals, FixedBitSet seenOrdinals, FixedBitSet seenMatchedOrdinals) {
|
||||
super(innerIterator);
|
||||
this.parents = parents;
|
||||
this.idReaderTypeCache = idReaderTypeCache;
|
||||
this.parentIds = parentIds;
|
||||
this.bytesValues = bytesValues;
|
||||
this.ordinals = ordinals;
|
||||
this.seenOrdinals = seenOrdinals;
|
||||
this.seenMatchedOrdinals = seenMatchedOrdinals;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean match(int doc) {
|
||||
return parents.contains(idReaderTypeCache.parentIdByDoc(doc));
|
||||
int ord = (int) ordinals.getOrd(doc);
|
||||
if (ord == Ordinals.MISSING_ORDINAL) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!seenOrdinals.get(ord)) {
|
||||
seenOrdinals.set(ord);
|
||||
if (parentIds.find(bytesValues.getValueByOrd(ord), bytesValues.currentValueHash()) >= 0) {
|
||||
seenMatchedOrdinals.set(ord);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return seenMatchedOrdinals.get(ord);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private final static class ParentUidsCollector extends NoopCollector {
|
||||
private final static class ParentIdsCollector extends NoopCollector {
|
||||
|
||||
private final ObjectOpenHashSet<HashedBytesArray> collectedUids;
|
||||
private final SearchContext context;
|
||||
private final BytesRefHash parentIds;
|
||||
private final ParentChildIndexFieldData indexFieldData;
|
||||
private final String parentType;
|
||||
|
||||
private IdReaderTypeCache typeCache;
|
||||
private BytesValues values;
|
||||
|
||||
ParentUidsCollector(ObjectOpenHashSet<HashedBytesArray> collectedUids, SearchContext context, String parentType) {
|
||||
this.collectedUids = collectedUids;
|
||||
this.context = context;
|
||||
ParentIdsCollector(String parentType, ParentChildIndexFieldData indexFieldData, BytesRefHash parentIds) {
|
||||
this.parentIds = parentIds;
|
||||
this.indexFieldData = indexFieldData;
|
||||
this.parentType = parentType;
|
||||
}
|
||||
|
||||
public void collect(int doc) throws IOException {
|
||||
// It can happen that for particular segment no document exist for an specific type. This prevents NPE
|
||||
if (typeCache != null) {
|
||||
collectedUids.add(typeCache.idByDoc(doc));
|
||||
if (values != null) {
|
||||
values.setDocument(doc);
|
||||
parentIds.add(values.nextValue(), values.currentValueHash());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext readerContext) throws IOException {
|
||||
typeCache = context.idCache().reader(readerContext.reader()).type(parentType);
|
||||
values = indexFieldData.load(readerContext).getBytesValues(parentType);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.search.child;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.common.lucene.search.NoopCollector;
|
||||
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
/**
|
||||
* A simple collector that only collects if the docs parent ID is not
|
||||
* <code>null</code>
|
||||
*/
|
||||
abstract class ParentIdCollector extends NoopCollector {
|
||||
protected final String type;
|
||||
protected final SearchContext context;
|
||||
private IdReaderTypeCache typeCache;
|
||||
|
||||
protected ParentIdCollector(String parentType, SearchContext context) {
|
||||
this.type = parentType;
|
||||
this.context = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void collect(int doc) throws IOException {
|
||||
if (typeCache != null) {
|
||||
HashedBytesArray parentIdByDoc = typeCache.parentIdByDoc(doc);
|
||||
if (parentIdByDoc != null) {
|
||||
collect(doc, parentIdByDoc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void collect(int doc, HashedBytesArray parentId) throws IOException;
|
||||
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext readerContext) throws IOException {
|
||||
typeCache = context.idCache().reader(readerContext.reader()).type(type);
|
||||
}
|
||||
}
|
|
@ -28,32 +28,29 @@ import org.apache.lucene.search.Filter;
|
|||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.search.aggregations.bucket.BytesRefHash;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Advantages over using this filter over Lucene's TermsFilter in the parent child context:
|
||||
* 1) Don't need to copy all values over to a list from the id cache and then
|
||||
* copy all the ids values over to one continuous byte array. Should save a lot of of object creations and gcs..
|
||||
* copy all the ids values over to one continuous byte array. Should save a lot of of object creations and gcs..
|
||||
* 2) We filter docs by one field only.
|
||||
* 3) We can directly reference to values that originate from the id cache.
|
||||
*/
|
||||
final class ParentIdsFilter extends Filter {
|
||||
|
||||
private final BytesRef parentTypeBr;
|
||||
private final Object[] keys;
|
||||
private final boolean[] allocated;
|
||||
|
||||
private final Filter nonNestedDocsFilter;
|
||||
private final BytesRefHash parentIds;
|
||||
|
||||
public ParentIdsFilter(String parentType, Object[] keys, boolean[] allocated, Filter nonNestedDocsFilter) {
|
||||
ParentIdsFilter(String parentType, Filter nonNestedDocsFilter, BytesRefHash parentIds) {
|
||||
this.nonNestedDocsFilter = nonNestedDocsFilter;
|
||||
this.parentTypeBr = new BytesRef(parentType);
|
||||
this.keys = keys;
|
||||
this.allocated = allocated;
|
||||
this.parentIds = parentIds;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -78,13 +75,9 @@ final class ParentIdsFilter extends Filter {
|
|||
|
||||
DocsEnum docsEnum = null;
|
||||
FixedBitSet result = null;
|
||||
for (int i = 0; i < allocated.length; i++) {
|
||||
if (!allocated[i]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
idSpare.bytes = ((HashedBytesArray) keys[i]).toBytes();
|
||||
idSpare.length = idSpare.bytes.length;
|
||||
long size = parentIds.size();
|
||||
for (int i = 0; i < size; i++) {
|
||||
parentIds.get(i, idSpare);
|
||||
Uid.createUidAsBytes(parentTypeBr, idSpare, uidSpare);
|
||||
if (termsEnum.seekExact(uidSpare)) {
|
||||
int docId;
|
||||
|
|
|
@ -18,23 +18,27 @@
|
|||
*/
|
||||
package org.elasticsearch.index.search.child;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectFloatOpenHashMap;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.lucene.docset.DocIdSets;
|
||||
import org.elasticsearch.common.lucene.search.ApplyAcceptedDocsFilter;
|
||||
import org.elasticsearch.common.lucene.search.NoopCollector;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.recycler.Recycler;
|
||||
import org.elasticsearch.index.cache.id.IdReaderTypeCache;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.FloatArray;
|
||||
import org.elasticsearch.common.util.LongArray;
|
||||
import org.elasticsearch.index.fielddata.BytesValues;
|
||||
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.search.aggregations.bucket.BytesRefHash;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -43,10 +47,11 @@ import java.util.Set;
|
|||
/**
|
||||
* A query implementation that executes the wrapped parent query and
|
||||
* connects the matching parent docs to the related child documents
|
||||
* using the {@link IdReaderTypeCache}.
|
||||
* using the {@link ParentChildIndexFieldData}.
|
||||
*/
|
||||
public class ParentQuery extends Query {
|
||||
|
||||
private final ParentChildIndexFieldData parentChildIndexFieldData;
|
||||
private final Query originalParentQuery;
|
||||
private final String parentType;
|
||||
private final Filter childrenFilter;
|
||||
|
@ -54,7 +59,8 @@ public class ParentQuery extends Query {
|
|||
private Query rewrittenParentQuery;
|
||||
private IndexReader rewriteIndexReader;
|
||||
|
||||
public ParentQuery(Query parentQuery, String parentType, Filter childrenFilter) {
|
||||
public ParentQuery(ParentChildIndexFieldData parentChildIndexFieldData, Query parentQuery, String parentType, Filter childrenFilter) {
|
||||
this.parentChildIndexFieldData = parentChildIndexFieldData;
|
||||
this.originalParentQuery = parentQuery;
|
||||
this.parentType = parentType;
|
||||
this.childrenFilter = childrenFilter;
|
||||
|
@ -117,9 +123,7 @@ public class ParentQuery extends Query {
|
|||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher) throws IOException {
|
||||
SearchContext searchContext = SearchContext.current();
|
||||
searchContext.idCache().refresh(searchContext.searcher().getTopReaderContext().leaves());
|
||||
Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore = searchContext.cacheRecycler().objectFloatMap(-1);
|
||||
ParentUidCollector collector = new ParentUidCollector(uidToScore.v(), searchContext, parentType);
|
||||
ParentIdAndScoreCollector collector = new ParentIdAndScoreCollector(searchContext, parentChildIndexFieldData, parentType);
|
||||
|
||||
final Query parentQuery;
|
||||
if (rewrittenParentQuery == null) {
|
||||
|
@ -131,40 +135,47 @@ public class ParentQuery extends Query {
|
|||
IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
|
||||
indexSearcher.setSimilarity(searcher.getSimilarity());
|
||||
indexSearcher.search(parentQuery, collector);
|
||||
FloatArray scores = collector.scores;
|
||||
BytesRefHash parentIds = collector.parentIds;
|
||||
|
||||
if (uidToScore.v().isEmpty()) {
|
||||
uidToScore.release();
|
||||
if (parentIds.size() == 0) {
|
||||
Releasables.release(parentIds, scores);
|
||||
return Queries.newMatchNoDocsQuery().createWeight(searcher);
|
||||
}
|
||||
|
||||
ChildWeight childWeight = new ChildWeight(parentQuery.createWeight(searcher), childrenFilter, searchContext, uidToScore);
|
||||
ChildWeight childWeight = new ChildWeight(searchContext, parentQuery.createWeight(searcher), childrenFilter, parentIds, scores);
|
||||
searchContext.addReleasable(childWeight);
|
||||
return childWeight;
|
||||
}
|
||||
|
||||
private static class ParentUidCollector extends NoopCollector {
|
||||
private static class ParentIdAndScoreCollector extends NoopCollector {
|
||||
|
||||
private final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
|
||||
private final SearchContext searchContext;
|
||||
private final BytesRefHash parentIds;
|
||||
private FloatArray scores;
|
||||
private final ParentChildIndexFieldData indexFieldData;
|
||||
private final String parentType;
|
||||
|
||||
private Scorer scorer;
|
||||
private IdReaderTypeCache typeCache;
|
||||
private BytesValues values;
|
||||
|
||||
ParentUidCollector(ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, SearchContext searchContext, String parentType) {
|
||||
this.uidToScore = uidToScore;
|
||||
this.searchContext = searchContext;
|
||||
ParentIdAndScoreCollector(SearchContext searchContext, ParentChildIndexFieldData indexFieldData, String parentType) {
|
||||
this.parentIds = new BytesRefHash(512, searchContext.pageCacheRecycler());
|
||||
this.scores = BigArrays.newFloatArray(512, searchContext.pageCacheRecycler(), false);
|
||||
this.indexFieldData = indexFieldData;
|
||||
this.parentType = parentType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
if (typeCache == null) {
|
||||
return;
|
||||
// It can happen that for particular segment no document exist for an specific type. This prevents NPE
|
||||
if (values != null) {
|
||||
values.setDocument(doc);
|
||||
long index = parentIds.add(values.nextValue(), values.currentValueHash());
|
||||
if (index >= 0) {
|
||||
scores = BigArrays.grow(scores, index + 1);
|
||||
scores.set(index, scorer.score());
|
||||
}
|
||||
}
|
||||
|
||||
HashedBytesArray parentUid = typeCache.idByDoc(doc);
|
||||
uidToScore.put(parentUid, scorer.score());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -174,22 +185,27 @@ public class ParentQuery extends Query {
|
|||
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) throws IOException {
|
||||
typeCache = searchContext.idCache().reader(context.reader()).type(parentType);
|
||||
values = indexFieldData.load(context).getBytesValues(parentType);
|
||||
}
|
||||
}
|
||||
|
||||
private class ChildWeight extends Weight implements Releasable {
|
||||
|
||||
private final SearchContext searchContext;
|
||||
private final Weight parentWeight;
|
||||
private final Filter childrenFilter;
|
||||
private final SearchContext searchContext;
|
||||
private final Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore;
|
||||
private final BytesRefHash parentIds;
|
||||
private final FloatArray scores;
|
||||
|
||||
private ChildWeight(Weight parentWeight, Filter childrenFilter, SearchContext searchContext, Recycler.V<ObjectFloatOpenHashMap<HashedBytesArray>> uidToScore) {
|
||||
private FixedBitSet seenOrdinalsCache;
|
||||
private LongArray parentIdsIndexCache;
|
||||
|
||||
private ChildWeight(SearchContext searchContext, Weight parentWeight, Filter childrenFilter, BytesRefHash parentIds, FloatArray scores) {
|
||||
this.searchContext = searchContext;
|
||||
this.parentWeight = parentWeight;
|
||||
this.childrenFilter = new ApplyAcceptedDocsFilter(childrenFilter);
|
||||
this.searchContext = searchContext;
|
||||
this.uidToScore = uidToScore;
|
||||
this.parentIds = parentIds;
|
||||
this.scores = scores;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -219,35 +235,60 @@ public class ParentQuery extends Query {
|
|||
if (DocIdSets.isEmpty(childrenDocSet)) {
|
||||
return null;
|
||||
}
|
||||
IdReaderTypeCache idTypeCache = searchContext.idCache().reader(context.reader()).type(parentType);
|
||||
if (idTypeCache == null) {
|
||||
BytesValues.WithOrdinals bytesValues = parentChildIndexFieldData.load(context).getBytesValues(parentType);
|
||||
if (bytesValues == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new ChildScorer(this, uidToScore.v(), childrenDocSet.iterator(), idTypeCache);
|
||||
Ordinals.Docs ordinals = bytesValues.ordinals();
|
||||
final int maxOrd = (int) ordinals.getMaxOrd();
|
||||
if (parentIdsIndexCache == null) {
|
||||
parentIdsIndexCache = BigArrays.newLongArray(BigArrays.overSize(maxOrd), searchContext.pageCacheRecycler(), false);
|
||||
} else if (parentIdsIndexCache.size() < maxOrd) {
|
||||
parentIdsIndexCache = BigArrays.grow(parentIdsIndexCache, maxOrd);
|
||||
}
|
||||
parentIdsIndexCache.fill(0, maxOrd, -1L);
|
||||
if (seenOrdinalsCache == null || seenOrdinalsCache.length() < maxOrd) {
|
||||
seenOrdinalsCache = new FixedBitSet(maxOrd);
|
||||
} else {
|
||||
seenOrdinalsCache.clear(0, maxOrd);
|
||||
}
|
||||
return new ChildScorer(this, parentIds, scores, childrenDocSet.iterator(), bytesValues, ordinals, seenOrdinalsCache, parentIdsIndexCache);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean release() throws ElasticsearchException {
|
||||
Releasables.release(uidToScore);
|
||||
Releasables.release(parentIds, scores);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
private static class ChildScorer extends Scorer {
|
||||
|
||||
private final ObjectFloatOpenHashMap<HashedBytesArray> uidToScore;
|
||||
private final BytesRefHash parentIds;
|
||||
private final FloatArray scores;
|
||||
private final DocIdSetIterator childrenIterator;
|
||||
private final IdReaderTypeCache typeCache;
|
||||
private final BytesValues.WithOrdinals bytesValues;
|
||||
private final Ordinals.Docs ordinals;
|
||||
|
||||
// This remembers what ordinals have already been seen in the current segment
|
||||
// and prevents from fetch the actual id from FD and checking if it exists in parentIds
|
||||
private final FixedBitSet seenOrdinals;
|
||||
private final LongArray parentIdsIndex;
|
||||
|
||||
private int currentChildDoc = -1;
|
||||
private float currentScore;
|
||||
|
||||
ChildScorer(Weight weight, ObjectFloatOpenHashMap<HashedBytesArray> uidToScore, DocIdSetIterator childrenIterator, IdReaderTypeCache typeCache) {
|
||||
ChildScorer(Weight weight, BytesRefHash parentIds, FloatArray scores, DocIdSetIterator childrenIterator,
|
||||
BytesValues.WithOrdinals bytesValues, Ordinals.Docs ordinals, FixedBitSet seenOrdinals, LongArray parentIdsIndex) {
|
||||
super(weight);
|
||||
this.uidToScore = uidToScore;
|
||||
this.parentIds = parentIds;
|
||||
this.scores = scores;
|
||||
this.childrenIterator = childrenIterator;
|
||||
this.typeCache = typeCache;
|
||||
this.bytesValues = bytesValues;
|
||||
this.ordinals = ordinals;
|
||||
this.seenOrdinals = seenOrdinals;
|
||||
this.parentIdsIndex = parentIdsIndex;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -275,14 +316,25 @@ public class ParentQuery extends Query {
|
|||
return currentChildDoc;
|
||||
}
|
||||
|
||||
HashedBytesArray uid = typeCache.parentIdByDoc(currentChildDoc);
|
||||
if (uid == null) {
|
||||
int ord = (int) ordinals.getOrd(currentChildDoc);
|
||||
if (ord == Ordinals.MISSING_ORDINAL) {
|
||||
continue;
|
||||
}
|
||||
if (uidToScore.containsKey(uid)) {
|
||||
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
|
||||
currentScore = uidToScore.lget();
|
||||
return currentChildDoc;
|
||||
|
||||
if (!seenOrdinals.get(ord)) {
|
||||
seenOrdinals.set(ord);
|
||||
long parentIdx = parentIds.find(bytesValues.getValueByOrd(ord), bytesValues.currentValueHash());
|
||||
if (parentIdx != -1) {
|
||||
currentScore = scores.get(parentIdx);
|
||||
parentIdsIndex.set(ord, parentIdx);
|
||||
return currentChildDoc;
|
||||
}
|
||||
} else {
|
||||
long parentIdx = parentIdsIndex.get(ord);
|
||||
if (parentIdx != -1) {
|
||||
currentScore = scores.get(parentIdx);
|
||||
return currentChildDoc;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -293,17 +345,30 @@ public class ParentQuery extends Query {
|
|||
if (currentChildDoc == DocIdSetIterator.NO_MORE_DOCS) {
|
||||
return currentChildDoc;
|
||||
}
|
||||
HashedBytesArray uid = typeCache.parentIdByDoc(currentChildDoc);
|
||||
if (uid == null) {
|
||||
|
||||
int ord = (int) ordinals.getOrd(currentChildDoc);
|
||||
if (ord == Ordinals.MISSING_ORDINAL) {
|
||||
return nextDoc();
|
||||
}
|
||||
|
||||
if (uidToScore.containsKey(uid)) {
|
||||
// Can use lget b/c uidToScore is only used by one thread at the time (via CacheRecycler)
|
||||
currentScore = uidToScore.lget();
|
||||
return currentChildDoc;
|
||||
if (!seenOrdinals.get(ord)) {
|
||||
seenOrdinals.set(ord);
|
||||
long parentIdx = parentIds.find(bytesValues.getValueByOrd(ord), bytesValues.currentValueHash());
|
||||
if (parentIdx != -1) {
|
||||
currentScore = scores.get(parentIdx);
|
||||
parentIdsIndex.set(ord, parentIdx);
|
||||
return currentChildDoc;
|
||||
} else {
|
||||
return nextDoc();
|
||||
}
|
||||
} else {
|
||||
return nextDoc();
|
||||
long parentIdx = parentIdsIndex.get(ord);
|
||||
if (parentIdx != -1) {
|
||||
currentScore = scores.get(parentIdx);
|
||||
return currentChildDoc;
|
||||
} else {
|
||||
return nextDoc();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,15 +23,20 @@ import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
|
|||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchIllegalStateException;
|
||||
import org.elasticsearch.cache.recycler.CacheRecycler;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.lucene.search.EmptyScorer;
|
||||
import org.elasticsearch.common.recycler.Recycler;
|
||||
import org.elasticsearch.index.fielddata.BytesValues;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -57,6 +62,7 @@ public class TopChildrenQuery extends Query {
|
|||
|
||||
private static final ParentDocComparator PARENT_DOC_COMP = new ParentDocComparator();
|
||||
|
||||
private final ParentChildIndexFieldData parentChildIndexFieldData;
|
||||
private final CacheRecycler cacheRecycler;
|
||||
private final String parentType;
|
||||
private final String childType;
|
||||
|
@ -64,13 +70,15 @@ public class TopChildrenQuery extends Query {
|
|||
private final int factor;
|
||||
private final int incrementalFactor;
|
||||
private final Query originalChildQuery;
|
||||
private final Filter nonNestedDocsFilter;
|
||||
|
||||
// This field will hold the rewritten form of originalChildQuery, so that we can reuse it
|
||||
private Query rewrittenChildQuery;
|
||||
private IndexReader rewriteIndexReader;
|
||||
|
||||
// Note, the query is expected to already be filtered to only child type docs
|
||||
public TopChildrenQuery(Query childQuery, String childType, String parentType, ScoreType scoreType, int factor, int incrementalFactor, CacheRecycler cacheRecycler) {
|
||||
public TopChildrenQuery(ParentChildIndexFieldData parentChildIndexFieldData, Query childQuery, String childType, String parentType, ScoreType scoreType, int factor, int incrementalFactor, CacheRecycler cacheRecycler, Filter nonNestedDocsFilter) {
|
||||
this.parentChildIndexFieldData = parentChildIndexFieldData;
|
||||
this.originalChildQuery = childQuery;
|
||||
this.childType = childType;
|
||||
this.parentType = parentType;
|
||||
|
@ -78,6 +86,7 @@ public class TopChildrenQuery extends Query {
|
|||
this.factor = factor;
|
||||
this.incrementalFactor = incrementalFactor;
|
||||
this.cacheRecycler = cacheRecycler;
|
||||
this.nonNestedDocsFilter = nonNestedDocsFilter;
|
||||
}
|
||||
|
||||
// Rewrite invocation logic:
|
||||
|
@ -106,7 +115,6 @@ public class TopChildrenQuery extends Query {
|
|||
public Weight createWeight(IndexSearcher searcher) throws IOException {
|
||||
Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs = cacheRecycler.hashMap(-1);
|
||||
SearchContext searchContext = SearchContext.current();
|
||||
searchContext.idCache().refresh(searchContext.searcher().getTopReaderContext().leaves());
|
||||
|
||||
int parentHitsResolved;
|
||||
int requestedDocs = (searchContext.from() + searchContext.size());
|
||||
|
@ -128,7 +136,11 @@ public class TopChildrenQuery extends Query {
|
|||
while (true) {
|
||||
parentDocs.v().clear();
|
||||
TopDocs topChildDocs = indexSearcher.search(childQuery, numChildDocs);
|
||||
parentHitsResolved = resolveParentDocuments(topChildDocs, searchContext, parentDocs);
|
||||
try {
|
||||
parentHitsResolved = resolveParentDocuments(topChildDocs, searchContext, parentDocs);
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
|
||||
// check if we found enough docs, if so, break
|
||||
if (parentHitsResolved >= requestedDocs) {
|
||||
|
@ -150,16 +162,18 @@ public class TopChildrenQuery extends Query {
|
|||
return parentWeight;
|
||||
}
|
||||
|
||||
int resolveParentDocuments(TopDocs topDocs, SearchContext context, Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs) {
|
||||
int resolveParentDocuments(TopDocs topDocs, SearchContext context, Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs) throws Exception {
|
||||
int parentHitsResolved = 0;
|
||||
Recycler.V<ObjectObjectOpenHashMap<Object, Recycler.V<IntObjectOpenHashMap<ParentDoc>>>> parentDocsPerReader = cacheRecycler.hashMap(context.searcher().getIndexReader().leaves().size());
|
||||
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
|
||||
int readerIndex = ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves());
|
||||
AtomicReaderContext subContext = context.searcher().getIndexReader().leaves().get(readerIndex);
|
||||
BytesValues.WithOrdinals parentValues = parentChildIndexFieldData.load(subContext).getBytesValues(parentType);
|
||||
int subDoc = scoreDoc.doc - subContext.docBase;
|
||||
|
||||
// find the parent id
|
||||
HashedBytesArray parentId = context.idCache().reader(subContext.reader()).parentIdByDoc(parentType, subDoc);
|
||||
parentValues.setDocument(subDoc);
|
||||
BytesRef parentId = parentValues.nextValue();
|
||||
if (parentId == null) {
|
||||
// no parent found
|
||||
continue;
|
||||
|
@ -167,9 +181,25 @@ public class TopChildrenQuery extends Query {
|
|||
// now go over and find the parent doc Id and reader tuple
|
||||
for (AtomicReaderContext atomicReaderContext : context.searcher().getIndexReader().leaves()) {
|
||||
AtomicReader indexReader = atomicReaderContext.reader();
|
||||
int parentDocId = context.idCache().reader(indexReader).docById(parentType, parentId);
|
||||
Bits liveDocs = indexReader.getLiveDocs();
|
||||
if (parentDocId != -1 && (liveDocs == null || liveDocs.get(parentDocId))) {
|
||||
FixedBitSet nonNestedDocs = null;
|
||||
if (nonNestedDocsFilter != null) {
|
||||
nonNestedDocs = (FixedBitSet) nonNestedDocsFilter.getDocIdSet(atomicReaderContext, indexReader.getLiveDocs());
|
||||
}
|
||||
|
||||
Terms terms = indexReader.terms(UidFieldMapper.NAME);
|
||||
if (terms == null) {
|
||||
continue;
|
||||
}
|
||||
TermsEnum termsEnum = terms.iterator(null);
|
||||
if (!termsEnum.seekExact(Uid.createUidAsBytes(parentType, parentId))) {
|
||||
continue;
|
||||
}
|
||||
DocsEnum docsEnum = termsEnum.docs(indexReader.getLiveDocs(), null, DocsEnum.FLAG_NONE);
|
||||
int parentDocId = docsEnum.nextDoc();
|
||||
if (nonNestedDocs != null && !nonNestedDocs.get(parentDocId)) {
|
||||
parentDocId = nonNestedDocs.nextSetBit(parentDocId);
|
||||
}
|
||||
if (parentDocId != DocsEnum.NO_MORE_DOCS) {
|
||||
// we found a match, add it and break
|
||||
|
||||
Recycler.V<IntObjectOpenHashMap<ParentDoc>> readerParentDocs = parentDocsPerReader.v().get(indexReader.getCoreCacheKey());
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.index.aliases.IndexAliasesService;
|
|||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.cache.filter.ShardFilterCacheModule;
|
||||
import org.elasticsearch.index.cache.id.ShardIdCacheModule;
|
||||
import org.elasticsearch.index.deletionpolicy.DeletionPolicyModule;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineModule;
|
||||
|
@ -153,7 +152,6 @@ public class InternalIndexService extends AbstractIndexComponent implements Inde
|
|||
|
||||
// inject workarounds for cyclic dep
|
||||
indexCache.filter().setIndexService(this);
|
||||
indexCache.idCache().setIndexService(this);
|
||||
indexFieldData.setIndexService(this);
|
||||
}
|
||||
|
||||
|
@ -330,7 +328,6 @@ public class InternalIndexService extends AbstractIndexComponent implements Inde
|
|||
modules.add(new MergeSchedulerModule(indexSettings));
|
||||
modules.add(new ShardFilterCacheModule());
|
||||
modules.add(new ShardFieldDataModule());
|
||||
modules.add(new ShardIdCacheModule());
|
||||
modules.add(new TranslogModule(indexSettings));
|
||||
modules.add(new EngineModule(indexSettings));
|
||||
modules.add(new IndexShardGatewayModule(injector.getInstance(IndexGateway.class)));
|
||||
|
|
|
@ -20,14 +20,12 @@
|
|||
package org.elasticsearch.index.shard.service;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheStats;
|
||||
import org.elasticsearch.index.cache.filter.ShardFilterCache;
|
||||
import org.elasticsearch.index.cache.id.IdCacheStats;
|
||||
import org.elasticsearch.index.cache.id.ShardIdCache;
|
||||
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineException;
|
||||
|
@ -56,6 +54,7 @@ import org.elasticsearch.index.shard.IndexShardComponent;
|
|||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.index.termvectors.ShardTermVectorService;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.index.warmer.ShardIndexWarmerService;
|
||||
import org.elasticsearch.index.warmer.WarmerStats;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
@ -75,8 +74,6 @@ public interface IndexShard extends IndexShardComponent {
|
|||
|
||||
ShardFilterCache filterCache();
|
||||
|
||||
ShardIdCache idCache();
|
||||
|
||||
ShardFieldData fieldData();
|
||||
|
||||
ShardRouting routingEntry();
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.lucene.util.ThreadInterruptedException;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchIllegalArgumentException;
|
||||
import org.elasticsearch.ElasticsearchIllegalStateException;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
|
@ -47,7 +46,6 @@ import org.elasticsearch.index.cache.IndexCache;
|
|||
import org.elasticsearch.index.cache.filter.FilterCacheStats;
|
||||
import org.elasticsearch.index.cache.filter.ShardFilterCache;
|
||||
import org.elasticsearch.index.cache.id.IdCacheStats;
|
||||
import org.elasticsearch.index.cache.id.ShardIdCache;
|
||||
import org.elasticsearch.index.codec.CodecService;
|
||||
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
|
||||
import org.elasticsearch.index.engine.*;
|
||||
|
@ -60,6 +58,7 @@ import org.elasticsearch.index.get.ShardGetService;
|
|||
import org.elasticsearch.index.indexing.IndexingStats;
|
||||
import org.elasticsearch.index.indexing.ShardIndexingService;
|
||||
import org.elasticsearch.index.mapper.*;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
|
||||
|
@ -77,6 +76,7 @@ import org.elasticsearch.index.store.Store;
|
|||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.index.termvectors.ShardTermVectorService;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.index.warmer.ShardIndexWarmerService;
|
||||
import org.elasticsearch.index.warmer.WarmerStats;
|
||||
import org.elasticsearch.indices.IndicesLifecycle;
|
||||
|
@ -115,7 +115,6 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
|
|||
private final ShardGetService getService;
|
||||
private final ShardIndexWarmerService shardWarmerService;
|
||||
private final ShardFilterCache shardFilterCache;
|
||||
private final ShardIdCache shardIdCache;
|
||||
private final ShardFieldData shardFieldData;
|
||||
private final PercolatorQueriesRegistry percolatorQueriesRegistry;
|
||||
private final ShardPercolateService shardPercolateService;
|
||||
|
@ -146,8 +145,7 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
|
|||
@Inject
|
||||
public InternalIndexShard(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, Engine engine, MergeSchedulerProvider mergeScheduler, Translog translog,
|
||||
ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, ShardIndexingService indexingService, ShardGetService getService, ShardSearchService searchService, ShardIndexWarmerService shardWarmerService,
|
||||
ShardFilterCache shardFilterCache, ShardIdCache shardIdCache, ShardFieldData shardFieldData,
|
||||
PercolatorQueriesRegistry percolatorQueriesRegistry, ShardPercolateService shardPercolateService, CodecService codecService,
|
||||
ShardFilterCache shardFilterCache, ShardFieldData shardFieldData, PercolatorQueriesRegistry percolatorQueriesRegistry, ShardPercolateService shardPercolateService, CodecService codecService,
|
||||
ShardTermVectorService termVectorService, IndexFieldDataService indexFieldDataService, IndexService indexService) {
|
||||
super(shardId, indexSettings);
|
||||
this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle;
|
||||
|
@ -167,7 +165,6 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
|
|||
this.searchService = searchService;
|
||||
this.shardWarmerService = shardWarmerService;
|
||||
this.shardFilterCache = shardFilterCache;
|
||||
this.shardIdCache = shardIdCache;
|
||||
this.shardFieldData = shardFieldData;
|
||||
this.percolatorQueriesRegistry = percolatorQueriesRegistry;
|
||||
this.shardPercolateService = shardPercolateService;
|
||||
|
@ -246,11 +243,6 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
|
|||
return this.shardFilterCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShardIdCache idCache() {
|
||||
return this.shardIdCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShardFieldData fieldData() {
|
||||
return this.shardFieldData;
|
||||
|
@ -559,7 +551,8 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
|
|||
|
||||
@Override
|
||||
public IdCacheStats idCacheStats() {
|
||||
return shardIdCache.stats();
|
||||
long memorySizeInBytes = shardFieldData.stats(ParentFieldMapper.NAME).getFields().get(ParentFieldMapper.NAME);
|
||||
return new IdCacheStats(memorySizeInBytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.elasticsearch.common.text.StringText;
|
|||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.docset.DocSetCache;
|
||||
import org.elasticsearch.index.cache.filter.FilterCache;
|
||||
import org.elasticsearch.index.cache.id.IdCache;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.fieldvisitor.JustSourceFieldsVisitor;
|
||||
|
@ -488,11 +487,6 @@ public class PercolateContext extends SearchContext {
|
|||
return indexService.cache().docSet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IdCache idCache() {
|
||||
return indexService.cache().idCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long timeoutInMillis() {
|
||||
return -1;
|
||||
|
|
|
@ -709,32 +709,26 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
|||
public TerminationHandle warm(final IndexShard indexShard, IndexMetaData indexMetaData, final WarmerContext context, ThreadPool threadPool) {
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final Map<String, FieldMapper<?>> warmUp = new HashMap<String, FieldMapper<?>>();
|
||||
boolean parentChild = false;
|
||||
for (DocumentMapper docMapper : mapperService) {
|
||||
for (FieldMapper<?> fieldMapper : docMapper.mappers().mappers()) {
|
||||
if (fieldMapper instanceof ParentFieldMapper) {
|
||||
ParentFieldMapper parentFieldMapper = (ParentFieldMapper) fieldMapper;
|
||||
if (parentFieldMapper.active()) {
|
||||
parentChild = true;
|
||||
}
|
||||
}
|
||||
final FieldDataType fieldDataType = fieldMapper.fieldDataType();
|
||||
if (fieldDataType == null) {
|
||||
continue;
|
||||
}
|
||||
if (fieldDataType.getLoading() != Loading.EAGER) {
|
||||
continue;
|
||||
}
|
||||
final String indexName = fieldMapper.names().indexName();
|
||||
if (warmUp.containsKey(indexName)) {
|
||||
continue;
|
||||
if (fieldMapper instanceof ParentFieldMapper) {
|
||||
ParentFieldMapper parentFieldMapper = (ParentFieldMapper) fieldMapper;
|
||||
if (parentFieldMapper.active()) {
|
||||
warmUp.put(indexName, parentFieldMapper);
|
||||
}
|
||||
} else if (fieldDataType.getLoading() != Loading.EAGER && warmUp.containsKey(indexName)) {
|
||||
warmUp.put(indexName, fieldMapper);
|
||||
}
|
||||
warmUp.put(indexName, fieldMapper);
|
||||
}
|
||||
}
|
||||
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
|
||||
final Executor executor = threadPool.executor(executor());
|
||||
final CountDownLatch latch = new CountDownLatch(context.newSearcher().reader().leaves().size() * warmUp.size() + (parentChild ? 1 : 0));
|
||||
final CountDownLatch latch = new CountDownLatch(context.newSearcher().reader().leaves().size() * warmUp.size());
|
||||
for (final AtomicReaderContext ctx : context.newSearcher().reader().leaves()) {
|
||||
for (final FieldMapper<?> fieldMapper : warmUp.values()) {
|
||||
executor.execute(new Runnable() {
|
||||
|
@ -757,28 +751,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
|||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (parentChild) {
|
||||
executor.execute(new Runnable() {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
final long start = System.nanoTime();
|
||||
indexShard.indexService().cache().idCache().refresh(context.newSearcher().reader().leaves());
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace("warmed id_cache, took [{}]", TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
indexShard.warmerService().logger().warn("failed to warm-up id cache", t);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
return new TerminationHandle() {
|
||||
@Override
|
||||
public void awaitTermination() throws InterruptedException {
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
|
|||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.docset.DocSetCache;
|
||||
import org.elasticsearch.index.cache.filter.FilterCache;
|
||||
import org.elasticsearch.index.cache.id.IdCache;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
|
@ -460,10 +459,6 @@ public class DefaultSearchContext extends SearchContext {
|
|||
return indexService.fieldData();
|
||||
}
|
||||
|
||||
public IdCache idCache() {
|
||||
return indexService.cache().idCache();
|
||||
}
|
||||
|
||||
public long timeoutInMillis() {
|
||||
return timeoutInMillis;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.common.lease.Releasable;
|
|||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.docset.DocSetCache;
|
||||
import org.elasticsearch.index.cache.filter.FilterCache;
|
||||
import org.elasticsearch.index.cache.id.IdCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMappers;
|
||||
|
@ -189,8 +188,6 @@ public abstract class SearchContext implements Releasable {
|
|||
|
||||
public abstract IndexFieldDataService fieldData();
|
||||
|
||||
public abstract IdCache idCache();
|
||||
|
||||
public abstract long timeoutInMillis();
|
||||
|
||||
public abstract void timeoutInMillis(long timeoutInMillis);
|
||||
|
|
|
@ -1,410 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.cache.id;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexShardMissingException;
|
||||
import org.elasticsearch.index.aliases.IndexAliasesService;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.cache.id.simple.SimpleIdCache;
|
||||
import org.elasticsearch.index.engine.IndexEngine;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.gateway.IndexGateway;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MapperTestUtils;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.query.IndexQueryParserService;
|
||||
import org.elasticsearch.index.service.IndexService;
|
||||
import org.elasticsearch.index.settings.IndexSettingsService;
|
||||
import org.elasticsearch.index.shard.service.IndexShard;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class SimpleIdCacheTests extends ElasticsearchTestCase {
|
||||
|
||||
@Test
|
||||
public void testDeletedDocuments() throws Exception {
|
||||
SimpleIdCache idCache = createSimpleIdCache(Tuple.tuple("child", "parent"));
|
||||
IndexWriter writer = createIndexWriter();
|
||||
// Begins with parent, ends with child docs
|
||||
final Document parent = doc("parent", "1");
|
||||
writer.addDocument(parent);
|
||||
writer.addDocument(childDoc("child", "1", "parent", "1"));
|
||||
writer.addDocument(childDoc("child", "2", "parent", "1"));
|
||||
writer.addDocument(childDoc("child", "3", "parent", "1"));
|
||||
writer.commit();
|
||||
|
||||
final String parentUid = parent.get("_uid");
|
||||
assertNotNull(parentUid);
|
||||
writer.deleteDocuments(new Term("_uid", parentUid));
|
||||
|
||||
writer.close();
|
||||
DirectoryReader topLevelReader = DirectoryReader.open(writer.getDirectory());
|
||||
List<AtomicReaderContext> leaves = topLevelReader.getContext().leaves();
|
||||
idCache.refresh(leaves);
|
||||
|
||||
assertThat(leaves.size(), equalTo(1));
|
||||
IdReaderCache readerCache = idCache.reader(leaves.get(0).reader());
|
||||
IdReaderTypeCache typeCache = readerCache.type("parent");
|
||||
assertThat(typeCache.idByDoc(0).toUtf8(), equalTo("1"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRefresh() throws Exception {
|
||||
SimpleIdCache idCache = createSimpleIdCache(Tuple.tuple("child", "parent"));
|
||||
IndexWriter writer = createIndexWriter();
|
||||
// Begins with parent, ends with child docs
|
||||
writer.addDocument(doc("parent", "1"));
|
||||
writer.addDocument(childDoc("child", "1", "parent", "1"));
|
||||
writer.addDocument(childDoc("child", "2", "parent", "1"));
|
||||
writer.addDocument(childDoc("child", "3", "parent", "1"));
|
||||
writer.addDocument(childDoc("child", "4", "parent", "1"));
|
||||
writer.commit();
|
||||
|
||||
// Begins with child, ends with parent docs
|
||||
writer.addDocument(childDoc("child", "5", "parent", "2"));
|
||||
writer.addDocument(doc("parent", "2"));
|
||||
writer.addDocument(doc("parent", "3"));
|
||||
writer.addDocument(doc("parent", "4"));
|
||||
writer.addDocument(doc("parent", "5"));
|
||||
writer.commit();
|
||||
|
||||
// Begins with parent, child docs in the middle and ends with parent doc
|
||||
writer.addDocument(doc("parent", "6"));
|
||||
writer.addDocument(childDoc("child", "6", "parent", "6"));
|
||||
writer.addDocument(childDoc("child", "7", "parent", "6"));
|
||||
writer.addDocument(childDoc("child", "8", "parent", "5"));
|
||||
writer.addDocument(childDoc("child", "9", "parent", "4"));
|
||||
writer.addDocument(doc("parent", "7"));
|
||||
writer.commit();
|
||||
|
||||
// Garbage segment
|
||||
writer.addDocument(doc("zzz", "1"));
|
||||
writer.addDocument(doc("xxx", "2"));
|
||||
writer.addDocument(doc("aaa", "3"));
|
||||
writer.addDocument(doc("ccc", "4"));
|
||||
writer.addDocument(doc("parent", "8"));
|
||||
writer.commit();
|
||||
|
||||
writer.close();
|
||||
DirectoryReader topLevelReader = DirectoryReader.open(writer.getDirectory());
|
||||
List<AtomicReaderContext> leaves = topLevelReader.getContext().leaves();
|
||||
idCache.refresh(leaves);
|
||||
|
||||
// Verify simple id cache for segment 1
|
||||
IdReaderCache readerCache = idCache.reader(leaves.get(0).reader());
|
||||
assertThat(readerCache.type("child"), nullValue());
|
||||
IdReaderTypeCache typeCache = readerCache.type("parent");
|
||||
assertThat(typeCache.idByDoc(0).toUtf8(), equalTo("1"));
|
||||
assertThat(typeCache.idByDoc(1), nullValue());
|
||||
assertThat(typeCache.idByDoc(2), nullValue());
|
||||
assertThat(typeCache.idByDoc(3), nullValue());
|
||||
assertThat(typeCache.idByDoc(4), nullValue());
|
||||
|
||||
assertThat(typeCache.parentIdByDoc(0), nullValue());
|
||||
assertThat(typeCache.parentIdByDoc(1).toUtf8(), equalTo("1"));
|
||||
assertThat(typeCache.parentIdByDoc(2).toUtf8(), equalTo("1"));
|
||||
assertThat(typeCache.parentIdByDoc(3).toUtf8(), equalTo("1"));
|
||||
assertThat(typeCache.parentIdByDoc(4).toUtf8(), equalTo("1"));
|
||||
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("1"))), equalTo(0));
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("2"))), equalTo(-1));
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("3"))), equalTo(-1));
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("4"))), equalTo(-1));
|
||||
|
||||
// Verify simple id cache for segment 2
|
||||
readerCache = idCache.reader(leaves.get(1).reader());
|
||||
assertThat(readerCache.type("child"), nullValue());
|
||||
typeCache = readerCache.type("parent");
|
||||
assertThat(typeCache.idByDoc(0), nullValue());
|
||||
assertThat(typeCache.idByDoc(1).toUtf8(), equalTo("2"));
|
||||
assertThat(typeCache.idByDoc(2).toUtf8(), equalTo("3"));
|
||||
assertThat(typeCache.idByDoc(3).toUtf8(), equalTo("4"));
|
||||
assertThat(typeCache.idByDoc(4).toUtf8(), equalTo("5"));
|
||||
|
||||
assertThat(typeCache.parentIdByDoc(0).toUtf8(), equalTo("2"));
|
||||
assertThat(typeCache.parentIdByDoc(1), nullValue());
|
||||
assertThat(typeCache.parentIdByDoc(2), nullValue());
|
||||
assertThat(typeCache.parentIdByDoc(3), nullValue());
|
||||
assertThat(typeCache.parentIdByDoc(4), nullValue());
|
||||
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("2"))), equalTo(1));
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("3"))), equalTo(2));
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("4"))), equalTo(3));
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("5"))), equalTo(4));
|
||||
|
||||
// Verify simple id cache for segment 3
|
||||
readerCache = idCache.reader(leaves.get(2).reader());
|
||||
assertThat(readerCache.type("child"), nullValue());
|
||||
typeCache = readerCache.type("parent");
|
||||
assertThat(typeCache.idByDoc(0).toUtf8(), equalTo("6"));
|
||||
assertThat(typeCache.idByDoc(1), nullValue());
|
||||
assertThat(typeCache.idByDoc(2), nullValue());
|
||||
assertThat(typeCache.idByDoc(3), nullValue());
|
||||
assertThat(typeCache.idByDoc(4), nullValue());
|
||||
assertThat(typeCache.idByDoc(5).toUtf8(), equalTo("7"));
|
||||
|
||||
assertThat(typeCache.parentIdByDoc(0), nullValue());
|
||||
assertThat(typeCache.parentIdByDoc(1).toUtf8(), equalTo("6"));
|
||||
assertThat(typeCache.parentIdByDoc(2).toUtf8(), equalTo("6"));
|
||||
assertThat(typeCache.parentIdByDoc(3).toUtf8(), equalTo("5"));
|
||||
assertThat(typeCache.parentIdByDoc(4).toUtf8(), equalTo("4"));
|
||||
assertThat(typeCache.parentIdByDoc(5), nullValue());
|
||||
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("6"))), equalTo(0));
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("7"))), equalTo(5));
|
||||
|
||||
// Verify simple id cache for segment 4
|
||||
readerCache = idCache.reader(leaves.get(3).reader());
|
||||
assertThat(readerCache.type("child"), nullValue());
|
||||
typeCache = readerCache.type("parent");
|
||||
assertThat(typeCache.idByDoc(0), nullValue());
|
||||
assertThat(typeCache.idByDoc(1), nullValue());
|
||||
assertThat(typeCache.idByDoc(2), nullValue());
|
||||
assertThat(typeCache.idByDoc(3), nullValue());
|
||||
assertThat(typeCache.idByDoc(4).toUtf8(), equalTo("8"));
|
||||
|
||||
assertThat(typeCache.parentIdByDoc(0), nullValue());
|
||||
assertThat(typeCache.parentIdByDoc(1), nullValue());
|
||||
assertThat(typeCache.parentIdByDoc(2), nullValue());
|
||||
assertThat(typeCache.parentIdByDoc(3), nullValue());
|
||||
assertThat(typeCache.parentIdByDoc(4), nullValue());
|
||||
|
||||
assertThat(typeCache.docById(new HashedBytesArray(Strings.toUTF8Bytes("8"))), equalTo(4));
|
||||
}
|
||||
|
||||
@Test(expected = AssertionError.class)
|
||||
public void testRefresh_tripAssert() throws Exception {
|
||||
assumeTrue(ASSERTIONS_ENABLED);
|
||||
SimpleIdCache idCache = createSimpleIdCache(Tuple.tuple("child", "parent"));
|
||||
IndexWriter writer = createIndexWriter();
|
||||
// Begins with parent, ends with child docs
|
||||
writer.addDocument(doc("parent", "1"));
|
||||
writer.addDocument(childDoc("child", "1", "parent", "1"));
|
||||
writer.addDocument(childDoc("child", "2", "parent", "1"));
|
||||
writer.addDocument(childDoc("child", "3", "parent", "1"));
|
||||
writer.addDocument(childDoc("child", "4", "parent", "1"));
|
||||
// Doc like this should never end up in the index, just wanna trip an assert here!
|
||||
Document document = new Document();
|
||||
document.add(new StringField(UidFieldMapper.NAME, "parent", Field.Store.NO));
|
||||
writer.addDocument(document);
|
||||
writer.commit();
|
||||
|
||||
writer.close();
|
||||
DirectoryReader topLevelReader = DirectoryReader.open(writer.getDirectory());
|
||||
List<AtomicReaderContext> leaves = topLevelReader.getContext().leaves();
|
||||
idCache.refresh(leaves);
|
||||
}
|
||||
|
||||
private Document doc(String type, String id) {
|
||||
Document parent = new Document();
|
||||
parent.add(new StringField(UidFieldMapper.NAME, String.format(Locale.ROOT, "%s#%s", type, id), Field.Store.NO));
|
||||
return parent;
|
||||
}
|
||||
|
||||
private Document childDoc(String type, String id, String parentType, String parentId) {
|
||||
Document parent = new Document();
|
||||
parent.add(new StringField(UidFieldMapper.NAME, String.format(Locale.ROOT, "%s#%s", type, id), Field.Store.NO));
|
||||
parent.add(new StringField(ParentFieldMapper.NAME, String.format(Locale.ROOT, "%s#%s", parentType, parentId), Field.Store.NO));
|
||||
return parent;
|
||||
}
|
||||
|
||||
private SimpleIdCache createSimpleIdCache(Tuple<String, String>... documentTypes) throws IOException {
|
||||
Settings settings = ImmutableSettings.EMPTY;
|
||||
Index index = new Index("test");
|
||||
SimpleIdCache idCache = new SimpleIdCache(index, settings);
|
||||
MapperService mapperService = MapperTestUtils.newMapperService();
|
||||
idCache.setIndexService(new StubIndexService(mapperService));
|
||||
|
||||
for (Tuple<String, String> documentType : documentTypes) {
|
||||
String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(documentType.v1())
|
||||
.startObject("_parent").field("type", documentType.v2()).endObject()
|
||||
.endObject().endObject().string();
|
||||
mapperService.merge(documentType.v1(), new CompressedString(defaultMapping), true);
|
||||
}
|
||||
|
||||
return idCache;
|
||||
}
|
||||
|
||||
private IndexWriter createIndexWriter() throws IOException {
|
||||
return new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)));
|
||||
}
|
||||
|
||||
public static class StubIndexService implements IndexService {
|
||||
|
||||
private final MapperService mapperService;
|
||||
|
||||
public StubIndexService(MapperService mapperService) {
|
||||
this.mapperService = mapperService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector injector() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexGateway gateway() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexCache cache() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexFieldDataService fieldData() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexSettingsService settingsService() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnalysisService analysisService() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public MapperService mapperService() {
|
||||
return mapperService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexQueryParserService queryParserService() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimilarityService similarityService() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexAliasesService aliasesService() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexEngine engine() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexStore store() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexShard createShard(int sShardId) throws ElasticsearchException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeShard(int shardId, String reason) throws ElasticsearchException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numberOfShards() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableSet<Integer> shardIds() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasShard(int shardId) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexShard shard(int shardId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexShard shardSafe(int shardId) throws IndexShardMissingException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector shardInjector(int shardId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector shardInjectorSafe(int shardId) throws IndexShardMissingException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String indexUUID() {
|
||||
return IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Index index() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<IndexShard> iterator() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -23,13 +23,13 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
|||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.*;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
import org.elasticsearch.test.index.service.StubIndexService;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -69,6 +69,8 @@ public abstract class AbstractFieldDataTests extends ElasticsearchTestCase {
|
|||
mapper = MapperBuilders.byteField(fieldName).fieldDataSettings(type.getSettings()).build(context);
|
||||
} else if (type.getType().equals("geo_point")) {
|
||||
mapper = MapperBuilders.geoPointField(fieldName).fieldDataSettings(type.getSettings()).build(context);
|
||||
} else if (type.getType().equals("_parent")) {
|
||||
mapper = MapperBuilders.parent().type(fieldName).build(context);
|
||||
} else {
|
||||
throw new UnsupportedOperationException(type.getType());
|
||||
}
|
||||
|
@ -78,6 +80,8 @@ public abstract class AbstractFieldDataTests extends ElasticsearchTestCase {
|
|||
@Before
|
||||
public void setup() throws Exception {
|
||||
ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(ifdService.index(), ImmutableSettings.Builder.EMPTY_SETTINGS);
|
||||
ifdService.setIndexService(new StubIndexService(mapperService));
|
||||
// LogByteSizeMP to preserve doc ID order
|
||||
writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)).setMergePolicy(new LogByteSizeMergePolicy()));
|
||||
}
|
||||
|
|
|
@ -29,13 +29,12 @@ import org.elasticsearch.common.settings.ImmutableSettings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.fielddata.plain.*;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.*;
|
||||
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
|
||||
import org.elasticsearch.index.mapper.MapperBuilders;
|
||||
import org.elasticsearch.index.mapper.core.*;
|
||||
import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
import org.elasticsearch.test.index.service.StubIndexService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -51,6 +50,8 @@ public class IndexFieldDataServiceTests extends ElasticsearchTestCase {
|
|||
@SuppressWarnings("unchecked")
|
||||
public void testGetForFieldDefaults() {
|
||||
final IndexFieldDataService ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(ifdService.index(), ImmutableSettings.Builder.EMPTY_SETTINGS);
|
||||
ifdService.setIndexService(new StubIndexService(mapperService));
|
||||
for (boolean docValues : Arrays.asList(true, false)) {
|
||||
final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
|
||||
final StringFieldMapper stringMapper = new StringFieldMapper.Builder("string").tokenized(false).fieldDataSettings(docValues ? DOC_VALUES_SETTINGS : ImmutableSettings.EMPTY).build(ctx);
|
||||
|
@ -100,6 +101,8 @@ public class IndexFieldDataServiceTests extends ElasticsearchTestCase {
|
|||
@SuppressWarnings("unchecked")
|
||||
public void testByPassDocValues() {
|
||||
final IndexFieldDataService ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(ifdService.index(), ImmutableSettings.Builder.EMPTY_SETTINGS);
|
||||
ifdService.setIndexService(new StubIndexService(mapperService));
|
||||
final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
|
||||
final StringFieldMapper stringMapper = MapperBuilders.stringField("string").tokenized(false).fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(ImmutableSettings.builder().put("format", "fst").build()).build(ctx);
|
||||
ifdService.clear();
|
||||
|
@ -131,6 +134,8 @@ public class IndexFieldDataServiceTests extends ElasticsearchTestCase {
|
|||
|
||||
public void testChangeFieldDataFormat() throws Exception {
|
||||
final IndexFieldDataService ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(ifdService.index(), ImmutableSettings.Builder.EMPTY_SETTINGS);
|
||||
ifdService.setIndexService(new StubIndexService(mapperService));
|
||||
final BuilderContext ctx = new BuilderContext(null, new ContentPath(1));
|
||||
final StringFieldMapper mapper1 = MapperBuilders.stringField("s").tokenized(false).fieldDataSettings(ImmutableSettings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx);
|
||||
final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer()));
|
||||
|
|
|
@ -0,0 +1,172 @@
|
|||
package org.elasticsearch.index.fielddata;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.common.compress.CompressedString;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MapperTestUtils;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.service.IndexService;
|
||||
import org.elasticsearch.test.index.service.StubIndexService;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ParentChildFieldDataTests extends AbstractFieldDataTests {
|
||||
|
||||
private final String parentType = "parent";
|
||||
private final String childType = "child";
|
||||
private final String grandChildType = "grand-child";
|
||||
|
||||
@Before
|
||||
public void before() throws Exception {
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(ifdService.index(), ImmutableSettings.Builder.EMPTY_SETTINGS);
|
||||
mapperService.merge(
|
||||
childType, new CompressedString(PutMappingRequest.buildFromSimplifiedDef(childType, "_parent", "type=" + parentType).string()), true
|
||||
);
|
||||
mapperService.merge(
|
||||
grandChildType, new CompressedString(PutMappingRequest.buildFromSimplifiedDef(grandChildType, "_parent", "type=" + childType).string()), true
|
||||
);
|
||||
IndexService indexService = new StubIndexService(mapperService);
|
||||
ifdService.setIndexService(indexService);
|
||||
|
||||
Document d = new Document();
|
||||
d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(parentType, "1"), Field.Store.NO));
|
||||
writer.addDocument(d);
|
||||
|
||||
d = new Document();
|
||||
d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(childType, "2"), Field.Store.NO));
|
||||
d.add(new StringField(ParentFieldMapper.NAME, Uid.createUid(parentType, "1"), Field.Store.NO));
|
||||
writer.addDocument(d);
|
||||
writer.commit();
|
||||
|
||||
d = new Document();
|
||||
d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(childType, "3"), Field.Store.NO));
|
||||
d.add(new StringField(ParentFieldMapper.NAME, Uid.createUid(parentType, "1"), Field.Store.NO));
|
||||
writer.addDocument(d);
|
||||
|
||||
d = new Document();
|
||||
d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(parentType, "2"), Field.Store.NO));
|
||||
writer.addDocument(d);
|
||||
|
||||
d = new Document();
|
||||
d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(childType, "4"), Field.Store.NO));
|
||||
d.add(new StringField(ParentFieldMapper.NAME, Uid.createUid(parentType, "2"), Field.Store.NO));
|
||||
writer.addDocument(d);
|
||||
|
||||
d = new Document();
|
||||
d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(childType, "5"), Field.Store.NO));
|
||||
d.add(new StringField(ParentFieldMapper.NAME, Uid.createUid(parentType, "1"), Field.Store.NO));
|
||||
writer.addDocument(d);
|
||||
writer.commit();
|
||||
|
||||
d = new Document();
|
||||
d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(grandChildType, "6"), Field.Store.NO));
|
||||
d.add(new StringField(ParentFieldMapper.NAME, Uid.createUid(childType, "2"), Field.Store.NO));
|
||||
writer.addDocument(d);
|
||||
|
||||
d = new Document();
|
||||
d.add(new StringField(UidFieldMapper.NAME, Uid.createUid("other-type", "1"), Field.Store.NO));
|
||||
writer.addDocument(d);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetBytesValues() throws Exception {
|
||||
IndexFieldData indexFieldData = getForField(childType);
|
||||
AtomicFieldData fieldData = indexFieldData.load(refreshReader());
|
||||
assertThat(fieldData.getNumDocs(), equalTo(8));
|
||||
assertThat(fieldData.getMemorySizeInBytes(), greaterThan(0l));
|
||||
|
||||
BytesValues bytesValues = fieldData.getBytesValues(randomBoolean());
|
||||
assertThat(bytesValues.setDocument(0), equalTo(1));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("1"));
|
||||
|
||||
assertThat(bytesValues.setDocument(1), equalTo(2));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("1"));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("2"));
|
||||
|
||||
assertThat(bytesValues.setDocument(2), equalTo(2));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("1"));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("3"));
|
||||
|
||||
assertThat(bytesValues.setDocument(3), equalTo(1));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("2"));
|
||||
|
||||
assertThat(bytesValues.setDocument(4), equalTo(2));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("2"));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("4"));
|
||||
|
||||
assertThat(bytesValues.setDocument(5), equalTo(2));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("1"));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("5"));
|
||||
|
||||
assertThat(bytesValues.setDocument(6), equalTo(1));
|
||||
assertThat(bytesValues.nextValue().utf8ToString(), equalTo("2"));
|
||||
|
||||
assertThat(bytesValues.setDocument(7), equalTo(0));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting() throws Exception {
|
||||
IndexFieldData indexFieldData = getForField(childType);
|
||||
IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
|
||||
IndexFieldData.XFieldComparatorSource comparator = indexFieldData.comparatorSource("_last", SortMode.MIN);
|
||||
|
||||
TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField(ParentFieldMapper.NAME, comparator, false)));
|
||||
assertThat(topDocs.totalHits, equalTo(8));
|
||||
assertThat(topDocs.scoreDocs.length, equalTo(8));
|
||||
assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("1"));
|
||||
assertThat(topDocs.scoreDocs[1].doc, equalTo(1));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("1"));
|
||||
assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("1"));
|
||||
assertThat(topDocs.scoreDocs[3].doc, equalTo(5));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("1"));
|
||||
assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("2"));
|
||||
assertThat(topDocs.scoreDocs[5].doc, equalTo(4));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("2"));
|
||||
assertThat(topDocs.scoreDocs[6].doc, equalTo(6));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[6]).fields[0]).utf8ToString(), equalTo("2"));
|
||||
assertThat(topDocs.scoreDocs[7].doc, equalTo(7));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[7]).fields[0]), equalTo(IndexFieldData.XFieldComparatorSource.MAX_TERM));
|
||||
|
||||
topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField(ParentFieldMapper.NAME, comparator, true)));
|
||||
assertThat(topDocs.totalHits, equalTo(8));
|
||||
assertThat(topDocs.scoreDocs.length, equalTo(8));
|
||||
assertThat(topDocs.scoreDocs[0].doc, equalTo(3));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("2"));
|
||||
assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("2"));
|
||||
assertThat(topDocs.scoreDocs[2].doc, equalTo(6));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("2"));
|
||||
assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("1"));
|
||||
assertThat(topDocs.scoreDocs[4].doc, equalTo(1));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("1"));
|
||||
assertThat(topDocs.scoreDocs[5].doc, equalTo(2));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("1"));
|
||||
assertThat(topDocs.scoreDocs[6].doc, equalTo(5));
|
||||
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[6]).fields[0]).utf8ToString(), equalTo("1"));
|
||||
assertThat(topDocs.scoreDocs[7].doc, equalTo(7));
|
||||
assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], nullValue());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FieldDataType getFieldDataType() {
|
||||
return new FieldDataType("_parent");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.fielddata.plain;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.test.ElasticsearchLuceneTestCase;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.core.IsNull.notNullValue;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ParentChildFilteredTermsEnumTests extends ElasticsearchLuceneTestCase {
|
||||
|
||||
@BeforeClass
|
||||
public static void before() {
|
||||
forceDefaultCodec();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSimple_twoFieldEachUniqueValue() throws Exception {
|
||||
Directory directory = newDirectory();
|
||||
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
|
||||
for (int i = 1; i <= 10000; i++) {
|
||||
Document document = new Document();
|
||||
String fieldName = i % 2 == 0 ? "field1" : "field2";
|
||||
document.add(new StringField(fieldName, format(i), Field.Store.NO));
|
||||
indexWriter.addDocument(document);
|
||||
}
|
||||
|
||||
IndexReader indexReader = DirectoryReader.open(indexWriter.w, false);
|
||||
TermsEnum[] compoundTermsEnums = new TermsEnum[]{
|
||||
new ParentChildIntersectTermsEnum(SlowCompositeReaderWrapper.wrap(indexReader), "field1", "field2")
|
||||
};
|
||||
for (TermsEnum termsEnum : compoundTermsEnums) {
|
||||
int expected = 0;
|
||||
for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
|
||||
++expected;
|
||||
assertThat(term.utf8ToString(), equalTo(format(expected)));
|
||||
DocsEnum docsEnum = termsEnum.docs(null, null);
|
||||
assertThat(docsEnum, notNullValue());
|
||||
int docId = docsEnum.nextDoc();
|
||||
assertThat(docId, not(equalTo(-1)));
|
||||
assertThat(docId, not(equalTo(DocsEnum.NO_MORE_DOCS)));
|
||||
assertThat(docsEnum.nextDoc(), equalTo(DocsEnum.NO_MORE_DOCS));
|
||||
}
|
||||
}
|
||||
|
||||
indexWriter.close();
|
||||
indexReader.close();
|
||||
directory.close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDocument_twoFieldsEachSharingValues() throws Exception {
|
||||
Directory directory = newDirectory();
|
||||
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
|
||||
for (int i = 1; i <= 1000; i++) {
|
||||
Document document = new Document();
|
||||
document.add(new StringField("field1", format(i), Field.Store.NO));
|
||||
indexWriter.addDocument(document);
|
||||
|
||||
for (int j = 0; j < 10; j++) {
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", format(i), Field.Store.NO));
|
||||
indexWriter.addDocument(document);
|
||||
}
|
||||
}
|
||||
|
||||
IndexReader indexReader = DirectoryReader.open(indexWriter.w, false);
|
||||
TermsEnum[] compoundTermsEnums = new TermsEnum[]{
|
||||
new ParentChildIntersectTermsEnum(SlowCompositeReaderWrapper.wrap(indexReader), "field1", "field2")
|
||||
};
|
||||
for (TermsEnum termsEnum : compoundTermsEnums) {
|
||||
int expected = 0;
|
||||
for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
|
||||
++expected;
|
||||
assertThat(term.utf8ToString(), equalTo(format(expected)));
|
||||
DocsEnum docsEnum = termsEnum.docs(null, null);
|
||||
assertThat(docsEnum, notNullValue());
|
||||
int numDocs = 0;
|
||||
for (int docId = docsEnum.nextDoc(); docId != DocsEnum.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
|
||||
numDocs++;
|
||||
}
|
||||
assertThat(numDocs, equalTo(11));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
indexWriter.close();
|
||||
indexReader.close();
|
||||
directory.close();
|
||||
}
|
||||
|
||||
static String format(int i) {
|
||||
return String.format(Locale.ROOT, "%06d", i);
|
||||
}
|
||||
}
|
|
@ -19,11 +19,9 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.bytes.HashedBytesArray;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class UidTests extends ElasticsearchTestCase {
|
||||
|
@ -31,16 +29,16 @@ public class UidTests extends ElasticsearchTestCase {
|
|||
@Test
|
||||
public void testCreateAndSplitId() {
|
||||
BytesRef createUid = Uid.createUidAsBytes("foo", "bar");
|
||||
HashedBytesArray[] splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(createUid);
|
||||
assertThat("foo", equalTo(splitUidIntoTypeAndId[0].toUtf8()));
|
||||
assertThat("bar", equalTo(splitUidIntoTypeAndId[1].toUtf8()));
|
||||
BytesRef[] splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(createUid);
|
||||
assertThat("foo", equalTo(splitUidIntoTypeAndId[0].utf8ToString()));
|
||||
assertThat("bar", equalTo(splitUidIntoTypeAndId[1].utf8ToString()));
|
||||
// split also with an offset
|
||||
BytesRef ref = new BytesRef(createUid.length+10);
|
||||
ref.offset = 9;
|
||||
ref.length = createUid.length;
|
||||
System.arraycopy(createUid.bytes, createUid.offset, ref.bytes, ref.offset, ref.length);
|
||||
splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(ref);
|
||||
assertThat("foo", equalTo(splitUidIntoTypeAndId[0].toUtf8()));
|
||||
assertThat("bar", equalTo(splitUidIntoTypeAndId[1].toUtf8()));
|
||||
assertThat("foo", equalTo(splitUidIntoTypeAndId[0].utf8ToString()));
|
||||
assertThat("bar", equalTo(splitUidIntoTypeAndId[1].utf8ToString()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,6 +45,8 @@ import org.elasticsearch.index.analysis.AnalysisModule;
|
|||
import org.elasticsearch.index.cache.IndexCacheModule;
|
||||
import org.elasticsearch.index.codec.CodecModule;
|
||||
import org.elasticsearch.index.engine.IndexEngineModule;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataModule;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MapperServiceModule;
|
||||
import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
|
||||
|
@ -57,6 +59,7 @@ import org.elasticsearch.indices.query.IndicesQueriesModule;
|
|||
import org.elasticsearch.script.ScriptModule;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
import org.elasticsearch.test.index.service.StubIndexService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPoolModule;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -101,6 +104,7 @@ public class IndexQueryParserFilterCachingTests extends ElasticsearchTestCase {
|
|||
new IndexQueryParserModule(settings),
|
||||
new IndexNameModule(index),
|
||||
new FunctionScoreModule(),
|
||||
new IndexFieldDataModule(settings),
|
||||
new AbstractModule() {
|
||||
@Override
|
||||
protected void configure() {
|
||||
|
@ -110,6 +114,7 @@ public class IndexQueryParserFilterCachingTests extends ElasticsearchTestCase {
|
|||
}
|
||||
).createInjector();
|
||||
|
||||
injector.getInstance(IndexFieldDataService.class).setIndexService((new StubIndexService(injector.getInstance(MapperService.class))));
|
||||
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
|
||||
injector.getInstance(MapperService.class).merge("person", new CompressedString(mapping), true);
|
||||
String childMapping = copyToStringFromClasspath("/org/elasticsearch/index/query/child-mapping.json");
|
||||
|
|
|
@ -52,6 +52,8 @@ import org.elasticsearch.index.cache.IndexCacheModule;
|
|||
import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;
|
||||
import org.elasticsearch.index.codec.CodecModule;
|
||||
import org.elasticsearch.index.engine.IndexEngineModule;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataModule;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MapperServiceModule;
|
||||
import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
|
||||
|
@ -66,6 +68,7 @@ import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
|
|||
import org.elasticsearch.indices.query.IndicesQueriesModule;
|
||||
import org.elasticsearch.script.ScriptModule;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
import org.elasticsearch.test.index.service.StubIndexService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPoolModule;
|
||||
import org.hamcrest.Matchers;
|
||||
|
@ -116,6 +119,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
|
|||
new IndexEngineModule(settings),
|
||||
new SimilarityModule(settings),
|
||||
new IndexQueryParserModule(settings),
|
||||
new IndexFieldDataModule(settings),
|
||||
new IndexNameModule(index),
|
||||
new FunctionScoreModule(),
|
||||
new AbstractModule() {
|
||||
|
@ -127,6 +131,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
|
|||
}
|
||||
).createInjector();
|
||||
|
||||
injector.getInstance(IndexFieldDataService.class).setIndexService((new StubIndexService(injector.getInstance(MapperService.class))));
|
||||
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
|
||||
injector.getInstance(MapperService.class).merge("person", new CompressedString(mapping), true);
|
||||
injector.getInstance(MapperService.class).documentMapper("person").parse(new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
|
||||
|
|
|
@ -29,13 +29,12 @@ import org.apache.lucene.store.RAMDirectory;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
||||
import org.elasticsearch.index.mapper.ContentPath;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.*;
|
||||
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
|
||||
|
@ -43,6 +42,7 @@ import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
|||
import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||
import org.elasticsearch.test.index.service.StubIndexService;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
@ -80,6 +80,8 @@ public class FieldDataTermsFilterTests extends ElasticsearchTestCase {
|
|||
|
||||
// create index and fielddata service
|
||||
ifdService = new IndexFieldDataService(new Index("test"), new DummyCircuitBreakerService());
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(ifdService.index(), ImmutableSettings.Builder.EMPTY_SETTINGS);
|
||||
ifdService.setIndexService(new StubIndexService(mapperService));
|
||||
writer = new IndexWriter(new RAMDirectory(),
|
||||
new IndexWriterConfig(Lucene.VERSION, new StandardAnalyzer(Lucene.VERSION)));
|
||||
|
||||
|
|
|
@ -39,10 +39,9 @@ import org.elasticsearch.common.settings.ImmutableSettings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache;
|
||||
import org.elasticsearch.index.cache.id.IdCache;
|
||||
import org.elasticsearch.index.cache.id.SimpleIdCacheTests;
|
||||
import org.elasticsearch.index.cache.id.simple.SimpleIdCache;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MapperTestUtils;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -52,10 +51,12 @@ import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
|||
import org.elasticsearch.index.search.nested.NonNestedDocsFilter;
|
||||
import org.elasticsearch.index.service.IndexService;
|
||||
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
|
||||
import org.elasticsearch.indices.fielddata.breaker.DummyCircuitBreakerService;
|
||||
import org.elasticsearch.node.settings.NodeSettingsService;
|
||||
import org.elasticsearch.search.internal.ContextIndexSearcher;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ElasticsearchLuceneTestCase;
|
||||
import org.elasticsearch.test.index.service.StubIndexService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.hamcrest.Description;
|
||||
import org.hamcrest.StringDescription;
|
||||
|
@ -109,7 +110,9 @@ public class ChildrenConstantScoreQueryTests extends ElasticsearchLuceneTestCase
|
|||
TermQuery childQuery = new TermQuery(new Term("field1", "value" + (1 + random().nextInt(3))));
|
||||
TermFilter parentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
|
||||
int shortCircuitParentDocSet = random().nextInt(5);
|
||||
ChildrenConstantScoreQuery query = new ChildrenConstantScoreQuery(childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, null);
|
||||
ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper();
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper);
|
||||
ChildrenConstantScoreQuery query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, null);
|
||||
|
||||
BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
|
||||
searcher.search(query, collector);
|
||||
|
@ -198,6 +201,8 @@ public class ChildrenConstantScoreQueryTests extends ElasticsearchLuceneTestCase
|
|||
);
|
||||
((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
|
||||
|
||||
ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper();
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper);
|
||||
Filter rawParentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
|
||||
Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
|
||||
int max = numUniqueChildValues / 4;
|
||||
|
@ -253,12 +258,12 @@ public class ChildrenConstantScoreQueryTests extends ElasticsearchLuceneTestCase
|
|||
Query query;
|
||||
if (random().nextBoolean()) {
|
||||
// Usage in HasChildQueryParser
|
||||
query = new ChildrenConstantScoreQuery(childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
|
||||
query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter);
|
||||
} else {
|
||||
// Usage in HasChildFilterParser
|
||||
query = new XConstantScoreQuery(
|
||||
new CustomQueryWrappingFilter(
|
||||
new ChildrenConstantScoreQuery(childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter)
|
||||
new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -325,13 +330,13 @@ public class ChildrenConstantScoreQueryTests extends ElasticsearchLuceneTestCase
|
|||
|
||||
static SearchContext createSearchContext(String indexName, String parentType, String childType) throws IOException {
|
||||
final Index index = new Index(indexName);
|
||||
final IdCache idCache = new SimpleIdCache(index, ImmutableSettings.EMPTY);
|
||||
final CacheRecycler cacheRecycler = new CacheRecycler(ImmutableSettings.EMPTY);
|
||||
final PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(ImmutableSettings.EMPTY, new ThreadPool());
|
||||
Settings settings = ImmutableSettings.EMPTY;
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(index, settings);
|
||||
final IndexService indexService = new SimpleIdCacheTests.StubIndexService(mapperService);
|
||||
idCache.setIndexService(indexService);
|
||||
IndexFieldDataService indexFieldDataService = new IndexFieldDataService(index, new DummyCircuitBreakerService());
|
||||
final IndexService indexService = new StubIndexService(mapperService);
|
||||
indexFieldDataService.setIndexService(indexService);
|
||||
// Id_cache is now registered as document type listener, so we can add mappings.
|
||||
mapperService.merge(
|
||||
childType, new CompressedString(PutMappingRequest.buildFromSimplifiedDef(childType, "_parent", "type=" + parentType).string()), true
|
||||
|
@ -341,7 +346,7 @@ public class ChildrenConstantScoreQueryTests extends ElasticsearchLuceneTestCase
|
|||
NodeSettingsService nodeSettingsService = new NodeSettingsService(settings);
|
||||
IndicesFilterCache indicesFilterCache = new IndicesFilterCache(settings, threadPool, cacheRecycler, nodeSettingsService);
|
||||
WeightedFilterCache filterCache = new WeightedFilterCache(index, settings, indicesFilterCache);
|
||||
return new TestSearchContext(cacheRecycler, pageCacheRecycler, idCache, indexService, filterCache);
|
||||
return new TestSearchContext(cacheRecycler, pageCacheRecycler, indexService, filterCache, indexFieldDataService);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.util.FixedBitSet;
|
|||
import org.elasticsearch.common.lucene.search.NotFilter;
|
||||
import org.elasticsearch.common.lucene.search.XFilteredQuery;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
|
@ -146,6 +147,9 @@ public class ChildrenQueryTests extends ElasticsearchLuceneTestCase {
|
|||
ChildrenQueryTests.class.getSimpleName(), searcher
|
||||
);
|
||||
((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
|
||||
|
||||
ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper();
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper);
|
||||
Filter rawParentFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "parent"));
|
||||
Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
|
||||
int max = numUniqueChildValues / 4;
|
||||
|
@ -199,7 +203,7 @@ public class ChildrenQueryTests extends ElasticsearchLuceneTestCase {
|
|||
int shortCircuitParentDocSet = random().nextInt(numParentDocs);
|
||||
ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)];
|
||||
Filter nonNestedDocsFilter = random().nextBoolean() ? NonNestedDocsFilter.INSTANCE : null;
|
||||
Query query = new ChildrenQuery("parent", "child", parentFilter, childQuery, scoreType, shortCircuitParentDocSet, nonNestedDocsFilter);
|
||||
Query query = new ChildrenQuery(parentChildIndexFieldData, "parent", "child", parentFilter, childQuery, scoreType, shortCircuitParentDocSet, nonNestedDocsFilter);
|
||||
query = new XFilteredQuery(query, filterMe);
|
||||
BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
|
||||
int numHits = 1 + random().nextInt(25);
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.lucene.search.NotFilter;
|
|||
import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
|
||||
import org.elasticsearch.common.lucene.search.XFilteredQuery;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
|
@ -148,6 +149,8 @@ public class ParentConstantScoreQueryTests extends ElasticsearchLuceneTestCase {
|
|||
);
|
||||
((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
|
||||
|
||||
ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper();
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper);
|
||||
TermFilter rawChildrenFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "child"));
|
||||
Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
|
||||
int max = numUniqueParentValues / 4;
|
||||
|
@ -200,12 +203,12 @@ public class ParentConstantScoreQueryTests extends ElasticsearchLuceneTestCase {
|
|||
Query query;
|
||||
if (random().nextBoolean()) {
|
||||
// Usage in HasParentQueryParser
|
||||
query = new ParentConstantScoreQuery(parentQuery, "parent", childrenFilter);
|
||||
query = new ParentConstantScoreQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter);
|
||||
} else {
|
||||
// Usage in HasParentFilterParser
|
||||
query = new XConstantScoreQuery(
|
||||
new CustomQueryWrappingFilter(
|
||||
new ParentConstantScoreQuery(parentQuery, "parent", childrenFilter)
|
||||
new ParentConstantScoreQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.util.FixedBitSet;
|
|||
import org.elasticsearch.common.lucene.search.NotFilter;
|
||||
import org.elasticsearch.common.lucene.search.XFilteredQuery;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
|
@ -145,6 +146,8 @@ public class ParentQueryTests extends ElasticsearchLuceneTestCase {
|
|||
);
|
||||
((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
|
||||
|
||||
ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper();
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper);
|
||||
TermFilter rawChildrenFilter = new TermFilter(new Term(TypeFieldMapper.NAME, "child"));
|
||||
Filter rawFilterMe = new NotFilter(new TermFilter(new Term("filter", "me")));
|
||||
int max = numUniqueParentValues / 4;
|
||||
|
@ -194,7 +197,7 @@ public class ParentQueryTests extends ElasticsearchLuceneTestCase {
|
|||
|
||||
String parentValue = parentValues[random().nextInt(numUniqueParentValues)];
|
||||
Query parentQuery = new ConstantScoreQuery(new TermQuery(new Term("field1", parentValue)));
|
||||
Query query = new ParentQuery(parentQuery,"parent", childrenFilter);
|
||||
Query query = new ParentQuery(parentChildIndexFieldData, parentQuery,"parent", childrenFilter);
|
||||
query = new XFilteredQuery(query, filterMe);
|
||||
BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
|
||||
int numHits = 1 + random().nextInt(25);
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.common.lease.Releasable;
|
|||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.docset.DocSetCache;
|
||||
import org.elasticsearch.index.cache.filter.FilterCache;
|
||||
import org.elasticsearch.index.cache.id.IdCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMappers;
|
||||
|
@ -67,27 +66,27 @@ public class TestSearchContext extends SearchContext {
|
|||
|
||||
final CacheRecycler cacheRecycler;
|
||||
final PageCacheRecycler pageCacheRecycler;
|
||||
final IdCache idCache;
|
||||
final IndexService indexService;
|
||||
final FilterCache filterCache;
|
||||
final IndexFieldDataService indexFieldDataService;
|
||||
|
||||
ContextIndexSearcher searcher;
|
||||
int size;
|
||||
|
||||
public TestSearchContext(CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler, IdCache idCache, IndexService indexService, FilterCache filterCache) {
|
||||
public TestSearchContext(CacheRecycler cacheRecycler, PageCacheRecycler pageCacheRecycler, IndexService indexService, FilterCache filterCache, IndexFieldDataService indexFieldDataService) {
|
||||
this.cacheRecycler = cacheRecycler;
|
||||
this.pageCacheRecycler = pageCacheRecycler;
|
||||
this.idCache = idCache;
|
||||
this.indexService = indexService;
|
||||
this.filterCache = filterCache;
|
||||
this.indexFieldDataService = indexFieldDataService;
|
||||
}
|
||||
|
||||
public TestSearchContext() {
|
||||
this.cacheRecycler = null;
|
||||
this.pageCacheRecycler = null;
|
||||
this.idCache = null;
|
||||
this.indexService = null;
|
||||
this.filterCache = null;
|
||||
this.indexFieldDataService = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -332,12 +331,7 @@ public class TestSearchContext extends SearchContext {
|
|||
|
||||
@Override
|
||||
public IndexFieldDataService fieldData() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IdCache idCache() {
|
||||
return idCache;
|
||||
return indexFieldDataService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -284,9 +284,12 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
|
|||
client().admin().indices().prepareRefresh().get();
|
||||
|
||||
indicesStatsResponse = client().admin().indices()
|
||||
.prepareStats("test").setIdCache(true).get();
|
||||
.prepareStats("test").setFieldData(true).get();
|
||||
// automatic warm-up has populated the cache since it found a parent field mapper
|
||||
assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
// Even though p/c is field data based the stats stay zero, because _parent field data field is kept
|
||||
// track of under id cache stats memory wise for bwc
|
||||
assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch("test")
|
||||
.setQuery(constantScoreQuery(hasChildFilter("child", termQuery("c_field", "blue"))))
|
||||
|
@ -295,13 +298,15 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||
|
||||
indicesStatsResponse = client().admin().indices()
|
||||
.prepareStats("test").setIdCache(true).get();
|
||||
.prepareStats("test").setFieldData(true).get();
|
||||
assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
client().admin().indices().prepareClearCache("test").setIdCache(true).get();
|
||||
indicesStatsResponse = client().admin().indices()
|
||||
.prepareStats("test").setIdCache(true).get();
|
||||
.prepareStats("test").setFieldData(true).get();
|
||||
assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -1866,13 +1871,23 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
|
||||
}
|
||||
|
||||
// can't fail, because there is no check, this b/c parent type can be refered by many child types.
|
||||
client().prepareSearch("test")
|
||||
.setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
|
||||
.get();
|
||||
client().prepareSearch("test")
|
||||
.setPostFilter(hasParentFilter("parent", termQuery("p_field", "1")))
|
||||
.get();
|
||||
try {
|
||||
client().prepareSearch("test")
|
||||
.setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
|
||||
.get();
|
||||
fail();
|
||||
} catch (SearchPhaseExecutionException e) {
|
||||
assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
|
||||
}
|
||||
|
||||
try {
|
||||
client().prepareSearch("test")
|
||||
.setPostFilter(hasParentFilter("parent", termQuery("p_field", "1")))
|
||||
.get();
|
||||
fail();
|
||||
} catch (SearchPhaseExecutionException e) {
|
||||
assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -0,0 +1,172 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.test.index.service;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexShardMissingException;
|
||||
import org.elasticsearch.index.aliases.IndexAliasesService;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.engine.IndexEngine;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.gateway.IndexGateway;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.IndexQueryParserService;
|
||||
import org.elasticsearch.index.service.IndexService;
|
||||
import org.elasticsearch.index.settings.IndexSettingsService;
|
||||
import org.elasticsearch.index.shard.service.IndexShard;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
|
||||
import java.util.Iterator;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class StubIndexService implements IndexService {
|
||||
|
||||
private final MapperService mapperService;
|
||||
|
||||
public StubIndexService(MapperService mapperService) {
|
||||
this.mapperService = mapperService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector injector() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexGateway gateway() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexCache cache() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexFieldDataService fieldData() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexSettingsService settingsService() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnalysisService analysisService() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public MapperService mapperService() {
|
||||
return mapperService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexQueryParserService queryParserService() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SimilarityService similarityService() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexAliasesService aliasesService() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexEngine engine() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexStore store() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexShard createShard(int sShardId) throws ElasticsearchException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeShard(int shardId, String reason) throws ElasticsearchException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numberOfShards() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableSet<Integer> shardIds() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasShard(int shardId) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexShard shard(int shardId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexShard shardSafe(int shardId) throws IndexShardMissingException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector shardInjector(int shardId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector shardInjectorSafe(int shardId) throws IndexShardMissingException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String indexUUID() {
|
||||
return IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Index index() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<IndexShard> iterator() {
|
||||
return null;
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue