mirror of https://github.com/apache/lucene.git
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr
This commit is contained in:
commit
a4d15862ac
|
@ -211,6 +211,9 @@ Bug Fixes
|
|||
* LUCENE-6984: SpanMultiTermQueryWrapper no longer modifies its wrapped query.
|
||||
(Alan Woodward, Adrien Grand)
|
||||
|
||||
* LUCENE-6998: Fix a couple places to better detect truncated index files
|
||||
as corruption. (Robert Muir, Mike McCandless)
|
||||
|
||||
Other
|
||||
|
||||
* LUCENE-6924: Upgrade randomizedtesting to 2.3.2. (Dawid Weiss)
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.lucene.store.IOContext;
|
|||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.util.bkd.BKDReader;
|
||||
|
||||
|
@ -59,8 +60,10 @@ class SimpleTextPointReader extends PointReader {
|
|||
|
||||
public SimpleTextPointReader(SegmentReadState readState) throws IOException {
|
||||
// Initialize readers now:
|
||||
String fileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointFormat.POINT_EXTENSION);
|
||||
dataIn = readState.directory.openInput(fileName, IOContext.DEFAULT);
|
||||
|
||||
// Read index:
|
||||
Map<String,Long> fieldToFileOffset = new HashMap<>();
|
||||
|
||||
String indexFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointFormat.POINT_INDEX_EXTENSION);
|
||||
try (ChecksumIndexInput in = readState.directory.openChecksumInput(indexFileName, IOContext.DEFAULT)) {
|
||||
readLine(in);
|
||||
|
@ -70,10 +73,25 @@ class SimpleTextPointReader extends PointReader {
|
|||
String fieldName = stripPrefix(FIELD_FP_NAME);
|
||||
readLine(in);
|
||||
long fp = parseLong(FIELD_FP);
|
||||
readers.put(fieldName, initReader(fp));
|
||||
fieldToFileOffset.put(fieldName, fp);
|
||||
}
|
||||
SimpleTextUtil.checkFooter(in);
|
||||
}
|
||||
|
||||
boolean success = false;
|
||||
String fileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, SimpleTextPointFormat.POINT_EXTENSION);
|
||||
dataIn = readState.directory.openInput(fileName, IOContext.DEFAULT);
|
||||
try {
|
||||
for(Map.Entry<String,Long> ent : fieldToFileOffset.entrySet()) {
|
||||
readers.put(ent.getKey(), initReader(ent.getValue()));
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
IOUtils.closeWhileHandlingException(this);
|
||||
}
|
||||
}
|
||||
|
||||
this.readState = readState;
|
||||
}
|
||||
|
||||
|
|
|
@ -196,10 +196,14 @@ class SimpleTextPointWriter extends PointWriter {
|
|||
SimpleTextUtil.writeNewline(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void finish() throws IOException {
|
||||
SimpleTextUtil.writeChecksum(dataOut, scratch);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (dataOut != null) {
|
||||
SimpleTextUtil.writeChecksum(dataOut, scratch);
|
||||
dataOut.close();
|
||||
dataOut = null;
|
||||
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
package org.apache.lucene.codecs.simpletext;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BasePointFormatTestCase;
|
||||
|
||||
/**
|
||||
* Tests SimpleText's point format
|
||||
*/
|
||||
public class TestSimpleTextPointFormat extends BasePointFormatTestCase {
|
||||
private final Codec codec = new SimpleTextCodec();
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
return codec;
|
||||
}
|
||||
}
|
|
@ -17,7 +17,6 @@ package org.apache.lucene.codecs;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
|
@ -397,6 +396,9 @@ public final class CodecUtil {
|
|||
* @throws IOException if the footer is invalid
|
||||
*/
|
||||
public static long retrieveChecksum(IndexInput in) throws IOException {
|
||||
if (in.length() < footerLength()) {
|
||||
throw new CorruptIndexException("misplaced codec footer (file truncated?): length=" + in.length() + " but footerLength==" + footerLength(), in);
|
||||
}
|
||||
in.seek(in.length() - footerLength());
|
||||
validateFooter(in);
|
||||
return readCRC(in);
|
||||
|
|
|
@ -126,5 +126,9 @@ public abstract class PointWriter implements Closeable {
|
|||
mergeOneField(mergeState, fieldInfo);
|
||||
}
|
||||
}
|
||||
finish();
|
||||
}
|
||||
|
||||
/** Called once at the end before close */
|
||||
public abstract void finish() throws IOException;
|
||||
}
|
||||
|
|
|
@ -68,6 +68,13 @@ final class Lucene50CompoundReader extends Directory {
|
|||
String entriesFileName = IndexFileNames.segmentFileName(segmentName, "", Lucene50CompoundFormat.ENTRIES_EXTENSION);
|
||||
this.entries = readEntries(si.getId(), directory, entriesFileName);
|
||||
boolean success = false;
|
||||
|
||||
long expectedLength = CodecUtil.indexHeaderLength(Lucene50CompoundFormat.DATA_CODEC, "");
|
||||
for(Map.Entry<String,FileEntry> ent : entries.entrySet()) {
|
||||
expectedLength += ent.getValue().length;
|
||||
}
|
||||
expectedLength += CodecUtil.footerLength();
|
||||
|
||||
handle = directory.openInput(dataFileName, context);
|
||||
try {
|
||||
CodecUtil.checkIndexHeader(handle, Lucene50CompoundFormat.DATA_CODEC, version, version, si.getId(), "");
|
||||
|
@ -77,6 +84,13 @@ final class Lucene50CompoundReader extends Directory {
|
|||
// for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
|
||||
// such as file truncation.
|
||||
CodecUtil.retrieveChecksum(handle);
|
||||
|
||||
// We also validate length, because e.g. if you strip 16 bytes off the .cfs we otherwise
|
||||
// would not detect it:
|
||||
if (handle.length() != expectedLength) {
|
||||
throw new CorruptIndexException("length should be " + expectedLength + " bytes, but is " + handle.length() + " instead", handle);
|
||||
}
|
||||
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
|
|
|
@ -73,7 +73,8 @@ import org.apache.lucene.index.SegmentWriteState;
|
|||
|
||||
public final class Lucene60PointFormat extends PointFormat {
|
||||
|
||||
static final String CODEC_NAME = "Lucene60PointFormat";
|
||||
static final String DATA_CODEC_NAME = "Lucene60PointFormatData";
|
||||
static final String META_CODEC_NAME = "Lucene60PointFormatMeta";
|
||||
|
||||
/**
|
||||
* Filename extension for the leaf blocks
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
package org.apache.lucene.codecs.lucene60;
|
||||
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -48,20 +47,20 @@ public class Lucene60PointReader extends PointReader implements Closeable {
|
|||
/** Sole constructor */
|
||||
public Lucene60PointReader(SegmentReadState readState) throws IOException {
|
||||
this.readState = readState;
|
||||
String dataFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name,
|
||||
readState.segmentSuffix,
|
||||
Lucene60PointFormat.DATA_EXTENSION);
|
||||
dataIn = readState.directory.openInput(dataFileName, readState.context);
|
||||
|
||||
|
||||
String indexFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name,
|
||||
readState.segmentSuffix,
|
||||
Lucene60PointFormat.INDEX_EXTENSION);
|
||||
|
||||
boolean success = false;
|
||||
Map<Integer,Long> fieldToFileOffset = new HashMap<>();
|
||||
|
||||
// Read index file
|
||||
try (ChecksumIndexInput indexIn = readState.directory.openChecksumInput(indexFileName, readState.context)) {
|
||||
Throwable priorE = null;
|
||||
try {
|
||||
CodecUtil.checkIndexHeader(indexIn,
|
||||
Lucene60PointFormat.CODEC_NAME,
|
||||
Lucene60PointFormat.META_CODEC_NAME,
|
||||
Lucene60PointFormat.INDEX_VERSION_START,
|
||||
Lucene60PointFormat.INDEX_VERSION_START,
|
||||
readState.segmentInfo.getId(),
|
||||
|
@ -70,16 +69,47 @@ public class Lucene60PointReader extends PointReader implements Closeable {
|
|||
for(int i=0;i<count;i++) {
|
||||
int fieldNumber = indexIn.readVInt();
|
||||
long fp = indexIn.readVLong();
|
||||
fieldToFileOffset.put(fieldNumber, fp);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
priorE = t;
|
||||
} finally {
|
||||
CodecUtil.checkFooter(indexIn, priorE);
|
||||
}
|
||||
}
|
||||
|
||||
String dataFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name,
|
||||
readState.segmentSuffix,
|
||||
Lucene60PointFormat.DATA_EXTENSION);
|
||||
boolean success = false;
|
||||
dataIn = readState.directory.openInput(dataFileName, readState.context);
|
||||
try {
|
||||
|
||||
CodecUtil.checkIndexHeader(dataIn,
|
||||
Lucene60PointFormat.DATA_CODEC_NAME,
|
||||
Lucene60PointFormat.DATA_VERSION_START,
|
||||
Lucene60PointFormat.DATA_VERSION_START,
|
||||
readState.segmentInfo.getId(),
|
||||
readState.segmentSuffix);
|
||||
|
||||
// NOTE: data file is too costly to verify checksum against all the bytes on open,
|
||||
// but for now we at least verify proper structure of the checksum footer: which looks
|
||||
// for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
|
||||
// such as file truncation.
|
||||
CodecUtil.retrieveChecksum(dataIn);
|
||||
|
||||
for(Map.Entry<Integer,Long> ent : fieldToFileOffset.entrySet()) {
|
||||
int fieldNumber = ent.getKey();
|
||||
long fp = ent.getValue();
|
||||
dataIn.seek(fp);
|
||||
BKDReader reader = new BKDReader(dataIn);
|
||||
readers.put(fieldNumber, reader);
|
||||
//reader.verify(readState.segmentInfo.maxDoc());
|
||||
}
|
||||
CodecUtil.checkFooter(indexIn);
|
||||
|
||||
success = true;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
IOUtils.closeWhileHandlingException(dataIn);
|
||||
IOUtils.closeWhileHandlingException(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,12 +27,12 @@ import java.util.Map;
|
|||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.codecs.PointReader;
|
||||
import org.apache.lucene.codecs.PointWriter;
|
||||
import org.apache.lucene.index.PointValues.IntersectVisitor;
|
||||
import org.apache.lucene.index.PointValues.Relation;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.PointValues.IntersectVisitor;
|
||||
import org.apache.lucene.index.PointValues.Relation;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
@ -47,7 +47,7 @@ public class Lucene60PointWriter extends PointWriter implements Closeable {
|
|||
final SegmentWriteState writeState;
|
||||
final int maxPointsInLeafNode;
|
||||
final double maxMBSortInHeap;
|
||||
private boolean closed;
|
||||
private boolean finished;
|
||||
|
||||
/** Full constructor */
|
||||
public Lucene60PointWriter(SegmentWriteState writeState, int maxPointsInLeafNode, double maxMBSortInHeap) throws IOException {
|
||||
|
@ -62,7 +62,7 @@ public class Lucene60PointWriter extends PointWriter implements Closeable {
|
|||
boolean success = false;
|
||||
try {
|
||||
CodecUtil.writeIndexHeader(dataOut,
|
||||
Lucene60PointFormat.CODEC_NAME,
|
||||
Lucene60PointFormat.DATA_CODEC_NAME,
|
||||
Lucene60PointFormat.DATA_VERSION_CURRENT,
|
||||
writeState.segmentInfo.getId(),
|
||||
writeState.segmentSuffix);
|
||||
|
@ -141,11 +141,17 @@ public class Lucene60PointWriter extends PointWriter implements Closeable {
|
|||
for(int i=0;i<mergeState.pointReaders.length;i++) {
|
||||
PointReader reader = mergeState.pointReaders[i];
|
||||
|
||||
if (reader != null) {
|
||||
|
||||
// we confirmed this up above
|
||||
assert reader instanceof Lucene60PointReader;
|
||||
Lucene60PointReader reader60 = (Lucene60PointReader) reader;
|
||||
if (reader60 != null) {
|
||||
// TODO: I could just use the merged fieldInfo.number instead of resolving to this
|
||||
// reader's FieldInfo, right? Field numbers are always consistent across segments,
|
||||
// since when?
|
||||
|
||||
// NOTE: we cannot just use the merged fieldInfo.number (instead of resolving to this
|
||||
// reader's FieldInfo as we do below) because field numbers can easily be different
|
||||
// when addIndexes(Directory...) copies over segments from another index:
|
||||
|
||||
|
||||
FieldInfos readerFieldInfos = mergeState.fieldInfos[i];
|
||||
FieldInfo readerFieldInfo = readerFieldInfos.fieldInfo(fieldInfo.name);
|
||||
if (readerFieldInfo != null) {
|
||||
|
@ -169,14 +175,17 @@ public class Lucene60PointWriter extends PointWriter implements Closeable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
finish();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (closed == false) {
|
||||
public void finish() throws IOException {
|
||||
if (finished) {
|
||||
throw new IllegalStateException("already finished");
|
||||
}
|
||||
finished = true;
|
||||
CodecUtil.writeFooter(dataOut);
|
||||
dataOut.close();
|
||||
closed = true;
|
||||
|
||||
String indexFileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name,
|
||||
writeState.segmentSuffix,
|
||||
|
@ -184,7 +193,7 @@ public class Lucene60PointWriter extends PointWriter implements Closeable {
|
|||
// Write index file
|
||||
try (IndexOutput indexOut = writeState.directory.createOutput(indexFileName, writeState.context)) {
|
||||
CodecUtil.writeIndexHeader(indexOut,
|
||||
Lucene60PointFormat.CODEC_NAME,
|
||||
Lucene60PointFormat.META_CODEC_NAME,
|
||||
Lucene60PointFormat.INDEX_VERSION_CURRENT,
|
||||
writeState.segmentInfo.getId(),
|
||||
writeState.segmentSuffix);
|
||||
|
@ -201,5 +210,9 @@ public class Lucene60PointWriter extends PointWriter implements Closeable {
|
|||
CodecUtil.writeFooter(indexOut);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
dataOut.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,9 @@ public final class DoublePoint extends Field {
|
|||
|
||||
/** Change the values of this field */
|
||||
public void setDoubleValues(double... point) {
|
||||
if (type.pointDimensionCount() != point.length) {
|
||||
throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
|
||||
}
|
||||
fieldsData = pack(point);
|
||||
}
|
||||
|
||||
|
@ -52,6 +55,9 @@ public final class DoublePoint extends Field {
|
|||
|
||||
@Override
|
||||
public Number numericValue() {
|
||||
if (type.pointDimensionCount() != 1) {
|
||||
throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
|
||||
}
|
||||
BytesRef bytes = (BytesRef) fieldsData;
|
||||
assert bytes.length == RamUsageEstimator.NUM_BYTES_LONG;
|
||||
return NumericUtils.sortableLongToDouble(NumericUtils.bytesToLongDirect(bytes.bytes, bytes.offset));
|
||||
|
|
|
@ -42,6 +42,9 @@ public final class FloatPoint extends Field {
|
|||
|
||||
/** Change the values of this field */
|
||||
public void setFloatValues(float... point) {
|
||||
if (type.pointDimensionCount() != point.length) {
|
||||
throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
|
||||
}
|
||||
fieldsData = pack(point);
|
||||
}
|
||||
|
||||
|
@ -52,6 +55,9 @@ public final class FloatPoint extends Field {
|
|||
|
||||
@Override
|
||||
public Number numericValue() {
|
||||
if (type.pointDimensionCount() != 1) {
|
||||
throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
|
||||
}
|
||||
BytesRef bytes = (BytesRef) fieldsData;
|
||||
assert bytes.length == RamUsageEstimator.NUM_BYTES_INT;
|
||||
return NumericUtils.sortableIntToFloat(NumericUtils.bytesToIntDirect(bytes.bytes, bytes.offset));
|
||||
|
|
|
@ -42,6 +42,9 @@ public final class IntPoint extends Field {
|
|||
|
||||
/** Change the values of this field */
|
||||
public void setIntValues(int... point) {
|
||||
if (type.pointDimensionCount() != point.length) {
|
||||
throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
|
||||
}
|
||||
fieldsData = pack(point);
|
||||
}
|
||||
|
||||
|
@ -52,6 +55,9 @@ public final class IntPoint extends Field {
|
|||
|
||||
@Override
|
||||
public Number numericValue() {
|
||||
if (type.pointDimensionCount() != 1) {
|
||||
throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
|
||||
}
|
||||
BytesRef bytes = (BytesRef) fieldsData;
|
||||
assert bytes.length == RamUsageEstimator.NUM_BYTES_INT;
|
||||
return NumericUtils.bytesToInt(bytes.bytes, bytes.offset);
|
||||
|
|
|
@ -42,6 +42,9 @@ public final class LongPoint extends Field {
|
|||
|
||||
/** Change the values of this field */
|
||||
public void setLongValues(long... point) {
|
||||
if (type.pointDimensionCount() != point.length) {
|
||||
throw new IllegalArgumentException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot change to (incoming) " + point.length + " dimensions");
|
||||
}
|
||||
fieldsData = pack(point);
|
||||
}
|
||||
|
||||
|
@ -52,6 +55,9 @@ public final class LongPoint extends Field {
|
|||
|
||||
@Override
|
||||
public Number numericValue() {
|
||||
if (type.pointDimensionCount() != 1) {
|
||||
throw new IllegalStateException("this field (name=" + name + ") uses " + type.pointDimensionCount() + " dimensions; cannot convert to a single numeric value");
|
||||
}
|
||||
BytesRef bytes = (BytesRef) fieldsData;
|
||||
assert bytes.length == RamUsageEstimator.NUM_BYTES_LONG;
|
||||
return NumericUtils.bytesToLong(bytes.bytes, bytes.offset);
|
||||
|
|
|
@ -152,6 +152,9 @@ final class DefaultIndexingChain extends DocConsumer {
|
|||
perField = perField.next;
|
||||
}
|
||||
}
|
||||
if (pointWriter != null) {
|
||||
pointWriter.finish();
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
if (success) {
|
||||
|
|
|
@ -138,7 +138,7 @@ public abstract class IndexReader implements Closeable {
|
|||
parentReaders.add(reader);
|
||||
}
|
||||
|
||||
private void notifyReaderClosedListeners(Throwable th) {
|
||||
private void notifyReaderClosedListeners(Throwable th) throws IOException {
|
||||
synchronized(readerClosedListeners) {
|
||||
for(ReaderClosedListener listener : readerClosedListeners) {
|
||||
try {
|
||||
|
@ -151,7 +151,7 @@ public abstract class IndexReader implements Closeable {
|
|||
}
|
||||
}
|
||||
}
|
||||
IOUtils.reThrowUnchecked(th);
|
||||
IOUtils.reThrow(th);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,8 @@ import java.util.List;
|
|||
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
class MultiPointValues extends PointValues {
|
||||
/** Merges multiple {@link PointValues} into a single one. */
|
||||
public class MultiPointValues extends PointValues {
|
||||
|
||||
private final List<PointValues> subs;
|
||||
private final List<Integer> docBases;
|
||||
|
@ -33,6 +34,7 @@ class MultiPointValues extends PointValues {
|
|||
this.docBases = docBases;
|
||||
}
|
||||
|
||||
/** Returns a {@link PointValues} merging all point values from the provided reader. */
|
||||
public static PointValues get(IndexReader r) {
|
||||
final List<LeafReaderContext> leaves = r.leaves();
|
||||
final int size = leaves.size();
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.bkd.BKDWriter;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -21,6 +17,10 @@ import org.apache.lucene.util.bkd.BKDWriter;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.bkd.BKDWriter;
|
||||
|
||||
/** Allows recursively visiting point values indexed with {@link org.apache.lucene.document.IntPoint},
|
||||
* {@link org.apache.lucene.document.FloatPoint}, {@link org.apache.lucene.document.LongPoint}, {@link org.apache.lucene.document.DoublePoint}
|
||||
* or {@link org.apache.lucene.document.BinaryPoint}.
|
||||
|
|
|
@ -166,7 +166,7 @@ final class SegmentCoreReaders {
|
|||
}
|
||||
}
|
||||
|
||||
private void notifyCoreClosedListeners(Throwable th) {
|
||||
private void notifyCoreClosedListeners(Throwable th) throws IOException {
|
||||
synchronized(coreClosedListeners) {
|
||||
for (CoreClosedListener listener : coreClosedListeners) {
|
||||
// SegmentReader uses our instance as its
|
||||
|
@ -181,7 +181,7 @@ final class SegmentCoreReaders {
|
|||
}
|
||||
}
|
||||
}
|
||||
IOUtils.reThrowUnchecked(th);
|
||||
IOUtils.reThrow(th);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
package org.apache.lucene.codecs.lucene60;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.FilterCodec;
|
||||
import org.apache.lucene.codecs.PointFormat;
|
||||
import org.apache.lucene.codecs.PointReader;
|
||||
import org.apache.lucene.codecs.PointWriter;
|
||||
import org.apache.lucene.index.BasePointFormatTestCase;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Tests Lucene60PointFormat
|
||||
*/
|
||||
public class TestLucene60PointFormat extends BasePointFormatTestCase {
|
||||
private final Codec codec;
|
||||
|
||||
public TestLucene60PointFormat() {
|
||||
// standard issue
|
||||
Codec defaultCodec = TestUtil.getDefaultCodec();
|
||||
if (random().nextBoolean()) {
|
||||
// randomize parameters
|
||||
int maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 500);
|
||||
double maxMBSortInHeap = 3.0 + (3*random().nextDouble());
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: using Lucene60PointFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap);
|
||||
}
|
||||
|
||||
// sneaky impersonation!
|
||||
codec = new FilterCodec(defaultCodec.getName(), defaultCodec) {
|
||||
@Override
|
||||
public PointFormat pointFormat() {
|
||||
return new PointFormat() {
|
||||
@Override
|
||||
public PointWriter fieldsWriter(SegmentWriteState writeState) throws IOException {
|
||||
return new Lucene60PointWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PointReader fieldsReader(SegmentReadState readState) throws IOException {
|
||||
return new Lucene60PointReader(readState);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
} else {
|
||||
// standard issue
|
||||
codec = defaultCodec;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
return codec;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testMergeStability() throws Exception {
|
||||
assumeFalse("TODO: mess with the parameters and test gets angry!", codec instanceof FilterCodec);
|
||||
super.testMergeStability();
|
||||
}
|
||||
|
||||
}
|
|
@ -32,6 +32,8 @@ import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
|
@ -169,6 +171,9 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
|
||||
doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
|
||||
doc.add(new IntPoint("doc", i));
|
||||
doc.add(new IntPoint("doc2d", i, i));
|
||||
doc.add(new NumericDocValuesField("dv", i));
|
||||
writer.updateDocument(new Term("id", "" + (i%10)), doc);
|
||||
}
|
||||
// Deletes one of the 10 added docs, leaving 9:
|
||||
|
@ -202,6 +207,9 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
|
||||
doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
|
||||
doc.add(new IntPoint("doc", i));
|
||||
doc.add(new IntPoint("doc2d", i, i));
|
||||
doc.add(new NumericDocValuesField("dv", i));
|
||||
writer.updateDocument(new Term("id", "" + (i%10)), doc);
|
||||
}
|
||||
|
||||
|
@ -238,6 +246,9 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(newStringField("id", "" + (i % 10), Field.Store.NO));
|
||||
doc.add(newTextField("content", "bbb " + i, Field.Store.NO));
|
||||
doc.add(new IntPoint("doc", i));
|
||||
doc.add(new IntPoint("doc2d", i, i));
|
||||
doc.add(new NumericDocValuesField("dv", i));
|
||||
writer.updateDocument(new Term("id", "" + (i%10)), doc);
|
||||
}
|
||||
|
||||
|
@ -510,6 +521,9 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
for (int i = 0; i < numDocs; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newTextField("content", "aaa", Field.Store.NO));
|
||||
doc.add(new IntPoint("doc", i));
|
||||
doc.add(new IntPoint("doc2d", i, i));
|
||||
doc.add(new NumericDocValuesField("dv", i));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
}
|
||||
|
@ -518,6 +532,9 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
for (int i = 0; i < numDocs; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newTextField("content", "bbb", Field.Store.NO));
|
||||
doc.add(new IntPoint("doc", i));
|
||||
doc.add(new IntPoint("doc2d", i, i));
|
||||
doc.add(new NumericDocValuesField("dv", i));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
}
|
||||
|
@ -1001,6 +1018,9 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(newTextField("content", "aaa", Field.Store.NO));
|
||||
doc.add(newTextField("id", "" + (docStart + i), Field.Store.YES));
|
||||
doc.add(new IntPoint("doc", i));
|
||||
doc.add(new IntPoint("doc2d", i, i));
|
||||
doc.add(new NumericDocValuesField("dv", i));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,147 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Test that a plain default detects broken index headers early (on opening a reader).
|
||||
*/
|
||||
@SuppressFileSystems("ExtrasFS")
|
||||
public class TestAllFilesCheckIndexHeader extends LuceneTestCase {
|
||||
public void test() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
// otherwise we can have unref'd files left in the index that won't be visited when opening a reader and lead to scary looking false failures:
|
||||
((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
|
||||
}
|
||||
|
||||
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
|
||||
conf.setCodec(TestUtil.getDefaultCodec());
|
||||
|
||||
// Disable CFS 80% of the time so we can truncate individual files, but the other 20% of the time we test truncation of .cfs/.cfe too:
|
||||
if (random().nextInt(5) != 1) {
|
||||
conf.setUseCompoundFile(false);
|
||||
conf.getMergePolicy().setNoCFSRatio(0.0);
|
||||
}
|
||||
|
||||
RandomIndexWriter riw = new RandomIndexWriter(random(), dir, conf);
|
||||
// Use LineFileDocs so we (hopefully) get most Lucene features
|
||||
// tested, e.g. IntPoint was recently added to it:
|
||||
LineFileDocs docs = new LineFileDocs(random());
|
||||
for (int i = 0; i < 100; i++) {
|
||||
riw.addDocument(docs.nextDoc());
|
||||
if (random().nextInt(7) == 0) {
|
||||
riw.commit();
|
||||
}
|
||||
if (random().nextInt(20) == 0) {
|
||||
riw.deleteDocuments(new Term("docid", Integer.toString(i)));
|
||||
}
|
||||
if (random().nextInt(15) == 0) {
|
||||
riw.updateNumericDocValue(new Term("docid", Integer.toString(i)), "docid_intDV", Long.valueOf(i));
|
||||
}
|
||||
}
|
||||
|
||||
if (TEST_NIGHTLY == false) {
|
||||
riw.forceMerge(1);
|
||||
}
|
||||
riw.close();
|
||||
checkIndexHeader(dir);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void checkIndexHeader(Directory dir) throws IOException {
|
||||
for(String name : dir.listAll()) {
|
||||
if (name.equals(IndexWriter.WRITE_LOCK_NAME) == false) {
|
||||
checkOneFile(dir, name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void checkOneFile(Directory dir, String victim) throws IOException {
|
||||
try (BaseDirectoryWrapper dirCopy = newDirectory()) {
|
||||
dirCopy.setCheckIndexOnClose(false);
|
||||
long victimLength = dir.fileLength(victim);
|
||||
int wrongBytes = TestUtil.nextInt(random(), 1, (int) Math.min(100, victimLength));
|
||||
assert victimLength > 0;
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now break file " + victim + " by randomizing first " + wrongBytes + " of " + victimLength);
|
||||
}
|
||||
|
||||
for(String name : dir.listAll()) {
|
||||
if (name.equals(victim) == false) {
|
||||
dirCopy.copyFrom(dir, name, name, IOContext.DEFAULT);
|
||||
} else {
|
||||
|
||||
// Iterate until our randomly generated bytes are indeed different from the first bytes of the file ... the vast majority of the
|
||||
// time this will only require one iteration!
|
||||
while (true) {
|
||||
try(IndexOutput out = dirCopy.createOutput(name, IOContext.DEFAULT);
|
||||
IndexInput in = dir.openInput(name, IOContext.DEFAULT)) {
|
||||
// keeps same file length, but replaces the first wrongBytes with random bytes:
|
||||
byte[] bytes = new byte[wrongBytes];
|
||||
random().nextBytes(bytes);
|
||||
out.writeBytes(bytes, 0, bytes.length);
|
||||
byte[] bytes2 = new byte[wrongBytes];
|
||||
in.readBytes(bytes2, 0, bytes2.length);
|
||||
if (Arrays.equals(bytes, bytes2) == false) {
|
||||
// We successfully randomly generated bytes that differ from the bytes in the file:
|
||||
out.copyBytes(in, victimLength - wrongBytes);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
dirCopy.sync(Collections.singleton(name));
|
||||
}
|
||||
|
||||
try {
|
||||
// NOTE: we .close so that if the test fails (truncation not detected) we don't also get all these confusing errors about open files:
|
||||
DirectoryReader.open(dirCopy).close();
|
||||
fail("wrong bytes not detected after randomizing first " + wrongBytes + " bytes out of " + victimLength + " for file " + victim);
|
||||
} catch (CorruptIndexException | EOFException | IndexFormatTooOldException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// CheckIndex should also fail:
|
||||
try {
|
||||
TestUtil.checkIndex(dirCopy, true, true);
|
||||
fail("wrong bytes not detected after randomizing first " + wrongBytes + " bytes out of " + victimLength + " for file " + victim);
|
||||
} catch (CorruptIndexException | EOFException | IndexFormatTooOldException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Test that a plain default detects index file truncation early (on opening a reader).
|
||||
*/
|
||||
@SuppressFileSystems("ExtrasFS")
|
||||
public class TestAllFilesDetectTruncation extends LuceneTestCase {
|
||||
public void test() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
// otherwise we can have unref'd files left in the index that won't be visited when opening a reader and lead to scary looking false failures:
|
||||
((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
|
||||
}
|
||||
|
||||
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
|
||||
conf.setCodec(TestUtil.getDefaultCodec());
|
||||
|
||||
// Disable CFS 80% of the time so we can truncate individual files, but the other 20% of the time we test truncation of .cfs/.cfe too:
|
||||
if (random().nextInt(5) != 1) {
|
||||
conf.setUseCompoundFile(false);
|
||||
conf.getMergePolicy().setNoCFSRatio(0.0);
|
||||
}
|
||||
|
||||
RandomIndexWriter riw = new RandomIndexWriter(random(), dir, conf);
|
||||
// Use LineFileDocs so we (hopefully) get most Lucene features
|
||||
// tested, e.g. IntPoint was recently added to it:
|
||||
LineFileDocs docs = new LineFileDocs(random());
|
||||
for (int i = 0; i < 100; i++) {
|
||||
riw.addDocument(docs.nextDoc());
|
||||
if (random().nextInt(7) == 0) {
|
||||
riw.commit();
|
||||
}
|
||||
if (random().nextInt(20) == 0) {
|
||||
riw.deleteDocuments(new Term("docid", Integer.toString(i)));
|
||||
}
|
||||
if (random().nextInt(15) == 0) {
|
||||
riw.updateNumericDocValue(new Term("docid", Integer.toString(i)), "docid_intDV", Long.valueOf(i));
|
||||
}
|
||||
}
|
||||
if (TEST_NIGHTLY == false) {
|
||||
riw.forceMerge(1);
|
||||
}
|
||||
riw.close();
|
||||
checkTruncation(dir);
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void checkTruncation(Directory dir) throws IOException {
|
||||
for(String name : dir.listAll()) {
|
||||
if (name.equals(IndexWriter.WRITE_LOCK_NAME) == false) {
|
||||
truncateOneFile(dir, name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void truncateOneFile(Directory dir, String victim) throws IOException {
|
||||
try (BaseDirectoryWrapper dirCopy = newDirectory()) {
|
||||
dirCopy.setCheckIndexOnClose(false);
|
||||
long victimLength = dir.fileLength(victim);
|
||||
int lostBytes = TestUtil.nextInt(random(), 1, (int) Math.min(100, victimLength));
|
||||
assert victimLength > 0;
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now truncate file " + victim + " by removing " + lostBytes + " of " + victimLength + " bytes");
|
||||
}
|
||||
|
||||
for(String name : dir.listAll()) {
|
||||
if (name.equals(victim) == false) {
|
||||
dirCopy.copyFrom(dir, name, name, IOContext.DEFAULT);
|
||||
} else {
|
||||
try(IndexOutput out = dirCopy.createOutput(name, IOContext.DEFAULT);
|
||||
IndexInput in = dir.openInput(name, IOContext.DEFAULT)) {
|
||||
out.copyBytes(in, victimLength - lostBytes);
|
||||
}
|
||||
}
|
||||
dirCopy.sync(Collections.singleton(name));
|
||||
}
|
||||
|
||||
try {
|
||||
// NOTE: we .close so that if the test fails (truncation not detected) we don't also get all these confusing errors about open files:
|
||||
DirectoryReader.open(dirCopy).close();
|
||||
fail("truncation not detected after removing " + lostBytes + " bytes out of " + victimLength + " for file " + victim);
|
||||
} catch (CorruptIndexException | EOFException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// CheckIndex should also fail:
|
||||
try {
|
||||
TestUtil.checkIndex(dirCopy, true, true);
|
||||
fail("truncation not detected after removing " + lostBytes + " bytes out of " + victimLength + " for file " + victim);
|
||||
} catch (CorruptIndexException | EOFException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,11 +21,9 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
|
@ -38,23 +36,19 @@ public class TestAllFilesHaveChecksumFooter extends LuceneTestCase {
|
|||
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
|
||||
conf.setCodec(TestUtil.getDefaultCodec());
|
||||
RandomIndexWriter riw = new RandomIndexWriter(random(), dir, conf);
|
||||
Document doc = new Document();
|
||||
// these fields should sometimes get term vectors, etc
|
||||
Field idField = newStringField("id", "", Field.Store.NO);
|
||||
Field bodyField = newTextField("body", "", Field.Store.NO);
|
||||
Field dvField = new NumericDocValuesField("dv", 5);
|
||||
doc.add(idField);
|
||||
doc.add(bodyField);
|
||||
doc.add(dvField);
|
||||
// Use LineFileDocs so we (hopefully) get most Lucene features
|
||||
// tested, e.g. IntPoint was recently added to it:
|
||||
LineFileDocs docs = new LineFileDocs(random());
|
||||
for (int i = 0; i < 100; i++) {
|
||||
idField.setStringValue(Integer.toString(i));
|
||||
bodyField.setStringValue(TestUtil.randomUnicodeString(random()));
|
||||
riw.addDocument(doc);
|
||||
riw.addDocument(docs.nextDoc());
|
||||
if (random().nextInt(7) == 0) {
|
||||
riw.commit();
|
||||
}
|
||||
if (random().nextInt(20) == 0) {
|
||||
riw.deleteDocuments(new Term("id", Integer.toString(i)));
|
||||
riw.deleteDocuments(new Term("docid", Integer.toString(i)));
|
||||
}
|
||||
if (random().nextInt(15) == 0) {
|
||||
riw.updateNumericDocValue(new Term("docid", Integer.toString(i)), "docid_intDV", Long.valueOf(i));
|
||||
}
|
||||
}
|
||||
riw.close();
|
||||
|
|
|
@ -23,13 +23,9 @@ import java.util.Map;
|
|||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
|
@ -43,32 +39,19 @@ public class TestAllFilesHaveCodecHeader extends LuceneTestCase {
|
|||
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
|
||||
conf.setCodec(TestUtil.getDefaultCodec());
|
||||
RandomIndexWriter riw = new RandomIndexWriter(random(), dir, conf);
|
||||
Document doc = new Document();
|
||||
Field idField = newStringField("id", "", Field.Store.YES);
|
||||
Field bodyField = newTextField("body", "", Field.Store.YES);
|
||||
FieldType vectorsType = new FieldType(TextField.TYPE_STORED);
|
||||
vectorsType.setStoreTermVectors(true);
|
||||
vectorsType.setStoreTermVectorPositions(true);
|
||||
Field vectorsField = new Field("vectors", "", vectorsType);
|
||||
Field dvField = new NumericDocValuesField("dv", 5);
|
||||
doc.add(idField);
|
||||
doc.add(bodyField);
|
||||
doc.add(vectorsField);
|
||||
doc.add(dvField);
|
||||
// Use LineFileDocs so we (hopefully) get most Lucene features
|
||||
// tested, e.g. IntPoint was recently added to it:
|
||||
LineFileDocs docs = new LineFileDocs(random());
|
||||
for (int i = 0; i < 100; i++) {
|
||||
idField.setStringValue(Integer.toString(i));
|
||||
bodyField.setStringValue(TestUtil.randomUnicodeString(random()));
|
||||
dvField.setLongValue(random().nextInt(5));
|
||||
vectorsField.setStringValue(TestUtil.randomUnicodeString(random()));
|
||||
riw.addDocument(doc);
|
||||
riw.addDocument(docs.nextDoc());
|
||||
if (random().nextInt(7) == 0) {
|
||||
riw.commit();
|
||||
}
|
||||
if (random().nextInt(20) == 0) {
|
||||
riw.deleteDocuments(new Term("id", Integer.toString(i)));
|
||||
riw.deleteDocuments(new Term("docid", Integer.toString(i)));
|
||||
}
|
||||
if (random().nextInt(15) == 0) {
|
||||
riw.updateNumericDocValue(new Term("id"), "dv", Long.valueOf(i));
|
||||
riw.updateNumericDocValue(new Term("docid", Integer.toString(i)), "docid_intDV", Long.valueOf(i));
|
||||
}
|
||||
}
|
||||
riw.close();
|
||||
|
|
|
@ -80,6 +80,8 @@ public class TestAtomicUpdate extends LuceneTestCase {
|
|||
Document d = new Document();
|
||||
d.add(new StringField("id", Integer.toString(i), Field.Store.YES));
|
||||
d.add(new TextField("contents", English.intToEnglish(i+10*count), Field.Store.NO));
|
||||
d.add(new IntPoint("doc", i));
|
||||
d.add(new IntPoint("doc2d", i, i));
|
||||
writer.updateDocument(new Term("id", Integer.toString(i)), d);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@ package org.apache.lucene.index;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -35,6 +37,9 @@ public class TestCodecHoldsOpenFiles extends LuceneTestCase {
|
|||
for(int i=0;i<numDocs;i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("foo", "bar", TextField.TYPE_NOT_STORED));
|
||||
doc.add(new IntPoint("doc", i));
|
||||
doc.add(new IntPoint("doc2d", i, i));
|
||||
doc.add(new NumericDocValuesField("dv", i));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.lucene.document.BinaryDocValuesField;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
|
@ -125,6 +126,8 @@ public class TestIndexWriterExceptions2 extends LuceneTestCase {
|
|||
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
|
||||
ft.setStoreTermVectors(true);
|
||||
doc.add(newField("text_vectors", TestUtil.randomAnalysisString(random(), 6, true), ft));
|
||||
doc.add(new IntPoint("point", random().nextInt()));
|
||||
doc.add(new IntPoint("point2d", random().nextInt(), random().nextInt()));
|
||||
|
||||
if (random().nextInt(10) > 0) {
|
||||
// single doc
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.codecs.LiveDocsFormat;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
|
@ -572,6 +573,8 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
doc.add(newTextField("content", "aaa", Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("numericdv", 1));
|
||||
doc.add(new IntPoint("point", 1));
|
||||
doc.add(new IntPoint("point2d", 1, 1));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
|
@ -580,6 +583,8 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
|
|||
doc.add(newTextField("content", "aaa " + index, Field.Store.NO));
|
||||
doc.add(newTextField("id", "" + index, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("numericdv", 1));
|
||||
doc.add(new IntPoint("point", 1));
|
||||
doc.add(new IntPoint("point2d", 1, 1));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.lucene.document.BinaryDocValuesField;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
|
@ -124,6 +125,8 @@ public class TestIndexWriterOnVMError extends LuceneTestCase {
|
|||
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
|
||||
ft.setStoreTermVectors(true);
|
||||
doc.add(newField("text_vectors", TestUtil.randomAnalysisString(random(), 6, true), ft));
|
||||
doc.add(new IntPoint("point", random().nextInt()));
|
||||
doc.add(new IntPoint("point2d", random().nextInt(), random().nextInt()));
|
||||
|
||||
if (random().nextInt(10) > 0) {
|
||||
// single doc
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,129 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Test that the same file name, but from a different index, is detected as foreign.
|
||||
*/
|
||||
@SuppressFileSystems("ExtrasFS")
|
||||
public class TestSwappedIndexFiles extends LuceneTestCase {
|
||||
public void test() throws Exception {
|
||||
Directory dir1 = newDirectory();
|
||||
Directory dir2 = newDirectory();
|
||||
|
||||
if (dir1 instanceof MockDirectoryWrapper) {
|
||||
// otherwise we can have unref'd files left in the index that won't be visited when opening a reader and lead to scary looking false failures:
|
||||
((MockDirectoryWrapper) dir1).setEnableVirusScanner(false);
|
||||
}
|
||||
if (dir2 instanceof MockDirectoryWrapper) {
|
||||
// otherwise we can have unref'd files left in the index that won't be visited when opening a reader and lead to scary looking false failures:
|
||||
((MockDirectoryWrapper) dir2).setEnableVirusScanner(false);
|
||||
}
|
||||
|
||||
// Disable CFS 80% of the time so we can truncate individual files, but the other 20% of the time we test truncation of .cfs/.cfe too:
|
||||
boolean useCFS = random().nextInt(5) == 1;
|
||||
|
||||
// Use LineFileDocs so we (hopefully) get most Lucene features
|
||||
// tested, e.g. IntPoint was recently added to it:
|
||||
LineFileDocs docs = new LineFileDocs(random());
|
||||
Document doc = docs.nextDoc();
|
||||
long seed = random().nextLong();
|
||||
|
||||
indexOneDoc(seed, dir1, doc, useCFS);
|
||||
indexOneDoc(seed, dir2, doc, useCFS);
|
||||
|
||||
swapFiles(dir1, dir2);
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
private void indexOneDoc(long seed, Directory dir, Document doc, boolean useCFS) throws IOException {
|
||||
Random random = new Random(seed);
|
||||
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random));
|
||||
conf.setCodec(TestUtil.getDefaultCodec());
|
||||
|
||||
if (useCFS == false) {
|
||||
conf.setUseCompoundFile(false);
|
||||
conf.getMergePolicy().setNoCFSRatio(0.0);
|
||||
} else {
|
||||
conf.setUseCompoundFile(true);
|
||||
conf.getMergePolicy().setNoCFSRatio(1.0);
|
||||
}
|
||||
|
||||
RandomIndexWriter w = new RandomIndexWriter(random, dir, conf);
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
}
|
||||
|
||||
private void swapFiles(Directory dir1, Directory dir2) throws IOException {
|
||||
for(String name : dir1.listAll()) {
|
||||
if (name.equals(IndexWriter.WRITE_LOCK_NAME)) {
|
||||
continue;
|
||||
}
|
||||
swapOneFile(dir1, dir2, name);
|
||||
}
|
||||
}
|
||||
|
||||
private void swapOneFile(Directory dir1, Directory dir2, String victim) throws IOException {
|
||||
try (BaseDirectoryWrapper dirCopy = newDirectory()) {
|
||||
dirCopy.setCheckIndexOnClose(false);
|
||||
|
||||
// Copy all files from dir1 to dirCopy, except victim which we copy from dir2:
|
||||
for(String name : dir1.listAll()) {
|
||||
if (name.equals(victim) == false) {
|
||||
dirCopy.copyFrom(dir1, name, name, IOContext.DEFAULT);
|
||||
} else {
|
||||
dirCopy.copyFrom(dir2, name, name, IOContext.DEFAULT);
|
||||
}
|
||||
dirCopy.sync(Collections.singleton(name));
|
||||
}
|
||||
|
||||
try {
|
||||
// NOTE: we .close so that if the test fails (truncation not detected) we don't also get all these confusing errors about open files:
|
||||
DirectoryReader.open(dirCopy).close();
|
||||
fail("wrong file " + victim + " not detected");
|
||||
} catch (CorruptIndexException | EOFException | IndexFormatTooOldException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
// CheckIndex should also fail:
|
||||
try {
|
||||
TestUtil.checkIndex(dirCopy, true, true);
|
||||
fail("wrong file " + victim + " not detected");
|
||||
} catch (CorruptIndexException | EOFException | IndexFormatTooOldException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -140,6 +140,11 @@ public final class AssertingPointFormat extends PointFormat {
|
|||
in.merge(mergeState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void finish() throws IOException {
|
||||
in.finish();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
in.close();
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.codecs.FieldInfosFormat;
|
|||
import org.apache.lucene.codecs.FilterCodec;
|
||||
import org.apache.lucene.codecs.LiveDocsFormat;
|
||||
import org.apache.lucene.codecs.NormsFormat;
|
||||
import org.apache.lucene.codecs.PointFormat;
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.codecs.SegmentInfoFormat;
|
||||
import org.apache.lucene.codecs.StoredFieldsFormat;
|
||||
|
@ -91,6 +92,11 @@ public class CrankyCodec extends FilterCodec {
|
|||
return new CrankyCompoundFormat(delegate.compoundFormat(), random);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PointFormat pointFormat() {
|
||||
return new CrankyPointFormat(delegate.pointFormat(), random);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Cranky(" + delegate + ")";
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
package org.apache.lucene.codecs.cranky;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.lucene.codecs.PointFormat;
|
||||
import org.apache.lucene.codecs.PointReader;
|
||||
import org.apache.lucene.codecs.PointWriter;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
||||
class CrankyPointFormat extends PointFormat {
|
||||
PointFormat delegate;
|
||||
Random random;
|
||||
|
||||
CrankyPointFormat(PointFormat delegate, Random random) {
|
||||
this.delegate = delegate;
|
||||
this.random = random;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PointWriter fieldsWriter(SegmentWriteState state) throws IOException {
|
||||
return new CrankyPointWriter(delegate.fieldsWriter(state), random);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PointReader fieldsReader(SegmentReadState state) throws IOException {
|
||||
return new CrankyPointReader(delegate.fieldsReader(state), random);
|
||||
}
|
||||
|
||||
static class CrankyPointWriter extends PointWriter {
|
||||
final PointWriter delegate;
|
||||
final Random random;
|
||||
|
||||
public CrankyPointWriter(PointWriter delegate, Random random) {
|
||||
this.delegate = delegate;
|
||||
this.random = random;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeField(FieldInfo fieldInfo, PointReader values) throws IOException {
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
delegate.writeField(fieldInfo, values);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void finish() throws IOException {
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
delegate.finish();
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(MergeState mergeState) throws IOException {
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
delegate.merge(mergeState);
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
delegate.close();
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class CrankyPointReader extends PointReader {
|
||||
final PointReader delegate;
|
||||
final Random random;
|
||||
public CrankyPointReader(PointReader delegate, Random random) {
|
||||
this.delegate = delegate;
|
||||
this.random = random;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkIntegrity() throws IOException {
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
delegate.checkIntegrity();
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void intersect(String fieldName, IntersectVisitor visitor) throws IOException {
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
delegate.intersect(fieldName, visitor);
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getMinPackedValue(String fieldName) throws IOException {
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
return delegate.getMinPackedValue(fieldName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getMaxPackedValue(String fieldName) throws IOException {
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
return delegate.getMaxPackedValue(fieldName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getNumDimensions(String fieldName) throws IOException {
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
return delegate.getNumDimensions(fieldName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getBytesPerDimension(String fieldName) throws IOException {
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
return delegate.getBytesPerDimension(fieldName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
delegate.close();
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return delegate.ramBytesUsed();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -195,6 +195,7 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
|
|||
|
||||
/** The purpose of this test is to make sure that bulk merge doesn't accumulate useless data over runs. */
|
||||
public void testMergeStability() throws Exception {
|
||||
assumeTrue("merge is not stable", mergeIsStable());
|
||||
Directory dir = newDirectory();
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
// Else, the virus checker may prevent deletion of files and cause
|
||||
|
@ -240,6 +241,10 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
|
|||
dir2.close();
|
||||
}
|
||||
|
||||
protected boolean mergeIsStable() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Test the accuracy of the ramBytesUsed estimations. */
|
||||
@Slow
|
||||
public void testRamBytesUsed() throws IOException {
|
||||
|
|
|
@ -0,0 +1,929 @@
|
|||
package org.apache.lucene.index;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.math.BigInteger;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.BitSet;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.document.BinaryPoint;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.PointValues.IntersectVisitor;
|
||||
import org.apache.lucene.index.PointValues.Relation;
|
||||
import org.apache.lucene.search.ExactPointQuery;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
/**
|
||||
* Abstract class to do basic tests for a points format.
|
||||
* NOTE: This test focuses on the points impl, nothing else.
|
||||
* The [stretch] goal is for this test to be
|
||||
* so thorough in testing a new PointFormat that if this
|
||||
* test passes, then all Lucene/Solr tests should also pass. Ie,
|
||||
* if there is some bug in a given PointFormat that this
|
||||
* test fails to catch then this test needs to be improved! */
|
||||
public abstract class BasePointFormatTestCase extends BaseIndexFileFormatTestCase {
|
||||
|
||||
@Override
|
||||
protected void addRandomFields(Document doc) {
|
||||
final int numValues = random().nextInt(3);
|
||||
for (int i = 0; i < numValues; i++) {
|
||||
doc.add(new IntPoint("f", random().nextInt()));
|
||||
}
|
||||
}
|
||||
|
||||
public void testBasic() throws Exception {
|
||||
Directory dir = getDirectory(20);
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
iwc.setMergePolicy(newLogMergePolicy());
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
byte[] point = new byte[4];
|
||||
for(int i=0;i<20;i++) {
|
||||
Document doc = new Document();
|
||||
NumericUtils.intToBytes(i, point, 0);
|
||||
doc.add(new BinaryPoint("dim", point));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
w.forceMerge(1);
|
||||
w.close();
|
||||
|
||||
DirectoryReader r = DirectoryReader.open(dir);
|
||||
LeafReader sub = getOnlySegmentReader(r);
|
||||
PointValues values = sub.getPointValues();
|
||||
|
||||
// Simple test: make sure intersect can visit every doc:
|
||||
BitSet seen = new BitSet();
|
||||
values.intersect("dim",
|
||||
new IntersectVisitor() {
|
||||
@Override
|
||||
public Relation compare(byte[] minPacked, byte[] maxPacked) {
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
}
|
||||
public void visit(int docID) {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
public void visit(int docID, byte[] packedValue) {
|
||||
seen.set(docID);
|
||||
assertEquals(docID, NumericUtils.bytesToInt(packedValue, 0));
|
||||
}
|
||||
});
|
||||
assertEquals(20, seen.cardinality());
|
||||
IOUtils.close(r, dir);
|
||||
}
|
||||
|
||||
public void testMerge() throws Exception {
|
||||
Directory dir = getDirectory(20);
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
iwc.setMergePolicy(newLogMergePolicy());
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
byte[] point = new byte[4];
|
||||
for(int i=0;i<20;i++) {
|
||||
Document doc = new Document();
|
||||
NumericUtils.intToBytes(i, point, 0);
|
||||
doc.add(new BinaryPoint("dim", point));
|
||||
w.addDocument(doc);
|
||||
if (i == 10) {
|
||||
w.commit();
|
||||
}
|
||||
}
|
||||
w.forceMerge(1);
|
||||
w.close();
|
||||
|
||||
DirectoryReader r = DirectoryReader.open(dir);
|
||||
LeafReader sub = getOnlySegmentReader(r);
|
||||
PointValues values = sub.getPointValues();
|
||||
|
||||
// Simple test: make sure intersect can visit every doc:
|
||||
BitSet seen = new BitSet();
|
||||
values.intersect("dim",
|
||||
new IntersectVisitor() {
|
||||
@Override
|
||||
public Relation compare(byte[] minPacked, byte[] maxPacked) {
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
}
|
||||
public void visit(int docID) {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
public void visit(int docID, byte[] packedValue) {
|
||||
seen.set(docID);
|
||||
assertEquals(docID, NumericUtils.bytesToInt(packedValue, 0));
|
||||
}
|
||||
});
|
||||
assertEquals(20, seen.cardinality());
|
||||
IOUtils.close(r, dir);
|
||||
}
|
||||
|
||||
public void testAllPointDocsDeletedInSegment() throws Exception {
|
||||
Directory dir = getDirectory(20);
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
byte[] point = new byte[4];
|
||||
for(int i=0;i<10;i++) {
|
||||
Document doc = new Document();
|
||||
NumericUtils.intToBytes(i, point, 0);
|
||||
doc.add(new BinaryPoint("dim", point));
|
||||
doc.add(new NumericDocValuesField("id", i));
|
||||
doc.add(newStringField("x", "x", Field.Store.NO));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
w.addDocument(new Document());
|
||||
w.deleteDocuments(new Term("x", "x"));
|
||||
if (random().nextBoolean()) {
|
||||
w.forceMerge(1);
|
||||
}
|
||||
w.close();
|
||||
DirectoryReader r = DirectoryReader.open(dir);
|
||||
assertEquals(1, r.numDocs());
|
||||
PointValues values = MultiPointValues.get(r);
|
||||
Bits liveDocs = MultiFields.getLiveDocs(r);
|
||||
NumericDocValues idValues = MultiDocValues.getNumericValues(r, "id");
|
||||
|
||||
if (values != null) {
|
||||
BitSet seen = new BitSet();
|
||||
values.intersect("dim",
|
||||
new IntersectVisitor() {
|
||||
@Override
|
||||
public Relation compare(byte[] minPacked, byte[] maxPacked) {
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
}
|
||||
public void visit(int docID) {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
public void visit(int docID, byte[] packedValue) {
|
||||
if (liveDocs.get(docID)) {
|
||||
seen.set(docID);
|
||||
}
|
||||
assertEquals(idValues.get(docID), NumericUtils.bytesToInt(packedValue, 0));
|
||||
}
|
||||
});
|
||||
assertEquals(0, seen.cardinality());
|
||||
}
|
||||
IOUtils.close(r, dir);
|
||||
}
|
||||
|
||||
/** Make sure we close open files, delete temp files, etc., on exception */
|
||||
public void testWithExceptions() throws Exception {
|
||||
int numDocs = atLeast(10000);
|
||||
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
|
||||
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
|
||||
|
||||
byte[][][] docValues = new byte[numDocs][][];
|
||||
|
||||
for(int docID=0;docID<numDocs;docID++) {
|
||||
byte[][] values = new byte[numDims][];
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
values[dim] = new byte[numBytesPerDim];
|
||||
random().nextBytes(values[dim]);
|
||||
}
|
||||
docValues[docID] = values;
|
||||
}
|
||||
|
||||
// Keep retrying until we 1) we allow a big enough heap, and 2) we hit a random IOExc from MDW:
|
||||
boolean done = false;
|
||||
while (done == false) {
|
||||
try (MockDirectoryWrapper dir = newMockFSDirectory(createTempDir())) {
|
||||
try {
|
||||
dir.setRandomIOExceptionRate(0.05);
|
||||
dir.setRandomIOExceptionRateOnOpen(0.05);
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
dir.setEnableVirusScanner(false);
|
||||
}
|
||||
verify(dir, docValues, null, numDims, numBytesPerDim, true);
|
||||
} catch (IllegalStateException ise) {
|
||||
if (ise.getMessage().contains("this writer hit an unrecoverable error")) {
|
||||
Throwable cause = ise.getCause();
|
||||
if (cause != null && cause.getMessage().contains("a random IOException")) {
|
||||
done = true;
|
||||
} else {
|
||||
throw ise;
|
||||
}
|
||||
} else {
|
||||
throw ise;
|
||||
}
|
||||
} catch (AssertionError ae) {
|
||||
if (ae.getMessage().contains("does not exist; files=")) {
|
||||
// OK: likely we threw the random IOExc when IW was asserting the commit files exist
|
||||
done = true;
|
||||
} else {
|
||||
throw ae;
|
||||
}
|
||||
} catch (IllegalArgumentException iae) {
|
||||
// This just means we got a too-small maxMB for the maxPointsInLeafNode; just retry w/ more heap
|
||||
assertTrue(iae.getMessage().contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
|
||||
} catch (IOException ioe) {
|
||||
String message = ioe.getMessage();
|
||||
if (message.contains("a random IOException") || message.contains("background merge hit exception")) {
|
||||
// BKDWriter should fully clean up after itself:
|
||||
done = true;
|
||||
} else {
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testMultiValued() throws Exception {
|
||||
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
|
||||
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
|
||||
|
||||
int numDocs = atLeast(1000);
|
||||
List<byte[][]> docValues = new ArrayList<>();
|
||||
List<Integer> docIDs = new ArrayList<>();
|
||||
|
||||
for(int docID=0;docID<numDocs;docID++) {
|
||||
int numValuesInDoc = TestUtil.nextInt(random(), 1, 5);
|
||||
for(int ord=0;ord<numValuesInDoc;ord++) {
|
||||
docIDs.add(docID);
|
||||
byte[][] values = new byte[numDims][];
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
values[dim] = new byte[numBytesPerDim];
|
||||
random().nextBytes(values[dim]);
|
||||
}
|
||||
docValues.add(values);
|
||||
}
|
||||
}
|
||||
|
||||
byte[][][] docValuesArray = docValues.toArray(new byte[docValues.size()][][]);
|
||||
int[] docIDsArray = new int[docIDs.size()];
|
||||
for(int i=0;i<docIDsArray.length;i++) {
|
||||
docIDsArray[i] = docIDs.get(i);
|
||||
}
|
||||
|
||||
verify(docValuesArray, docIDsArray, numDims, numBytesPerDim);
|
||||
}
|
||||
|
||||
public void testAllEqual() throws Exception {
|
||||
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
|
||||
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
|
||||
|
||||
int numDocs = atLeast(1000);
|
||||
byte[][][] docValues = new byte[numDocs][][];
|
||||
|
||||
for(int docID=0;docID<numDocs;docID++) {
|
||||
if (docID == 0) {
|
||||
byte[][] values = new byte[numDims][];
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
values[dim] = new byte[numBytesPerDim];
|
||||
random().nextBytes(values[dim]);
|
||||
}
|
||||
docValues[docID] = values;
|
||||
} else {
|
||||
docValues[docID] = docValues[0];
|
||||
}
|
||||
}
|
||||
|
||||
verify(docValues, null, numDims, numBytesPerDim);
|
||||
}
|
||||
|
||||
public void testOneDimEqual() throws Exception {
|
||||
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
|
||||
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
|
||||
|
||||
int numDocs = atLeast(1000);
|
||||
int theEqualDim = random().nextInt(numDims);
|
||||
byte[][][] docValues = new byte[numDocs][][];
|
||||
|
||||
for(int docID=0;docID<numDocs;docID++) {
|
||||
byte[][] values = new byte[numDims][];
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
values[dim] = new byte[numBytesPerDim];
|
||||
random().nextBytes(values[dim]);
|
||||
}
|
||||
docValues[docID] = values;
|
||||
if (docID > 0) {
|
||||
docValues[docID][theEqualDim] = docValues[0][theEqualDim];
|
||||
}
|
||||
}
|
||||
|
||||
verify(docValues, null, numDims, numBytesPerDim);
|
||||
}
|
||||
|
||||
// Tests on N-dimensional points where each dimension is a BigInteger
|
||||
public void testBigIntNDims() throws Exception {
|
||||
|
||||
int numDocs = atLeast(1000);
|
||||
try (Directory dir = getDirectory(numDocs)) {
|
||||
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
|
||||
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
|
||||
// We rely on docIDs not changing:
|
||||
iwc.setMergePolicy(newLogMergePolicy());
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
|
||||
BigInteger[][] docs = new BigInteger[numDocs][];
|
||||
|
||||
for(int docID=0;docID<numDocs;docID++) {
|
||||
BigInteger[] values = new BigInteger[numDims];
|
||||
if (VERBOSE) {
|
||||
System.out.println(" docID=" + docID);
|
||||
}
|
||||
byte[][] bytes = new byte[numDims][];
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
values[dim] = randomBigInt(numBytesPerDim);
|
||||
bytes[dim] = new byte[numBytesPerDim];
|
||||
NumericUtils.bigIntToBytes(values[dim], bytes[dim], 0, numBytesPerDim);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" " + dim + " -> " + values[dim]);
|
||||
}
|
||||
}
|
||||
docs[docID] = values;
|
||||
Document doc = new Document();
|
||||
doc.add(new BinaryPoint("field", bytes));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
DirectoryReader r = w.getReader();
|
||||
w.close();
|
||||
|
||||
PointValues dimValues = MultiPointValues.get(r);
|
||||
|
||||
int iters = atLeast(100);
|
||||
for(int iter=0;iter<iters;iter++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: iter=" + iter);
|
||||
}
|
||||
|
||||
// Random N dims rect query:
|
||||
BigInteger[] queryMin = new BigInteger[numDims];
|
||||
BigInteger[] queryMax = new BigInteger[numDims];
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
queryMin[dim] = randomBigInt(numBytesPerDim);
|
||||
queryMax[dim] = randomBigInt(numBytesPerDim);
|
||||
if (queryMin[dim].compareTo(queryMax[dim]) > 0) {
|
||||
BigInteger x = queryMin[dim];
|
||||
queryMin[dim] = queryMax[dim];
|
||||
queryMax[dim] = x;
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println(" " + dim + "\n min=" + queryMin[dim] + "\n max=" + queryMax[dim]);
|
||||
}
|
||||
}
|
||||
|
||||
final BitSet hits = new BitSet();
|
||||
dimValues.intersect("field", new IntersectVisitor() {
|
||||
@Override
|
||||
public void visit(int docID) {
|
||||
hits.set(docID);
|
||||
//System.out.println("visit docID=" + docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visit(int docID, byte[] packedValue) {
|
||||
//System.out.println("visit check docID=" + docID);
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
BigInteger x = NumericUtils.bytesToBigInt(packedValue, dim, numBytesPerDim);
|
||||
if (x.compareTo(queryMin[dim]) < 0 || x.compareTo(queryMax[dim]) > 0) {
|
||||
//System.out.println(" no");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
//System.out.println(" yes");
|
||||
hits.set(docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Relation compare(byte[] minPacked, byte[] maxPacked) {
|
||||
boolean crosses = false;
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
BigInteger min = NumericUtils.bytesToBigInt(minPacked, dim, numBytesPerDim);
|
||||
BigInteger max = NumericUtils.bytesToBigInt(maxPacked, dim, numBytesPerDim);
|
||||
assert max.compareTo(min) >= 0;
|
||||
|
||||
if (max.compareTo(queryMin[dim]) < 0 || min.compareTo(queryMax[dim]) > 0) {
|
||||
return Relation.CELL_OUTSIDE_QUERY;
|
||||
} else if (min.compareTo(queryMin[dim]) < 0 || max.compareTo(queryMax[dim]) > 0) {
|
||||
crosses = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (crosses) {
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
} else {
|
||||
return Relation.CELL_INSIDE_QUERY;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for(int docID=0;docID<numDocs;docID++) {
|
||||
BigInteger[] docValues = docs[docID];
|
||||
boolean expected = true;
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
BigInteger x = docValues[dim];
|
||||
if (x.compareTo(queryMin[dim]) < 0 || x.compareTo(queryMax[dim]) > 0) {
|
||||
expected = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
boolean actual = hits.get(docID);
|
||||
assertEquals("docID=" + docID, expected, actual);
|
||||
}
|
||||
}
|
||||
r.close();
|
||||
}
|
||||
}
|
||||
|
||||
public void testRandomBinaryTiny() throws Exception {
|
||||
doTestRandomBinary(10);
|
||||
}
|
||||
|
||||
public void testRandomBinaryMedium() throws Exception {
|
||||
doTestRandomBinary(10000);
|
||||
}
|
||||
|
||||
@Nightly
|
||||
public void testRandomBinaryBig() throws Exception {
|
||||
assumeFalse("too slow with SimpleText", Codec.getDefault().getName().equals("SimpleText"));
|
||||
doTestRandomBinary(200000);
|
||||
}
|
||||
|
||||
private void doTestRandomBinary(int count) throws Exception {
|
||||
int numDocs = TestUtil.nextInt(random(), count, count*2);
|
||||
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
|
||||
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
|
||||
|
||||
byte[][][] docValues = new byte[numDocs][][];
|
||||
|
||||
for(int docID=0;docID<numDocs;docID++) {
|
||||
byte[][] values = new byte[numDims][];
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
values[dim] = new byte[numBytesPerDim];
|
||||
// TODO: sometimes test on a "small" volume too, so we test the high density cases, higher chance of boundary, etc. cases:
|
||||
random().nextBytes(values[dim]);
|
||||
}
|
||||
docValues[docID] = values;
|
||||
}
|
||||
|
||||
verify(docValues, null, numDims, numBytesPerDim);
|
||||
}
|
||||
|
||||
/** docIDs can be null, for the single valued case, else it maps value to docID, but all values for one doc must be adjacent */
|
||||
private void verify(byte[][][] docValues, int[] docIDs, int numDims, int numBytesPerDim) throws Exception {
|
||||
try (Directory dir = getDirectory(docValues.length)) {
|
||||
while (true) {
|
||||
try {
|
||||
verify(dir, docValues, docIDs, numDims, numBytesPerDim, false);
|
||||
return;
|
||||
} catch (IllegalArgumentException iae) {
|
||||
// This just means we got a too-small maxMB for the maxPointsInLeafNode; just retry
|
||||
assertTrue(iae.getMessage().contains("either increase maxMBSortInHeap or decrease maxPointsInLeafNode"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void verify(Directory dir, byte[][][] docValues, int[] ids, int numDims, int numBytesPerDim, boolean expectExceptions) throws Exception {
|
||||
int numValues = docValues.length;
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: numValues=" + numValues + " numDims=" + numDims + " numBytesPerDim=" + numBytesPerDim);
|
||||
}
|
||||
|
||||
// RandomIndexWriter is too slow:
|
||||
boolean useRealWriter = docValues.length > 10000;
|
||||
|
||||
IndexWriterConfig iwc;
|
||||
if (useRealWriter) {
|
||||
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
|
||||
} else {
|
||||
iwc = newIndexWriterConfig();
|
||||
}
|
||||
|
||||
if (expectExceptions) {
|
||||
MergeScheduler ms = iwc.getMergeScheduler();
|
||||
if (ms instanceof ConcurrentMergeScheduler) {
|
||||
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
|
||||
}
|
||||
}
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
|
||||
DirectoryReader r = null;
|
||||
|
||||
// Compute actual min/max values:
|
||||
byte[][] expectedMinValues = new byte[numDims][];
|
||||
byte[][] expectedMaxValues = new byte[numDims][];
|
||||
for(int ord=0;ord<docValues.length;ord++) {
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
if (ord == 0) {
|
||||
expectedMinValues[dim] = new byte[numBytesPerDim];
|
||||
System.arraycopy(docValues[ord][dim], 0, expectedMinValues[dim], 0, numBytesPerDim);
|
||||
expectedMaxValues[dim] = new byte[numBytesPerDim];
|
||||
System.arraycopy(docValues[ord][dim], 0, expectedMaxValues[dim], 0, numBytesPerDim);
|
||||
} else {
|
||||
// TODO: it's cheating that we use StringHelper.compare for "truth": what if it's buggy?
|
||||
if (StringHelper.compare(numBytesPerDim, docValues[ord][dim], 0, expectedMinValues[dim], 0) < 0) {
|
||||
System.arraycopy(docValues[ord][dim], 0, expectedMinValues[dim], 0, numBytesPerDim);
|
||||
}
|
||||
if (StringHelper.compare(numBytesPerDim, docValues[ord][dim], 0, expectedMaxValues[dim], 0) > 0) {
|
||||
System.arraycopy(docValues[ord][dim], 0, expectedMaxValues[dim], 0, numBytesPerDim);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 20% of the time we add into a separate directory, then at some point use
|
||||
// addIndexes to bring the indexed point values to the main directory:
|
||||
Directory saveDir;
|
||||
RandomIndexWriter saveW;
|
||||
int addIndexesAt;
|
||||
if (random().nextInt(5) == 1) {
|
||||
saveDir = dir;
|
||||
saveW = w;
|
||||
dir = getDirectory(numValues);
|
||||
if (useRealWriter) {
|
||||
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
|
||||
} else {
|
||||
iwc = newIndexWriterConfig();
|
||||
}
|
||||
if (expectExceptions) {
|
||||
MergeScheduler ms = iwc.getMergeScheduler();
|
||||
if (ms instanceof ConcurrentMergeScheduler) {
|
||||
((ConcurrentMergeScheduler) ms).setSuppressExceptions();
|
||||
}
|
||||
}
|
||||
w = new RandomIndexWriter(random(), dir, iwc);
|
||||
addIndexesAt = TestUtil.nextInt(random(), 1, numValues-1);
|
||||
} else {
|
||||
saveW = null;
|
||||
saveDir = null;
|
||||
addIndexesAt = 0;
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
Document doc = null;
|
||||
int lastID = -1;
|
||||
for(int ord=0;ord<numValues;ord++) {
|
||||
int id;
|
||||
if (ids == null) {
|
||||
id = ord;
|
||||
} else {
|
||||
id = ids[ord];
|
||||
}
|
||||
if (id != lastID) {
|
||||
if (doc != null) {
|
||||
if (useRealWriter) {
|
||||
w.w.addDocument(doc);
|
||||
} else {
|
||||
w.addDocument(doc);
|
||||
}
|
||||
}
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("id", id));
|
||||
}
|
||||
doc.add(new BinaryPoint("field", docValues[ord]));
|
||||
lastID = id;
|
||||
|
||||
if (random().nextInt(30) == 17) {
|
||||
// randomly index some documents without this field
|
||||
if (useRealWriter) {
|
||||
w.w.addDocument(new Document());
|
||||
} else {
|
||||
w.addDocument(new Document());
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println("add empty doc");
|
||||
}
|
||||
}
|
||||
|
||||
if (random().nextInt(30) == 17) {
|
||||
// randomly index some documents with this field, but we will delete them:
|
||||
Document xdoc = new Document();
|
||||
xdoc.add(new BinaryPoint("field", docValues[ord]));
|
||||
xdoc.add(new StringField("nukeme", "yes", Field.Store.NO));
|
||||
if (useRealWriter) {
|
||||
w.w.addDocument(xdoc);
|
||||
} else {
|
||||
w.addDocument(xdoc);
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println("add doc doc-to-delete");
|
||||
}
|
||||
|
||||
if (random().nextInt(5) == 1) {
|
||||
if (useRealWriter) {
|
||||
w.w.deleteDocuments(new Term("nukeme", "yes"));
|
||||
} else {
|
||||
w.deleteDocuments(new Term("nukeme", "yes"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println(" ord=" + ord + " id=" + id);
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
System.out.println(" dim=" + dim + " value=" + new BytesRef(docValues[ord][dim]));
|
||||
}
|
||||
}
|
||||
|
||||
if (saveW != null && ord >= addIndexesAt) {
|
||||
switchIndex(w, dir, saveW);
|
||||
w = saveW;
|
||||
dir = saveDir;
|
||||
saveW = null;
|
||||
saveDir = null;
|
||||
}
|
||||
}
|
||||
w.addDocument(doc);
|
||||
w.deleteDocuments(new Term("nukeme", "yes"));
|
||||
|
||||
if (random().nextBoolean()) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: now force merge");
|
||||
}
|
||||
w.forceMerge(1);
|
||||
}
|
||||
|
||||
r = w.getReader();
|
||||
w.close();
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: reader=" + r);
|
||||
}
|
||||
|
||||
PointValues dimValues = MultiPointValues.get(r);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" dimValues=" + dimValues);
|
||||
}
|
||||
assertNotNull(dimValues);
|
||||
|
||||
NumericDocValues idValues = MultiDocValues.getNumericValues(r, "id");
|
||||
Bits liveDocs = MultiFields.getLiveDocs(r);
|
||||
|
||||
// Verify min/max values are correct:
|
||||
byte[] minValues = dimValues.getMinPackedValue("field");
|
||||
byte[] maxValues = dimValues.getMaxPackedValue("field");
|
||||
byte[] scratch = new byte[numBytesPerDim];
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
System.arraycopy(minValues, dim*numBytesPerDim, scratch, 0, scratch.length);
|
||||
//System.out.println("dim=" + dim + " expectedMin=" + new BytesRef(expectedMinValues[dim]) + " min=" + new BytesRef(scratch));
|
||||
assertTrue(Arrays.equals(expectedMinValues[dim], scratch));
|
||||
System.arraycopy(maxValues, dim*numBytesPerDim, scratch, 0, scratch.length);
|
||||
//System.out.println("dim=" + dim + " expectedMax=" + new BytesRef(expectedMaxValues[dim]) + " max=" + new BytesRef(scratch));
|
||||
assertTrue(Arrays.equals(expectedMaxValues[dim], scratch));
|
||||
}
|
||||
|
||||
int iters = atLeast(100);
|
||||
for(int iter=0;iter<iters;iter++) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: iter=" + iter);
|
||||
}
|
||||
|
||||
// Random N dims rect query:
|
||||
byte[][] queryMin = new byte[numDims][];
|
||||
byte[][] queryMax = new byte[numDims][];
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
queryMin[dim] = new byte[numBytesPerDim];
|
||||
random().nextBytes(queryMin[dim]);
|
||||
queryMax[dim] = new byte[numBytesPerDim];
|
||||
random().nextBytes(queryMax[dim]);
|
||||
if (NumericUtils.compare(numBytesPerDim, queryMin[dim], 0, queryMax[dim], 0) > 0) {
|
||||
byte[] x = queryMin[dim];
|
||||
queryMin[dim] = queryMax[dim];
|
||||
queryMax[dim] = x;
|
||||
}
|
||||
}
|
||||
|
||||
if (VERBOSE) {
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
System.out.println(" dim=" + dim + "\n queryMin=" + new BytesRef(queryMin[dim]) + "\n queryMax=" + new BytesRef(queryMax[dim]));
|
||||
}
|
||||
}
|
||||
|
||||
final BitSet hits = new BitSet();
|
||||
|
||||
dimValues.intersect("field", new PointValues.IntersectVisitor() {
|
||||
@Override
|
||||
public void visit(int docID) {
|
||||
if (liveDocs == null || liveDocs.get(docID)) {
|
||||
hits.set((int) idValues.get(docID));
|
||||
}
|
||||
//System.out.println("visit docID=" + docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visit(int docID, byte[] packedValue) {
|
||||
if (liveDocs != null && liveDocs.get(docID) == false) {
|
||||
return;
|
||||
}
|
||||
//System.out.println("visit check docID=" + docID + " id=" + idValues.get(docID));
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
//System.out.println(" dim=" + dim + " value=" + new BytesRef(packedValue, dim*numBytesPerDim, numBytesPerDim));
|
||||
if (NumericUtils.compare(numBytesPerDim, packedValue, dim, queryMin[dim], 0) < 0 ||
|
||||
NumericUtils.compare(numBytesPerDim, packedValue, dim, queryMax[dim], 0) > 0) {
|
||||
//System.out.println(" no");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
//System.out.println(" yes");
|
||||
hits.set((int) idValues.get(docID));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Relation compare(byte[] minPacked, byte[] maxPacked) {
|
||||
boolean crosses = false;
|
||||
//System.out.println("compare");
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
if (NumericUtils.compare(numBytesPerDim, maxPacked, dim, queryMin[dim], 0) < 0 ||
|
||||
NumericUtils.compare(numBytesPerDim, minPacked, dim, queryMax[dim], 0) > 0) {
|
||||
//System.out.println(" query_outside_cell");
|
||||
return Relation.CELL_OUTSIDE_QUERY;
|
||||
} else if (NumericUtils.compare(numBytesPerDim, minPacked, dim, queryMin[dim], 0) < 0 ||
|
||||
NumericUtils.compare(numBytesPerDim, maxPacked, dim, queryMax[dim], 0) > 0) {
|
||||
crosses = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (crosses) {
|
||||
//System.out.println(" query_crosses_cell");
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
} else {
|
||||
//System.out.println(" cell_inside_query");
|
||||
return Relation.CELL_INSIDE_QUERY;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
BitSet expected = new BitSet();
|
||||
for(int ord=0;ord<numValues;ord++) {
|
||||
boolean matches = true;
|
||||
for(int dim=0;dim<numDims;dim++) {
|
||||
byte[] x = docValues[ord][dim];
|
||||
if (NumericUtils.compare(numBytesPerDim, x, 0, queryMin[dim], 0) < 0 ||
|
||||
NumericUtils.compare(numBytesPerDim, x, 0, queryMax[dim], 0) > 0) {
|
||||
matches = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (matches) {
|
||||
int id;
|
||||
if (ids == null) {
|
||||
id = ord;
|
||||
} else {
|
||||
id = ids[ord];
|
||||
}
|
||||
expected.set(id);
|
||||
}
|
||||
}
|
||||
|
||||
int limit = Math.max(expected.length(), hits.length());
|
||||
int failCount = 0;
|
||||
int successCount = 0;
|
||||
for(int id=0;id<limit;id++) {
|
||||
if (expected.get(id) != hits.get(id)) {
|
||||
System.out.println("FAIL: id=" + id);
|
||||
failCount++;
|
||||
} else {
|
||||
successCount++;
|
||||
}
|
||||
}
|
||||
|
||||
if (failCount != 0) {
|
||||
for(int docID=0;docID<r.maxDoc();docID++) {
|
||||
System.out.println(" docID=" + docID + " id=" + idValues.get(docID));
|
||||
}
|
||||
|
||||
fail(failCount + " docs failed; " + successCount + " docs succeeded");
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(r, w, saveW, saveDir == null ? null : dir);
|
||||
}
|
||||
}
|
||||
|
||||
public void testAddIndexes() throws IOException {
|
||||
Directory dir1 = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir1);
|
||||
Document doc = new Document();
|
||||
doc.add(new IntPoint("int1", 17));
|
||||
w.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new IntPoint("int2", 42));
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
|
||||
// Different field number assigments:
|
||||
Directory dir2 = newDirectory();
|
||||
w = new RandomIndexWriter(random(), dir2);
|
||||
doc = new Document();
|
||||
doc.add(new IntPoint("int2", 42));
|
||||
w.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new IntPoint("int1", 17));
|
||||
w.addDocument(doc);
|
||||
w.close();
|
||||
|
||||
Directory dir = newDirectory();
|
||||
w = new RandomIndexWriter(random(), dir);
|
||||
w.addIndexes(new Directory[] {dir1, dir2});
|
||||
w.forceMerge(1);
|
||||
|
||||
DirectoryReader r = w.getReader();
|
||||
IndexSearcher s = newSearcher(r);
|
||||
assertEquals(2, s.count(ExactPointQuery.new1DIntExact("int1", 17)));
|
||||
assertEquals(2, s.count(ExactPointQuery.new1DIntExact("int2", 42)));
|
||||
r.close();
|
||||
w.close();
|
||||
dir.close();
|
||||
dir1.close();
|
||||
dir2.close();
|
||||
}
|
||||
|
||||
private void switchIndex(RandomIndexWriter w, Directory dir, RandomIndexWriter saveW) throws IOException {
|
||||
if (random().nextBoolean()) {
|
||||
// Add via readers:
|
||||
try (DirectoryReader r = w.getReader()) {
|
||||
if (random().nextBoolean()) {
|
||||
// Add via CodecReaders:
|
||||
List<CodecReader> subs = new ArrayList<>();
|
||||
for (LeafReaderContext context : r.leaves()) {
|
||||
subs.add((CodecReader) context.reader());
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now use addIndexes(CodecReader[]) to switch writers");
|
||||
}
|
||||
saveW.addIndexes(subs.toArray(new CodecReader[subs.size()]));
|
||||
} else {
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now use TestUtil.addIndexesSlowly(DirectoryReader[]) to switch writers");
|
||||
}
|
||||
TestUtil.addIndexesSlowly(saveW.w, r);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Add via directory:
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: now use addIndexes(Directory[]) to switch writers");
|
||||
}
|
||||
w.close();
|
||||
saveW.addIndexes(new Directory[] {dir});
|
||||
}
|
||||
w.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private BigInteger randomBigInt(int numBytes) {
|
||||
BigInteger x = new BigInteger(numBytes*8-1, random());
|
||||
if (random().nextBoolean()) {
|
||||
x = x.negate();
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
private static Directory noVirusChecker(Directory dir) {
|
||||
if (dir instanceof MockDirectoryWrapper) {
|
||||
((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
|
||||
}
|
||||
return dir;
|
||||
}
|
||||
|
||||
private Directory getDirectory(int numPoints) throws IOException {
|
||||
Directory dir;
|
||||
if (numPoints > 100000) {
|
||||
dir = newFSDirectory(createTempDir("TestBKDTree"));
|
||||
} else {
|
||||
dir = newDirectory();
|
||||
}
|
||||
noVirusChecker(dir);
|
||||
//dir = FSDirectory.open(createTempDir());
|
||||
return dir;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean mergeIsStable() {
|
||||
// suppress this test from base class: merges for BKD trees are not stable because the tree created by merge will have a different
|
||||
// structure than the tree created by adding points separately
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -1888,6 +1888,7 @@ public abstract class LuceneTestCase extends Assert {
|
|||
assertDocValuesEquals(info, leftReader, rightReader);
|
||||
assertDeletedDocsEquals(info, leftReader, rightReader);
|
||||
assertFieldInfosEquals(info, leftReader, rightReader);
|
||||
assertPointsEquals(info, leftReader, rightReader);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2533,6 +2534,69 @@ public abstract class LuceneTestCase extends Assert {
|
|||
assertEquals(info, left, right);
|
||||
}
|
||||
|
||||
// naive silly memory heavy uninversion!! maps docID -> packed values (a Set because a given doc can be multi-valued)
|
||||
private Map<Integer,Set<BytesRef>> uninvert(String fieldName, PointValues points) throws IOException {
|
||||
final Map<Integer,Set<BytesRef>> docValues = new HashMap<>();
|
||||
points.intersect(fieldName, new PointValues.IntersectVisitor() {
|
||||
@Override
|
||||
public void visit(int docID) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visit(int docID, byte[] packedValue) throws IOException {
|
||||
if (docValues.containsKey(docID) == false) {
|
||||
docValues.put(docID, new HashSet<BytesRef>());
|
||||
}
|
||||
docValues.get(docID).add(new BytesRef(packedValue.clone()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
|
||||
// We pretend our query shape is so hairy that it crosses every single cell:
|
||||
return PointValues.Relation.CELL_CROSSES_QUERY;
|
||||
}
|
||||
});
|
||||
return docValues;
|
||||
}
|
||||
|
||||
public void assertPointsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
|
||||
assertPointsEquals(info,
|
||||
MultiFields.getMergedFieldInfos(leftReader),
|
||||
MultiPointValues.get(leftReader),
|
||||
MultiFields.getMergedFieldInfos(rightReader),
|
||||
MultiPointValues.get(rightReader));
|
||||
}
|
||||
|
||||
public void assertPointsEquals(String info, FieldInfos fieldInfos1, PointValues points1, FieldInfos fieldInfos2, PointValues points2) throws IOException {
|
||||
for(FieldInfo fieldInfo1 : fieldInfos1) {
|
||||
if (fieldInfo1.getPointDimensionCount() != 0) {
|
||||
FieldInfo fieldInfo2 = fieldInfos2.fieldInfo(fieldInfo1.name);
|
||||
// same dimension count?
|
||||
assertEquals(info, fieldInfo2.getPointDimensionCount(), fieldInfo2.getPointDimensionCount());
|
||||
// same bytes per dimension?
|
||||
assertEquals(info, fieldInfo2.getPointNumBytes(), fieldInfo2.getPointNumBytes());
|
||||
|
||||
assertEquals(info + " field=" + fieldInfo1.name,
|
||||
uninvert(fieldInfo1.name, points1),
|
||||
uninvert(fieldInfo1.name, points2));
|
||||
}
|
||||
}
|
||||
|
||||
// make sure FieldInfos2 doesn't have any point fields that FieldInfo1 didn't have
|
||||
for(FieldInfo fieldInfo2 : fieldInfos2) {
|
||||
if (fieldInfo2.getPointDimensionCount() != 0) {
|
||||
FieldInfo fieldInfo1 = fieldInfos1.fieldInfo(fieldInfo2.name);
|
||||
// same dimension count?
|
||||
assertEquals(info, fieldInfo2.getPointDimensionCount(), fieldInfo1.getPointDimensionCount());
|
||||
// same bytes per dimension?
|
||||
assertEquals(info, fieldInfo2.getPointNumBytes(), fieldInfo1.getPointNumBytes());
|
||||
|
||||
// we don't need to uninvert and compare here ... we did that in the first loop above
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns true if the file exists (can be opened), false
|
||||
* if it cannot be opened, and (unlike Java's
|
||||
* File.exists) throws IOException if there's some
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
package org.apache.lucene.codecs.asserting;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.index.BasePointFormatTestCase;
|
||||
|
||||
/** Test AssertingPointFormat directly */
|
||||
public class TestAssertingPointFormat extends BasePointFormatTestCase {
|
||||
private final Codec codec = new AssertingCodec();
|
||||
|
||||
@Override
|
||||
protected Codec getCodec() {
|
||||
return codec;
|
||||
}
|
||||
}
|
|
@ -172,6 +172,9 @@ Optimizations
|
|||
count. Also includes change to move to the next non-zero term value when selecting a segment
|
||||
position. (Keith Laban, Steve Bower, Dennis Gove)
|
||||
|
||||
* SOLR-8532: Optimize GraphQuery when maxDepth is set by not collecting edges at the maxDepth level.
|
||||
(Kevin Watters via yonik)
|
||||
|
||||
Other Changes
|
||||
----------------------
|
||||
|
||||
|
@ -576,6 +579,8 @@ Other Changes
|
|||
|
||||
* SOLR-8595: Use BinaryRequestWriter by default in HttpSolrClient and ConcurrentUpdateSolrClient. (shalin)
|
||||
|
||||
* SOLR-8597: add default, no-op QParserPlugin.init(NamedList) method (Christine Poerschke)
|
||||
|
||||
================== 5.4.1 ==================
|
||||
|
||||
Bug Fixes
|
||||
|
|
|
@ -112,11 +112,15 @@ public class SQLHandler extends RequestHandlerBase implements SolrCoreAware {
|
|||
throw new Exception("sql parameter cannot be null");
|
||||
}
|
||||
|
||||
TupleStream tupleStream = SQLTupleStreamParser.parse(sql, numWorkers, workerCollection, workerZkhost,
|
||||
AggregationMode.getMode(mode), includeMetadata);
|
||||
context.numWorkers = numWorkers;
|
||||
context.setSolrClientCache(StreamHandler.clientCache);
|
||||
tupleStream.setStreamContext(context);
|
||||
|
||||
TupleStream tupleStream = SQLTupleStreamParser.parse(sql,
|
||||
numWorkers,
|
||||
workerCollection,
|
||||
workerZkhost,
|
||||
AggregationMode.getMode(mode),
|
||||
includeMetadata,
|
||||
context);
|
||||
|
||||
rsp.add("result-set", new StreamHandler.TimerStream(new ExceptionStream(tupleStream)));
|
||||
} catch(Exception e) {
|
||||
|
@ -148,7 +152,8 @@ public class SQLHandler extends RequestHandlerBase implements SolrCoreAware {
|
|||
String workerCollection,
|
||||
String workerZkhost,
|
||||
AggregationMode aggregationMode,
|
||||
boolean includeMetadata) throws IOException {
|
||||
boolean includeMetadata,
|
||||
StreamContext context) throws IOException {
|
||||
SqlParser parser = new SqlParser();
|
||||
Statement statement = parser.createStatement(sql);
|
||||
|
||||
|
@ -163,12 +168,14 @@ public class SQLHandler extends RequestHandlerBase implements SolrCoreAware {
|
|||
if(aggregationMode == AggregationMode.FACET) {
|
||||
sqlStream = doGroupByWithAggregatesFacets(sqlVistor);
|
||||
} else {
|
||||
context.numWorkers = numWorkers;
|
||||
sqlStream = doGroupByWithAggregates(sqlVistor, numWorkers, workerCollection, workerZkhost);
|
||||
}
|
||||
} else if(sqlVistor.isDistinct) {
|
||||
if(aggregationMode == AggregationMode.FACET) {
|
||||
sqlStream = doSelectDistinctFacets(sqlVistor);
|
||||
} else {
|
||||
context.numWorkers = numWorkers;
|
||||
sqlStream = doSelectDistinct(sqlVistor, numWorkers, workerCollection, workerZkhost);
|
||||
}
|
||||
} else {
|
||||
|
@ -179,6 +186,7 @@ public class SQLHandler extends RequestHandlerBase implements SolrCoreAware {
|
|||
sqlStream = new MetadataStream(sqlStream, sqlVistor);
|
||||
}
|
||||
|
||||
sqlStream.setStreamContext(context);
|
||||
return sqlStream;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,10 +40,6 @@ public class BoostQParserPlugin extends QParserPlugin {
|
|||
public static final String NAME = "boost";
|
||||
public static String BOOSTFUNC = "b";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new QParser(qstr, localParams, params, req) {
|
||||
|
|
|
@ -122,10 +122,6 @@ public class CollapsingQParserPlugin extends QParserPlugin {
|
|||
public static final String HINT_MULTI_DOCVALUES = "multi_docvalues";
|
||||
|
||||
|
||||
public void init(NamedList namedList) {
|
||||
|
||||
}
|
||||
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest request) {
|
||||
return new CollapsingQParser(qstr, localParams, params, request);
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ public class ComplexPhraseQParserPlugin extends QParserPlugin {
|
|||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
super.init(args);
|
||||
if (args != null) {
|
||||
Object val = args.get("inOrder");
|
||||
if (val != null) {
|
||||
|
|
|
@ -112,10 +112,6 @@ import org.apache.solr.request.SolrQueryRequest;
|
|||
public class DisMaxQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "dismax";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new DisMaxQParser(qstr, localParams, params, req);
|
||||
|
|
|
@ -34,9 +34,6 @@ public class ExportQParserPlugin extends QParserPlugin {
|
|||
|
||||
public static final String NAME = "xport";
|
||||
|
||||
public void init(NamedList namedList) {
|
||||
}
|
||||
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest request) {
|
||||
return new ExportQParser(qstr, localParams, params, request);
|
||||
}
|
||||
|
|
|
@ -28,10 +28,6 @@ import org.apache.solr.request.SolrQueryRequest;
|
|||
public class ExtendedDismaxQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "edismax";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new ExtendedDismaxQParser(qstr, localParams, params, req);
|
||||
|
|
|
@ -34,10 +34,6 @@ import org.apache.solr.schema.SchemaField;
|
|||
public class FieldQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "field";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new QParser(qstr, localParams, params, req) {
|
||||
|
|
|
@ -28,10 +28,6 @@ import org.apache.solr.request.SolrQueryRequest;
|
|||
public class FunctionQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "func";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new FunctionQParser(qstr, localParams, params, req);
|
||||
|
|
|
@ -38,10 +38,6 @@ import org.apache.solr.search.function.*;
|
|||
public class FunctionRangeQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "frange";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new QParser(qstr, localParams, params, req) {
|
||||
|
|
|
@ -56,10 +56,6 @@ public class HashQParserPlugin extends QParserPlugin {
|
|||
public static final String NAME = "hash";
|
||||
|
||||
|
||||
public void init(NamedList params) {
|
||||
|
||||
}
|
||||
|
||||
public QParser createParser(String query, SolrParams localParams, SolrParams params, SolrQueryRequest request) {
|
||||
return new HashQParser(query, localParams, params, request);
|
||||
}
|
||||
|
|
|
@ -60,10 +60,6 @@ import org.apache.solr.util.RefCounted;
|
|||
public class JoinQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "join";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new QParser(qstr, localParams, params, req) {
|
||||
|
|
|
@ -36,10 +36,6 @@ import java.util.List;
|
|||
public class LuceneQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "lucene";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new LuceneQParser(qstr, localParams, params, req);
|
||||
|
|
|
@ -34,10 +34,6 @@ import org.apache.solr.request.SolrQueryRequest;
|
|||
public class NestedQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "query";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new QParser(qstr, localParams, params, req) {
|
||||
|
|
|
@ -28,10 +28,6 @@ import org.apache.solr.request.SolrQueryRequest;
|
|||
public class OldLuceneQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "lucenePlusSort";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new OldLuceneQParser(qstr, localParams, params, req);
|
||||
|
|
|
@ -34,10 +34,6 @@ import org.apache.solr.schema.SchemaField;
|
|||
public class PrefixQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "prefix";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new QParser(qstr, localParams, params, req) {
|
||||
|
|
|
@ -82,6 +82,10 @@ public abstract class QParserPlugin implements NamedListInitializedPlugin, SolrI
|
|||
/** return a {@link QParser} */
|
||||
public abstract QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req);
|
||||
|
||||
@Override
|
||||
public void init( NamedList args ) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
// TODO: ideally use the NAME property that each qparser plugin has
|
||||
|
|
|
@ -36,10 +36,6 @@ import org.apache.solr.request.SolrQueryRequest;
|
|||
public class RawQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "raw";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new QParser(qstr, localParams, params, req) {
|
||||
|
|
|
@ -64,9 +64,6 @@ public class ReRankQParserPlugin extends QParserPlugin {
|
|||
public static final String NAME = "rerank";
|
||||
private static Query defaultQuery = new MatchAllDocsQuery();
|
||||
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
public QParser createParser(String query, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new ReRankQParser(query, localParams, params, req);
|
||||
}
|
||||
|
|
|
@ -91,11 +91,6 @@ public class SimpleQParserPlugin extends QParserPlugin {
|
|||
OPERATORS.put(SimpleParams.NEAR_OPERATOR, SimpleQueryParser.NEAR_OPERATOR);
|
||||
}
|
||||
|
||||
/** No initialization is necessary so this method is empty. */
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
/** Returns a QParser that will create a query by using Lucene's SimpleQueryParser. */
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
|
|
|
@ -31,9 +31,4 @@ public class SpatialBoxQParserPlugin extends SpatialFilterQParserPlugin {
|
|||
return new SpatialFilterQParser(qstr, localParams, params, req, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -56,10 +56,5 @@ public class SpatialFilterQParserPlugin extends QParserPlugin {
|
|||
return new SpatialFilterQParser(qstr, localParams, params, req, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -48,10 +48,6 @@ public class SurroundQParserPlugin extends QParserPlugin {
|
|||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
public static final String NAME = "surround";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams,
|
||||
SolrParams params, SolrQueryRequest req) {
|
||||
|
|
|
@ -151,10 +151,6 @@ public class SwitchQParserPlugin extends QParserPlugin {
|
|||
*/
|
||||
public static String SWITCH_DEFAULT = "default";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new QParser(qstr, localParams, params, req) {
|
||||
|
|
|
@ -43,10 +43,6 @@ import org.apache.solr.schema.FieldType;
|
|||
public class TermQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "term";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new QParser(qstr, localParams, params, req) {
|
||||
|
|
|
@ -58,10 +58,6 @@ public class TermsQParserPlugin extends QParserPlugin {
|
|||
/** Choose the internal algorithm */
|
||||
private static final String METHOD = "method";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
private static enum Method {
|
||||
termsFilter {
|
||||
@Override
|
||||
|
|
|
@ -58,10 +58,6 @@ public class XmlQParserPlugin extends QParserPlugin {
|
|||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
public QParser createParser(String qstr, SolrParams localParams,
|
||||
SolrParams params, SolrQueryRequest req) {
|
||||
return new XmlQParser(qstr, localParams, params, req);
|
||||
|
|
|
@ -41,9 +41,5 @@ public class BlockJoinParentQParserPlugin extends QParserPlugin {
|
|||
protected QParser createBJQParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new BlockJoinParentQParser(qstr, localParams, params, req);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -31,10 +31,6 @@ public class GraphQParserPlugin extends QParserPlugin {
|
|||
// Graph Query Parser parser name
|
||||
public static final String NAME = "graph";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
// return the graph query parser for this request.
|
||||
|
|
|
@ -135,8 +135,8 @@ public class GraphQuery extends Query {
|
|||
SolrIndexSearcher fromSearcher;
|
||||
private float queryNorm = 1.0F;
|
||||
private float queryWeight = 1.0F;
|
||||
int frontierSize = 0;
|
||||
public int currentDepth = 0;
|
||||
private int frontierSize = 0;
|
||||
private int currentDepth = -1;
|
||||
private Filter filter;
|
||||
private DocSet resultSet;
|
||||
|
||||
|
@ -177,69 +177,82 @@ public class GraphQuery extends Query {
|
|||
* @throws IOException - if a sub search fails... maybe other cases too! :)
|
||||
*/
|
||||
private DocSet getDocSet() throws IOException {
|
||||
DocSet fromSet = null;
|
||||
FixedBitSet seedResultBits = null;
|
||||
// Size that the bit set needs to be.
|
||||
int capacity = fromSearcher.getRawReader().maxDoc();
|
||||
// The bit set to contain the results that match the query.
|
||||
FixedBitSet resultBits = new FixedBitSet(capacity);
|
||||
// The measure of how deep in the graph we have gone.
|
||||
currentDepth = 0;
|
||||
// this holds the result at each level
|
||||
BitDocSet fromSet = null;
|
||||
// the root docs if we return root is false
|
||||
FixedBitSet rootBits = null;
|
||||
// the initial query for the frontier for the first query
|
||||
Query frontierQuery = q;
|
||||
// Find all documents in this graph that are leaf nodes to speed traversal
|
||||
// TODO: speed this up in the future with HAS_FIELD type queries
|
||||
BooleanQuery.Builder leafNodeQuery = new BooleanQuery.Builder();
|
||||
WildcardQuery edgeQuery = new WildcardQuery(new Term(toField, "*"));
|
||||
leafNodeQuery.add(edgeQuery, Occur.MUST_NOT);
|
||||
DocSet leafNodes = fromSearcher.getDocSet(leafNodeQuery.build());
|
||||
DocSet leafNodes = resolveLeafNodes(toField);
|
||||
// Start the breadth first graph traversal.
|
||||
|
||||
do {
|
||||
// Increment how far we have gone in the frontier.
|
||||
currentDepth++;
|
||||
// if we are at the max level we don't need the graph terms collector.
|
||||
// TODO validate that the join case works properly.
|
||||
if (maxDepth != -1 && currentDepth >= maxDepth) {
|
||||
// if we've reached the max depth, don't worry about collecting edges.
|
||||
fromSet = fromSearcher.getDocSetBits(frontierQuery);
|
||||
// explicitly the frontier size is zero now so we can break
|
||||
frontierSize = 0;
|
||||
} else {
|
||||
// when we're not at the max depth level, we need to collect edges
|
||||
// Create the graph result collector for this level
|
||||
GraphTermsCollector graphResultCollector = new GraphTermsCollector(toField,capacity, resultBits, leafNodes);
|
||||
// traverse the level!
|
||||
fromSearcher.search(frontierQuery, graphResultCollector);
|
||||
fromSet = graphResultCollector.getDocSet();
|
||||
// All edge ids on the frontier.
|
||||
BytesRefHash collectorTerms = graphResultCollector.getCollectorTerms();
|
||||
frontierSize = collectorTerms.size();
|
||||
// The resulting doc set from the frontier.
|
||||
fromSet = graphResultCollector.getDocSet();
|
||||
if (seedResultBits == null) {
|
||||
// grab a copy of the seed bits (these are the "rootNodes")
|
||||
seedResultBits = ((BitDocSet)fromSet).getBits().clone();
|
||||
}
|
||||
Integer fs = new Integer(frontierSize);
|
||||
FrontierQuery fq = buildFrontierQuery(collectorTerms, fs);
|
||||
FrontierQuery fq = buildFrontierQuery(collectorTerms, frontierSize);
|
||||
if (fq == null) {
|
||||
// in case we get null back, make sure we know we're done at this level.
|
||||
fq = new FrontierQuery(null, 0);
|
||||
}
|
||||
frontierSize = 0;
|
||||
} else {
|
||||
frontierQuery = fq.getQuery();
|
||||
frontierSize = fq.getFrontierSize();
|
||||
}
|
||||
}
|
||||
if (currentDepth == 0 && !returnRoot) {
|
||||
// grab a copy of the root bits but only if we need it.
|
||||
rootBits = fromSet.getBits();
|
||||
}
|
||||
// Add the bits from this level to the result set.
|
||||
resultBits.or(((BitDocSet)fromSet).getBits());
|
||||
// Increment how far we have gone in the frontier.
|
||||
currentDepth++;
|
||||
// Break out if we have reached our max depth
|
||||
if (currentDepth >= maxDepth && maxDepth != -1) {
|
||||
resultBits.or(fromSet.getBits());
|
||||
// test if we discovered any new edges, if not , we're done.
|
||||
if ((maxDepth != -1 && currentDepth >= maxDepth)) {
|
||||
break;
|
||||
}
|
||||
// test if we discovered any new edges, if not , we're done.
|
||||
} while (frontierSize > 0);
|
||||
// helper bit set operations on the final result set
|
||||
if (!returnRoot) {
|
||||
resultBits.andNot(seedResultBits);
|
||||
resultBits.andNot(rootBits);
|
||||
}
|
||||
// this is the final resulting filter.
|
||||
BitDocSet resultSet = new BitDocSet(resultBits);
|
||||
// If we only want to return leaf nodes do that here.
|
||||
if (onlyLeafNodes) {
|
||||
return resultSet.intersection(leafNodes);
|
||||
} else {
|
||||
// create a doc set off the bits that we found.
|
||||
return resultSet;
|
||||
}
|
||||
}
|
||||
|
||||
private DocSet resolveLeafNodes(String field) throws IOException {
|
||||
BooleanQuery.Builder leafNodeQuery = new BooleanQuery.Builder();
|
||||
WildcardQuery edgeQuery = new WildcardQuery(new Term(field, "*"));
|
||||
leafNodeQuery.add(edgeQuery, Occur.MUST_NOT);
|
||||
DocSet leafNodes = fromSearcher.getDocSet(leafNodeQuery.build());
|
||||
return leafNodes;
|
||||
}
|
||||
|
||||
/** Build an automaton to represent the frontier query */
|
||||
private Automaton buildAutomaton(BytesRefHash termBytesHash) {
|
||||
// need top pass a sorted set of terms to the autn builder (maybe a better way to avoid this?)
|
||||
|
|
|
@ -108,7 +108,7 @@ class GraphTermsCollector extends SimpleCollector implements Collector {
|
|||
numHits++;
|
||||
}
|
||||
|
||||
public DocSet getDocSet() {
|
||||
public BitDocSet getDocSet() {
|
||||
if (bits == null) {
|
||||
// TODO: this shouldn't happen
|
||||
bits = new FixedBitSet(maxDoc);
|
||||
|
|
|
@ -206,10 +206,6 @@ public class ScoreJoinQParserPlugin extends QParserPlugin {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
|
|
|
@ -28,11 +28,6 @@ import org.apache.solr.search.QParserPlugin;
|
|||
public class MLTQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "mlt";
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public QParser createParser(String qstr, SolrParams localParams,
|
||||
SolrParams params, SolrQueryRequest req) {
|
||||
|
|
|
@ -79,7 +79,7 @@ public class ConnectionReuseTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
SolrClient client;
|
||||
HttpClient httpClient = HttpClientUtil.createClient(null);
|
||||
int rndClient = 0;//random().nextInt(3);
|
||||
int rndClient = random().nextInt(3);
|
||||
if (rndClient == 0) {
|
||||
client = new ConcurrentUpdateSolrClient(url.toString(), httpClient, 6, 1); // currently only testing with 1 thread
|
||||
} else if (rndClient == 1) {
|
||||
|
|
|
@ -30,10 +30,6 @@ public class FooQParserPlugin extends QParserPlugin {
|
|||
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new FooQParser(qstr, localParams, params, req);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(NamedList args) {
|
||||
}
|
||||
}
|
||||
|
||||
class FooQParser extends QParser {
|
||||
|
|
|
@ -41,10 +41,6 @@ import java.io.IOException;
|
|||
public class TestAnalyticsQParserPlugin extends QParserPlugin {
|
||||
|
||||
|
||||
public void init(NamedList params) {
|
||||
|
||||
}
|
||||
|
||||
public QParser createParser(String query, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new TestAnalyticsQueryParser(query, localParams, params, req);
|
||||
}
|
||||
|
|
|
@ -72,10 +72,6 @@ import org.junit.Ignore;
|
|||
public class TestRankQueryPlugin extends QParserPlugin {
|
||||
|
||||
|
||||
public void init(NamedList params) {
|
||||
|
||||
}
|
||||
|
||||
public QParser createParser(String query, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
|
||||
return new TestRankQueryParser(query, localParams, params, req);
|
||||
}
|
||||
|
|
|
@ -77,6 +77,29 @@ public class GraphQueryTest extends SolrTestCaseJ4 {
|
|||
qr = createRequest(g4Query);
|
||||
assertQ(qr,"//*[@numFound='2']");
|
||||
|
||||
String g5Query = "{!graph from=\"node_id\" to=\"edge_id\" returnRoot=\"true\" returnOnlyLeaf=\"false\" maxDepth=0}id:doc_8";
|
||||
qr = createRequest(g5Query);
|
||||
assertQ(qr,"//*[@numFound='1']");
|
||||
|
||||
String g6Query = "{!graph from=\"node_id\" to=\"edge_id\" returnRoot=\"true\" returnOnlyLeaf=\"false\" maxDepth=1}id:doc_8";
|
||||
qr = createRequest(g6Query);
|
||||
assertQ(qr,"//*[@numFound='3']");
|
||||
|
||||
String g7Query = "{!graph from=\"node_id\" to=\"edge_id\" returnRoot=\"false\" returnOnlyLeaf=\"false\" maxDepth=1}id:doc_8";
|
||||
qr = createRequest(g7Query);
|
||||
assertQ(qr,"//*[@numFound='2']");
|
||||
|
||||
String g8Query = "{!graph from=\"node_id\" to=\"edge_id\" returnRoot=\"false\" returnOnlyLeaf=\"true\" maxDepth=2}id:doc_8";
|
||||
qr = createRequest(g8Query);
|
||||
assertQ(qr,"//*[@numFound='1']");
|
||||
|
||||
String g9Query = "{!graph from=\"node_id\" to=\"edge_id\" maxDepth=1}id:doc_1";
|
||||
qr = createRequest(g9Query);
|
||||
assertQ(qr,"//*[@numFound='2']");
|
||||
|
||||
String g10Query = "{!graph from=\"node_id\" to=\"edge_id\" returnRoot=false maxDepth=1}id:doc_1";
|
||||
qr = createRequest(g10Query);
|
||||
assertQ(qr,"//*[@numFound='1']");
|
||||
}
|
||||
|
||||
private SolrQueryRequest createRequest(String query) {
|
||||
|
|
|
@ -53,6 +53,7 @@ class ResultSetImpl implements ResultSet {
|
|||
private boolean done;
|
||||
private boolean closed;
|
||||
private SQLWarning currentWarning;
|
||||
private boolean wasLastValueNull;
|
||||
|
||||
ResultSetImpl(StatementImpl statement) {
|
||||
this.statement = statement;
|
||||
|
@ -66,15 +67,11 @@ class ResultSetImpl implements ResultSet {
|
|||
if(isMetadata == null || !isMetadata.equals(true)) {
|
||||
throw new RuntimeException("First tuple is not a metadata tuple");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Couldn't get metadata tuple");
|
||||
}
|
||||
|
||||
try {
|
||||
this.firstTuple = this.solrStream.read();
|
||||
this.solrStream.pushBack(firstTuple);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Couldn't get first tuple.");
|
||||
throw new RuntimeException("Couldn't read first tuple", e);
|
||||
}
|
||||
|
||||
this.resultSetMetaData = new ResultSetMetaDataImpl(this);
|
||||
|
@ -122,8 +119,7 @@ class ResultSetImpl implements ResultSet {
|
|||
|
||||
@Override
|
||||
public boolean wasNull() throws SQLException {
|
||||
// TODO implement logic to check if last value was null
|
||||
return false;
|
||||
return this.wasLastValueNull;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -208,24 +204,38 @@ class ResultSetImpl implements ResultSet {
|
|||
|
||||
@Override
|
||||
public String getString(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
return tuple.getString(columnLabel);
|
||||
String value = tuple.getString(columnLabel);
|
||||
if(value.equals(String.valueOf((Object)null))) {
|
||||
this.wasLastValueNull = true;
|
||||
return null;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getBoolean(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
return (boolean)getObject(columnLabel);
|
||||
Object value = getObject(columnLabel);
|
||||
if(value == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return false;
|
||||
}
|
||||
return (boolean)value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte getByte(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
Number number = (Number)getObject(columnLabel);
|
||||
if(number == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return 0;
|
||||
} else {
|
||||
return number.byteValue();
|
||||
|
@ -234,9 +244,12 @@ class ResultSetImpl implements ResultSet {
|
|||
|
||||
@Override
|
||||
public short getShort(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
Number number = (Number)getObject(columnLabel);
|
||||
if(number == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return 0;
|
||||
} else {
|
||||
return number.shortValue();
|
||||
|
@ -245,10 +258,12 @@ class ResultSetImpl implements ResultSet {
|
|||
|
||||
@Override
|
||||
public int getInt(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
Number number = (Number)getObject(columnLabel);
|
||||
if(number == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return 0;
|
||||
} else {
|
||||
return number.intValue();
|
||||
|
@ -257,10 +272,12 @@ class ResultSetImpl implements ResultSet {
|
|||
|
||||
@Override
|
||||
public long getLong(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
Number number = (Number)getObject(columnLabel);
|
||||
if(number == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return 0L;
|
||||
} else {
|
||||
return number.longValue();
|
||||
|
@ -269,10 +286,12 @@ class ResultSetImpl implements ResultSet {
|
|||
|
||||
@Override
|
||||
public float getFloat(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
Number number = (Number)getObject(columnLabel);
|
||||
if(number == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return 0.0F;
|
||||
} else {
|
||||
return number.floatValue();
|
||||
|
@ -281,10 +300,12 @@ class ResultSetImpl implements ResultSet {
|
|||
|
||||
@Override
|
||||
public double getDouble(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
Number number = (Number)getObject(columnLabel);
|
||||
if(number == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return 0.0D;
|
||||
} else {
|
||||
return number.doubleValue();
|
||||
|
@ -298,30 +319,54 @@ class ResultSetImpl implements ResultSet {
|
|||
|
||||
@Override
|
||||
public byte[] getBytes(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
return (byte[]) getObject(columnLabel);
|
||||
Object value = getObject(columnLabel);
|
||||
if(value == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return null;
|
||||
}
|
||||
return (byte[])value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Date getDate(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
return (Date)getObject(columnLabel);
|
||||
Object value = getObject(columnLabel);
|
||||
if(value == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return null;
|
||||
}
|
||||
return (Date)value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Time getTime(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
return (Time)getObject(columnLabel);
|
||||
Object value = getObject(columnLabel);
|
||||
if(value == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return null;
|
||||
}
|
||||
return (Time)value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Timestamp getTimestamp(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
return (Timestamp)getObject(columnLabel);
|
||||
Object value = getObject(columnLabel);
|
||||
if(value == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return null;
|
||||
}
|
||||
return (Timestamp)value;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -376,9 +421,15 @@ class ResultSetImpl implements ResultSet {
|
|||
|
||||
@Override
|
||||
public Object getObject(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
return this.tuple.get(columnLabel);
|
||||
Object value = this.tuple.get(columnLabel);
|
||||
if(value == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return null;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -403,9 +454,15 @@ class ResultSetImpl implements ResultSet {
|
|||
|
||||
@Override
|
||||
public BigDecimal getBigDecimal(String columnLabel) throws SQLException {
|
||||
this.wasLastValueNull = false;
|
||||
checkClosed();
|
||||
|
||||
return (BigDecimal)getObject(columnLabel);
|
||||
Object value = this.getObject(columnLabel);
|
||||
if(value == null) {
|
||||
this.wasLastValueNull = true;
|
||||
return null;
|
||||
}
|
||||
return (BigDecimal)value;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -92,16 +92,16 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
waitForRecoveriesToFinish(false);
|
||||
|
||||
indexr(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1");
|
||||
indexr(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2");
|
||||
indexr(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3");
|
||||
indexr(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4");
|
||||
indexr(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5");
|
||||
indexr(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6");
|
||||
indexr(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7");
|
||||
indexr(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8");
|
||||
indexr(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9");
|
||||
indexr(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10");
|
||||
indexr(id, "0", "a_s", "hello0", "a_i", "0", "a_f", "1", "testnull_i", null);
|
||||
indexr(id, "2", "a_s", "hello0", "a_i", "2", "a_f", "2", "testnull_i", "2");
|
||||
indexr(id, "3", "a_s", "hello3", "a_i", "3", "a_f", "3", "testnull_i", null);
|
||||
indexr(id, "4", "a_s", "hello4", "a_i", "4", "a_f", "4", "testnull_i", "4");
|
||||
indexr(id, "1", "a_s", "hello0", "a_i", "1", "a_f", "5", "testnull_i", null);
|
||||
indexr(id, "5", "a_s", "hello3", "a_i", "10", "a_f", "6", "testnull_i", "6");
|
||||
indexr(id, "6", "a_s", "hello4", "a_i", "11", "a_f", "7", "testnull_i", null);
|
||||
indexr(id, "7", "a_s", "hello3", "a_i", "12", "a_f", "8", "testnull_i", "8");
|
||||
indexr(id, "8", "a_s", "hello3", "a_i", "13", "a_f", "9", "testnull_i", null);
|
||||
indexr(id, "9", "a_s", "hello0", "a_i", "14", "a_f", "10", "testnull_i", "10");
|
||||
|
||||
commit();
|
||||
|
||||
|
@ -355,11 +355,28 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
private void testDriverMetadata() throws Exception {
|
||||
String collection = DEFAULT_COLLECTION;
|
||||
String connectionString = "jdbc:solr://" + zkServer.getZkAddress() + "?collection=" + collection +
|
||||
"&username=&password=&testKey1=testValue&testKey2";
|
||||
String sql = "select id, a_i, a_s, a_f as my_float_col from " + collection + " order by a_i desc limit 2";
|
||||
|
||||
try (Connection con = DriverManager.getConnection(connectionString)) {
|
||||
String connectionString1 = "jdbc:solr://" + zkServer.getZkAddress() + "?collection=" + collection +
|
||||
"&username=&password=&testKey1=testValue&testKey2";
|
||||
Properties properties1 = new Properties();
|
||||
|
||||
String sql = "select id, a_i, a_s, a_f as my_float_col, testnull_i from " + collection +
|
||||
" order by a_i desc";
|
||||
|
||||
String connectionString2 = "jdbc:solr://" + zkServer.getZkAddress() + "?collection=" + collection +
|
||||
"&aggregationMode=map_reduce&numWorkers=2&username=&password=&testKey1=testValue&testKey2";
|
||||
Properties properties2 = new Properties();
|
||||
|
||||
String sql2 = sql + " limit 2";
|
||||
|
||||
//testJDBCMethods(collection, connectionString1, properties1, sql);
|
||||
//testJDBCMethods(collection, connectionString2, properties2, sql);
|
||||
testJDBCMethods(collection, connectionString1, properties1, sql2);
|
||||
testJDBCMethods(collection, connectionString2, properties2, sql2);
|
||||
}
|
||||
|
||||
private void testJDBCMethods(String collection, String connectionString, Properties properties, String sql) throws Exception {
|
||||
try (Connection con = DriverManager.getConnection(connectionString, properties)) {
|
||||
assertTrue(con.isValid(DEFAULT_CONNECTION_TIMEOUT));
|
||||
assertEquals(collection, con.getCatalog());
|
||||
|
||||
|
@ -407,32 +424,37 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
assertNotNull(resultSetMetaData);
|
||||
|
||||
assertEquals(4, resultSetMetaData.getColumnCount());
|
||||
assertEquals(5, resultSetMetaData.getColumnCount());
|
||||
|
||||
assertEquals("id", resultSetMetaData.getColumnName(1));
|
||||
assertEquals("a_i", resultSetMetaData.getColumnName(2));
|
||||
assertEquals("a_s", resultSetMetaData.getColumnName(3));
|
||||
assertEquals("a_f", resultSetMetaData.getColumnName(4));
|
||||
assertEquals("testnull_i", resultSetMetaData.getColumnName(5));
|
||||
|
||||
assertEquals("id", resultSetMetaData.getColumnLabel(1));
|
||||
assertEquals("a_i", resultSetMetaData.getColumnLabel(2));
|
||||
assertEquals("a_s", resultSetMetaData.getColumnLabel(3));
|
||||
assertEquals("my_float_col", resultSetMetaData.getColumnLabel(4));
|
||||
assertEquals("testnull_i", resultSetMetaData.getColumnLabel(5));
|
||||
|
||||
assertEquals("id".length(), resultSetMetaData.getColumnDisplaySize(1));
|
||||
assertEquals("a_i".length(), resultSetMetaData.getColumnDisplaySize(2));
|
||||
assertEquals("a_s".length(), resultSetMetaData.getColumnDisplaySize(3));
|
||||
assertEquals("my_float_col".length(), resultSetMetaData.getColumnDisplaySize(4));
|
||||
assertEquals("testnull_i".length(), resultSetMetaData.getColumnDisplaySize(5));
|
||||
|
||||
assertEquals("Long", resultSetMetaData.getColumnTypeName(1));
|
||||
assertEquals("Long", resultSetMetaData.getColumnTypeName(2));
|
||||
assertEquals("String", resultSetMetaData.getColumnTypeName(3));
|
||||
assertEquals("Double", resultSetMetaData.getColumnTypeName(4));
|
||||
assertEquals("Long", resultSetMetaData.getColumnTypeName(5));
|
||||
|
||||
assertEquals(Types.DOUBLE, resultSetMetaData.getColumnType(1));
|
||||
assertEquals(Types.DOUBLE, resultSetMetaData.getColumnType(2));
|
||||
assertEquals(Types.VARCHAR, resultSetMetaData.getColumnType(3));
|
||||
assertEquals(Types.DOUBLE, resultSetMetaData.getColumnType(4));
|
||||
assertEquals(Types.DOUBLE, resultSetMetaData.getColumnType(5));
|
||||
}
|
||||
|
||||
private void checkResultSet(ResultSet rs) throws Exception {
|
||||
|
@ -443,72 +465,199 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
|
|||
assertTrue(rs.next());
|
||||
|
||||
assertEquals(14L, rs.getObject("a_i"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14L, rs.getObject(2));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14L, rs.getLong("a_i"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14L, rs.getLong(2));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14D, rs.getDouble("a_i"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14D, rs.getDouble(2), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14f, rs.getFloat("a_i"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14f, rs.getFloat(2), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14, rs.getShort("a_i"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14, rs.getShort(2));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14, rs.getByte("a_i"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(14, rs.getByte(2));
|
||||
assertFalse(rs.wasNull());
|
||||
|
||||
assertEquals("hello0", rs.getObject("a_s"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals("hello0", rs.getObject(3));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals("hello0", rs.getString("a_s"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals("hello0", rs.getString(3));
|
||||
assertFalse(rs.wasNull());
|
||||
|
||||
assertEquals(10D, rs.getObject("my_float_col"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10D, rs.getObject(4));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10D, rs.getDouble("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10D, rs.getDouble(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10F, rs.getFloat("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10F, rs.getFloat(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getInt("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getInt(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10L, rs.getLong("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10L, rs.getLong(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getShort("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getShort(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getByte("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getByte(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
|
||||
assertEquals(10L, rs.getObject("testnull_i"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10L, rs.getObject(5));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals("10", rs.getString("testnull_i"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals("10", rs.getString(5));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10D, rs.getDouble("testnull_i"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10D, rs.getDouble(5), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10F, rs.getFloat("testnull_i"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10F, rs.getFloat(5), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getInt("testnull_i"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getInt(5), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10L, rs.getLong("testnull_i"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10L, rs.getLong(5), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getShort("testnull_i"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getShort(5), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getByte("testnull_i"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(10, rs.getByte(5), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
|
||||
|
||||
assertTrue(rs.next());
|
||||
|
||||
assertEquals(13L, rs.getObject("a_i"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13L, rs.getObject(2));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13L, rs.getLong("a_i"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13L, rs.getLong(2));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13D, rs.getDouble("a_i"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13D, rs.getDouble(2), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13f, rs.getFloat("a_i"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13f, rs.getFloat(2), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13, rs.getShort("a_i"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13, rs.getShort(2));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13, rs.getByte("a_i"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(13, rs.getByte(2));
|
||||
assertFalse(rs.wasNull());
|
||||
|
||||
assertEquals("hello3", rs.getObject("a_s"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals("hello3", rs.getObject(3));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals("hello3", rs.getString("a_s"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals("hello3", rs.getString(3));
|
||||
assertFalse(rs.wasNull());
|
||||
|
||||
assertEquals(9D, rs.getObject("my_float_col"));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9D, rs.getObject(4));
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9D, rs.getDouble("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9D, rs.getDouble(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9F, rs.getFloat("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9F, rs.getFloat(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9, rs.getInt("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9, rs.getInt(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9L, rs.getLong("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9L, rs.getLong(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9, rs.getShort("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9, rs.getShort(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9, rs.getByte("my_float_col"), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
assertEquals(9, rs.getByte(4), 0);
|
||||
assertFalse(rs.wasNull());
|
||||
|
||||
assertEquals(null, rs.getObject("testnull_i"));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(null, rs.getObject(5));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(null, rs.getString("testnull_i"));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(null, rs.getString(5));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0D, rs.getDouble("testnull_i"), 0);
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0D, rs.getDouble(5), 0);
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0F, rs.getFloat("testnull_i"), 0);
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0F, rs.getFloat(5), 0);
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0, rs.getInt("testnull_i"));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0, rs.getInt(5));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0L, rs.getLong("testnull_i"));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0L, rs.getLong(5));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0, rs.getShort("testnull_i"));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0, rs.getShort(5));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0, rs.getByte("testnull_i"));
|
||||
assertTrue(rs.wasNull());
|
||||
assertEquals(0, rs.getByte(5));
|
||||
assertTrue(rs.wasNull());
|
||||
|
||||
assertFalse(rs.next());
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue