mirror of https://github.com/apache/lucene.git
clear up some nocommits
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene4547@1439232 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d7ad96f234
commit
ad8934b205
|
@ -36,6 +36,7 @@ import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
|
import org.apache.lucene.util.PagedBytes;
|
||||||
import org.apache.lucene.util.packed.PackedInts;
|
import org.apache.lucene.util.packed.PackedInts;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -308,9 +309,9 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
Lucene40DocValuesFormat.BYTES_FIXED_STRAIGHT_VERSION_START,
|
Lucene40DocValuesFormat.BYTES_FIXED_STRAIGHT_VERSION_START,
|
||||||
Lucene40DocValuesFormat.BYTES_FIXED_STRAIGHT_VERSION_CURRENT);
|
Lucene40DocValuesFormat.BYTES_FIXED_STRAIGHT_VERSION_CURRENT);
|
||||||
final int fixedLength = input.readInt();
|
final int fixedLength = input.readInt();
|
||||||
// nocommit? can the current impl even handle > 2G?
|
PagedBytes bytes = new PagedBytes(16);
|
||||||
final byte bytes[] = new byte[state.segmentInfo.getDocCount() * fixedLength];
|
bytes.copy(input, fixedLength * (long)state.segmentInfo.getDocCount());
|
||||||
input.readBytes(bytes, 0, bytes.length);
|
final PagedBytes.Reader bytesReader = bytes.freeze(true);
|
||||||
if (input.getFilePointer() != input.length()) {
|
if (input.getFilePointer() != input.length()) {
|
||||||
throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read " + input.getFilePointer() + " vs size " + input.length() + " (resource: " + input + ")");
|
throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read " + input.getFilePointer() + " vs size " + input.length() + " (resource: " + input + ")");
|
||||||
}
|
}
|
||||||
|
@ -318,9 +319,7 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
return new BinaryDocValues() {
|
return new BinaryDocValues() {
|
||||||
@Override
|
@Override
|
||||||
public void get(int docID, BytesRef result) {
|
public void get(int docID, BytesRef result) {
|
||||||
result.bytes = bytes;
|
bytesReader.fillSlice(result, fixedLength * (long)docID, fixedLength);
|
||||||
result.offset = docID * fixedLength;
|
|
||||||
result.length = fixedLength;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -347,10 +346,10 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
CodecUtil.checkHeader(index, Lucene40DocValuesFormat.BYTES_VAR_STRAIGHT_CODEC_NAME_IDX,
|
CodecUtil.checkHeader(index, Lucene40DocValuesFormat.BYTES_VAR_STRAIGHT_CODEC_NAME_IDX,
|
||||||
Lucene40DocValuesFormat.BYTES_VAR_STRAIGHT_VERSION_START,
|
Lucene40DocValuesFormat.BYTES_VAR_STRAIGHT_VERSION_START,
|
||||||
Lucene40DocValuesFormat.BYTES_VAR_STRAIGHT_VERSION_CURRENT);
|
Lucene40DocValuesFormat.BYTES_VAR_STRAIGHT_VERSION_CURRENT);
|
||||||
// nocommit? can the current impl even handle > 2G?
|
|
||||||
long totalBytes = index.readVLong();
|
long totalBytes = index.readVLong();
|
||||||
final byte bytes[] = new byte[(int)totalBytes];
|
PagedBytes bytes = new PagedBytes(16);
|
||||||
data.readBytes(bytes, 0, bytes.length);
|
bytes.copy(data, totalBytes);
|
||||||
|
final PagedBytes.Reader bytesReader = bytes.freeze(true);
|
||||||
final PackedInts.Reader reader = PackedInts.getReader(index);
|
final PackedInts.Reader reader = PackedInts.getReader(index);
|
||||||
if (data.getFilePointer() != data.length()) {
|
if (data.getFilePointer() != data.length()) {
|
||||||
throw new CorruptIndexException("did not read all bytes from file \"" + dataName + "\": read " + data.getFilePointer() + " vs size " + data.length() + " (resource: " + data + ")");
|
throw new CorruptIndexException("did not read all bytes from file \"" + dataName + "\": read " + data.getFilePointer() + " vs size " + data.length() + " (resource: " + data + ")");
|
||||||
|
@ -364,9 +363,7 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
public void get(int docID, BytesRef result) {
|
public void get(int docID, BytesRef result) {
|
||||||
long startAddress = reader.get(docID);
|
long startAddress = reader.get(docID);
|
||||||
long endAddress = reader.get(docID+1);
|
long endAddress = reader.get(docID+1);
|
||||||
result.bytes = bytes;
|
bytesReader.fillSlice(result, startAddress, (int)(endAddress - startAddress));
|
||||||
result.offset = (int)startAddress;
|
|
||||||
result.length = (int)(endAddress - startAddress);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -396,9 +393,9 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
|
|
||||||
final int fixedLength = data.readInt();
|
final int fixedLength = data.readInt();
|
||||||
final int valueCount = index.readInt();
|
final int valueCount = index.readInt();
|
||||||
// nocommit? can the current impl even handle > 2G?
|
PagedBytes bytes = new PagedBytes(16);
|
||||||
final byte bytes[] = new byte[fixedLength * valueCount];
|
bytes.copy(data, fixedLength * (long) valueCount);
|
||||||
data.readBytes(bytes, 0, bytes.length);
|
final PagedBytes.Reader bytesReader = bytes.freeze(true);
|
||||||
final PackedInts.Reader reader = PackedInts.getReader(index);
|
final PackedInts.Reader reader = PackedInts.getReader(index);
|
||||||
if (data.getFilePointer() != data.length()) {
|
if (data.getFilePointer() != data.length()) {
|
||||||
throw new CorruptIndexException("did not read all bytes from file \"" + dataName + "\": read " + data.getFilePointer() + " vs size " + data.length() + " (resource: " + data + ")");
|
throw new CorruptIndexException("did not read all bytes from file \"" + dataName + "\": read " + data.getFilePointer() + " vs size " + data.length() + " (resource: " + data + ")");
|
||||||
|
@ -410,10 +407,8 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
return new BinaryDocValues() {
|
return new BinaryDocValues() {
|
||||||
@Override
|
@Override
|
||||||
public void get(int docID, BytesRef result) {
|
public void get(int docID, BytesRef result) {
|
||||||
int ord = (int)reader.get(docID);
|
final long offset = fixedLength * reader.get(docID);
|
||||||
result.bytes = bytes;
|
bytesReader.fillSlice(result, offset, fixedLength);
|
||||||
result.offset = ord * fixedLength;
|
|
||||||
result.length = fixedLength;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -442,9 +437,9 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
Lucene40DocValuesFormat.BYTES_VAR_DEREF_VERSION_CURRENT);
|
Lucene40DocValuesFormat.BYTES_VAR_DEREF_VERSION_CURRENT);
|
||||||
|
|
||||||
final long totalBytes = index.readLong();
|
final long totalBytes = index.readLong();
|
||||||
// nocommit? can the current impl even handle > 2G?
|
final PagedBytes bytes = new PagedBytes(16);
|
||||||
final byte bytes[] = new byte[(int)totalBytes];
|
bytes.copy(data, totalBytes);
|
||||||
data.readBytes(bytes, 0, bytes.length);
|
final PagedBytes.Reader bytesReader = bytes.freeze(true);
|
||||||
final PackedInts.Reader reader = PackedInts.getReader(index);
|
final PackedInts.Reader reader = PackedInts.getReader(index);
|
||||||
if (data.getFilePointer() != data.length()) {
|
if (data.getFilePointer() != data.length()) {
|
||||||
throw new CorruptIndexException("did not read all bytes from file \"" + dataName + "\": read " + data.getFilePointer() + " vs size " + data.length() + " (resource: " + data + ")");
|
throw new CorruptIndexException("did not read all bytes from file \"" + dataName + "\": read " + data.getFilePointer() + " vs size " + data.length() + " (resource: " + data + ")");
|
||||||
|
@ -456,16 +451,17 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
return new BinaryDocValues() {
|
return new BinaryDocValues() {
|
||||||
@Override
|
@Override
|
||||||
public void get(int docID, BytesRef result) {
|
public void get(int docID, BytesRef result) {
|
||||||
int startAddress = (int)reader.get(docID);
|
long startAddress = reader.get(docID);
|
||||||
result.bytes = bytes;
|
BytesRef lengthBytes = new BytesRef();
|
||||||
result.offset = startAddress;
|
bytesReader.fillSlice(lengthBytes, startAddress, 1);
|
||||||
if ((bytes[startAddress] & 128) == 0) {
|
byte code = lengthBytes.bytes[lengthBytes.offset];
|
||||||
|
if ((code & 128) == 0) {
|
||||||
// length is 1 byte
|
// length is 1 byte
|
||||||
result.offset++;
|
bytesReader.fillSlice(result, startAddress + 1, (int) code);
|
||||||
result.length = bytes[startAddress];
|
|
||||||
} else {
|
} else {
|
||||||
result.offset += 2;
|
bytesReader.fillSlice(lengthBytes, startAddress + 1, 1);
|
||||||
result.length = ((bytes[startAddress] & 0x7f) << 8) | ((bytes[startAddress+1] & 0xff));
|
int length = ((code & 0x7f) << 8) | (lengthBytes.bytes[lengthBytes.offset] & 0xff);
|
||||||
|
bytesReader.fillSlice(result, startAddress + 2, length);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -530,9 +526,9 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
final int fixedLength = data.readInt();
|
final int fixedLength = data.readInt();
|
||||||
final int valueCount = index.readInt();
|
final int valueCount = index.readInt();
|
||||||
|
|
||||||
// nocommit? can the current impl even handle > 2G?
|
PagedBytes bytes = new PagedBytes(16);
|
||||||
final byte[] bytes = new byte[fixedLength*valueCount];
|
bytes.copy(data, fixedLength * (long) valueCount);
|
||||||
data.readBytes(bytes, 0, bytes.length);
|
final PagedBytes.Reader bytesReader = bytes.freeze(true);
|
||||||
final PackedInts.Reader reader = PackedInts.getReader(index);
|
final PackedInts.Reader reader = PackedInts.getReader(index);
|
||||||
|
|
||||||
return correctBuggyOrds(new SortedDocValues() {
|
return correctBuggyOrds(new SortedDocValues() {
|
||||||
|
@ -543,9 +539,7 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void lookupOrd(int ord, BytesRef result) {
|
public void lookupOrd(int ord, BytesRef result) {
|
||||||
result.bytes = bytes;
|
bytesReader.fillSlice(result, fixedLength * (long) ord, fixedLength);
|
||||||
result.offset = ord * fixedLength;
|
|
||||||
result.length = fixedLength;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -564,10 +558,9 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
Lucene40DocValuesFormat.BYTES_VAR_SORTED_VERSION_CURRENT);
|
Lucene40DocValuesFormat.BYTES_VAR_SORTED_VERSION_CURRENT);
|
||||||
|
|
||||||
long maxAddress = index.readLong();
|
long maxAddress = index.readLong();
|
||||||
// nocommit? can the current impl even handle > 2G?
|
PagedBytes bytes = new PagedBytes(16);
|
||||||
final byte[] bytes = new byte[(int)maxAddress];
|
bytes.copy(data, maxAddress);
|
||||||
data.readBytes(bytes, 0, bytes.length);
|
final PagedBytes.Reader bytesReader = bytes.freeze(true);
|
||||||
|
|
||||||
final PackedInts.Reader addressReader = PackedInts.getReader(index);
|
final PackedInts.Reader addressReader = PackedInts.getReader(index);
|
||||||
final PackedInts.Reader ordsReader = PackedInts.getReader(index);
|
final PackedInts.Reader ordsReader = PackedInts.getReader(index);
|
||||||
|
|
||||||
|
@ -583,9 +576,7 @@ final class Lucene40DocValuesReader extends DocValuesProducer {
|
||||||
public void lookupOrd(int ord, BytesRef result) {
|
public void lookupOrd(int ord, BytesRef result) {
|
||||||
long startAddress = addressReader.get(ord);
|
long startAddress = addressReader.get(ord);
|
||||||
long endAddress = addressReader.get(ord+1);
|
long endAddress = addressReader.get(ord+1);
|
||||||
result.bytes = bytes;
|
bytesReader.fillSlice(result, startAddress, (int)(endAddress - startAddress));
|
||||||
result.offset = (int)startAddress;
|
|
||||||
result.length = (int)(endAddress - startAddress);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -202,7 +202,4 @@ class Lucene42DocValuesConsumer extends DocValuesConsumer {
|
||||||
fst.save(data);
|
fst.save(data);
|
||||||
meta.writeVInt((int)ord);
|
meta.writeVInt((int)ord);
|
||||||
}
|
}
|
||||||
|
|
||||||
// nocommit: can/should we make override merge + make it smarter to pull the values
|
|
||||||
// directly from disk for fields that arent already loaded up in ram?
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.lucene.store.IndexInput;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.apache.lucene.util.IntsRef;
|
import org.apache.lucene.util.IntsRef;
|
||||||
|
import org.apache.lucene.util.PagedBytes;
|
||||||
import org.apache.lucene.util.fst.BytesRefFSTEnum;
|
import org.apache.lucene.util.fst.BytesRefFSTEnum;
|
||||||
import org.apache.lucene.util.fst.BytesRefFSTEnum.InputOutput;
|
import org.apache.lucene.util.fst.BytesRefFSTEnum.InputOutput;
|
||||||
import org.apache.lucene.util.fst.FST;
|
import org.apache.lucene.util.fst.FST;
|
||||||
|
@ -181,17 +182,15 @@ class Lucene42DocValuesProducer extends DocValuesProducer {
|
||||||
private BinaryDocValues loadBinary(FieldInfo field) throws IOException {
|
private BinaryDocValues loadBinary(FieldInfo field) throws IOException {
|
||||||
BinaryEntry entry = binaries.get(field.number);
|
BinaryEntry entry = binaries.get(field.number);
|
||||||
data.seek(entry.offset);
|
data.seek(entry.offset);
|
||||||
assert entry.numBytes < Integer.MAX_VALUE; // nocommit
|
PagedBytes bytes = new PagedBytes(16);
|
||||||
final byte[] bytes = new byte[(int)entry.numBytes];
|
bytes.copy(data, entry.numBytes);
|
||||||
data.readBytes(bytes, 0, bytes.length);
|
final PagedBytes.Reader bytesReader = bytes.freeze(true);
|
||||||
if (entry.minLength == entry.maxLength) {
|
if (entry.minLength == entry.maxLength) {
|
||||||
final int fixedLength = entry.minLength;
|
final int fixedLength = entry.minLength;
|
||||||
return new BinaryDocValues() {
|
return new BinaryDocValues() {
|
||||||
@Override
|
@Override
|
||||||
public void get(int docID, BytesRef result) {
|
public void get(int docID, BytesRef result) {
|
||||||
result.bytes = bytes;
|
bytesReader.fillSlice(result, fixedLength * (long)docID, fixedLength);
|
||||||
result.offset = docID * fixedLength;
|
|
||||||
result.length = fixedLength;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
|
@ -201,9 +200,7 @@ class Lucene42DocValuesProducer extends DocValuesProducer {
|
||||||
public void get(int docID, BytesRef result) {
|
public void get(int docID, BytesRef result) {
|
||||||
long startAddress = docID == 0 ? 0 : addresses.get(docID-1);
|
long startAddress = docID == 0 ? 0 : addresses.get(docID-1);
|
||||||
long endAddress = addresses.get(docID);
|
long endAddress = addresses.get(docID);
|
||||||
result.bytes = bytes;
|
bytesReader.fillSlice(result, startAddress, (int) (endAddress - startAddress));
|
||||||
result.offset = (int) startAddress;
|
|
||||||
result.length = (int) (endAddress - startAddress);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,11 +37,9 @@ class BinaryDocValuesWriter extends DocValuesWriter {
|
||||||
private int addedValues = 0;
|
private int addedValues = 0;
|
||||||
private final BytesRef emptyBytesRef = new BytesRef();
|
private final BytesRef emptyBytesRef = new BytesRef();
|
||||||
|
|
||||||
// nocommit this needs to update bytesUsed?
|
|
||||||
|
|
||||||
public BinaryDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
|
public BinaryDocValuesWriter(FieldInfo fieldInfo, Counter iwBytesUsed) {
|
||||||
this.fieldInfo = fieldInfo;
|
this.fieldInfo = fieldInfo;
|
||||||
this.bytesRefArray = new BytesRefArray(iwBytesUsed); // nocommit: test that this thing really accounts correctly
|
this.bytesRefArray = new BytesRefArray(iwBytesUsed);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addValue(int docID, BytesRef value) {
|
public void addValue(int docID, BytesRef value) {
|
||||||
|
|
|
@ -1293,7 +1293,6 @@ public class CheckIndex {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void checkBinaryDocValues(String fieldName, AtomicReader reader, BinaryDocValues dv) {
|
private static void checkBinaryDocValues(String fieldName, AtomicReader reader, BinaryDocValues dv) {
|
||||||
// nocommit what else to check ...
|
|
||||||
BytesRef scratch = new BytesRef();
|
BytesRef scratch = new BytesRef();
|
||||||
for (int i = 0; i < reader.maxDoc(); i++) {
|
for (int i = 0; i < reader.maxDoc(); i++) {
|
||||||
dv.get(i, scratch);
|
dv.get(i, scratch);
|
||||||
|
@ -1360,7 +1359,6 @@ public class CheckIndex {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void checkNumericDocValues(String fieldName, AtomicReader reader, NumericDocValues ndv) {
|
private static void checkNumericDocValues(String fieldName, AtomicReader reader, NumericDocValues ndv) {
|
||||||
// nocommit what else to check!
|
|
||||||
for (int i = 0; i < reader.maxDoc(); i++) {
|
for (int i = 0; i < reader.maxDoc(); i++) {
|
||||||
ndv.get(i);
|
ndv.get(i);
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,8 +24,6 @@ import org.apache.lucene.codecs.DocValuesConsumer;
|
||||||
import org.apache.lucene.util.Counter;
|
import org.apache.lucene.util.Counter;
|
||||||
import org.apache.lucene.util.packed.AppendingLongBuffer;
|
import org.apache.lucene.util.packed.AppendingLongBuffer;
|
||||||
|
|
||||||
// nocommit pick numeric or number ... then fix all places ...
|
|
||||||
|
|
||||||
/** Buffers up pending long per doc, then flushes when
|
/** Buffers up pending long per doc, then flushes when
|
||||||
* segment flushes. */
|
* segment flushes. */
|
||||||
class NumericDocValuesWriter extends DocValuesWriter {
|
class NumericDocValuesWriter extends DocValuesWriter {
|
||||||
|
|
|
@ -280,7 +280,7 @@ public final class ByteBlockPool {
|
||||||
|
|
||||||
// Fill in a BytesRef from term's length & bytes encoded in
|
// Fill in a BytesRef from term's length & bytes encoded in
|
||||||
// byte block
|
// byte block
|
||||||
public final BytesRef setBytesRef(BytesRef term, int textStart) {
|
public final void setBytesRef(BytesRef term, int textStart) {
|
||||||
final byte[] bytes = term.bytes = buffers[textStart >> BYTE_BLOCK_SHIFT];
|
final byte[] bytes = term.bytes = buffers[textStart >> BYTE_BLOCK_SHIFT];
|
||||||
int pos = textStart & BYTE_BLOCK_MASK;
|
int pos = textStart & BYTE_BLOCK_MASK;
|
||||||
if ((bytes[pos] & 0x80) == 0) {
|
if ((bytes[pos] & 0x80) == 0) {
|
||||||
|
@ -293,7 +293,6 @@ public final class ByteBlockPool {
|
||||||
term.offset = pos+2;
|
term.offset = pos+2;
|
||||||
}
|
}
|
||||||
assert term.length >= 0;
|
assert term.length >= 0;
|
||||||
return term;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -322,10 +321,10 @@ public final class ByteBlockPool {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Copies bytes from the pool starting at the given offset with the given
|
* Copies bytes from the pool starting at the given offset with the given
|
||||||
* length into the given {@link BytesRef} at offset <tt>0</tt> and returns it.
|
* length into the given {@link BytesRef} at offset <tt>0</tt>.
|
||||||
* <p>Note: this method allows to copy across block boundaries.</p>
|
* <p>Note: this method allows to copy across block boundaries.</p>
|
||||||
*/
|
*/
|
||||||
public final BytesRef copyFrom(final BytesRef bytes, final int offset, final int length) {
|
public final void copyFrom(final BytesRef bytes, final int offset, final int length) {
|
||||||
bytes.offset = 0;
|
bytes.offset = 0;
|
||||||
bytes.grow(length);
|
bytes.grow(length);
|
||||||
bytes.length = length;
|
bytes.length = length;
|
||||||
|
@ -349,7 +348,6 @@ public final class ByteBlockPool {
|
||||||
overflow = overflow - BYTE_BLOCK_SIZE;
|
overflow = overflow - BYTE_BLOCK_SIZE;
|
||||||
}
|
}
|
||||||
} while (true);
|
} while (true);
|
||||||
return bytes;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,13 +38,6 @@ public final class BytesRefArray {
|
||||||
private int currentOffset = 0;
|
private int currentOffset = 0;
|
||||||
private final Counter bytesUsed;
|
private final Counter bytesUsed;
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link BytesRefArray}
|
|
||||||
*/
|
|
||||||
public BytesRefArray() {
|
|
||||||
this(Counter.newCounter(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new {@link BytesRefArray} with a counter to track allocated bytes
|
* Creates a new {@link BytesRefArray} with a counter to track allocated bytes
|
||||||
*/
|
*/
|
||||||
|
@ -112,17 +105,6 @@ public final class BytesRefArray {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the number internally used bytes to hold the appended bytes in
|
|
||||||
* memory
|
|
||||||
*
|
|
||||||
* @return the number internally used bytes to hold the appended bytes in
|
|
||||||
* memory
|
|
||||||
*/
|
|
||||||
public long bytesUsed() {
|
|
||||||
return bytesUsed.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
private int[] sort(final Comparator<BytesRef> comp) {
|
private int[] sort(final Comparator<BytesRef> comp) {
|
||||||
final int[] orderedEntries = new int[size()];
|
final int[] orderedEntries = new int[size()];
|
||||||
for (int i = 0; i < orderedEntries.length; i++) {
|
for (int i = 0; i < orderedEntries.length; i++) {
|
||||||
|
|
|
@ -118,7 +118,8 @@ public final class BytesRefHash {
|
||||||
public BytesRef get(int ord, BytesRef ref) {
|
public BytesRef get(int ord, BytesRef ref) {
|
||||||
assert bytesStart != null : "bytesStart is null - not initialized";
|
assert bytesStart != null : "bytesStart is null - not initialized";
|
||||||
assert ord < bytesStart.length: "ord exceeds byteStart len: " + bytesStart.length;
|
assert ord < bytesStart.length: "ord exceeds byteStart len: " + bytesStart.length;
|
||||||
return pool.setBytesRef(ref, bytesStart[ord]);
|
pool.setBytesRef(ref, bytesStart[ord]);
|
||||||
|
return ref;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -171,8 +172,9 @@ public final class BytesRefHash {
|
||||||
protected int compare(int i, int j) {
|
protected int compare(int i, int j) {
|
||||||
final int ord1 = compact[i], ord2 = compact[j];
|
final int ord1 = compact[i], ord2 = compact[j];
|
||||||
assert bytesStart.length > ord1 && bytesStart.length > ord2;
|
assert bytesStart.length > ord1 && bytesStart.length > ord2;
|
||||||
return comp.compare(pool.setBytesRef(scratch1, bytesStart[ord1]),
|
pool.setBytesRef(scratch1, bytesStart[ord1]);
|
||||||
pool.setBytesRef(scratch2, bytesStart[ord2]));
|
pool.setBytesRef(scratch2, bytesStart[ord2]);
|
||||||
|
return comp.compare(scratch1, scratch2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -186,8 +188,8 @@ public final class BytesRefHash {
|
||||||
protected int comparePivot(int j) {
|
protected int comparePivot(int j) {
|
||||||
final int ord = compact[j];
|
final int ord = compact[j];
|
||||||
assert bytesStart.length > ord;
|
assert bytesStart.length > ord;
|
||||||
return comp.compare(pivot,
|
pool.setBytesRef(scratch2, bytesStart[ord]);
|
||||||
pool.setBytesRef(scratch2, bytesStart[ord]));
|
return comp.compare(pivot, scratch2);
|
||||||
}
|
}
|
||||||
|
|
||||||
private final BytesRef pivot = new BytesRef(),
|
private final BytesRef pivot = new BytesRef(),
|
||||||
|
@ -197,7 +199,8 @@ public final class BytesRefHash {
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean equals(int ord, BytesRef b) {
|
private boolean equals(int ord, BytesRef b) {
|
||||||
return pool.setBytesRef(scratch1, bytesStart[ord]).bytesEquals(b);
|
pool.setBytesRef(scratch1, bytesStart[ord]);
|
||||||
|
return scratch1.bytesEquals(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean shrink(int targetSize) {
|
private boolean shrink(int targetSize) {
|
||||||
|
|
|
@ -55,7 +55,7 @@ public final class PagedBytes {
|
||||||
private final int blockMask;
|
private final int blockMask;
|
||||||
private final int blockSize;
|
private final int blockSize;
|
||||||
|
|
||||||
public Reader(PagedBytes pagedBytes) {
|
private Reader(PagedBytes pagedBytes) {
|
||||||
blocks = new byte[pagedBytes.blocks.size()][];
|
blocks = new byte[pagedBytes.blocks.size()][];
|
||||||
for(int i=0;i<blocks.length;i++) {
|
for(int i=0;i<blocks.length;i++) {
|
||||||
blocks[i] = pagedBytes.blocks.get(i);
|
blocks[i] = pagedBytes.blocks.get(i);
|
||||||
|
@ -78,7 +78,7 @@ public final class PagedBytes {
|
||||||
* </p>
|
* </p>
|
||||||
* @lucene.internal
|
* @lucene.internal
|
||||||
**/
|
**/
|
||||||
public BytesRef fillSlice(BytesRef b, long start, int length) {
|
public void fillSlice(BytesRef b, long start, int length) {
|
||||||
assert length >= 0: "length=" + length;
|
assert length >= 0: "length=" + length;
|
||||||
assert length <= blockSize+1;
|
assert length <= blockSize+1;
|
||||||
final int index = (int) (start >> blockBits);
|
final int index = (int) (start >> blockBits);
|
||||||
|
@ -95,7 +95,6 @@ public final class PagedBytes {
|
||||||
System.arraycopy(blocks[index], offset, b.bytes, 0, blockSize-offset);
|
System.arraycopy(blocks[index], offset, b.bytes, 0, blockSize-offset);
|
||||||
System.arraycopy(blocks[1+index], 0, b.bytes, blockSize-offset, length-(blockSize-offset));
|
System.arraycopy(blocks[1+index], 0, b.bytes, blockSize-offset, length-(blockSize-offset));
|
||||||
}
|
}
|
||||||
return b;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -105,12 +104,10 @@ public final class PagedBytes {
|
||||||
* borders.
|
* borders.
|
||||||
* </p>
|
* </p>
|
||||||
*
|
*
|
||||||
* @return the given {@link BytesRef}
|
|
||||||
*
|
|
||||||
* @lucene.internal
|
* @lucene.internal
|
||||||
**/
|
**/
|
||||||
// nocommit: move this shit and any other vint bogusness to fieldcacheimpl!
|
// nocommit: move this shit and any other vint bogusness to fieldcacheimpl!
|
||||||
public BytesRef fill(BytesRef b, long start) {
|
public void fill(BytesRef b, long start) {
|
||||||
final int index = (int) (start >> blockBits);
|
final int index = (int) (start >> blockBits);
|
||||||
final int offset = (int) (start & blockMask);
|
final int offset = (int) (start & blockMask);
|
||||||
final byte[] block = b.bytes = blocks[index];
|
final byte[] block = b.bytes = blocks[index];
|
||||||
|
@ -123,7 +120,6 @@ public final class PagedBytes {
|
||||||
b.offset = offset+2;
|
b.offset = offset+2;
|
||||||
assert b.length > 0;
|
assert b.length > 0;
|
||||||
}
|
}
|
||||||
return b;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,7 +204,7 @@ public final class PagedBytes {
|
||||||
blockEnd.add(upto);
|
blockEnd.add(upto);
|
||||||
frozen = true;
|
frozen = true;
|
||||||
currentBlock = null;
|
currentBlock = null;
|
||||||
return new Reader(this);
|
return new PagedBytes.Reader(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getPointer() {
|
public long getPointer() {
|
||||||
|
|
|
@ -30,7 +30,7 @@ public class TestBytesRefArray extends LuceneTestCase {
|
||||||
|
|
||||||
public void testAppend() throws IOException {
|
public void testAppend() throws IOException {
|
||||||
Random random = random();
|
Random random = random();
|
||||||
BytesRefArray list = new BytesRefArray();
|
BytesRefArray list = new BytesRefArray(Counter.newCounter());
|
||||||
List<String> stringList = new ArrayList<String>();
|
List<String> stringList = new ArrayList<String>();
|
||||||
for (int j = 0; j < 2; j++) {
|
for (int j = 0; j < 2; j++) {
|
||||||
if (j > 0 && random.nextBoolean()) {
|
if (j > 0 && random.nextBoolean()) {
|
||||||
|
@ -71,7 +71,7 @@ public class TestBytesRefArray extends LuceneTestCase {
|
||||||
|
|
||||||
public void testSort() throws IOException {
|
public void testSort() throws IOException {
|
||||||
Random random = random();
|
Random random = random();
|
||||||
BytesRefArray list = new BytesRefArray();
|
BytesRefArray list = new BytesRefArray(Counter.newCounter());
|
||||||
List<String> stringList = new ArrayList<String>();
|
List<String> stringList = new ArrayList<String>();
|
||||||
|
|
||||||
for (int j = 0; j < 2; j++) {
|
for (int j = 0; j < 2; j++) {
|
||||||
|
|
|
@ -356,9 +356,6 @@ public class DistinctValuesCollectorTest extends AbstractGroupingTestCase {
|
||||||
case NUMERIC:
|
case NUMERIC:
|
||||||
valuesField = new NumericDocValuesField(dvField, Integer.parseInt(value));
|
valuesField = new NumericDocValuesField(dvField, Integer.parseInt(value));
|
||||||
break;
|
break;
|
||||||
/* nocommit: case FLOAT_64:
|
|
||||||
valuesField = new DoubleDocValuesField(dvField, Double.parseDouble(value));
|
|
||||||
break; */
|
|
||||||
case BINARY:
|
case BINARY:
|
||||||
valuesField = new BinaryDocValuesField(dvField, new BytesRef(value));
|
valuesField = new BinaryDocValuesField(dvField, new BytesRef(value));
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.search.spell.TermFreqIterator;
|
||||||
import org.apache.lucene.util.ArrayUtil;
|
import org.apache.lucene.util.ArrayUtil;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefArray;
|
import org.apache.lucene.util.BytesRefArray;
|
||||||
|
import org.apache.lucene.util.Counter;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This wrapper buffers incoming elements.
|
* This wrapper buffers incoming elements.
|
||||||
|
@ -31,7 +32,7 @@ import org.apache.lucene.util.BytesRefArray;
|
||||||
public class BufferingTermFreqIteratorWrapper implements TermFreqIterator {
|
public class BufferingTermFreqIteratorWrapper implements TermFreqIterator {
|
||||||
// TODO keep this for now
|
// TODO keep this for now
|
||||||
/** buffered term entries */
|
/** buffered term entries */
|
||||||
protected BytesRefArray entries = new BytesRefArray();
|
protected BytesRefArray entries = new BytesRefArray(Counter.newCounter());
|
||||||
/** current buffer position */
|
/** current buffer position */
|
||||||
protected int curPos = -1;
|
protected int curPos = -1;
|
||||||
/** buffered weights, parallel with {@link #entries} */
|
/** buffered weights, parallel with {@link #entries} */
|
||||||
|
|
|
@ -22,6 +22,7 @@ import java.util.Comparator;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefIterator;
|
import org.apache.lucene.util.BytesRefIterator;
|
||||||
import org.apache.lucene.util.BytesRefArray;
|
import org.apache.lucene.util.BytesRefArray;
|
||||||
|
import org.apache.lucene.util.Counter;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An {@link BytesRefSorter} that keeps all the entries in memory.
|
* An {@link BytesRefSorter} that keeps all the entries in memory.
|
||||||
|
@ -29,7 +30,7 @@ import org.apache.lucene.util.BytesRefArray;
|
||||||
* @lucene.internal
|
* @lucene.internal
|
||||||
*/
|
*/
|
||||||
public final class InMemorySorter implements BytesRefSorter {
|
public final class InMemorySorter implements BytesRefSorter {
|
||||||
private final BytesRefArray buffer = new BytesRefArray();
|
private final BytesRefArray buffer = new BytesRefArray(Counter.newCounter());
|
||||||
private boolean closed = false;
|
private boolean closed = false;
|
||||||
private final Comparator<BytesRef> comparator;
|
private final Comparator<BytesRef> comparator;
|
||||||
|
|
||||||
|
|
|
@ -155,7 +155,8 @@ public final class Sort {
|
||||||
private final BufferSize ramBufferSize;
|
private final BufferSize ramBufferSize;
|
||||||
private final File tempDirectory;
|
private final File tempDirectory;
|
||||||
|
|
||||||
private final BytesRefArray buffer = new BytesRefArray();
|
private final Counter bufferBytesUsed = Counter.newCounter();
|
||||||
|
private final BytesRefArray buffer = new BytesRefArray(bufferBytesUsed);
|
||||||
private SortInfo sortInfo;
|
private SortInfo sortInfo;
|
||||||
private int maxTempFiles;
|
private int maxTempFiles;
|
||||||
private final Comparator<BytesRef> comparator;
|
private final Comparator<BytesRef> comparator;
|
||||||
|
@ -396,7 +397,7 @@ public final class Sort {
|
||||||
buffer.append(scratch);
|
buffer.append(scratch);
|
||||||
// Account for the created objects.
|
// Account for the created objects.
|
||||||
// (buffer slots do not account to buffer size.)
|
// (buffer slots do not account to buffer size.)
|
||||||
if (ramBufferSize.bytes < buffer.bytesUsed()) {
|
if (ramBufferSize.bytes < bufferBytesUsed.get()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class SchemaCodecFactory extends CodecFactory implements SchemaAware {
|
||||||
}
|
}
|
||||||
return super.getPostingsFormatForField(field);
|
return super.getPostingsFormatForField(field);
|
||||||
}
|
}
|
||||||
// nocommit: dv too
|
// TODO: when dv support is added to solr, add it here too
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue