From 8d8e83518c27a64ccc3e730975b83fddd45e97d7 Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Wed, 6 Aug 2008 09:56:00 +0000 Subject: [PATCH] LUCENE-1301: fix cause of rare NPE in TestIndexWriterExceptions git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@683206 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/lucene/index/TermsHash.java | 29 +++++++------------ .../lucene/index/TermsHashPerThread.java | 9 ++++-- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/src/java/org/apache/lucene/index/TermsHash.java b/src/java/org/apache/lucene/index/TermsHash.java index 068720dfbcf..437ce8204dd 100644 --- a/src/java/org/apache/lucene/index/TermsHash.java +++ b/src/java/org/apache/lucene/index/TermsHash.java @@ -44,11 +44,11 @@ final class TermsHash extends InvertedDocConsumer { final int postingsFreeChunk; final DocumentsWriter docWriter; - TermsHash primaryTermsHash; + private TermsHash primaryTermsHash; - RawPostingList[] postingsFreeList = new RawPostingList[1]; - int postingsFreeCount; - int postingsAllocCount; + private RawPostingList[] postingsFreeList = new RawPostingList[1]; + private int postingsFreeCount; + private int postingsAllocCount; boolean trackAllocations; public TermsHash(final DocumentsWriter docWriter, boolean trackAllocations, final TermsHashConsumer consumer, final TermsHash nextTermsHash) { @@ -176,17 +176,6 @@ final class TermsHash extends InvertedDocConsumer { return any; } - // USE ONLY FOR DEBUGGING! - /* - public String getPostingText() { - char[] text = charPool.buffers[p.textStart >> CHAR_BLOCK_SHIFT]; - int upto = p.textStart & CHAR_BLOCK_MASK; - while(text[upto] != 0xffff) - upto++; - return new String(text, p.textStart, upto-(p.textStart & BYTE_BLOCK_MASK)); - } - */ - synchronized public void recyclePostings(final RawPostingList[] postings, final int numPostings) { assert postings.length >= numPostings; @@ -219,19 +208,21 @@ final class TermsHash extends InvertedDocConsumer { postings, 0, numToCopy); // Directly allocate the remainder if any - if (numToCopy < postings.length) { + if (numToCopy != postings.length) { final int extra = postings.length - numToCopy; final int newPostingsAllocCount = postingsAllocCount + extra; - if (newPostingsAllocCount > postingsFreeList.length) - postingsFreeList = new RawPostingList[ArrayUtil.getNextSize(newPostingsAllocCount)]; - consumer.createPostings(postings, numToCopy, extra); assert docWriter.writer.testPoint("TermsHash.getPostings after create"); postingsAllocCount += extra; if (trackAllocations) docWriter.bytesAllocated(extra * bytesPerPosting); + + if (newPostingsAllocCount > postingsFreeList.length) + // Pre-allocate the postingsFreeList so it's large + // enough to hold all postings we've given out + postingsFreeList = new RawPostingList[ArrayUtil.getNextSize(newPostingsAllocCount)]; } postingsFreeCount -= numToCopy; diff --git a/src/java/org/apache/lucene/index/TermsHashPerThread.java b/src/java/org/apache/lucene/index/TermsHashPerThread.java index 4997438b267..03a08220591 100644 --- a/src/java/org/apache/lucene/index/TermsHashPerThread.java +++ b/src/java/org/apache/lucene/index/TermsHashPerThread.java @@ -74,8 +74,13 @@ final class TermsHashPerThread extends InvertedDocConsumerPerThread { assert freePostingsCount == 0; termsHash.getPostings(freePostings); freePostingsCount = freePostings.length; - for(int i=0;i