LUCENE-4172: clean up redundant throws clauses

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1355069 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Steven Rowe 2012-06-28 16:39:25 +00:00
parent 306081e328
commit fd16190940
402 changed files with 1102 additions and 1417 deletions

View File

@ -205,7 +205,7 @@ public final class CJKBigramFilter extends TokenFilter {
/**
* refills buffers with new data from the current token.
*/
private void refill() throws IOException {
private void refill() {
// compact buffers to keep them smallish if they become large
// just a safety check, but technically we only need the last codepoint
if (bufferLen > 64) {

View File

@ -64,7 +64,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
static final int ELEM_HYPHEN = 4;
public PatternParser() throws HyphenationException {
public PatternParser() {
token = new StringBuilder();
parser = createParser();
parser.setContentHandler(this);
@ -74,7 +74,7 @@ public class PatternParser extends DefaultHandler implements PatternConsumer {
}
public PatternParser(PatternConsumer consumer) throws HyphenationException {
public PatternParser(PatternConsumer consumer) {
this();
this.consumer = consumer;
}

View File

@ -648,7 +648,7 @@ public class TernaryTree implements Cloneable {
}
public static void main(String[] args) throws Exception {
public static void main(String[] args) {
TernaryTree tt = new TernaryTree();
tt.insert("Carlos", 'C');
tt.insert("Car", 'r');

View File

@ -17,7 +17,6 @@ package org.apache.lucene.analysis.core;
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
@ -122,7 +121,7 @@ public final class StopFilter extends FilteringTokenFilter {
* Returns the next input Token whose term() is not a stop word.
*/
@Override
protected boolean accept() throws IOException {
protected boolean accept() {
return !stopWords.contains(termAtt.buffer(), 0, termAtt.length());
}

View File

@ -48,7 +48,7 @@ public final class TypeTokenFilter extends FilteringTokenFilter {
* When the useWhiteList parameter is set to true then accept the token if its type is contained in the stopTypes
*/
@Override
protected boolean accept() throws IOException {
protected boolean accept() {
return useWhiteList == stopTypes.contains(typeAttribute.type());
}
}

View File

@ -19,15 +19,13 @@ package org.apache.lucene.analysis.miscellaneous;
import org.apache.lucene.analysis.TokenStream;
import java.io.IOException;
/**
* An always exhausted token stream.
*/
public final class EmptyTokenStream extends TokenStream {
@Override
public final boolean incrementToken() throws IOException {
public final boolean incrementToken() {
return false;
}

View File

@ -17,9 +17,6 @@
package org.apache.lucene.analysis.miscellaneous;
import java.io.IOException;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.util.FilteringTokenFilter;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
@ -43,7 +40,7 @@ public final class KeepWordFilter extends FilteringTokenFilter {
}
@Override
public boolean accept() throws IOException {
public boolean accept() {
return words.contains(termAtt.buffer(), 0, termAtt.length());
}
}

View File

@ -17,10 +17,7 @@ package org.apache.lucene.analysis.miscellaneous;
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.util.FilteringTokenFilter;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
@ -48,7 +45,7 @@ public final class LengthFilter extends FilteringTokenFilter {
}
@Override
public boolean accept() throws IOException {
public boolean accept() {
final int len = termAtt.length();
return (len >= min && len <= max);
}

View File

@ -17,8 +17,6 @@ package org.apache.lucene.analysis.miscellaneous;
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
@ -46,7 +44,7 @@ public final class SingleTokenTokenStream extends TokenStream {
}
@Override
public final boolean incrementToken() throws IOException {
public final boolean incrementToken() {
if (exhausted) {
return false;
} else {
@ -58,7 +56,7 @@ public final class SingleTokenTokenStream extends TokenStream {
}
@Override
public void reset() throws IOException {
public void reset() {
exhausted = false;
}

View File

@ -84,7 +84,7 @@ public final class PatternTokenizer extends Tokenizer {
}
@Override
public boolean incrementToken() throws IOException {
public boolean incrementToken() {
if (index >= str.length()) return false;
clearAttributes();
if (group >= 0) {
@ -130,7 +130,7 @@ public final class PatternTokenizer extends Tokenizer {
}
@Override
public void end() throws IOException {
public void end() {
final int ofs = correctOffset(str.length());
offsetAtt.setOffset(ofs, ofs);
}

View File

@ -212,7 +212,7 @@ public final class TeeSinkTokenFilter extends TokenFilter {
}
@Override
public final boolean incrementToken() throws IOException {
public final boolean incrementToken() {
// lazy init the iterator
if (it == null) {
it = cachedStates.iterator();
@ -228,7 +228,7 @@ public final class TeeSinkTokenFilter extends TokenFilter {
}
@Override
public final void end() throws IOException {
public final void end() {
if (finalState != null) {
restoreState(finalState);
}

View File

@ -92,7 +92,7 @@ public class WordnetSynonymParser extends SynonymMap.Builder {
return analyze(analyzer, text, reuse);
}
private void addInternal(CharsRef synset[], int size) throws IOException {
private void addInternal(CharsRef synset[], int size) {
if (size <= 1) {
return; // nothing to do
}

View File

@ -331,7 +331,7 @@ public final class WikipediaTokenizer extends Tokenizer {
}
@Override
public void end() throws IOException {
public void end() {
// set final offset
final int finalOffset = correctOffset(scanner.yychar() + scanner.yylength());
this.offsetAtt.setOffset(finalOffset, finalOffset);

View File

@ -1,6 +1,5 @@
package org.apache.lucene.analysis.core;
import java.io.IOException;
import java.io.Reader;
import java.nio.CharBuffer;
@ -68,7 +67,7 @@ public class TestBugInSomething extends BaseTokenStreamTestCase {
CharStream wrappedStream = new CharStream() {
@Override
public void mark(int readAheadLimit) throws IOException {
public void mark(int readAheadLimit) {
throw new UnsupportedOperationException("mark(int)");
}
@ -78,32 +77,32 @@ public class TestBugInSomething extends BaseTokenStreamTestCase {
}
@Override
public int read() throws IOException {
public int read() {
throw new UnsupportedOperationException("read()");
}
@Override
public int read(char[] cbuf) throws IOException {
public int read(char[] cbuf) {
throw new UnsupportedOperationException("read(char[])");
}
@Override
public int read(CharBuffer target) throws IOException {
public int read(CharBuffer target) {
throw new UnsupportedOperationException("read(CharBuffer)");
}
@Override
public boolean ready() throws IOException {
public boolean ready() {
throw new UnsupportedOperationException("ready()");
}
@Override
public void reset() throws IOException {
public void reset() {
throw new UnsupportedOperationException("reset()");
}
@Override
public long skip(long n) throws IOException {
public long skip(long n) {
throw new UnsupportedOperationException("skip(long)");
}
@ -113,12 +112,12 @@ public class TestBugInSomething extends BaseTokenStreamTestCase {
}
@Override
public void close() throws IOException {
public void close() {
throw new UnsupportedOperationException("close()");
}
@Override
public int read(char[] arg0, int arg1, int arg2) throws IOException {
public int read(char[] arg0, int arg1, int arg2) {
throw new UnsupportedOperationException("read(char[], int, int)");
}
};

View File

@ -224,7 +224,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
}
@AfterClass
public static void afterClass() throws Exception {
public static void afterClass() {
tokenizers = null;
tokenfilters = null;
charfilters = null;

View File

@ -17,8 +17,6 @@ package org.apache.lucene.analysis.position;
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.shingle.ShingleFilter;
@ -38,7 +36,7 @@ public class PositionFilterTest extends BaseTokenStreamTestCase {
}
@Override
public final boolean incrementToken() throws IOException {
public final boolean incrementToken() {
clearAttributes();
if (index < testToken.length) {
termAtt.setEmpty().append(testToken[index++]);

View File

@ -54,7 +54,7 @@ public class ShingleFilterTest extends BaseTokenStreamTestCase {
}
@Override
public final boolean incrementToken() throws IOException {
public final boolean incrementToken() {
clearAttributes();
if (index < testToken.length) {
Token t = testToken[index++];

View File

@ -109,7 +109,7 @@ public final class ICUTokenizer extends Tokenizer {
}
@Override
public void end() throws IOException {
public void end() {
final int finalOffset = (length < 0) ? offset : offset + length;
offsetAtt.setOffset(correctOffset(finalOffset), correctOffset(finalOffset));
}

View File

@ -52,7 +52,7 @@ public class GenerateHTMLStripCharFilterSupplementaryMacros {
+ " */" + NL + NL;
public static void main(String args[]) throws Exception {
public static void main(String args[]) {
outputHeader();
outputMacro("ID_Start_Supp", "[:ID_Start:]");
outputMacro("ID_Continue_Supp", "[:ID_Continue:]");

View File

@ -55,7 +55,7 @@ public class GenerateJFlexSupplementaryMacros {
+ " */" + NL + NL;
public static void main(String args[]) throws Exception {
public static void main(String args[]) {
outputHeader();
outputMacro("ALetterSupp", "[:WordBreak=ALetter:]");
outputMacro("FormatSupp", "[:WordBreak=Format:]");

View File

@ -17,7 +17,6 @@ package org.apache.lucene.analysis.ja;
* limitations under the License.
*/
import java.io.IOException;
import java.util.Set;
import org.apache.lucene.analysis.ja.tokenattributes.PartOfSpeechAttribute;
@ -37,7 +36,7 @@ public final class JapanesePartOfSpeechStopFilter extends FilteringTokenFilter {
}
@Override
protected boolean accept() throws IOException {
protected boolean accept() {
final String pos = posAtt.getPartOfSpeech();
return pos == null || !stopTags.contains(pos);
}

View File

@ -224,7 +224,7 @@ public abstract class BinaryDictionaryWriter {
lastWordId = wordId;
}
protected final String getBaseFileName(String baseDir) throws IOException {
protected final String getBaseFileName(String baseDir) {
return baseDir + File.separator + implClazz.getName().replace('.', File.separatorChar);
}

View File

@ -55,7 +55,7 @@ public class DictionaryBuilder {
System.out.println("done");
}
public static void main(String[] args) throws IOException, ClassNotFoundException {
public static void main(String[] args) throws IOException {
DictionaryFormat format;
if (args[0].equalsIgnoreCase("ipadic")) {
format = DictionaryFormat.IPADIC;

View File

@ -123,7 +123,7 @@ public final class SentenceTokenizer extends Tokenizer {
}
@Override
public void end() throws IOException {
public void end() {
// set final offset
final int finalOffset = correctOffset(tokenEnd);
offsetAtt.setOffset(finalOffset, finalOffset);

View File

@ -148,8 +148,7 @@ class BigramDictionary extends AbstractDictionary {
* @throws IOException
* @throws UnsupportedEncodingException
*/
public void loadFromFile(String dctFilePath) throws FileNotFoundException,
IOException, UnsupportedEncodingException {
public void loadFromFile(String dctFilePath) throws IOException {
int i, cnt, length, total = 0;
// The file only counted 6763 Chinese characters plus 5 reserved slots 3756~3760.

View File

@ -190,8 +190,7 @@ class WordDictionary extends AbstractDictionary {
* @throws IOException
* @throws UnsupportedEncodingException
*/
private int loadMainDataFromFile(String dctFilePath)
throws FileNotFoundException, IOException, UnsupportedEncodingException {
private int loadMainDataFromFile(String dctFilePath) throws IOException {
int i, cnt, length, total = 0;
// The file only counted 6763 Chinese characters plus 5 reserved slots 3756~3760.
// The 3756th is used (as a header) to store information.

View File

@ -20,7 +20,6 @@ package org.apache.lucene.benchmark.byTask.tasks;
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.benchmark.byTask.utils.Config;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexDeletionPolicy;
import org.apache.lucene.index.IndexWriter;
@ -34,7 +33,6 @@ import org.apache.lucene.index.NoDeletionPolicy;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.NoMergeScheduler;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.Version;
import java.io.BufferedOutputStream;
@ -174,7 +172,7 @@ public class CreateIndexTask extends PerfTask {
return iwConf;
}
public static IndexWriter configureWriter(Config config, PerfRunData runData, OpenMode mode, IndexCommit commit) throws CorruptIndexException, LockObtainFailedException, IOException {
public static IndexWriter configureWriter(Config config, PerfRunData runData, OpenMode mode, IndexCommit commit) throws IOException {
IndexWriterConfig iwc = createWriterConfig(config, runData, mode, commit);
String infoStreamVal = config.get("writer.info.stream", null);
if (infoStreamVal != null) {

View File

@ -22,9 +22,7 @@ import java.util.List;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.StoredFieldVisitor.Status;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.IndexInput;
/**
* Utility: extract doc names from an index
@ -52,12 +50,12 @@ public class DocNameExtractor {
final List<String> name = new ArrayList<String>();
searcher.getIndexReader().document(docid, new StoredFieldVisitor() {
@Override
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
public void stringField(FieldInfo fieldInfo, String value) {
name.add(value);
}
@Override
public Status needsField(FieldInfo fieldInfo) throws IOException {
public Status needsField(FieldInfo fieldInfo) {
if (!name.isEmpty()) {
return Status.STOP;
} else if (fieldInfo.name.equals(docNameField)) {

View File

@ -35,14 +35,14 @@ public abstract class BenchmarkTestCase extends LuceneTestCase {
private static File WORKDIR;
@BeforeClass
public static void beforeClassBenchmarkTestCase() throws Exception {
public static void beforeClassBenchmarkTestCase() {
WORKDIR = _TestUtil.getTempDir("benchmark");
WORKDIR.delete();
WORKDIR.mkdirs();
}
@AfterClass
public static void afterClassBenchmarkTestCase() throws Exception {
public static void afterClassBenchmarkTestCase() {
WORKDIR = null;
}

View File

@ -18,7 +18,6 @@ package org.apache.lucene.benchmark.byTask.feeds;
*/
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Properties;
@ -46,12 +45,11 @@ public class DocMakerTest extends BenchmarkTestCase {
private boolean finish = false;
@Override
public void close() throws IOException {
public void close() {
}
@Override
public DocData getNextDocData(DocData docData) throws NoMoreDataException,
IOException {
public DocData getNextDocData(DocData docData) throws NoMoreDataException {
if (finish) {
throw new NoMoreDataException();
}

View File

@ -61,14 +61,14 @@ public final class CachingTokenFilter extends TokenFilter {
}
@Override
public final void end() throws IOException {
public final void end() {
if (finalState != null) {
restoreState(finalState);
}
}
@Override
public void reset() throws IOException {
public void reset() {
if(cache != null) {
iterator = cache.iterator();
}

View File

@ -18,7 +18,6 @@ package org.apache.lucene.codecs;
*/
import java.io.IOException;
import java.util.Collection;
import java.util.Comparator;
import java.util.Iterator;
import java.util.TreeMap;
@ -30,7 +29,6 @@ import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.FieldsEnum;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@ -713,7 +711,7 @@ public class BlockTermsReader extends FieldsProducer {
}
@Override
public void seekExact(BytesRef target, TermState otherState) throws IOException {
public void seekExact(BytesRef target, TermState otherState) {
//System.out.println("BTR.seekExact termState target=" + target.utf8ToString() + " " + target + " this=" + this);
assert otherState != null && otherState instanceof BlockTermState;
assert !doOrd || ((BlockTermState) otherState).ord < numTerms;

View File

@ -466,7 +466,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
}
@Override
public int getDocCount() throws IOException {
public int getDocCount() {
return docCount;
}
@ -863,7 +863,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
}
@Override
public BytesRef term() throws IOException {
public BytesRef term() {
return term;
}
@ -1156,22 +1156,22 @@ public class BlockTreeTermsReader extends FieldsProducer {
}
@Override
public boolean seekExact(BytesRef text, boolean useCache) throws IOException {
public boolean seekExact(BytesRef text, boolean useCache) {
throw new UnsupportedOperationException();
}
@Override
public void seekExact(long ord) throws IOException {
public void seekExact(long ord) {
throw new UnsupportedOperationException();
}
@Override
public long ord() throws IOException {
public long ord() {
throw new UnsupportedOperationException();
}
@Override
public SeekStatus seekCeil(BytesRef text, boolean useCache) throws IOException {
public SeekStatus seekCeil(BytesRef text, boolean useCache) {
throw new UnsupportedOperationException();
}
}
@ -2144,7 +2144,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
}
@Override
public void seekExact(BytesRef target, TermState otherState) throws IOException {
public void seekExact(BytesRef target, TermState otherState) {
// if (DEBUG) {
// System.out.println("BTTR.seekExact termState seg=" + segment + " target=" + target.utf8ToString() + " " + target + " state=" + otherState);
// }
@ -2174,7 +2174,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
}
@Override
public void seekExact(long ord) throws IOException {
public void seekExact(long ord) {
throw new UnsupportedOperationException();
}
@ -2351,7 +2351,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
// }
}
void rewind() throws IOException {
void rewind() {
// Force reload:
fp = fpOrig;

View File

@ -42,7 +42,7 @@ public final class MappingMultiDocsAndPositionsEnum extends DocsAndPositionsEnum
int doc = -1;
private MergeState mergeState;
MappingMultiDocsAndPositionsEnum reset(MultiDocsAndPositionsEnum postingsEnum) throws IOException {
MappingMultiDocsAndPositionsEnum reset(MultiDocsAndPositionsEnum postingsEnum) {
this.numSubs = postingsEnum.getNumSubs();
this.subs = postingsEnum.getSubs();
upto = -1;
@ -73,7 +73,7 @@ public final class MappingMultiDocsAndPositionsEnum extends DocsAndPositionsEnum
}
@Override
public int advance(int target) throws IOException {
public int advance(int target) {
throw new UnsupportedOperationException();
}

View File

@ -41,7 +41,7 @@ public final class MappingMultiDocsEnum extends DocsEnum {
int doc = -1;
private MergeState mergeState;
MappingMultiDocsEnum reset(MultiDocsEnum docsEnum) throws IOException {
MappingMultiDocsEnum reset(MultiDocsEnum docsEnum) {
this.numSubs = docsEnum.getNumSubs();
this.subs = docsEnum.getSubs();
upto = -1;
@ -72,7 +72,7 @@ public final class MappingMultiDocsEnum extends DocsEnum {
}
@Override
public int advance(int target) throws IOException {
public int advance(int target) {
throw new UnsupportedOperationException();
}

View File

@ -251,7 +251,7 @@ public abstract class MultiLevelSkipListReader {
}
@Override
public void close() throws IOException {
public void close() {
data = null;
}
@ -266,18 +266,18 @@ public abstract class MultiLevelSkipListReader {
}
@Override
public byte readByte() throws IOException {
public byte readByte() {
return data[pos++];
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
public void readBytes(byte[] b, int offset, int len) {
System.arraycopy(data, pos, b, offset, len);
pos += len;
}
@Override
public void seek(long pos) throws IOException {
public void seek(long pos) {
this.pos = (int) (pos - pointer);
}

View File

@ -19,7 +19,6 @@ package org.apache.lucene.codecs;
import java.io.Closeable;
import java.io.IOException;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.StoredFieldVisitor;
/**
@ -33,7 +32,7 @@ import org.apache.lucene.index.StoredFieldVisitor;
public abstract class StoredFieldsReader implements Cloneable, Closeable {
/** Visit the stored fields for document <code>n</code> */
public abstract void visitDocument(int n, StoredFieldVisitor visitor) throws CorruptIndexException, IOException;
public abstract void visitDocument(int n, StoredFieldVisitor visitor) throws IOException;
public abstract StoredFieldsReader clone();
}

View File

@ -89,8 +89,7 @@ public abstract class FixedIntBlockIndexInput extends IntIndexInput {
private final int blockSize;
private final IntsRef bulkResult = new IntsRef();
public Reader(final IndexInput in, final int[] pending, final BlockReader blockReader)
throws IOException {
public Reader(final IndexInput in, final int[] pending, final BlockReader blockReader) {
this.in = in;
this.pending = pending;
this.blockSize = pending.length;

View File

@ -92,15 +92,14 @@ public abstract class VariableIntBlockIndexInput extends IntIndexInput {
private final BlockReader blockReader;
private final IntsRef bulkResult = new IntsRef();
public Reader(final IndexInput in, final int[] pending, final BlockReader blockReader)
throws IOException {
public Reader(final IndexInput in, final int[] pending, final BlockReader blockReader) {
this.in = in;
this.pending = pending;
bulkResult.ints = pending;
this.blockReader = blockReader;
}
void seek(final long fp, final int upto) throws IOException {
void seek(final long fp, final int upto) {
// TODO: should we do this in real-time, not lazy?
pendingFP = fp;
pendingUpto = upto;

View File

@ -39,7 +39,7 @@ public class Lucene40DocValuesConsumer extends DocValuesWriterBase {
public final static String DOC_VALUES_SEGMENT_SUFFIX = "dv";
public Lucene40DocValuesConsumer(PerDocWriteState state, String segmentSuffix) throws IOException {
public Lucene40DocValuesConsumer(PerDocWriteState state, String segmentSuffix) {
super(state);
this.segmentSuffix = segmentSuffix;
mainDirectory = state.directory;

View File

@ -96,7 +96,7 @@ public class Lucene40NormsFormat extends NormsFormat {
public static class Lucene40NormsDocValuesConsumer extends Lucene40DocValuesConsumer {
public Lucene40NormsDocValuesConsumer(PerDocWriteState state,
String segmentSuffix) throws IOException {
String segmentSuffix) {
super(state, segmentSuffix);
}

View File

@ -319,7 +319,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
protected boolean skipped;
protected final Bits liveDocs;
SegmentDocsEnumBase(IndexInput startFreqIn, Bits liveDocs) throws IOException {
SegmentDocsEnumBase(IndexInput startFreqIn, Bits liveDocs) {
this.startFreqIn = startFreqIn;
this.freqIn = (IndexInput)startFreqIn.clone();
this.liveDocs = liveDocs;
@ -353,7 +353,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
}
@Override
public final int freq() throws IOException {
public final int freq() {
assert !indexOmitsTF;
return freq;
}
@ -499,7 +499,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
private final class AllDocsSegmentDocsEnum extends SegmentDocsEnumBase {
AllDocsSegmentDocsEnum(IndexInput startFreqIn) throws IOException {
AllDocsSegmentDocsEnum(IndexInput startFreqIn) {
super(startFreqIn, null);
assert liveDocs == null;
}
@ -576,7 +576,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
private final class LiveDocsSegmentDocsEnum extends SegmentDocsEnumBase {
LiveDocsSegmentDocsEnum(IndexInput startFreqIn, Bits liveDocs) throws IOException {
LiveDocsSegmentDocsEnum(IndexInput startFreqIn, Bits liveDocs) {
super(startFreqIn, liveDocs);
assert liveDocs != null;
}
@ -696,7 +696,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
Lucene40SkipListReader skipper;
private long lazyProxPointer;
public SegmentDocsAndPositionsEnum(IndexInput freqIn, IndexInput proxIn) throws IOException {
public SegmentDocsAndPositionsEnum(IndexInput freqIn, IndexInput proxIn) {
startFreqIn = freqIn;
this.freqIn = (IndexInput) freqIn.clone();
this.proxIn = (IndexInput) proxIn.clone();
@ -772,7 +772,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
}
@Override
public int freq() throws IOException {
public int freq() {
return freq;
}
@ -853,12 +853,12 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
}
@Override
public int startOffset() throws IOException {
public int startOffset() {
return -1;
}
@Override
public int endOffset() throws IOException {
public int endOffset() {
return -1;
}
@ -909,7 +909,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
int offsetLength;
int startOffset;
public SegmentFullPositionsEnum(IndexInput freqIn, IndexInput proxIn) throws IOException {
public SegmentFullPositionsEnum(IndexInput freqIn, IndexInput proxIn) {
startFreqIn = freqIn;
this.freqIn = (IndexInput) freqIn.clone();
this.proxIn = (IndexInput) proxIn.clone();

View File

@ -136,7 +136,7 @@ public final class Lucene40StoredFieldsReader extends StoredFieldsReader impleme
indexStream.seek(HEADER_LENGTH_IDX + docID * 8L);
}
public final void visitDocument(int n, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
public final void visitDocument(int n, StoredFieldVisitor visitor) throws IOException {
seekIndex(n);
fieldsStream.seek(indexStream.readLong());

View File

@ -22,12 +22,10 @@ import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.StoredFieldsReader;
import org.apache.lucene.codecs.StoredFieldsWriter;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MergePolicy.MergeAbortedException;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.SegmentReader;
import org.apache.lucene.store.Directory;
@ -209,7 +207,7 @@ public final class Lucene40StoredFieldsWriter extends StoredFieldsWriter {
}
@Override
public void finish(FieldInfos fis, int numDocs) throws IOException {
public void finish(FieldInfos fis, int numDocs) {
if (HEADER_LENGTH_IDX+((long) numDocs)*8 != indexStream.getFilePointer())
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
// we detect that the bug has struck, here, and
@ -255,7 +253,7 @@ public final class Lucene40StoredFieldsWriter extends StoredFieldsWriter {
private int copyFieldsWithDeletions(MergeState mergeState, final MergeState.IndexReaderAndLiveDocs reader,
final Lucene40StoredFieldsReader matchingFieldsReader, int rawDocLengths[])
throws IOException, MergeAbortedException, CorruptIndexException {
throws IOException {
int docCount = 0;
final int maxDoc = reader.reader.maxDoc();
final Bits liveDocs = reader.liveDocs;
@ -309,7 +307,7 @@ public final class Lucene40StoredFieldsWriter extends StoredFieldsWriter {
private int copyFieldsNoDeletions(MergeState mergeState, final MergeState.IndexReaderAndLiveDocs reader,
final Lucene40StoredFieldsReader matchingFieldsReader, int rawDocLengths[])
throws IOException, MergeAbortedException, CorruptIndexException {
throws IOException {
final int maxDoc = reader.reader.maxDoc();
int docCount = 0;
if (matchingFieldsReader != null) {

View File

@ -25,7 +25,6 @@ import java.util.Map;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.TermVectorsReader;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
@ -96,7 +95,7 @@ public class Lucene40TermVectorsReader extends TermVectorsReader {
}
public Lucene40TermVectorsReader(Directory d, SegmentInfo si, FieldInfos fieldInfos, IOContext context)
throws CorruptIndexException, IOException {
throws IOException {
final String segment = si.name;
final int size = si.getDocCount();
@ -252,7 +251,7 @@ public class Lucene40TermVectorsReader extends TermVectorsReader {
private int fieldUpto;
@Override
public String next() throws IOException {
public String next() {
if (fieldNumbers != null && fieldUpto < fieldNumbers.length) {
return fieldInfos.fieldInfo(fieldNumbers[fieldUpto++]).name;
} else {
@ -365,7 +364,7 @@ public class Lucene40TermVectorsReader extends TermVectorsReader {
private int[] endOffsets;
// NOTE: tvf is pre-positioned by caller
public TVTermsEnum() throws IOException {
public TVTermsEnum() {
this.origTVF = Lucene40TermVectorsReader.this.tvf;
tvf = (IndexInput) origTVF.clone();
}

View File

@ -27,7 +27,6 @@ import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.MergePolicy.MergeAbortedException;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.SegmentReader;
import org.apache.lucene.store.DataInput;
@ -284,7 +283,7 @@ public final class Lucene40TermVectorsWriter extends TermVectorsWriter {
final MergeState.IndexReaderAndLiveDocs reader,
int rawDocLengths[],
int rawDocLengths2[])
throws IOException, MergeAbortedException {
throws IOException {
final int maxDoc = reader.reader.maxDoc();
final Bits liveDocs = reader.liveDocs;
int totalNumDocs = 0;
@ -337,7 +336,7 @@ public final class Lucene40TermVectorsWriter extends TermVectorsWriter {
final MergeState.IndexReaderAndLiveDocs reader,
int rawDocLengths[],
int rawDocLengths2[])
throws IOException, MergeAbortedException {
throws IOException {
final int maxDoc = reader.reader.maxDoc();
if (matchingVectorsReader != null) {
// We can bulk-copy because the fieldInfos are "congruent"
@ -362,7 +361,7 @@ public final class Lucene40TermVectorsWriter extends TermVectorsWriter {
}
@Override
public void finish(FieldInfos fis, int numDocs) throws IOException {
public void finish(FieldInfos fis, int numDocs) {
if (HEADER_LENGTH_INDEX+((long) numDocs)*16 != tvx.getFilePointer())
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
// we detect that the bug has struck, here, and
@ -382,7 +381,7 @@ public final class Lucene40TermVectorsWriter extends TermVectorsWriter {
}
@Override
public Comparator<BytesRef> getComparator() throws IOException {
public Comparator<BytesRef> getComparator() {
return BytesRef.getUTF8SortedAsUnicodeComparator();
}
}

View File

@ -121,14 +121,11 @@ public final class Bytes {
* {@link Type#BYTES_VAR_SORTED}.
* @param context I/O Context
* @return a new {@link Writer} instance
* @throws IOException
* if the files for the writer can not be created.
* @see PackedInts#getReader(org.apache.lucene.store.DataInput)
*/
public static DocValuesConsumer getWriter(Directory dir, String id, Mode mode,
boolean fixedSize, Comparator<BytesRef> sortComparator,
Counter bytesUsed, IOContext context, float acceptableOverheadRatio)
throws IOException {
Counter bytesUsed, IOContext context, float acceptableOverheadRatio) {
// TODO -- i shouldn't have to specify fixed? can
// track itself & do the write thing at write time?
if (sortComparator == null) {
@ -244,7 +241,7 @@ public final class Bytes {
private final IOContext context;
protected BytesWriterBase(Directory dir, String id, String codecNameIdx, String codecNameDat,
int version, Counter bytesUsed, IOContext context, Type type) throws IOException {
int version, Counter bytesUsed, IOContext context, Type type) {
super(bytesUsed, type);
this.id = id;
this.dir = dir;
@ -388,21 +385,19 @@ public final class Bytes {
protected long maxBytes = 0;
protected DerefBytesWriterBase(Directory dir, String id, String codecNameIdx, String codecNameDat,
int codecVersion, Counter bytesUsed, IOContext context, Type type)
throws IOException {
int codecVersion, Counter bytesUsed, IOContext context, Type type) {
this(dir, id, codecNameIdx, codecNameDat, codecVersion, new DirectTrackingAllocator(
ByteBlockPool.BYTE_BLOCK_SIZE, bytesUsed), bytesUsed, context, PackedInts.DEFAULT, type);
}
protected DerefBytesWriterBase(Directory dir, String id, String codecNameIdx, String codecNameDat,
int codecVersion, Counter bytesUsed, IOContext context, float acceptableOverheadRatio, Type type)
throws IOException {
int codecVersion, Counter bytesUsed, IOContext context, float acceptableOverheadRatio, Type type) {
this(dir, id, codecNameIdx, codecNameDat, codecVersion, new DirectTrackingAllocator(
ByteBlockPool.BYTE_BLOCK_SIZE, bytesUsed), bytesUsed, context, acceptableOverheadRatio, type);
}
protected DerefBytesWriterBase(Directory dir, String id, String codecNameIdx, String codecNameDat, int codecVersion, Allocator allocator,
Counter bytesUsed, IOContext context, float acceptableOverheadRatio, Type type) throws IOException {
Counter bytesUsed, IOContext context, float acceptableOverheadRatio, Type type) {
super(dir, id, codecNameIdx, codecNameDat, codecVersion, bytesUsed, context, type);
hash = new BytesRefHash(new ByteBlockPool(allocator),
BytesRefHash.DEFAULT_CAPACITY, new TrackingDirectBytesStartArray(

View File

@ -126,7 +126,7 @@ abstract class DirectSource extends Source {
private static final class BytesToFloat extends ToNumeric {
@Override
long toLong(IndexInput input) throws IOException {
long toLong(IndexInput input) {
throw new UnsupportedOperationException("ints are not supported");
}
@ -137,7 +137,7 @@ abstract class DirectSource extends Source {
private static final class BytesToDouble extends ToNumeric {
@Override
long toLong(IndexInput input) throws IOException {
long toLong(IndexInput input) {
throw new UnsupportedOperationException("ints are not supported");
}
@ -153,7 +153,7 @@ abstract class DirectSource extends Source {
return input.readLong();
}
double toDouble(IndexInput input) throws IOException {
double toDouble(IndexInput input) {
throw new UnsupportedOperationException("doubles are not supported");
}
}

View File

@ -46,8 +46,7 @@ class FixedDerefBytesImpl {
static final int VERSION_CURRENT = VERSION_START;
public static class Writer extends DerefBytesWriterBase {
public Writer(Directory dir, String id, Counter bytesUsed, IOContext context)
throws IOException {
public Writer(Directory dir, String id, Counter bytesUsed, IOContext context) {
super(dir, id, CODEC_NAME_IDX, CODEC_NAME_DAT, VERSION_CURRENT, bytesUsed, context, Type.BYTES_FIXED_DEREF);
}

View File

@ -58,7 +58,7 @@ class FixedSortedBytesImpl {
private final Comparator<BytesRef> comp;
public Writer(Directory dir, String id, Comparator<BytesRef> comp,
Counter bytesUsed, IOContext context, float acceptableOverheadRatio) throws IOException {
Counter bytesUsed, IOContext context, float acceptableOverheadRatio) {
super(dir, id, CODEC_NAME_IDX, CODEC_NAME_DAT, VERSION_CURRENT, bytesUsed, context, acceptableOverheadRatio, Type.BYTES_FIXED_SORTED);
this.comp = comp;
}

View File

@ -62,12 +62,12 @@ class FixedStraightBytesImpl {
private final ByteBlockPool pool;
protected FixedBytesWriterBase(Directory dir, String id, String codecNameDat,
int version, Counter bytesUsed, IOContext context) throws IOException {
int version, Counter bytesUsed, IOContext context) {
this(dir, id, codecNameDat, version, bytesUsed, context, Type.BYTES_FIXED_STRAIGHT);
}
protected FixedBytesWriterBase(Directory dir, String id, String codecNameDat,
int version, Counter bytesUsed, IOContext context, Type type) throws IOException {
int version, Counter bytesUsed, IOContext context, Type type) {
super(dir, id, null, codecNameDat, version, bytesUsed, context, type);
pool = new ByteBlockPool(new DirectTrackingAllocator(bytesUsed));
pool.nextBuffer();
@ -140,11 +140,11 @@ class FixedStraightBytesImpl {
private boolean hasMerged;
private IndexOutput datOut;
public Writer(Directory dir, String id, Counter bytesUsed, IOContext context) throws IOException {
public Writer(Directory dir, String id, Counter bytesUsed, IOContext context) {
super(dir, id, CODEC_NAME, VERSION_CURRENT, bytesUsed, context);
}
public Writer(Directory dir, String id, String codecNameDat, int version, Counter bytesUsed, IOContext context) throws IOException {
public Writer(Directory dir, String id, String codecNameDat, int version, Counter bytesUsed, IOContext context) {
super(dir, id, codecNameDat, version, bytesUsed, context);
}

View File

@ -47,7 +47,7 @@ public class Floats {
protected static final int VERSION_CURRENT = VERSION_START;
public static DocValuesConsumer getWriter(Directory dir, String id, Counter bytesUsed,
IOContext context, Type type) throws IOException {
IOContext context, Type type) {
return new FloatsWriter(dir, id, bytesUsed, context, type);
}
@ -72,7 +72,7 @@ public class Floats {
private final int size;
private final DocValuesArraySource template;
public FloatsWriter(Directory dir, String id, Counter bytesUsed,
IOContext context, Type type) throws IOException {
IOContext context, Type type) {
super(dir, id, CODEC_NAME, VERSION_CURRENT, bytesUsed, context);
size = typeToSize(type);
this.bytesRef = new BytesRef(size);

View File

@ -46,7 +46,7 @@ public final class Ints {
}
public static DocValuesConsumer getWriter(Directory dir, String id, Counter bytesUsed,
Type type, IOContext context) throws IOException {
Type type, IOContext context) {
return type == Type.VAR_INTS ? new PackedIntValues.PackedIntsWriter(dir, id,
bytesUsed, context) : new IntsWriter(dir, id, bytesUsed, context, type);
}
@ -92,12 +92,12 @@ public final class Ints {
private final DocValuesArraySource template;
public IntsWriter(Directory dir, String id, Counter bytesUsed,
IOContext context, Type valueType) throws IOException {
IOContext context, Type valueType) {
this(dir, id, CODEC_NAME, VERSION_CURRENT, bytesUsed, context, valueType);
}
protected IntsWriter(Directory dir, String id, String codecName,
int version, Counter bytesUsed, IOContext context, Type valueType) throws IOException {
int version, Counter bytesUsed, IOContext context, Type valueType) {
super(dir, id, codecName, version, bytesUsed, context);
size = typeToSize(valueType);
this.bytesRef = new BytesRef(size);

View File

@ -57,7 +57,7 @@ class PackedIntValues {
private int lastDocId = -1;
protected PackedIntsWriter(Directory dir, String id, Counter bytesUsed,
IOContext context) throws IOException {
IOContext context) {
super(dir, id, CODEC_NAME, VERSION_CURRENT, bytesUsed, context, Type.VAR_INTS);
bytesRef = new BytesRef(8);
}

View File

@ -57,8 +57,7 @@ class VarDerefBytesImpl {
* order and merge them in a streamed fashion.
*/
static class Writer extends DerefBytesWriterBase {
public Writer(Directory dir, String id, Counter bytesUsed, IOContext context)
throws IOException {
public Writer(Directory dir, String id, Counter bytesUsed, IOContext context) {
super(dir, id, CODEC_NAME_IDX, CODEC_NAME_DAT, VERSION_CURRENT, bytesUsed, context, Type.BYTES_VAR_DEREF);
size = 0;
}

View File

@ -60,7 +60,7 @@ final class VarSortedBytesImpl {
private final Comparator<BytesRef> comp;
public Writer(Directory dir, String id, Comparator<BytesRef> comp,
Counter bytesUsed, IOContext context, float acceptableOverheadRatio) throws IOException {
Counter bytesUsed, IOContext context, float acceptableOverheadRatio) {
super(dir, id, CODEC_NAME_IDX, CODEC_NAME_DAT, VERSION_CURRENT, bytesUsed, context, acceptableOverheadRatio, Type.BYTES_VAR_SORTED);
this.comp = comp;
size = 0;

View File

@ -64,8 +64,7 @@ class VarStraightBytesImpl {
private final ByteBlockPool pool;
private IndexOutput datOut;
private boolean merge = false;
public Writer(Directory dir, String id, Counter bytesUsed, IOContext context)
throws IOException {
public Writer(Directory dir, String id, Counter bytesUsed, IOContext context) {
super(dir, id, CODEC_NAME_IDX, CODEC_NAME_DAT, VERSION_CURRENT, bytesUsed, context, Type.BYTES_VAR_STRAIGHT);
pool = new ByteBlockPool(new DirectTrackingAllocator(bytesUsed));
docToAddress = new long[1];

View File

@ -83,11 +83,10 @@ abstract class Writer extends DocValuesConsumer {
* docvalues of type {@link Type#BYTES_FIXED_SORTED} and
* {@link Type#BYTES_VAR_SORTED}.
* @return a new {@link Writer} instance for the given {@link Type}
* @throws IOException
* @see PackedInts#getReader(org.apache.lucene.store.DataInput, float)
*/
public static DocValuesConsumer create(Type type, String id, Directory directory,
Comparator<BytesRef> comp, Counter bytesUsed, IOContext context, float acceptableOverheadRatio) throws IOException {
Comparator<BytesRef> comp, Counter bytesUsed, IOContext context, float acceptableOverheadRatio) {
if (comp == null) {
comp = BytesRef.getUTF8SortedAsUnicodeComparator();
}

View File

@ -427,7 +427,7 @@ public class MemoryPostingsFormat extends PostingsFormat {
}
@Override
public int freq() throws IOException {
public int freq() {
assert indexOptions != IndexOptions.DOCS_ONLY;
return freq;
}
@ -627,7 +627,7 @@ public class MemoryPostingsFormat extends PostingsFormat {
}
@Override
public int freq() throws IOException {
public int freq() {
return freq;
}
}
@ -647,7 +647,7 @@ public class MemoryPostingsFormat extends PostingsFormat {
fstEnum = new BytesRefFSTEnum<BytesRef>(fst);
}
private void decodeMetaData() throws IOException {
private void decodeMetaData() {
if (!didDecode) {
buffer.reset(current.output.bytes, 0, current.output.length);
docFreq = buffer.readVInt();
@ -696,7 +696,7 @@ public class MemoryPostingsFormat extends PostingsFormat {
}
@Override
public DocsEnum docs(Bits liveDocs, DocsEnum reuse, boolean needsFreqs) throws IOException {
public DocsEnum docs(Bits liveDocs, DocsEnum reuse, boolean needsFreqs) {
decodeMetaData();
FSTDocsEnum docsEnum;
@ -714,7 +714,7 @@ public class MemoryPostingsFormat extends PostingsFormat {
}
@Override
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, boolean needsOffsets) throws IOException {
public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, boolean needsOffsets) {
boolean hasOffsets = field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
if (needsOffsets && !hasOffsets) {
@ -757,13 +757,13 @@ public class MemoryPostingsFormat extends PostingsFormat {
}
@Override
public int docFreq() throws IOException {
public int docFreq() {
decodeMetaData();
return docFreq;
}
@Override
public long totalTermFreq() throws IOException {
public long totalTermFreq() {
decodeMetaData();
return totalTermFreq;
}
@ -817,17 +817,17 @@ public class MemoryPostingsFormat extends PostingsFormat {
}
@Override
public long getSumDocFreq() throws IOException {
public long getSumDocFreq() {
return sumDocFreq;
}
@Override
public int getDocCount() throws IOException {
public int getDocCount() {
return docCount;
}
@Override
public long size() throws IOException {
public long size() {
return termCount;
}

View File

@ -85,7 +85,7 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat {
private final SegmentWriteState segmentWriteState;
public FieldsWriter(SegmentWriteState state) throws IOException {
public FieldsWriter(SegmentWriteState state) {
segmentWriteState = state;
}
@ -206,7 +206,7 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat {
}
@Override
public String next() throws IOException {
public String next() {
if (it.hasNext()) {
current = it.next();
} else {

View File

@ -52,7 +52,7 @@ public class PulsingPostingsReader extends PostingsReaderBase {
final PostingsReaderBase wrappedPostingsReader;
int maxPositions;
public PulsingPostingsReader(PostingsReaderBase wrappedPostingsReader) throws IOException {
public PulsingPostingsReader(PostingsReaderBase wrappedPostingsReader) {
this.wrappedPostingsReader = wrappedPostingsReader;
}

View File

@ -92,7 +92,7 @@ public final class PulsingPostingsWriter extends PostingsWriterBase {
/** If the total number of positions (summed across all docs
* for this term) is <= maxPositions, then the postings are
* inlined into terms dict */
public PulsingPostingsWriter(int maxPositions, PostingsWriterBase wrappedPostingsWriter) throws IOException {
public PulsingPostingsWriter(int maxPositions, PostingsWriterBase wrappedPostingsWriter) {
pending = new Position[maxPositions];
for(int i=0;i<maxPositions;i++) {
pending[i] = new Position();

View File

@ -17,8 +17,6 @@ package org.apache.lucene.codecs.sep;
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.codecs.lucene40.values.DocValuesWriterBase;
import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.store.Directory;
@ -31,7 +29,7 @@ import org.apache.lucene.store.Directory;
public class SepDocValuesConsumer extends DocValuesWriterBase {
private final Directory directory;
public SepDocValuesConsumer(PerDocWriteState state) throws IOException {
public SepDocValuesConsumer(PerDocWriteState state) {
super(state);
this.directory = state.directory;
}

View File

@ -105,7 +105,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
private boolean ended;
private final BytesRefFSTEnum<PairOutputs.Pair<Long,PairOutputs.Pair<Long,Long>>> fstEnum;
public SimpleTextTermsEnum(FST<PairOutputs.Pair<Long,PairOutputs.Pair<Long,Long>>> fst, IndexOptions indexOptions) throws IOException {
public SimpleTextTermsEnum(FST<PairOutputs.Pair<Long,PairOutputs.Pair<Long,Long>>> fst, IndexOptions indexOptions) {
this.indexOptions = indexOptions;
fstEnum = new BytesRefFSTEnum<PairOutputs.Pair<Long,PairOutputs.Pair<Long,Long>>>(fst);
}

View File

@ -99,7 +99,7 @@ public class SimpleTextLiveDocsFormat extends LiveDocsFormat {
}
}
private int parseIntAt(BytesRef bytes, int offset, CharsRef scratch) throws IOException {
private int parseIntAt(BytesRef bytes, int offset, CharsRef scratch) {
UnicodeUtil.UTF8toUTF16(bytes.bytes, bytes.offset+offset, bytes.length-offset, scratch);
return ArrayUtil.parseInt(scratch.chars, 0, scratch.length);
}

View File

@ -95,8 +95,7 @@ public class SimpleTextNormsFormat extends NormsFormat {
public static class SimpleTextNormsPerDocConsumer extends
SimpleTextPerDocConsumer {
public SimpleTextNormsPerDocConsumer(PerDocWriteState state)
throws IOException {
public SimpleTextNormsPerDocConsumer(PerDocWriteState state) {
super(state, NORMS_SEG_SUFFIX);
}

View File

@ -32,8 +32,7 @@ class SimpleTextPerDocConsumer extends PerDocConsumer {
protected final PerDocWriteState state;
protected final String segmentSuffix;
public SimpleTextPerDocConsumer(PerDocWriteState state, String segmentSuffix)
throws IOException {
public SimpleTextPerDocConsumer(PerDocWriteState state, String segmentSuffix) {
this.state = state;
this.segmentSuffix = segmentSuffix;
}

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import java.util.ArrayList;
import org.apache.lucene.codecs.StoredFieldsReader;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
@ -88,7 +87,7 @@ public class SimpleTextStoredFieldsReader extends StoredFieldsReader {
}
@Override
public void visitDocument(int n, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
public void visitDocument(int n, StoredFieldVisitor visitor) throws IOException {
in.seek(offsets.get(n));
readLine();
assert StringHelper.startsWith(scratch, NUM);
@ -181,7 +180,7 @@ public class SimpleTextStoredFieldsReader extends StoredFieldsReader {
SimpleTextUtil.readLine(in, scratch);
}
private int parseIntAt(int offset) throws IOException {
private int parseIntAt(int offset) {
UnicodeUtil.UTF8toUTF16(scratch.bytes, scratch.offset+offset, scratch.length-offset, scratchUTF16);
return ArrayUtil.parseInt(scratchUTF16.chars, 0, scratchUTF16.length);
}

View File

@ -204,7 +204,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
SimpleTextUtil.readLine(in, scratch);
}
private int parseIntAt(int offset) throws IOException {
private int parseIntAt(int offset) {
UnicodeUtil.UTF8toUTF16(scratch.bytes, scratch.offset+offset, scratch.length-offset, scratchUTF16);
return ArrayUtil.parseInt(scratchUTF16.chars, 0, scratchUTF16.length);
}
@ -217,7 +217,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
private class SimpleTVFields extends Fields {
private final SortedMap<String,SimpleTVTerms> fields;
SimpleTVFields(SortedMap<String,SimpleTVTerms> fields) throws IOException {
SimpleTVFields(SortedMap<String,SimpleTVTerms> fields) {
this.fields = fields;
}
@ -228,7 +228,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
private Map.Entry<String,SimpleTVTerms> current = null;
@Override
public String next() throws IOException {
public String next() {
if (!iterator.hasNext()) {
return null;
} else {
@ -238,7 +238,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
@Override
public Terms terms() throws IOException {
public Terms terms() {
return current.getValue();
}
};

View File

@ -469,7 +469,7 @@ public class Field implements IndexableField {
boolean used;
@Override
public boolean incrementToken() throws IOException {
public boolean incrementToken() {
if (used) {
return false;
}
@ -480,7 +480,7 @@ public class Field implements IndexableField {
}
@Override
public void reset() throws IOException {
public void reset() {
used = false;
}
};

View File

@ -75,7 +75,7 @@ class AutomatonTermsEnum extends FilteredTermsEnum {
* <p>
* @param compiled CompiledAutomaton
*/
public AutomatonTermsEnum(TermsEnum tenum, CompiledAutomaton compiled) throws IOException {
public AutomatonTermsEnum(TermsEnum tenum, CompiledAutomaton compiled) {
super(tenum);
this.finite = compiled.finite;
this.runAutomaton = compiled.runAutomaton;

View File

@ -67,7 +67,7 @@ public abstract class BaseCompositeReader<R extends IndexReader> extends Composi
* cloned and not protected for modification, the subclass is responsible
* to do this.
*/
protected BaseCompositeReader(R[] subReaders) throws IOException {
protected BaseCompositeReader(R[] subReaders) {
this.subReaders = subReaders;
this.subReadersList = Collections.unmodifiableList(Arrays.asList(subReaders));
starts = new int[subReaders.length + 1]; // build starts array
@ -112,7 +112,7 @@ public abstract class BaseCompositeReader<R extends IndexReader> extends Composi
}
@Override
public final void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
public final void document(int docID, StoredFieldVisitor visitor) throws IOException {
ensureOpen();
final int i = readerIndex(docID); // find subreader num
subReaders[i].document(docID - starts[i], visitor); // dispatch to subreader

View File

@ -398,7 +398,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
MergePolicy.OneMerge runningMerge;
private volatile boolean done;
public MergeThread(IndexWriter writer, MergePolicy.OneMerge startMerge) throws IOException {
public MergeThread(IndexWriter writer, MergePolicy.OneMerge startMerge) {
this.tWriter = writer;
this.startMerge = startMerge;
}

View File

@ -56,10 +56,9 @@ public abstract class DirectoryReader extends BaseCompositeReader<AtomicReader>
/** Returns a IndexReader reading the index in the given
* Directory
* @param directory the index directory
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static DirectoryReader open(final Directory directory) throws CorruptIndexException, IOException {
public static DirectoryReader open(final Directory directory) throws IOException {
return StandardDirectoryReader.open(directory, null, DEFAULT_TERMS_INDEX_DIVISOR);
}
@ -76,10 +75,9 @@ public abstract class DirectoryReader extends BaseCompositeReader<AtomicReader>
* memory usage, at the expense of higher latency when
* loading a TermInfo. The default value is 1. Set this
* to -1 to skip loading the terms index entirely.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static DirectoryReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
public static DirectoryReader open(final Directory directory, int termInfosIndexDivisor) throws IOException {
return StandardDirectoryReader.open(directory, null, termInfosIndexDivisor);
}
@ -102,17 +100,16 @@ public abstract class DirectoryReader extends BaseCompositeReader<AtomicReader>
*
* @lucene.experimental
*/
public static DirectoryReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException {
public static DirectoryReader open(final IndexWriter writer, boolean applyAllDeletes) throws IOException {
return writer.getReader(applyAllDeletes);
}
/** Expert: returns an IndexReader reading the index in the given
* {@link IndexCommit}.
* @param commit the commit point to open
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static DirectoryReader open(final IndexCommit commit) throws CorruptIndexException, IOException {
public static DirectoryReader open(final IndexCommit commit) throws IOException {
return StandardDirectoryReader.open(commit.getDirectory(), commit, DEFAULT_TERMS_INDEX_DIVISOR);
}
@ -129,10 +126,9 @@ public abstract class DirectoryReader extends BaseCompositeReader<AtomicReader>
* memory usage, at the expense of higher latency when
* loading a TermInfo. The default value is 1. Set this
* to -1 to skip loading the terms index entirely.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static DirectoryReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
public static DirectoryReader open(final IndexCommit commit, int termInfosIndexDivisor) throws IOException {
return StandardDirectoryReader.open(commit.getDirectory(), commit, termInfosIndexDivisor);
}
@ -311,9 +307,8 @@ public abstract class DirectoryReader extends BaseCompositeReader<AtomicReader>
* Returns <code>true</code> if an index exists at the specified directory.
* @param directory the directory to check for an index
* @return <code>true</code> if an index exists; <code>false</code> otherwise
* @throws IOException if there is a problem with accessing the index
*/
public static boolean indexExists(Directory directory) throws IOException {
public static boolean indexExists(Directory directory) {
try {
new SegmentInfos().read(directory);
return true;
@ -331,7 +326,7 @@ public abstract class DirectoryReader extends BaseCompositeReader<AtomicReader>
* Subclasses of {@code DirectoryReader} should take care to not allow
* modification of this internal array, e.g. {@link #doOpenIfChanged()}.
*/
protected DirectoryReader(Directory directory, AtomicReader[] segmentReaders) throws CorruptIndexException, IOException {
protected DirectoryReader(Directory directory, AtomicReader[] segmentReaders) {
super(segmentReaders);
this.directory = directory;
}
@ -348,32 +343,29 @@ public abstract class DirectoryReader extends BaseCompositeReader<AtomicReader>
* If this reader does not support reopen, return {@code null}, so
* client code is happy. This should be consistent with {@link #isCurrent}
* (should always return {@code true}) if reopen is not supported.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @return null if there are no changes; else, a new
* DirectoryReader instance.
*/
protected abstract DirectoryReader doOpenIfChanged() throws CorruptIndexException, IOException;
protected abstract DirectoryReader doOpenIfChanged() throws IOException;
/** Implement this method to support {@link #openIfChanged(DirectoryReader,IndexCommit)}.
* If this reader does not support reopen from a specific {@link IndexCommit},
* throw {@link UnsupportedOperationException}.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @return null if there are no changes; else, a new
* DirectoryReader instance.
*/
protected abstract DirectoryReader doOpenIfChanged(final IndexCommit commit) throws CorruptIndexException, IOException;
protected abstract DirectoryReader doOpenIfChanged(final IndexCommit commit) throws IOException;
/** Implement this method to support {@link #openIfChanged(DirectoryReader,IndexWriter,boolean)}.
* If this reader does not support reopen from {@link IndexWriter},
* throw {@link UnsupportedOperationException}.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @return null if there are no changes; else, a new
* DirectoryReader instance.
*/
protected abstract DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException;
protected abstract DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws IOException;
/**
* Version number when this IndexReader was opened.
@ -407,16 +399,15 @@ public abstract class DirectoryReader extends BaseCompositeReader<AtomicReader>
* {@link #openIfChanged} to get a new reader that sees the
* changes.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public abstract boolean isCurrent() throws CorruptIndexException, IOException;
public abstract boolean isCurrent() throws IOException;
/**
* Expert: return the IndexCommit that this reader has opened.
* <p/>
* @lucene.experimental
*/
public abstract IndexCommit getIndexCommit() throws CorruptIndexException, IOException;
public abstract IndexCommit getIndexCommit() throws IOException;
}

View File

@ -175,7 +175,7 @@ public class DocTermOrds {
/** Subclass inits w/ this, but be sure you then call
* uninvert, only once */
protected DocTermOrds(String field, int maxTermDocFreq, int indexIntervalBits) throws IOException {
protected DocTermOrds(String field, int maxTermDocFreq, int indexIntervalBits) {
//System.out.println("DTO init field=" + field + " maxTDFreq=" + maxTermDocFreq);
this.field = field;
this.maxTermDocFreq = maxTermDocFreq;
@ -703,7 +703,7 @@ public class DocTermOrds {
}
@Override
public long ord() throws IOException {
public long ord() {
return ordBase + ord;
}

View File

@ -134,7 +134,7 @@ final class DocumentsWriter {
final Codec codec;
DocumentsWriter(Codec codec, LiveIndexWriterConfig config, Directory directory, IndexWriter writer, FieldNumbers globalFieldNumbers,
BufferedDeletesStream bufferedDeletesStream) throws IOException {
BufferedDeletesStream bufferedDeletesStream) {
this.codec = codec;
this.directory = directory;
this.indexWriter = writer;
@ -200,7 +200,7 @@ final class DocumentsWriter {
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
synchronized void abort() {
boolean success = false;
synchronized (this) {
deleteQueue.clear();
@ -219,8 +219,6 @@ final class DocumentsWriter {
if (perThread.isActive()) { // we might be closed
try {
perThread.dwpt.abort();
} catch (IOException ex) {
// continue
} finally {
perThread.dwpt.checkAndResetHasAborted();
flushControl.doOnAbort(perThread);
@ -276,7 +274,7 @@ final class DocumentsWriter {
flushControl.setClosed();
}
private boolean preUpdate() throws CorruptIndexException, IOException {
private boolean preUpdate() throws IOException {
ensureOpen();
boolean maybeMerge = false;
if (flushControl.anyStalledThreads() || flushControl.numQueuedFlushes() > 0) {
@ -325,7 +323,7 @@ final class DocumentsWriter {
}
boolean updateDocuments(final Iterable<? extends Iterable<? extends IndexableField>> docs, final Analyzer analyzer,
final Term delTerm) throws CorruptIndexException, IOException {
final Term delTerm) throws IOException {
boolean maybeMerge = preUpdate();
final ThreadState perThread = flushControl.obtainAndLock();
@ -356,7 +354,7 @@ final class DocumentsWriter {
}
boolean updateDocument(final Iterable<? extends IndexableField> doc, final Analyzer analyzer,
final Term delTerm) throws CorruptIndexException, IOException {
final Term delTerm) throws IOException {
boolean maybeMerge = preUpdate();

View File

@ -16,7 +16,7 @@ package org.apache.lucene.index;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.IdentityHashMap;
import java.util.List;
@ -570,21 +570,13 @@ final class DocumentsWriterFlushControl {
try {
for (DocumentsWriterPerThread dwpt : flushQueue) {
doAfterFlush(dwpt);
try {
dwpt.abort();
} catch (IOException ex) {
// continue
}
dwpt.abort();
}
for (BlockedFlush blockedFlush : blockedFlushes) {
flushingWriters
.put(blockedFlush.dwpt, Long.valueOf(blockedFlush.bytes));
doAfterFlush(blockedFlush.dwpt);
try {
blockedFlush.dwpt.abort();
} catch (IOException ex) {
// continue
}
blockedFlush.dwpt.abort();
}
} finally {
fullFlush = false;

View File

@ -135,7 +135,7 @@ class DocumentsWriterPerThread {
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
void abort() throws IOException {
void abort() {
hasAborted = aborting = true;
try {
if (infoStream.isEnabled("DWPT")) {
@ -352,7 +352,7 @@ class DocumentsWriterPerThread {
return docCount;
}
private void finishDocument(Term delTerm) throws IOException {
private void finishDocument(Term delTerm) {
/*
* here we actually finish the document in two steps 1. push the delete into
* the queue and update our slice. 2. increment the DWPT private document
@ -412,7 +412,7 @@ class DocumentsWriterPerThread {
}
/** Reset after a flush */
private void doAfterFlush() throws IOException {
private void doAfterFlush() {
segmentInfo = null;
consumer.doAfterFlush();
directory.getCreatedFiles().clear();

View File

@ -348,7 +348,7 @@ public class FilterAtomicReader extends AtomicReader {
}
@Override
public void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException {
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
ensureOpen();
in.document(docID, visitor);
}

View File

@ -82,7 +82,7 @@ public abstract class FilteredTermsEnum extends TermsEnum {
* <P>You can only use this method, if you keep the default
* implementation of {@link #nextSeekTerm}.
*/
protected final void setInitialSeekTerm(BytesRef term) throws IOException {
protected final void setInitialSeekTerm(BytesRef term) {
this.initialSeekTerm = term;
}

View File

@ -114,10 +114,10 @@ final class FreqProxTermsWriter extends TermsHashConsumer {
}
@Override
void finishDocument(TermsHash termsHash) throws IOException {
void finishDocument(TermsHash termsHash) {
}
@Override
void startDocument() throws IOException {
void startDocument() {
}
}

View File

@ -76,7 +76,7 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem
boolean hasPayloads;
@Override
void skippingLongTerm() throws IOException {}
void skippingLongTerm() {}
public int compareTo(FreqProxTermsWriterPerField other) {
return fieldInfo.name.compareTo(other.fieldInfo.name);
@ -326,7 +326,7 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem
* instances) found in this field and serialize them
* into a single RAM segment. */
void flush(String fieldName, FieldsConsumer consumer, final SegmentWriteState state)
throws CorruptIndexException, IOException {
throws IOException {
if (!fieldInfo.isIndexed()) {
return; // nothing to flush, don't bother the codec with the unindexed field

View File

@ -118,11 +118,10 @@ final class IndexFileDeleter {
* the Directory, incref the files they reference, call
* the policy to let it delete commits. This will remove
* any files not referenced by any of the commits.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos,
InfoStream infoStream, IndexWriter writer) throws CorruptIndexException, IOException {
InfoStream infoStream, IndexWriter writer) throws IOException {
this.infoStream = infoStream;
this.writer = writer;
@ -469,14 +468,14 @@ final class IndexFileDeleter {
}
}
void incRef(Collection<String> files) throws IOException {
void incRef(Collection<String> files) {
assert locked();
for(final String file : files) {
incRef(file);
}
}
void incRef(String fileName) throws IOException {
void incRef(String fileName) {
assert locked();
RefCount rc = getRefCount(fileName);
if (infoStream.isEnabled("IFD")) {
@ -661,7 +660,7 @@ final class IndexFileDeleter {
}
@Override
public Collection<String> getFileNames() throws IOException {
public Collection<String> getFileNames() {
return files;
}

View File

@ -28,7 +28,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DocumentStoredFieldVisitor;
import org.apache.lucene.search.SearcherManager; // javadocs
import org.apache.lucene.store.*;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
@ -316,7 +316,7 @@ public abstract class IndexReader implements Closeable {
* simply want to load all fields, use {@link
* #document(int)}. If you want to load a subset, use
* {@link DocumentStoredFieldVisitor}. */
public abstract void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException;
public abstract void document(int docID, StoredFieldVisitor visitor) throws IOException;
/**
* Returns the stored fields of the <code>n</code><sup>th</sup>
@ -340,7 +340,7 @@ public abstract class IndexReader implements Closeable {
// TODO: we need a separate StoredField, so that the
// Document returned here contains that class not
// IndexableField
public final Document document(int docID) throws CorruptIndexException, IOException {
public final Document document(int docID) throws IOException {
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
document(docID, visitor);
return visitor.getDocument();
@ -351,7 +351,7 @@ public abstract class IndexReader implements Closeable {
* fields. Note that this is simply sugar for {@link
* DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}.
*/
public final Document document(int docID, Set<String> fieldsToLoad) throws CorruptIndexException, IOException {
public final Document document(int docID, Set<String> fieldsToLoad) throws IOException {
final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad);
document(docID, visitor);
return visitor.getDocument();

View File

@ -498,7 +498,6 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* Obtain a ReadersAndLiveDocs instance from the
* readerPool. If create is true, you must later call
* {@link #release(ReadersAndLiveDocs)}.
* @throws IOException
*/
public synchronized ReadersAndLiveDocs get(SegmentInfoPerCommit info, boolean create) {
@ -530,7 +529,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* If the reader isn't being pooled, the segmentInfo's
* delCount is returned.
*/
public int numDeletedDocs(SegmentInfoPerCommit info) throws IOException {
public int numDeletedDocs(SegmentInfoPerCommit info) {
ensureOpen(false);
int delCount = info.getDelCount();
@ -572,19 +571,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @param conf
* the configuration settings according to which IndexWriter should
* be initialized.
* @throws CorruptIndexException
* if the index is corrupt
* @throws LockObtainFailedException
* if another writer has this index open (<code>write.lock</code>
* could not be obtained)
* @throws IOException
* if the directory cannot be read/written to, or if it does not
* exist and <code>conf.getOpenMode()</code> is
* <code>OpenMode.APPEND</code> or if there is any other low-level
* IO error
*/
public IndexWriter(Directory d, IndexWriterConfig conf)
throws CorruptIndexException, LockObtainFailedException, IOException {
public IndexWriter(Directory d, IndexWriterConfig conf) throws IOException {
config = new LiveIndexWriterConfig(conf.clone());
directory = d;
analyzer = config.getAnalyzer();
@ -760,7 +753,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
return config;
}
private void messageState() throws IOException {
private void messageState() {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "\ndir=" + directory + "\n" +
"index=" + segString() + "\n" +
@ -808,10 +801,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* you should immediately close the writer, again. See <a
* href="#OOME">above</a> for details.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void close() throws CorruptIndexException, IOException {
public void close() throws IOException {
close(true);
}
@ -838,7 +830,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* finished (which should be at most a few seconds), and
* then return.
*/
public void close(boolean waitForMerges) throws CorruptIndexException, IOException {
public void close(boolean waitForMerges) throws IOException {
// Ensure that only one thread actually gets to do the
// closing, and make sure no commit is also in progress:
@ -877,7 +869,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
}
private void closeInternal(boolean waitForMerges, boolean doFlush) throws CorruptIndexException, IOException {
private void closeInternal(boolean waitForMerges, boolean doFlush) throws IOException {
try {
@ -987,7 +979,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* are not counted. If you really need these to be
* counted you should call {@link #commit()} first.
* @see #numDocs */
public synchronized int numDocs() throws IOException {
public synchronized int numDocs() {
ensureOpen();
int count;
if (docWriter != null)
@ -1001,7 +993,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
return count;
}
public synchronized boolean hasDeletions() throws IOException {
public synchronized boolean hasDeletions() {
ensureOpen();
if (bufferedDeletesStream.any()) {
return true;
@ -1059,7 +1051,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void addDocument(Iterable<? extends IndexableField> doc) throws CorruptIndexException, IOException {
public void addDocument(Iterable<? extends IndexableField> doc) throws IOException {
addDocument(doc, analyzer);
}
@ -1078,7 +1070,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void addDocument(Iterable<? extends IndexableField> doc, Analyzer analyzer) throws CorruptIndexException, IOException {
public void addDocument(Iterable<? extends IndexableField> doc, Analyzer analyzer) throws IOException {
updateDocument(null, doc, analyzer);
}
@ -1123,7 +1115,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
*
* @lucene.experimental
*/
public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws CorruptIndexException, IOException {
public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
addDocuments(docs, analyzer);
}
@ -1138,7 +1130,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
*
* @lucene.experimental
*/
public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws CorruptIndexException, IOException {
public void addDocuments(Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws IOException {
updateDocuments(null, docs, analyzer);
}
@ -1155,7 +1147,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
*
* @lucene.experimental
*/
public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws CorruptIndexException, IOException {
public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs) throws IOException {
updateDocuments(delTerm, docs, analyzer);
}
@ -1173,7 +1165,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
*
* @lucene.experimental
*/
public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws CorruptIndexException, IOException {
public void updateDocuments(Term delTerm, Iterable<? extends Iterable<? extends IndexableField>> docs, Analyzer analyzer) throws IOException {
ensureOpen();
try {
boolean success = false;
@ -1207,7 +1199,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
public void deleteDocuments(Term term) throws IOException {
ensureOpen();
try {
docWriter.deleteTerms(term);
@ -1230,7 +1222,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
public void deleteDocuments(Term... terms) throws IOException {
ensureOpen();
try {
docWriter.deleteTerms(terms);
@ -1250,7 +1242,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
public void deleteDocuments(Query query) throws IOException {
ensureOpen();
try {
docWriter.deleteQueries(query);
@ -1272,7 +1264,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
public void deleteDocuments(Query... queries) throws IOException {
ensureOpen();
try {
docWriter.deleteQueries(queries);
@ -1298,7 +1290,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void updateDocument(Term term, Iterable<? extends IndexableField> doc) throws CorruptIndexException, IOException {
public void updateDocument(Term term, Iterable<? extends IndexableField> doc) throws IOException {
ensureOpen();
updateDocument(term, doc, getAnalyzer());
}
@ -1322,7 +1314,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @throws IOException if there is a low-level IO error
*/
public void updateDocument(Term term, Iterable<? extends IndexableField> doc, Analyzer analyzer)
throws CorruptIndexException, IOException {
throws IOException {
ensureOpen();
try {
boolean success = false;
@ -1463,7 +1455,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @param maxNumSegments maximum number of segments left
* in the index after merging finishes
*/
public void forceMerge(int maxNumSegments) throws CorruptIndexException, IOException {
public void forceMerge(int maxNumSegments) throws IOException {
forceMerge(maxNumSegments, true);
}
@ -1477,7 +1469,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public void forceMerge(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException {
public void forceMerge(int maxNumSegments, boolean doWait) throws IOException {
ensureOpen();
if (maxNumSegments < 1)
@ -1588,7 +1580,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* {@link MergePolicy.MergeAbortedException}.
*/
public void forceMergeDeletes(boolean doWait)
throws CorruptIndexException, IOException {
throws IOException {
ensureOpen();
flush(true, true);
@ -1673,7 +1665,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public void forceMergeDeletes() throws CorruptIndexException, IOException {
public void forceMergeDeletes() throws IOException {
forceMergeDeletes(true);
}
@ -1691,18 +1683,18 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public final void maybeMerge() throws CorruptIndexException, IOException {
public final void maybeMerge() throws IOException {
maybeMerge(-1);
}
private final void maybeMerge(int maxNumSegments) throws CorruptIndexException, IOException {
private final void maybeMerge(int maxNumSegments) throws IOException {
ensureOpen(false);
updatePendingMerges(maxNumSegments);
mergeScheduler.merge(this);
}
private synchronized void updatePendingMerges(int maxNumSegments)
throws CorruptIndexException, IOException {
throws IOException {
assert maxNumSegments == -1 || maxNumSegments > 0;
if (stopMerges) {
@ -1920,7 +1912,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
}
private synchronized void finishMerges(boolean waitForMerges) throws IOException {
private synchronized void finishMerges(boolean waitForMerges) {
if (!waitForMerges) {
stopMerges = true;
@ -2086,7 +2078,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
return newSegment;
}
synchronized void publishFrozenDeletes(FrozenBufferedDeletes packet) throws IOException {
synchronized void publishFrozenDeletes(FrozenBufferedDeletes packet) {
assert packet != null && packet.any();
synchronized (bufferedDeletesStream) {
bufferedDeletesStream.push(packet);
@ -2195,7 +2187,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void addIndexes(Directory... dirs) throws CorruptIndexException, IOException {
public void addIndexes(Directory... dirs) throws IOException {
ensureOpen();
noDupDirs(dirs);
@ -2279,7 +2271,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @throws IOException
* if there is a low-level IO error
*/
public void addIndexes(IndexReader... readers) throws CorruptIndexException, IOException {
public void addIndexes(IndexReader... readers) throws IOException {
ensureOpen();
int numDocs = 0;
@ -2467,7 +2459,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* href="#OOME">above</a> for details.</p>
*
* @see #prepareCommit(Map) */
public final void prepareCommit() throws CorruptIndexException, IOException {
public final void prepareCommit() throws IOException {
ensureOpen();
prepareCommit(null);
}
@ -2502,7 +2494,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* only "stick" if there are actually changes in the
* index to commit.
*/
public final void prepareCommit(Map<String,String> commitUserData) throws CorruptIndexException, IOException {
public final void prepareCommit(Map<String,String> commitUserData) throws IOException {
ensureOpen(false);
synchronized(commitLock) {
@ -2633,7 +2625,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @see #prepareCommit
* @see #commit(Map)
*/
public final void commit() throws CorruptIndexException, IOException {
public final void commit() throws IOException {
commit(null);
}
@ -2646,14 +2638,14 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public final void commit(Map<String,String> commitUserData) throws CorruptIndexException, IOException {
public final void commit(Map<String,String> commitUserData) throws IOException {
ensureOpen();
commitInternal(commitUserData);
}
private final void commitInternal(Map<String,String> commitUserData) throws CorruptIndexException, IOException {
private final void commitInternal(Map<String,String> commitUserData) throws IOException {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "commit: start");
@ -2681,7 +2673,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
}
private synchronized final void finishCommit() throws CorruptIndexException, IOException {
private synchronized final void finishCommit() throws IOException {
if (pendingCommit != null) {
try {
@ -2727,7 +2719,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* deletes or docs were flushed) if necessary
* @param applyAllDeletes whether pending deletes should also
*/
protected final void flush(boolean triggerMerge, boolean applyAllDeletes) throws CorruptIndexException, IOException {
protected final void flush(boolean triggerMerge, boolean applyAllDeletes) throws IOException {
// NOTE: this method cannot be sync'd because
// maybeMerge() in turn calls mergeScheduler.merge which
@ -2743,7 +2735,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
}
private boolean doFlush(boolean applyAllDeletes) throws CorruptIndexException, IOException {
private boolean doFlush(boolean applyAllDeletes) throws IOException {
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
}
@ -2850,7 +2842,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
return docWriter.getNumDocs();
}
private synchronized void ensureValidMerge(MergePolicy.OneMerge merge) throws IOException {
private synchronized void ensureValidMerge(MergePolicy.OneMerge merge) {
for(SegmentInfoPerCommit info : merge.segments) {
if (!segmentInfos.contains(info)) {
throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.info.name + ") that is not in the current index " + segString(), directory);
@ -3110,8 +3102,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
*
* @lucene.experimental
*/
public void merge(MergePolicy.OneMerge merge)
throws CorruptIndexException, IOException {
public void merge(MergePolicy.OneMerge merge) throws IOException {
boolean success = false;
@ -3173,7 +3164,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* are now participating in a merge, and true is
* returned. Else (the merge conflicts) false is
* returned. */
final synchronized boolean registerMerge(MergePolicy.OneMerge merge) throws MergePolicy.MergeAbortedException, IOException {
final synchronized boolean registerMerge(MergePolicy.OneMerge merge) throws IOException {
if (merge.registerDone) {
return true;
@ -3356,7 +3347,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
/** Does fininishing for a merge, which is fast but holds
* the synchronized lock on IndexWriter instance. */
final synchronized void mergeFinish(MergePolicy.OneMerge merge) throws IOException {
final synchronized void mergeFinish(MergePolicy.OneMerge merge) {
// forceMerge, addIndexes or finishMerges may be waiting
// on merges to finish.
@ -3417,8 +3408,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
/** Does the actual (time-consuming) work of the merge,
* but without holding synchronized lock on IndexWriter
* instance */
private int mergeMiddle(MergePolicy.OneMerge merge)
throws CorruptIndexException, IOException {
private int mergeMiddle(MergePolicy.OneMerge merge) throws IOException {
merge.checkAborted(directory);
@ -3660,12 +3650,12 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
/** @lucene.internal */
public synchronized String segString() throws IOException {
public synchronized String segString() {
return segString(segmentInfos);
}
/** @lucene.internal */
public synchronized String segString(Iterable<SegmentInfoPerCommit> infos) throws IOException {
public synchronized String segString(Iterable<SegmentInfoPerCommit> infos) {
final StringBuilder buffer = new StringBuilder();
for(final SegmentInfoPerCommit info : infos) {
if (buffer.length() > 0) {
@ -3677,7 +3667,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
}
/** @lucene.internal */
public synchronized String segString(SegmentInfoPerCommit info) throws IOException {
public synchronized String segString(SegmentInfoPerCommit info) {
return info.toString(info.info.dir, numDeletedDocs(info) - info.getDelCount());
}

View File

@ -420,7 +420,7 @@ public abstract class LogMergePolicy extends MergePolicy {
*/
@Override
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
throws CorruptIndexException, IOException {
throws IOException {
final List<SegmentInfoPerCommit> segments = segmentInfos.asList();
final int numSegments = segments.size();

View File

@ -299,7 +299,7 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
* the total set of segments in the index
*/
public abstract MergeSpecification findMerges(SegmentInfos segmentInfos)
throws CorruptIndexException, IOException;
throws IOException;
/**
* Determine what set of merge operations is necessary in
@ -324,7 +324,7 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
*/
public abstract MergeSpecification findForcedMerges(
SegmentInfos segmentInfos, int maxSegmentCount, Map<SegmentInfoPerCommit,Boolean> segmentsToMerge)
throws CorruptIndexException, IOException;
throws IOException;
/**
* Determine what set of merge operations is necessary in order to expunge all
@ -334,7 +334,7 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
* the total set of segments in the index
*/
public abstract MergeSpecification findForcedDeletesMerges(
SegmentInfos segmentInfos) throws CorruptIndexException, IOException;
SegmentInfos segmentInfos) throws IOException;
/**
* Release all resources for the policy.

View File

@ -29,10 +29,8 @@ import java.io.IOException;
public abstract class MergeScheduler {
/** Run the merges provided by {@link IndexWriter#getNextMerge()}. */
public abstract void merge(IndexWriter writer)
throws CorruptIndexException, IOException;
public abstract void merge(IndexWriter writer) throws IOException;
/** Close this MergeScheduler. */
public abstract void close()
throws CorruptIndexException, IOException;
public abstract void close() throws IOException;
}

View File

@ -247,7 +247,7 @@ public class MergeState {
* @lucene.internal */
static final MergeState.CheckAbort NONE = new MergeState.CheckAbort(null, null) {
@Override
public void work(double units) throws MergePolicy.MergeAbortedException {
public void work(double units) {
// do nothing
}
};

View File

@ -51,7 +51,7 @@ public class MultiDocValues extends DocValues {
return reader.normValues(field);
}
public boolean stopLoadingOnNull(AtomicReader reader, String field) throws IOException {
public boolean stopLoadingOnNull(AtomicReader reader, String field) {
// for norms we drop all norms if one leaf reader has no norms and the field is present
FieldInfos fieldInfos = reader.getFieldInfos();
FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
@ -79,7 +79,7 @@ public class MultiDocValues extends DocValues {
return reader.docValues(field);
}
public boolean stopLoadingOnNull(AtomicReader reader, String field) throws IOException {
public boolean stopLoadingOnNull(AtomicReader reader, String field) {
return false;
}
}
@ -437,7 +437,7 @@ public class MultiDocValues extends DocValues {
ordToOffset = type == Type.BYTES_VAR_SORTED ? new long[2] : null;
}
@Override
public void consume(BytesRef ref, int ord, long offset) throws IOException {
public void consume(BytesRef ref, int ord, long offset) {
pagedBytes.copy(ref);
if (ordToOffset != null) {
if (ord+1 >= ordToOffset.length) {

View File

@ -46,7 +46,7 @@ public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
return this.parent == parent;
}
public MultiDocsAndPositionsEnum reset(final EnumWithSlice[] subs, final int numSubs) throws IOException {
public MultiDocsAndPositionsEnum reset(final EnumWithSlice[] subs, final int numSubs) {
this.numSubs = numSubs;
this.subs = new EnumWithSlice[subs.length];
for(int i=0;i<subs.length;i++) {

View File

@ -42,7 +42,7 @@ public final class MultiDocsEnum extends DocsEnum {
subDocsEnum = new DocsEnum[subReaderCount];
}
MultiDocsEnum reset(final EnumWithSlice[] subs, final int numSubs) throws IOException {
MultiDocsEnum reset(final EnumWithSlice[] subs, final int numSubs) {
this.numSubs = numSubs;
this.subs = new EnumWithSlice[subs.length];

View File

@ -110,7 +110,7 @@ public final class MultiFieldsEnum extends FieldsEnum {
final int index;
String current;
public FieldsEnumWithSlice(FieldsEnum fields, ReaderSlice slice, int index) throws IOException {
public FieldsEnumWithSlice(FieldsEnum fields, ReaderSlice slice, int index) {
this.slice = slice;
this.index = index;
assert slice.length >= 0: "length=" + slice.length;

View File

@ -45,7 +45,7 @@ public class MultiReader extends BaseCompositeReader<IndexReader> {
* <p>Note that all subreaders are closed if this Multireader is closed.</p>
* @param subReaders set of (sub)readers
*/
public MultiReader(IndexReader... subReaders) throws IOException {
public MultiReader(IndexReader... subReaders) {
this(subReaders, true);
}
@ -55,7 +55,7 @@ public class MultiReader extends BaseCompositeReader<IndexReader> {
* @param closeSubReaders indicates whether the subreaders should be closed
* when this MultiReader is closed
*/
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) throws IOException {
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) {
super(subReaders.clone());
this.closeSubReaders = closeSubReaders;
if (!closeSubReaders) {

View File

@ -95,7 +95,7 @@ public final class MultiTerms extends Terms {
}
@Override
public long size() throws IOException {
public long size() {
return -1;
}

View File

@ -258,12 +258,12 @@ public final class MultiTermsEnum extends TermsEnum {
}
@Override
public void seekExact(long ord) throws IOException {
public void seekExact(long ord) {
throw new UnsupportedOperationException();
}
@Override
public long ord() throws IOException {
public long ord() {
throw new UnsupportedOperationException();
}

View File

@ -17,7 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.io.IOException;
import java.util.List;
/**
@ -34,8 +33,8 @@ public final class NoDeletionPolicy implements IndexDeletionPolicy {
// keep private to avoid instantiation
}
public void onCommit(List<? extends IndexCommit> commits) throws IOException {}
public void onCommit(List<? extends IndexCommit> commits) {}
public void onInit(List<? extends IndexCommit> commits) throws IOException {}
public void onInit(List<? extends IndexCommit> commits) {}
}

View File

@ -17,7 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.io.IOException;
import java.util.Map;
/**
@ -54,17 +53,14 @@ public final class NoMergePolicy extends MergePolicy {
public void close() {}
@Override
public MergeSpecification findMerges(SegmentInfos segmentInfos)
throws CorruptIndexException, IOException { return null; }
public MergeSpecification findMerges(SegmentInfos segmentInfos) { return null; }
@Override
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
int maxSegmentCount, Map<SegmentInfoPerCommit,Boolean> segmentsToMerge)
throws CorruptIndexException, IOException { return null; }
int maxSegmentCount, Map<SegmentInfoPerCommit,Boolean> segmentsToMerge) { return null; }
@Override
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
throws CorruptIndexException, IOException { return null; }
public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos) { return null; }
@Override
public boolean useCompoundFile(SegmentInfos segments, SegmentInfoPerCommit newSegment) { return useCompoundFile; }

View File

@ -17,8 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.io.IOException;
/**
* A {@link MergeScheduler} which never executes any merges. It is also a
* singleton and can be accessed through {@link NoMergeScheduler#INSTANCE}. Use
@ -42,5 +40,5 @@ public final class NoMergeScheduler extends MergeScheduler {
public void close() {}
@Override
public void merge(IndexWriter writer) throws CorruptIndexException, IOException {}
public void merge(IndexWriter writer) {}
}

View File

@ -87,10 +87,10 @@ final class NormsConsumer extends InvertedDocEndConsumer {
}
@Override
void finishDocument() throws IOException {}
void finishDocument() {}
@Override
void startDocument() throws IOException {}
void startDocument() {}
@Override
InvertedDocEndConsumerPerField addField(DocInverterPerField docInverterPerField,

Some files were not shown because too many files have changed in this diff Show More